Use compiler-agnostic defines to silence warning
[lttng-tools.git] / src / bin / lttng-sessiond / cmd.cpp
... / ...
CommitLineData
1/*
2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2016 Jérémie Galarneau <jeremie.galarneau@efficios.com>
4 *
5 * SPDX-License-Identifier: GPL-2.0-only
6 *
7 */
8
9#define _LGPL_SOURCE
10#include "agent-thread.hpp"
11#include "agent.hpp"
12#include "buffer-registry.hpp"
13#include "channel.hpp"
14#include "cmd.hpp"
15#include "consumer-output.hpp"
16#include "consumer.hpp"
17#include "event-notifier-error-accounting.hpp"
18#include "event.hpp"
19#include "health-sessiond.hpp"
20#include "kernel-consumer.hpp"
21#include "kernel.hpp"
22#include "lttng-sessiond.hpp"
23#include "lttng-syscall.hpp"
24#include "notification-thread-commands.hpp"
25#include "notification-thread.hpp"
26#include "rotation-thread.hpp"
27#include "session.hpp"
28#include "timer.hpp"
29#include "tracker.hpp"
30#include "utils.hpp"
31
32#include <common/buffer-view.hpp>
33#include <common/common.hpp>
34#include <common/compat/string.hpp>
35#include <common/defaults.hpp>
36#include <common/dynamic-buffer.hpp>
37#include <common/kernel-ctl/kernel-ctl.hpp>
38#include <common/payload-view.hpp>
39#include <common/payload.hpp>
40#include <common/relayd/relayd.hpp>
41#include <common/sessiond-comm/sessiond-comm.hpp>
42#include <common/string-utils/string-utils.hpp>
43#include <common/trace-chunk.hpp>
44#include <common/urcu.hpp>
45#include <common/utils.hpp>
46
47#include <lttng/action/action-internal.hpp>
48#include <lttng/action/action.h>
49#include <lttng/channel-internal.hpp>
50#include <lttng/channel.h>
51#include <lttng/condition/condition-internal.hpp>
52#include <lttng/condition/condition.h>
53#include <lttng/condition/event-rule-matches-internal.hpp>
54#include <lttng/condition/event-rule-matches.h>
55#include <lttng/error-query-internal.hpp>
56#include <lttng/event-internal.hpp>
57#include <lttng/event-rule/event-rule-internal.hpp>
58#include <lttng/event-rule/event-rule.h>
59#include <lttng/kernel.h>
60#include <lttng/location-internal.hpp>
61#include <lttng/lttng-error.h>
62#include <lttng/rotate-internal.hpp>
63#include <lttng/session-descriptor-internal.hpp>
64#include <lttng/session-internal.hpp>
65#include <lttng/tracker.h>
66#include <lttng/trigger/trigger-internal.hpp>
67#include <lttng/userspace-probe-internal.hpp>
68
69#include <algorithm>
70#include <inttypes.h>
71#include <stdio.h>
72#include <sys/stat.h>
73#include <urcu/list.h>
74#include <urcu/uatomic.h>
75
76/* Sleep for 100ms between each check for the shm path's deletion. */
77#define SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US 100000
78
79namespace lsu = lttng::sessiond::ust;
80
81static enum lttng_error_code wait_on_path(void *path);
82
83namespace {
84struct cmd_destroy_session_reply_context {
85 int reply_sock_fd;
86 bool implicit_rotation_on_destroy;
87 /*
88 * Indicates whether or not an error occurred while launching the
89 * destruction of a session.
90 */
91 enum lttng_error_code destruction_status;
92};
93
94/*
95 * Command completion handler that is used by the destroy command
96 * when a session that has a non-default shm_path is being destroyed.
97 *
98 * See comment in cmd_destroy_session() for the rationale.
99 */
100struct destroy_completion_handler {
101 struct cmd_completion_handler handler;
102 char shm_path[member_sizeof(struct ltt_session, shm_path)];
103} destroy_completion_handler = {
104 .handler = { .run = wait_on_path, .data = destroy_completion_handler.shm_path },
105 .shm_path = { 0 },
106};
107
108/*
109 * Used to keep a unique index for each relayd socket created where this value
110 * is associated with streams on the consumer so it can match the right relayd
111 * to send to. It must be accessed with the relayd_net_seq_idx_lock
112 * held.
113 */
114pthread_mutex_t relayd_net_seq_idx_lock = PTHREAD_MUTEX_INITIALIZER;
115uint64_t relayd_net_seq_idx;
116} /* namespace */
117
118static struct cmd_completion_handler *current_completion_handler;
119static int validate_ust_event_name(const char *);
120static int cmd_enable_event_internal(ltt_session::locked_ref& session,
121 const struct lttng_domain *domain,
122 char *channel_name,
123 struct lttng_event *event,
124 char *filter_expression,
125 struct lttng_bytecode *filter,
126 struct lttng_event_exclusion *exclusion,
127 int wpipe);
128static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
129 const struct lttng_domain *domain,
130 const struct lttng_channel *_attr,
131 int wpipe);
132
133/*
134 * Create a session path used by list_lttng_sessions for the case that the
135 * session consumer is on the network.
136 */
137static int
138build_network_session_path(char *dst, size_t size, const ltt_session::locked_ref& session)
139{
140 int ret, kdata_port, udata_port;
141 struct lttng_uri *kuri = nullptr, *uuri = nullptr, *uri = nullptr;
142 char tmp_uurl[PATH_MAX], tmp_urls[PATH_MAX];
143
144 LTTNG_ASSERT(dst);
145
146 memset(tmp_urls, 0, sizeof(tmp_urls));
147 memset(tmp_uurl, 0, sizeof(tmp_uurl));
148
149 kdata_port = udata_port = DEFAULT_NETWORK_DATA_PORT;
150
151 if (session->kernel_session && session->kernel_session->consumer) {
152 kuri = &session->kernel_session->consumer->dst.net.control;
153 kdata_port = session->kernel_session->consumer->dst.net.data.port;
154 }
155
156 if (session->ust_session && session->ust_session->consumer) {
157 uuri = &session->ust_session->consumer->dst.net.control;
158 udata_port = session->ust_session->consumer->dst.net.data.port;
159 }
160
161 if (uuri == nullptr && kuri == nullptr) {
162 uri = &session->consumer->dst.net.control;
163 kdata_port = session->consumer->dst.net.data.port;
164 } else if (kuri && uuri) {
165 ret = uri_compare(kuri, uuri);
166 if (ret) {
167 /* Not Equal */
168 uri = kuri;
169 /* Build uuri URL string */
170 ret = uri_to_str_url(uuri, tmp_uurl, sizeof(tmp_uurl));
171 if (ret < 0) {
172 goto error;
173 }
174 } else {
175 uri = kuri;
176 }
177 } else if (kuri && uuri == nullptr) {
178 uri = kuri;
179 } else if (uuri && kuri == nullptr) {
180 uri = uuri;
181 }
182
183 ret = uri_to_str_url(uri, tmp_urls, sizeof(tmp_urls));
184 if (ret < 0) {
185 goto error;
186 }
187
188 /*
189 * Do we have a UST url set. If yes, this means we have both kernel and UST
190 * to print.
191 */
192 if (*tmp_uurl != '\0') {
193 ret = snprintf(dst,
194 size,
195 "[K]: %s [data: %d] -- [U]: %s [data: %d]",
196 tmp_urls,
197 kdata_port,
198 tmp_uurl,
199 udata_port);
200 } else {
201 int dport;
202 if (kuri || (!kuri && !uuri)) {
203 dport = kdata_port;
204 } else {
205 /* No kernel URI, use the UST port. */
206 dport = udata_port;
207 }
208 ret = snprintf(dst, size, "%s [data: %d]", tmp_urls, dport);
209 }
210
211error:
212 return ret;
213}
214
215/*
216 * Get run-time attributes if the session has been started (discarded events,
217 * lost packets).
218 */
219static int get_kernel_runtime_stats(const ltt_session::locked_ref& session,
220 struct ltt_kernel_channel *kchan,
221 uint64_t *discarded_events,
222 uint64_t *lost_packets)
223{
224 int ret;
225
226 if (!session->has_been_started) {
227 ret = 0;
228 *discarded_events = 0;
229 *lost_packets = 0;
230 goto end;
231 }
232
233 ret = consumer_get_discarded_events(
234 session->id, kchan->key, session->kernel_session->consumer, discarded_events);
235 if (ret < 0) {
236 goto end;
237 }
238
239 ret = consumer_get_lost_packets(
240 session->id, kchan->key, session->kernel_session->consumer, lost_packets);
241 if (ret < 0) {
242 goto end;
243 }
244
245end:
246 return ret;
247}
248
249/*
250 * Get run-time attributes if the session has been started (discarded events,
251 * lost packets).
252 */
253static int get_ust_runtime_stats(const ltt_session::locked_ref& session,
254 struct ltt_ust_channel *uchan,
255 uint64_t *discarded_events,
256 uint64_t *lost_packets)
257{
258 int ret;
259 struct ltt_ust_session *usess;
260
261 if (!discarded_events || !lost_packets) {
262 ret = -1;
263 goto end;
264 }
265
266 usess = session->ust_session;
267 LTTNG_ASSERT(discarded_events);
268 LTTNG_ASSERT(lost_packets);
269
270 if (!usess || !session->has_been_started) {
271 *discarded_events = 0;
272 *lost_packets = 0;
273 ret = 0;
274 goto end;
275 }
276
277 if (usess->buffer_type == LTTNG_BUFFER_PER_UID) {
278 ret = ust_app_uid_get_channel_runtime_stats(usess->id,
279 &usess->buffer_reg_uid_list,
280 usess->consumer,
281 uchan->id,
282 uchan->attr.overwrite,
283 discarded_events,
284 lost_packets);
285 } else if (usess->buffer_type == LTTNG_BUFFER_PER_PID) {
286 ret = ust_app_pid_get_channel_runtime_stats(usess,
287 uchan,
288 usess->consumer,
289 uchan->attr.overwrite,
290 discarded_events,
291 lost_packets);
292 if (ret < 0) {
293 goto end;
294 }
295 *discarded_events += uchan->per_pid_closed_app_discarded;
296 *lost_packets += uchan->per_pid_closed_app_lost;
297 } else {
298 ERR("Unsupported buffer type");
299 abort();
300 ret = -1;
301 goto end;
302 }
303
304end:
305 return ret;
306}
307
308/*
309 * Create a list of agent domain events.
310 *
311 * Return number of events in list on success or else a negative value.
312 */
313static enum lttng_error_code list_lttng_agent_events(struct agent *agt,
314 struct lttng_payload *reply_payload,
315 unsigned int *nb_events)
316{
317 enum lttng_error_code ret_code;
318 int ret = 0;
319 unsigned int local_nb_events = 0;
320 unsigned long agent_event_count;
321
322 assert(agt);
323 assert(reply_payload);
324
325 DBG3("Listing agent events");
326
327 agent_event_count = lttng_ht_get_count(agt->events);
328 if (agent_event_count == 0) {
329 /* Early exit. */
330 goto end;
331 }
332
333 if (agent_event_count > UINT_MAX) {
334 ret_code = LTTNG_ERR_OVERFLOW;
335 goto error;
336 }
337
338 local_nb_events = (unsigned int) agent_event_count;
339
340 for (auto *event :
341 lttng::urcu::lfht_iteration_adapter<agent_event,
342 decltype(agent_event::node),
343 &agent_event::node>(*agt->events->ht)) {
344 struct lttng_event *tmp_event = lttng_event_create();
345
346 if (!tmp_event) {
347 ret_code = LTTNG_ERR_NOMEM;
348 goto error;
349 }
350
351 if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
352 lttng_event_destroy(tmp_event);
353 ret_code = LTTNG_ERR_FATAL;
354 goto error;
355 }
356
357 tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
358 tmp_event->enabled = !!event->enabled_count;
359 tmp_event->loglevel = event->loglevel_value;
360 tmp_event->loglevel_type = event->loglevel_type;
361
362 ret = lttng_event_serialize(
363 tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
364 lttng_event_destroy(tmp_event);
365 if (ret) {
366 ret_code = LTTNG_ERR_FATAL;
367 goto error;
368 }
369 }
370end:
371 ret_code = LTTNG_OK;
372 *nb_events = local_nb_events;
373error:
374 return ret_code;
375}
376
377/*
378 * Create a list of ust global domain events.
379 */
380static enum lttng_error_code list_lttng_ust_global_events(char *channel_name,
381 struct ltt_ust_domain_global *ust_global,
382 struct lttng_payload *reply_payload,
383 unsigned int *nb_events)
384{
385 enum lttng_error_code ret_code;
386 int ret;
387 struct lttng_ht_iter iter;
388 struct lttng_ht_node_str *node;
389 struct ltt_ust_channel *uchan;
390 unsigned long channel_event_count;
391 unsigned int local_nb_events = 0;
392
393 assert(reply_payload);
394 assert(nb_events);
395
396 DBG("Listing UST global events for channel %s", channel_name);
397
398 const lttng::urcu::read_lock_guard read_lock;
399
400 lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
401 node = lttng_ht_iter_get_node<lttng_ht_node_str>(&iter);
402 if (node == nullptr) {
403 ret_code = LTTNG_ERR_UST_CHAN_NOT_FOUND;
404 goto error;
405 }
406
407 uchan = lttng::utils::container_of(node, &ltt_ust_channel::node);
408
409 channel_event_count = lttng_ht_get_count(uchan->events);
410 if (channel_event_count == 0) {
411 /* Early exit. */
412 ret_code = LTTNG_OK;
413 goto end;
414 }
415
416 if (channel_event_count > UINT_MAX) {
417 ret_code = LTTNG_ERR_OVERFLOW;
418 goto error;
419 }
420
421 local_nb_events = (unsigned int) channel_event_count;
422
423 DBG3("Listing UST global %d events", *nb_events);
424
425 for (auto *uevent :
426 lttng::urcu::lfht_iteration_adapter<ltt_ust_event,
427 decltype(ltt_ust_event::node),
428 &ltt_ust_event::node>(*uchan->events->ht)) {
429 struct lttng_event *tmp_event = nullptr;
430
431 if (uevent->internal) {
432 /* This event should remain hidden from clients */
433 local_nb_events--;
434 continue;
435 }
436
437 tmp_event = lttng_event_create();
438 if (!tmp_event) {
439 ret_code = LTTNG_ERR_NOMEM;
440 goto error;
441 }
442
443 if (lttng_strncpy(tmp_event->name, uevent->attr.name, LTTNG_SYMBOL_NAME_LEN)) {
444 ret_code = LTTNG_ERR_FATAL;
445 lttng_event_destroy(tmp_event);
446 goto error;
447 }
448
449 tmp_event->name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
450 tmp_event->enabled = uevent->enabled;
451
452 switch (uevent->attr.instrumentation) {
453 case LTTNG_UST_ABI_TRACEPOINT:
454 tmp_event->type = LTTNG_EVENT_TRACEPOINT;
455 break;
456 case LTTNG_UST_ABI_PROBE:
457 tmp_event->type = LTTNG_EVENT_PROBE;
458 break;
459 case LTTNG_UST_ABI_FUNCTION:
460 tmp_event->type = LTTNG_EVENT_FUNCTION;
461 break;
462 }
463
464 tmp_event->loglevel = uevent->attr.loglevel;
465 switch (uevent->attr.loglevel_type) {
466 case LTTNG_UST_ABI_LOGLEVEL_ALL:
467 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
468 break;
469 case LTTNG_UST_ABI_LOGLEVEL_RANGE:
470 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_RANGE;
471 break;
472 case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
473 tmp_event->loglevel_type = LTTNG_EVENT_LOGLEVEL_SINGLE;
474 break;
475 }
476 if (uevent->filter) {
477 tmp_event->filter = 1;
478 }
479 if (uevent->exclusion) {
480 tmp_event->exclusion = 1;
481 }
482
483 std::vector<const char *> exclusion_names;
484 if (uevent->exclusion) {
485 for (int i = 0; i < uevent->exclusion->count; i++) {
486 exclusion_names.emplace_back(
487 LTTNG_EVENT_EXCLUSION_NAME_AT(uevent->exclusion, i));
488 }
489 }
490
491 /*
492 * We do not care about the filter bytecode and the fd from the
493 * userspace_probe_location.
494 */
495 ret = lttng_event_serialize(tmp_event,
496 exclusion_names.size(),
497 exclusion_names.size() ? exclusion_names.data() :
498 nullptr,
499 uevent->filter_expression,
500 0,
501 nullptr,
502 reply_payload);
503 lttng_event_destroy(tmp_event);
504 if (ret) {
505 ret_code = LTTNG_ERR_FATAL;
506 goto error;
507 }
508 }
509
510end:
511 /* nb_events is already set at this point. */
512 ret_code = LTTNG_OK;
513 *nb_events = local_nb_events;
514error:
515 return ret_code;
516}
517
518/*
519 * Fill lttng_event array of all kernel events in the channel.
520 */
521static enum lttng_error_code list_lttng_kernel_events(char *channel_name,
522 struct ltt_kernel_session *kernel_session,
523 struct lttng_payload *reply_payload,
524 unsigned int *nb_events)
525{
526 enum lttng_error_code ret_code;
527 int ret;
528 struct ltt_kernel_channel *kchan;
529
530 assert(reply_payload);
531
532 kchan = trace_kernel_get_channel_by_name(channel_name, kernel_session);
533 if (kchan == nullptr) {
534 ret_code = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
535 goto end;
536 }
537
538 *nb_events = kchan->event_count;
539
540 DBG("Listing events for channel %s", kchan->channel->name);
541
542 if (*nb_events == 0) {
543 ret_code = LTTNG_OK;
544 goto end;
545 }
546
547 /* Kernel channels */
548 for (auto event :
549 lttng::urcu::list_iteration_adapter<ltt_kernel_event, &ltt_kernel_event::list>(
550 kchan->events_list.head)) {
551 struct lttng_event *tmp_event = lttng_event_create();
552
553 if (!tmp_event) {
554 ret_code = LTTNG_ERR_NOMEM;
555 goto end;
556 }
557
558 if (lttng_strncpy(tmp_event->name, event->event->name, LTTNG_SYMBOL_NAME_LEN)) {
559 lttng_event_destroy(tmp_event);
560 ret_code = LTTNG_ERR_FATAL;
561 goto end;
562 }
563
564 tmp_event->name[LTTNG_SYMBOL_NAME_LEN - 1] = '\0';
565 tmp_event->enabled = event->enabled;
566 tmp_event->filter = (unsigned char) !!event->filter_expression;
567
568 switch (event->event->instrumentation) {
569 case LTTNG_KERNEL_ABI_TRACEPOINT:
570 tmp_event->type = LTTNG_EVENT_TRACEPOINT;
571 break;
572 case LTTNG_KERNEL_ABI_KRETPROBE:
573 tmp_event->type = LTTNG_EVENT_FUNCTION;
574 memcpy(&tmp_event->attr.probe,
575 &event->event->u.kprobe,
576 sizeof(struct lttng_kernel_abi_kprobe));
577 break;
578 case LTTNG_KERNEL_ABI_KPROBE:
579 tmp_event->type = LTTNG_EVENT_PROBE;
580 memcpy(&tmp_event->attr.probe,
581 &event->event->u.kprobe,
582 sizeof(struct lttng_kernel_abi_kprobe));
583 break;
584 case LTTNG_KERNEL_ABI_UPROBE:
585 tmp_event->type = LTTNG_EVENT_USERSPACE_PROBE;
586 break;
587 case LTTNG_KERNEL_ABI_FUNCTION:
588 tmp_event->type = LTTNG_EVENT_FUNCTION;
589 memcpy(&(tmp_event->attr.ftrace),
590 &event->event->u.ftrace,
591 sizeof(struct lttng_kernel_abi_function));
592 break;
593 case LTTNG_KERNEL_ABI_NOOP:
594 tmp_event->type = LTTNG_EVENT_NOOP;
595 break;
596 case LTTNG_KERNEL_ABI_SYSCALL:
597 tmp_event->type = LTTNG_EVENT_SYSCALL;
598 break;
599 case LTTNG_KERNEL_ABI_ALL:
600 /* fall-through. */
601 default:
602 abort();
603 break;
604 }
605
606 if (event->userspace_probe_location) {
607 struct lttng_userspace_probe_location *location_copy =
608 lttng_userspace_probe_location_copy(
609 event->userspace_probe_location);
610
611 if (!location_copy) {
612 lttng_event_destroy(tmp_event);
613 ret_code = LTTNG_ERR_NOMEM;
614 goto end;
615 }
616
617 ret = lttng_event_set_userspace_probe_location(tmp_event, location_copy);
618 if (ret) {
619 lttng_event_destroy(tmp_event);
620 lttng_userspace_probe_location_destroy(location_copy);
621 ret_code = LTTNG_ERR_INVALID;
622 goto end;
623 }
624 }
625
626 ret = lttng_event_serialize(
627 tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
628 lttng_event_destroy(tmp_event);
629 if (ret) {
630 ret_code = LTTNG_ERR_FATAL;
631 goto end;
632 }
633 }
634
635 ret_code = LTTNG_OK;
636end:
637 return ret_code;
638}
639
640/*
641 * Add URI so the consumer output object. Set the correct path depending on the
642 * domain adding the default trace directory.
643 */
644static enum lttng_error_code add_uri_to_consumer(const ltt_session::locked_ref& session,
645 struct consumer_output *consumer,
646 struct lttng_uri *uri,
647 enum lttng_domain_type domain)
648{
649 int ret;
650 enum lttng_error_code ret_code = LTTNG_OK;
651
652 LTTNG_ASSERT(uri);
653
654 if (consumer == nullptr) {
655 DBG("No consumer detected. Don't add URI. Stopping.");
656 ret_code = LTTNG_ERR_NO_CONSUMER;
657 goto error;
658 }
659
660 switch (domain) {
661 case LTTNG_DOMAIN_KERNEL:
662 ret = lttng_strncpy(consumer->domain_subdir,
663 DEFAULT_KERNEL_TRACE_DIR,
664 sizeof(consumer->domain_subdir));
665 break;
666 case LTTNG_DOMAIN_UST:
667 ret = lttng_strncpy(consumer->domain_subdir,
668 DEFAULT_UST_TRACE_DIR,
669 sizeof(consumer->domain_subdir));
670 break;
671 default:
672 /*
673 * This case is possible is we try to add the URI to the global
674 * tracing session consumer object which in this case there is
675 * no subdir.
676 */
677 memset(consumer->domain_subdir, 0, sizeof(consumer->domain_subdir));
678 ret = 0;
679 }
680 if (ret) {
681 ERR("Failed to initialize consumer output domain subdirectory");
682 ret_code = LTTNG_ERR_FATAL;
683 goto error;
684 }
685
686 switch (uri->dtype) {
687 case LTTNG_DST_IPV4:
688 case LTTNG_DST_IPV6:
689 DBG2("Setting network URI to consumer");
690
691 if (consumer->type == CONSUMER_DST_NET) {
692 if ((uri->stype == LTTNG_STREAM_CONTROL &&
693 consumer->dst.net.control_isset) ||
694 (uri->stype == LTTNG_STREAM_DATA && consumer->dst.net.data_isset)) {
695 ret_code = LTTNG_ERR_URL_EXIST;
696 goto error;
697 }
698 } else {
699 memset(&consumer->dst, 0, sizeof(consumer->dst));
700 }
701
702 /* Set URI into consumer output object */
703 ret = consumer_set_network_uri(session, consumer, uri);
704 if (ret < 0) {
705 ret_code = (lttng_error_code) -ret;
706 goto error;
707 } else if (ret == 1) {
708 /*
709 * URI was the same in the consumer so we do not append the subdir
710 * again so to not duplicate output dir.
711 */
712 ret_code = LTTNG_OK;
713 goto error;
714 }
715 break;
716 case LTTNG_DST_PATH:
717 if (*uri->dst.path != '/' || strstr(uri->dst.path, "../")) {
718 ret_code = LTTNG_ERR_INVALID;
719 goto error;
720 }
721 DBG2("Setting trace directory path from URI to %s", uri->dst.path);
722 memset(&consumer->dst, 0, sizeof(consumer->dst));
723
724 ret = lttng_strncpy(consumer->dst.session_root_path,
725 uri->dst.path,
726 sizeof(consumer->dst.session_root_path));
727 if (ret) {
728 ret_code = LTTNG_ERR_FATAL;
729 goto error;
730 }
731 consumer->type = CONSUMER_DST_LOCAL;
732 break;
733 }
734
735 ret_code = LTTNG_OK;
736error:
737 return ret_code;
738}
739
740/*
741 * Init tracing by creating trace directory and sending fds kernel consumer.
742 */
743static int init_kernel_tracing(struct ltt_kernel_session *session)
744{
745 int ret = 0;
746
747 LTTNG_ASSERT(session);
748
749 if (session->consumer_fds_sent == 0 && session->consumer != nullptr) {
750 for (auto *socket :
751 lttng::urcu::lfht_iteration_adapter<consumer_socket,
752 decltype(consumer_socket::node),
753 &consumer_socket::node>(
754 *session->consumer->socks->ht)) {
755 pthread_mutex_lock(socket->lock);
756 ret = kernel_consumer_send_session(socket, session);
757 pthread_mutex_unlock(socket->lock);
758 if (ret < 0) {
759 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
760 goto error;
761 }
762 }
763 }
764
765error:
766 return ret;
767}
768
769/*
770 * Create a socket to the relayd using the URI.
771 *
772 * On success, the relayd_sock pointer is set to the created socket.
773 * Else, it remains untouched and an LTTng error code is returned.
774 */
775static enum lttng_error_code create_connect_relayd(struct lttng_uri *uri,
776 struct lttcomm_relayd_sock **relayd_sock,
777 struct consumer_output *consumer)
778{
779 int ret;
780 enum lttng_error_code status = LTTNG_OK;
781 struct lttcomm_relayd_sock *rsock;
782
783 rsock = lttcomm_alloc_relayd_sock(
784 uri, RELAYD_VERSION_COMM_MAJOR, RELAYD_VERSION_COMM_MINOR);
785 if (!rsock) {
786 status = LTTNG_ERR_FATAL;
787 goto error;
788 }
789
790 /*
791 * Connect to relayd so we can proceed with a session creation. This call
792 * can possibly block for an arbitrary amount of time to set the health
793 * state to be in poll execution.
794 */
795 health_poll_entry();
796 ret = relayd_connect(rsock);
797 health_poll_exit();
798 if (ret < 0) {
799 ERR("Unable to reach lttng-relayd");
800 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
801 goto free_sock;
802 }
803
804 /* Create socket for control stream. */
805 if (uri->stype == LTTNG_STREAM_CONTROL) {
806 uint64_t result_flags;
807
808 DBG3("Creating relayd stream socket from URI");
809
810 /* Check relayd version */
811 ret = relayd_version_check(rsock);
812 if (ret == LTTNG_ERR_RELAYD_VERSION_FAIL) {
813 status = LTTNG_ERR_RELAYD_VERSION_FAIL;
814 goto close_sock;
815 } else if (ret < 0) {
816 ERR("Unable to reach lttng-relayd");
817 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
818 goto close_sock;
819 }
820 consumer->relay_major_version = rsock->major;
821 consumer->relay_minor_version = rsock->minor;
822 ret = relayd_get_configuration(rsock, 0, &result_flags);
823 if (ret < 0) {
824 ERR("Unable to get relayd configuration");
825 status = LTTNG_ERR_RELAYD_CONNECT_FAIL;
826 goto close_sock;
827 }
828 if (result_flags & LTTCOMM_RELAYD_CONFIGURATION_FLAG_CLEAR_ALLOWED) {
829 consumer->relay_allows_clear = true;
830 }
831 } else if (uri->stype == LTTNG_STREAM_DATA) {
832 DBG3("Creating relayd data socket from URI");
833 } else {
834 /* Command is not valid */
835 ERR("Relayd invalid stream type: %d", uri->stype);
836 status = LTTNG_ERR_INVALID;
837 goto close_sock;
838 }
839
840 *relayd_sock = rsock;
841
842 return status;
843
844close_sock:
845 /* The returned value is not useful since we are on an error path. */
846 (void) relayd_close(rsock);
847free_sock:
848 free(rsock);
849error:
850 return status;
851}
852
853/*
854 * Connect to the relayd using URI and send the socket to the right consumer.
855 *
856 * The consumer socket lock must be held by the caller.
857 *
858 * Returns LTTNG_OK on success or an LTTng error code on failure.
859 */
860static enum lttng_error_code send_consumer_relayd_socket(unsigned int session_id,
861 struct lttng_uri *relayd_uri,
862 struct consumer_output *consumer,
863 struct consumer_socket *consumer_sock,
864 const char *session_name,
865 const char *hostname,
866 const char *base_path,
867 int session_live_timer,
868 const uint64_t *current_chunk_id,
869 time_t session_creation_time,
870 bool session_name_contains_creation_time)
871{
872 int ret;
873 struct lttcomm_relayd_sock *rsock = nullptr;
874 enum lttng_error_code status;
875
876 /* Connect to relayd and make version check if uri is the control. */
877 status = create_connect_relayd(relayd_uri, &rsock, consumer);
878 if (status != LTTNG_OK) {
879 goto relayd_comm_error;
880 }
881 LTTNG_ASSERT(rsock);
882
883 /* Set the network sequence index if not set. */
884 if (consumer->net_seq_index == (uint64_t) -1ULL) {
885 pthread_mutex_lock(&relayd_net_seq_idx_lock);
886 /*
887 * Increment net_seq_idx because we are about to transfer the
888 * new relayd socket to the consumer.
889 * Assign unique key so the consumer can match streams.
890 */
891 consumer->net_seq_index = ++relayd_net_seq_idx;
892 pthread_mutex_unlock(&relayd_net_seq_idx_lock);
893 }
894
895 /* Send relayd socket to consumer. */
896 ret = consumer_send_relayd_socket(consumer_sock,
897 rsock,
898 consumer,
899 relayd_uri->stype,
900 session_id,
901 session_name,
902 hostname,
903 base_path,
904 session_live_timer,
905 current_chunk_id,
906 session_creation_time,
907 session_name_contains_creation_time);
908 if (ret < 0) {
909 status = LTTNG_ERR_ENABLE_CONSUMER_FAIL;
910 goto close_sock;
911 }
912
913 /* Flag that the corresponding socket was sent. */
914 if (relayd_uri->stype == LTTNG_STREAM_CONTROL) {
915 consumer_sock->control_sock_sent = 1;
916 } else if (relayd_uri->stype == LTTNG_STREAM_DATA) {
917 consumer_sock->data_sock_sent = 1;
918 }
919
920 /*
921 * Close socket which was dup on the consumer side. The session daemon does
922 * NOT keep track of the relayd socket(s) once transfer to the consumer.
923 */
924
925close_sock:
926 if (status != LTTNG_OK) {
927 /*
928 * The consumer output for this session should not be used anymore
929 * since the relayd connection failed thus making any tracing or/and
930 * streaming not usable.
931 */
932 consumer->enabled = false;
933 }
934 (void) relayd_close(rsock);
935 free(rsock);
936
937relayd_comm_error:
938 return status;
939}
940
941/*
942 * Send both relayd sockets to a specific consumer and domain. This is a
943 * helper function to facilitate sending the information to the consumer for a
944 * session.
945 *
946 * The consumer socket lock must be held by the caller.
947 *
948 * Returns LTTNG_OK, or an LTTng error code on failure.
949 */
950static enum lttng_error_code send_consumer_relayd_sockets(unsigned int session_id,
951 struct consumer_output *consumer,
952 struct consumer_socket *sock,
953 const char *session_name,
954 const char *hostname,
955 const char *base_path,
956 int session_live_timer,
957 const uint64_t *current_chunk_id,
958 time_t session_creation_time,
959 bool session_name_contains_creation_time)
960{
961 enum lttng_error_code status = LTTNG_OK;
962
963 LTTNG_ASSERT(consumer);
964 LTTNG_ASSERT(sock);
965
966 /* Sending control relayd socket. */
967 if (!sock->control_sock_sent) {
968 status = send_consumer_relayd_socket(session_id,
969 &consumer->dst.net.control,
970 consumer,
971 sock,
972 session_name,
973 hostname,
974 base_path,
975 session_live_timer,
976 current_chunk_id,
977 session_creation_time,
978 session_name_contains_creation_time);
979 if (status != LTTNG_OK) {
980 goto error;
981 }
982 }
983
984 /* Sending data relayd socket. */
985 if (!sock->data_sock_sent) {
986 status = send_consumer_relayd_socket(session_id,
987 &consumer->dst.net.data,
988 consumer,
989 sock,
990 session_name,
991 hostname,
992 base_path,
993 session_live_timer,
994 current_chunk_id,
995 session_creation_time,
996 session_name_contains_creation_time);
997 if (status != LTTNG_OK) {
998 goto error;
999 }
1000 }
1001
1002error:
1003 return status;
1004}
1005
1006/*
1007 * Setup relayd connections for a tracing session. First creates the socket to
1008 * the relayd and send them to the right domain consumer. Consumer type MUST be
1009 * network.
1010 */
1011int cmd_setup_relayd(const ltt_session::locked_ref& session)
1012{
1013 int ret = LTTNG_OK;
1014 struct ltt_ust_session *usess;
1015 struct ltt_kernel_session *ksess;
1016 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
1017
1018 usess = session->ust_session;
1019 ksess = session->kernel_session;
1020
1021 DBG("Setting relayd for session %s", session->name);
1022
1023 if (session->current_trace_chunk) {
1024 const lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
1025 session->current_trace_chunk, &current_chunk_id.value);
1026
1027 if (status == LTTNG_TRACE_CHUNK_STATUS_OK) {
1028 current_chunk_id.is_set = true;
1029 } else {
1030 ERR("Failed to get current trace chunk id");
1031 ret = LTTNG_ERR_UNK;
1032 goto error;
1033 }
1034 }
1035
1036 if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET &&
1037 usess->consumer->enabled) {
1038 /* For each consumer socket, send relayd sockets */
1039 for (auto *socket :
1040 lttng::urcu::lfht_iteration_adapter<consumer_socket,
1041 decltype(consumer_socket::node),
1042 &consumer_socket::node>(
1043 *usess->consumer->socks->ht)) {
1044 pthread_mutex_lock(socket->lock);
1045 ret = send_consumer_relayd_sockets(
1046 session->id,
1047 usess->consumer,
1048 socket,
1049 session->name,
1050 session->hostname,
1051 session->base_path,
1052 session->live_timer,
1053 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
1054 session->creation_time,
1055 session->name_contains_creation_time);
1056 pthread_mutex_unlock(socket->lock);
1057 if (ret != LTTNG_OK) {
1058 goto error;
1059 }
1060 /* Session is now ready for network streaming. */
1061 session->net_handle = 1;
1062 }
1063
1064 session->consumer->relay_major_version = usess->consumer->relay_major_version;
1065 session->consumer->relay_minor_version = usess->consumer->relay_minor_version;
1066 session->consumer->relay_allows_clear = usess->consumer->relay_allows_clear;
1067 }
1068
1069 if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET &&
1070 ksess->consumer->enabled) {
1071 const lttng::urcu::read_lock_guard read_lock;
1072
1073 for (auto *socket :
1074 lttng::urcu::lfht_iteration_adapter<consumer_socket,
1075 decltype(consumer_socket::node),
1076 &consumer_socket::node>(
1077 *ksess->consumer->socks->ht)) {
1078 pthread_mutex_lock(socket->lock);
1079 ret = send_consumer_relayd_sockets(
1080 session->id,
1081 ksess->consumer,
1082 socket,
1083 session->name,
1084 session->hostname,
1085 session->base_path,
1086 session->live_timer,
1087 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
1088 session->creation_time,
1089 session->name_contains_creation_time);
1090 pthread_mutex_unlock(socket->lock);
1091 if (ret != LTTNG_OK) {
1092 goto error;
1093 }
1094 /* Session is now ready for network streaming. */
1095 session->net_handle = 1;
1096 }
1097
1098 session->consumer->relay_major_version = ksess->consumer->relay_major_version;
1099 session->consumer->relay_minor_version = ksess->consumer->relay_minor_version;
1100 session->consumer->relay_allows_clear = ksess->consumer->relay_allows_clear;
1101 }
1102
1103error:
1104 return ret;
1105}
1106
1107/*
1108 * Start a kernel session by opening all necessary streams.
1109 */
1110int start_kernel_session(struct ltt_kernel_session *ksess)
1111{
1112 int ret;
1113
1114 /* Open kernel metadata */
1115 if (ksess->metadata == nullptr && ksess->output_traces) {
1116 ret = kernel_open_metadata(ksess);
1117 if (ret < 0) {
1118 ret = LTTNG_ERR_KERN_META_FAIL;
1119 goto error;
1120 }
1121 }
1122
1123 /* Open kernel metadata stream */
1124 if (ksess->metadata && ksess->metadata_stream_fd < 0) {
1125 ret = kernel_open_metadata_stream(ksess);
1126 if (ret < 0) {
1127 ERR("Kernel create metadata stream failed");
1128 ret = LTTNG_ERR_KERN_STREAM_FAIL;
1129 goto error;
1130 }
1131 }
1132
1133 /* For each channel */
1134 for (auto kchan :
1135 lttng::urcu::list_iteration_adapter<ltt_kernel_channel, &ltt_kernel_channel::list>(
1136 ksess->channel_list.head)) {
1137 if (kchan->stream_count == 0) {
1138 ret = kernel_open_channel_stream(kchan);
1139 if (ret < 0) {
1140 ret = LTTNG_ERR_KERN_STREAM_FAIL;
1141 goto error;
1142 }
1143 /* Update the stream global counter */
1144 ksess->stream_count_global += ret;
1145 }
1146 }
1147
1148 /* Setup kernel consumer socket and send fds to it */
1149 ret = init_kernel_tracing(ksess);
1150 if (ret != 0) {
1151 ret = LTTNG_ERR_KERN_START_FAIL;
1152 goto error;
1153 }
1154
1155 /* This start the kernel tracing */
1156 ret = kernel_start_session(ksess);
1157 if (ret < 0) {
1158 ret = LTTNG_ERR_KERN_START_FAIL;
1159 goto error;
1160 }
1161
1162 /* Quiescent wait after starting trace */
1163 kernel_wait_quiescent();
1164
1165 ksess->active = true;
1166
1167 ret = LTTNG_OK;
1168
1169error:
1170 return ret;
1171}
1172
1173int stop_kernel_session(struct ltt_kernel_session *ksess)
1174{
1175 bool error_occurred = false;
1176 int ret;
1177
1178 if (!ksess || !ksess->active) {
1179 return LTTNG_OK;
1180 }
1181 DBG("Stopping kernel tracing");
1182
1183 ret = kernel_stop_session(ksess);
1184 if (ret < 0) {
1185 ret = LTTNG_ERR_KERN_STOP_FAIL;
1186 goto error;
1187 }
1188
1189 kernel_wait_quiescent();
1190
1191 /* Flush metadata after stopping (if exists) */
1192 if (ksess->metadata_stream_fd >= 0) {
1193 ret = kernel_metadata_flush_buffer(ksess->metadata_stream_fd);
1194 if (ret < 0) {
1195 ERR("Kernel metadata flush failed");
1196 error_occurred = true;
1197 }
1198 }
1199
1200 /* Flush all buffers after stopping */
1201 for (auto kchan :
1202 lttng::urcu::list_iteration_adapter<ltt_kernel_channel, &ltt_kernel_channel::list>(
1203 ksess->channel_list.head)) {
1204 ret = kernel_flush_buffer(kchan);
1205 if (ret < 0) {
1206 ERR("Kernel flush buffer error");
1207 error_occurred = true;
1208 }
1209 }
1210
1211 ksess->active = false;
1212 if (error_occurred) {
1213 ret = LTTNG_ERR_UNK;
1214 } else {
1215 ret = LTTNG_OK;
1216 }
1217error:
1218 return ret;
1219}
1220
1221/*
1222 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
1223 */
1224int cmd_disable_channel(const ltt_session::locked_ref& session,
1225 enum lttng_domain_type domain,
1226 char *channel_name)
1227{
1228 int ret;
1229 struct ltt_ust_session *usess;
1230
1231 usess = session->ust_session;
1232
1233 const lttng::urcu::read_lock_guard read_lock;
1234
1235 switch (domain) {
1236 case LTTNG_DOMAIN_KERNEL:
1237 {
1238 ret = channel_kernel_disable(session->kernel_session, channel_name);
1239 if (ret != LTTNG_OK) {
1240 goto error;
1241 }
1242
1243 kernel_wait_quiescent();
1244 break;
1245 }
1246 case LTTNG_DOMAIN_UST:
1247 {
1248 struct ltt_ust_channel *uchan;
1249 struct lttng_ht *chan_ht;
1250
1251 chan_ht = usess->domain_global.channels;
1252
1253 uchan = trace_ust_find_channel_by_name(chan_ht, channel_name);
1254 if (uchan == nullptr) {
1255 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1256 goto error;
1257 }
1258
1259 ret = channel_ust_disable(usess, uchan);
1260 if (ret != LTTNG_OK) {
1261 goto error;
1262 }
1263 break;
1264 }
1265 default:
1266 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
1267 goto error;
1268 }
1269
1270 ret = LTTNG_OK;
1271
1272error:
1273 return ret;
1274}
1275
1276/*
1277 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
1278 *
1279 * The wpipe arguments is used as a notifier for the kernel thread.
1280 */
1281int cmd_enable_channel(command_ctx *cmd_ctx, ltt_session::locked_ref& session, int sock, int wpipe)
1282{
1283 int ret;
1284 size_t channel_len;
1285 ssize_t sock_recv_len;
1286 struct lttng_channel *channel = nullptr;
1287 struct lttng_buffer_view view;
1288 struct lttng_dynamic_buffer channel_buffer;
1289 const struct lttng_domain command_domain = cmd_ctx->lsm.domain;
1290
1291 lttng_dynamic_buffer_init(&channel_buffer);
1292 channel_len = (size_t) cmd_ctx->lsm.u.channel.length;
1293 ret = lttng_dynamic_buffer_set_size(&channel_buffer, channel_len);
1294 if (ret) {
1295 ret = LTTNG_ERR_NOMEM;
1296 goto end;
1297 }
1298
1299 sock_recv_len = lttcomm_recv_unix_sock(sock, channel_buffer.data, channel_len);
1300 if (sock_recv_len < 0 || sock_recv_len != channel_len) {
1301 ERR("Failed to receive \"enable channel\" command payload");
1302 ret = LTTNG_ERR_INVALID;
1303 goto end;
1304 }
1305
1306 view = lttng_buffer_view_from_dynamic_buffer(&channel_buffer, 0, channel_len);
1307 if (!lttng_buffer_view_is_valid(&view)) {
1308 ret = LTTNG_ERR_INVALID;
1309 goto end;
1310 }
1311
1312 if (lttng_channel_create_from_buffer(&view, &channel) != channel_len) {
1313 ERR("Invalid channel payload received in \"enable channel\" command");
1314 ret = LTTNG_ERR_INVALID;
1315 goto end;
1316 }
1317
1318 ret = cmd_enable_channel_internal(session, &command_domain, channel, wpipe);
1319
1320end:
1321 lttng_dynamic_buffer_reset(&channel_buffer);
1322 lttng_channel_destroy(channel);
1323 return ret;
1324}
1325
1326static enum lttng_error_code cmd_enable_channel_internal(ltt_session::locked_ref& session,
1327 const struct lttng_domain *domain,
1328 const struct lttng_channel *_attr,
1329 int wpipe)
1330{
1331 enum lttng_error_code ret_code;
1332 struct ltt_ust_session *usess = session->ust_session;
1333 struct lttng_ht *chan_ht;
1334 size_t len;
1335 struct lttng_channel *attr = nullptr;
1336
1337 LTTNG_ASSERT(_attr);
1338 LTTNG_ASSERT(domain);
1339
1340 const lttng::urcu::read_lock_guard read_lock;
1341
1342 attr = lttng_channel_copy(_attr);
1343 if (!attr) {
1344 ret_code = LTTNG_ERR_NOMEM;
1345 goto end;
1346 }
1347
1348 len = lttng_strnlen(attr->name, sizeof(attr->name));
1349
1350 /* Validate channel name */
1351 if (attr->name[0] == '.' || memchr(attr->name, '/', len) != nullptr) {
1352 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1353 goto end;
1354 }
1355
1356 DBG("Enabling channel %s for session %s", attr->name, session->name);
1357
1358 /*
1359 * If the session is a live session, remove the switch timer, the
1360 * live timer does the same thing but sends also synchronisation
1361 * beacons for inactive streams.
1362 */
1363 if (session->live_timer > 0) {
1364 attr->attr.live_timer_interval = session->live_timer;
1365 attr->attr.switch_timer_interval = 0;
1366 }
1367
1368 /* Check for feature support */
1369 switch (domain->type) {
1370 case LTTNG_DOMAIN_KERNEL:
1371 {
1372 if (kernel_supports_ring_buffer_snapshot_sample_positions() != 1) {
1373 /* Sampling position of buffer is not supported */
1374 WARN("Kernel tracer does not support buffer monitoring. "
1375 "Setting the monitor interval timer to 0 "
1376 "(disabled) for channel '%s' of session '%s'",
1377 attr->name,
1378 session->name);
1379 lttng_channel_set_monitor_timer_interval(attr, 0);
1380 }
1381 break;
1382 }
1383 case LTTNG_DOMAIN_UST:
1384 break;
1385 case LTTNG_DOMAIN_JUL:
1386 case LTTNG_DOMAIN_LOG4J:
1387 case LTTNG_DOMAIN_LOG4J2:
1388 case LTTNG_DOMAIN_PYTHON:
1389 if (!agent_tracing_is_enabled()) {
1390 DBG("Attempted to enable a channel in an agent domain but the agent thread is not running");
1391 ret_code = LTTNG_ERR_AGENT_TRACING_DISABLED;
1392 goto error;
1393 }
1394 break;
1395 default:
1396 ret_code = LTTNG_ERR_UNKNOWN_DOMAIN;
1397 goto error;
1398 }
1399
1400 switch (domain->type) {
1401 case LTTNG_DOMAIN_KERNEL:
1402 {
1403 struct ltt_kernel_channel *kchan;
1404
1405 kchan = trace_kernel_get_channel_by_name(attr->name, session->kernel_session);
1406 if (kchan == nullptr) {
1407 /*
1408 * Don't try to create a channel if the session has been started at
1409 * some point in time before. The tracer does not allow it.
1410 */
1411 if (session->has_been_started) {
1412 ret_code = LTTNG_ERR_TRACE_ALREADY_STARTED;
1413 goto error;
1414 }
1415
1416 if (session->snapshot.nb_output > 0 || session->snapshot_mode) {
1417 /* Enforce mmap output for snapshot sessions. */
1418 attr->attr.output = LTTNG_EVENT_MMAP;
1419 }
1420 ret_code = channel_kernel_create(session->kernel_session, attr, wpipe);
1421 if (attr->name[0] != '\0') {
1422 session->kernel_session->has_non_default_channel = 1;
1423 }
1424 } else {
1425 ret_code = channel_kernel_enable(session->kernel_session, kchan);
1426 }
1427
1428 if (ret_code != LTTNG_OK) {
1429 goto error;
1430 }
1431
1432 kernel_wait_quiescent();
1433 break;
1434 }
1435 case LTTNG_DOMAIN_UST:
1436 case LTTNG_DOMAIN_JUL:
1437 case LTTNG_DOMAIN_LOG4J:
1438 case LTTNG_DOMAIN_LOG4J2:
1439 case LTTNG_DOMAIN_PYTHON:
1440 {
1441 struct ltt_ust_channel *uchan;
1442
1443 /*
1444 * FIXME
1445 *
1446 * Current agent implementation limitations force us to allow
1447 * only one channel at once in "agent" subdomains. Each
1448 * subdomain has a default channel name which must be strictly
1449 * adhered to.
1450 */
1451 if (domain->type == LTTNG_DOMAIN_JUL) {
1452 if (strncmp(attr->name,
1453 DEFAULT_JUL_CHANNEL_NAME,
1454 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1455 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1456 goto error;
1457 }
1458 } else if (domain->type == LTTNG_DOMAIN_LOG4J) {
1459 if (strncmp(attr->name,
1460 DEFAULT_LOG4J_CHANNEL_NAME,
1461 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1462 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1463 goto error;
1464 }
1465 } else if (domain->type == LTTNG_DOMAIN_LOG4J2) {
1466 if (strncmp(attr->name,
1467 DEFAULT_LOG4J2_CHANNEL_NAME,
1468 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1469 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1470 goto error;
1471 }
1472 } else if (domain->type == LTTNG_DOMAIN_PYTHON) {
1473 if (strncmp(attr->name,
1474 DEFAULT_PYTHON_CHANNEL_NAME,
1475 LTTNG_SYMBOL_NAME_LEN - 1) != 0) {
1476 ret_code = LTTNG_ERR_INVALID_CHANNEL_NAME;
1477 goto error;
1478 }
1479 }
1480
1481 chan_ht = usess->domain_global.channels;
1482
1483 uchan = trace_ust_find_channel_by_name(chan_ht, attr->name);
1484 if (uchan == nullptr) {
1485 /*
1486 * Don't try to create a channel if the session has been started at
1487 * some point in time before. The tracer does not allow it.
1488 */
1489 if (session->has_been_started) {
1490 ret_code = LTTNG_ERR_TRACE_ALREADY_STARTED;
1491 goto error;
1492 }
1493
1494 ret_code = channel_ust_create(usess, attr, domain->buf_type);
1495 if (attr->name[0] != '\0') {
1496 usess->has_non_default_channel = 1;
1497 }
1498 } else {
1499 ret_code = channel_ust_enable(usess, uchan);
1500 }
1501 break;
1502 }
1503 default:
1504 ret_code = LTTNG_ERR_UNKNOWN_DOMAIN;
1505 goto error;
1506 }
1507
1508 if (ret_code == LTTNG_OK && attr->attr.output != LTTNG_EVENT_MMAP) {
1509 session->has_non_mmap_channel = true;
1510 }
1511error:
1512end:
1513 lttng_channel_destroy(attr);
1514 return ret_code;
1515}
1516
1517enum lttng_error_code
1518cmd_process_attr_tracker_get_tracking_policy(const ltt_session::locked_ref& session,
1519 enum lttng_domain_type domain,
1520 enum lttng_process_attr process_attr,
1521 enum lttng_tracking_policy *policy)
1522{
1523 enum lttng_error_code ret_code = LTTNG_OK;
1524 const struct process_attr_tracker *tracker;
1525
1526 switch (domain) {
1527 case LTTNG_DOMAIN_KERNEL:
1528 if (!session->kernel_session) {
1529 ret_code = LTTNG_ERR_INVALID;
1530 goto end;
1531 }
1532 tracker = kernel_get_process_attr_tracker(session->kernel_session, process_attr);
1533 break;
1534 case LTTNG_DOMAIN_UST:
1535 if (!session->ust_session) {
1536 ret_code = LTTNG_ERR_INVALID;
1537 goto end;
1538 }
1539 tracker = trace_ust_get_process_attr_tracker(session->ust_session, process_attr);
1540 break;
1541 default:
1542 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1543 goto end;
1544 }
1545 if (tracker) {
1546 *policy = process_attr_tracker_get_tracking_policy(tracker);
1547 } else {
1548 ret_code = LTTNG_ERR_INVALID;
1549 }
1550end:
1551 return ret_code;
1552}
1553
1554enum lttng_error_code
1555cmd_process_attr_tracker_set_tracking_policy(const ltt_session::locked_ref& session,
1556 enum lttng_domain_type domain,
1557 enum lttng_process_attr process_attr,
1558 enum lttng_tracking_policy policy)
1559{
1560 enum lttng_error_code ret_code = LTTNG_OK;
1561
1562 switch (policy) {
1563 case LTTNG_TRACKING_POLICY_INCLUDE_SET:
1564 case LTTNG_TRACKING_POLICY_EXCLUDE_ALL:
1565 case LTTNG_TRACKING_POLICY_INCLUDE_ALL:
1566 break;
1567 default:
1568 ret_code = LTTNG_ERR_INVALID;
1569 goto end;
1570 }
1571
1572 switch (domain) {
1573 case LTTNG_DOMAIN_KERNEL:
1574 if (!session->kernel_session) {
1575 ret_code = LTTNG_ERR_INVALID;
1576 goto end;
1577 }
1578 ret_code = kernel_process_attr_tracker_set_tracking_policy(
1579 session->kernel_session, process_attr, policy);
1580 break;
1581 case LTTNG_DOMAIN_UST:
1582 if (!session->ust_session) {
1583 ret_code = LTTNG_ERR_INVALID;
1584 goto end;
1585 }
1586 ret_code = trace_ust_process_attr_tracker_set_tracking_policy(
1587 session->ust_session, process_attr, policy);
1588 break;
1589 default:
1590 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1591 break;
1592 }
1593end:
1594 return ret_code;
1595}
1596
1597enum lttng_error_code
1598cmd_process_attr_tracker_inclusion_set_add_value(const ltt_session::locked_ref& session,
1599 enum lttng_domain_type domain,
1600 enum lttng_process_attr process_attr,
1601 const struct process_attr_value *value)
1602{
1603 enum lttng_error_code ret_code = LTTNG_OK;
1604
1605 switch (domain) {
1606 case LTTNG_DOMAIN_KERNEL:
1607 if (!session->kernel_session) {
1608 ret_code = LTTNG_ERR_INVALID;
1609 goto end;
1610 }
1611 ret_code = kernel_process_attr_tracker_inclusion_set_add_value(
1612 session->kernel_session, process_attr, value);
1613 break;
1614 case LTTNG_DOMAIN_UST:
1615 if (!session->ust_session) {
1616 ret_code = LTTNG_ERR_INVALID;
1617 goto end;
1618 }
1619 ret_code = trace_ust_process_attr_tracker_inclusion_set_add_value(
1620 session->ust_session, process_attr, value);
1621 break;
1622 default:
1623 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1624 break;
1625 }
1626end:
1627 return ret_code;
1628}
1629
1630enum lttng_error_code
1631cmd_process_attr_tracker_inclusion_set_remove_value(const ltt_session::locked_ref& session,
1632 enum lttng_domain_type domain,
1633 enum lttng_process_attr process_attr,
1634 const struct process_attr_value *value)
1635{
1636 enum lttng_error_code ret_code = LTTNG_OK;
1637
1638 switch (domain) {
1639 case LTTNG_DOMAIN_KERNEL:
1640 if (!session->kernel_session) {
1641 ret_code = LTTNG_ERR_INVALID;
1642 goto end;
1643 }
1644 ret_code = kernel_process_attr_tracker_inclusion_set_remove_value(
1645 session->kernel_session, process_attr, value);
1646 break;
1647 case LTTNG_DOMAIN_UST:
1648 if (!session->ust_session) {
1649 ret_code = LTTNG_ERR_INVALID;
1650 goto end;
1651 }
1652 ret_code = trace_ust_process_attr_tracker_inclusion_set_remove_value(
1653 session->ust_session, process_attr, value);
1654 break;
1655 default:
1656 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1657 break;
1658 }
1659end:
1660 return ret_code;
1661}
1662
1663enum lttng_error_code
1664cmd_process_attr_tracker_get_inclusion_set(const ltt_session::locked_ref& session,
1665 enum lttng_domain_type domain,
1666 enum lttng_process_attr process_attr,
1667 struct lttng_process_attr_values **values)
1668{
1669 enum lttng_error_code ret_code = LTTNG_OK;
1670 const struct process_attr_tracker *tracker;
1671 enum process_attr_tracker_status status;
1672
1673 switch (domain) {
1674 case LTTNG_DOMAIN_KERNEL:
1675 if (!session->kernel_session) {
1676 ret_code = LTTNG_ERR_INVALID;
1677 goto end;
1678 }
1679 tracker = kernel_get_process_attr_tracker(session->kernel_session, process_attr);
1680 break;
1681 case LTTNG_DOMAIN_UST:
1682 if (!session->ust_session) {
1683 ret_code = LTTNG_ERR_INVALID;
1684 goto end;
1685 }
1686 tracker = trace_ust_get_process_attr_tracker(session->ust_session, process_attr);
1687 break;
1688 default:
1689 ret_code = LTTNG_ERR_UNSUPPORTED_DOMAIN;
1690 goto end;
1691 }
1692
1693 if (!tracker) {
1694 ret_code = LTTNG_ERR_INVALID;
1695 goto end;
1696 }
1697
1698 status = process_attr_tracker_get_inclusion_set(tracker, values);
1699 switch (status) {
1700 case PROCESS_ATTR_TRACKER_STATUS_OK:
1701 ret_code = LTTNG_OK;
1702 break;
1703 case PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY:
1704 ret_code = LTTNG_ERR_PROCESS_ATTR_TRACKER_INVALID_TRACKING_POLICY;
1705 break;
1706 case PROCESS_ATTR_TRACKER_STATUS_ERROR:
1707 ret_code = LTTNG_ERR_NOMEM;
1708 break;
1709 default:
1710 ret_code = LTTNG_ERR_UNK;
1711 break;
1712 }
1713
1714end:
1715 return ret_code;
1716}
1717
1718/*
1719 * Command LTTNG_DISABLE_EVENT processed by the client thread.
1720 */
1721int cmd_disable_event(struct command_ctx *cmd_ctx,
1722 ltt_session::locked_ref& locked_session,
1723 struct lttng_event *event,
1724 char *filter_expression,
1725 struct lttng_bytecode *bytecode,
1726 struct lttng_event_exclusion *exclusion)
1727{
1728 int ret;
1729 const ltt_session& session = *locked_session;
1730 const char *event_name;
1731 const char *channel_name = cmd_ctx->lsm.u.disable.channel_name;
1732 const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
1733
1734 DBG("Disable event command for event \'%s\'", event->name);
1735
1736 /*
1737 * Filter and exclusions are simply not handled by the
1738 * disable event command at this time.
1739 *
1740 * FIXME
1741 */
1742 (void) filter_expression;
1743 (void) exclusion;
1744
1745 /* Ignore the presence of filter or exclusion for the event */
1746 event->filter = 0;
1747 event->exclusion = 0;
1748
1749 event_name = event->name;
1750
1751 const lttng::urcu::read_lock_guard read_lock;
1752
1753 /* Error out on unhandled search criteria */
1754 if (event->loglevel_type || event->loglevel != -1 || event->enabled || event->pid ||
1755 event->filter || event->exclusion) {
1756 ret = LTTNG_ERR_UNK;
1757 goto error;
1758 }
1759
1760 switch (domain) {
1761 case LTTNG_DOMAIN_KERNEL:
1762 {
1763 struct ltt_kernel_channel *kchan;
1764 struct ltt_kernel_session *ksess;
1765
1766 ksess = session.kernel_session;
1767
1768 /*
1769 * If a non-default channel has been created in the
1770 * session, explicitely require that -c chan_name needs
1771 * to be provided.
1772 */
1773 if (ksess->has_non_default_channel && channel_name[0] == '\0') {
1774 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
1775 goto error_unlock;
1776 }
1777
1778 kchan = trace_kernel_get_channel_by_name(channel_name, ksess);
1779 if (kchan == nullptr) {
1780 ret = LTTNG_ERR_KERN_CHAN_NOT_FOUND;
1781 goto error_unlock;
1782 }
1783
1784 switch (event->type) {
1785 case LTTNG_EVENT_ALL:
1786 case LTTNG_EVENT_TRACEPOINT:
1787 case LTTNG_EVENT_SYSCALL:
1788 case LTTNG_EVENT_PROBE:
1789 case LTTNG_EVENT_FUNCTION:
1790 case LTTNG_EVENT_FUNCTION_ENTRY: /* fall-through */
1791 if (event_name[0] == '\0') {
1792 ret = event_kernel_disable_event(kchan, nullptr, event->type);
1793 } else {
1794 ret = event_kernel_disable_event(kchan, event_name, event->type);
1795 }
1796 if (ret != LTTNG_OK) {
1797 goto error_unlock;
1798 }
1799 break;
1800 default:
1801 ret = LTTNG_ERR_UNK;
1802 goto error_unlock;
1803 }
1804
1805 kernel_wait_quiescent();
1806 break;
1807 }
1808 case LTTNG_DOMAIN_UST:
1809 {
1810 struct ltt_ust_channel *uchan;
1811 struct ltt_ust_session *usess;
1812
1813 usess = session.ust_session;
1814
1815 if (validate_ust_event_name(event_name)) {
1816 ret = LTTNG_ERR_INVALID_EVENT_NAME;
1817 goto error_unlock;
1818 }
1819
1820 /*
1821 * If a non-default channel has been created in the
1822 * session, explicitly require that -c chan_name needs
1823 * to be provided.
1824 */
1825 if (usess->has_non_default_channel && channel_name[0] == '\0') {
1826 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
1827 goto error_unlock;
1828 }
1829
1830 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels, channel_name);
1831 if (uchan == nullptr) {
1832 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1833 goto error_unlock;
1834 }
1835
1836 switch (event->type) {
1837 case LTTNG_EVENT_ALL:
1838 /*
1839 * An empty event name means that everything
1840 * should be disabled.
1841 */
1842 if (event->name[0] == '\0') {
1843 ret = event_ust_disable_all_tracepoints(usess, uchan);
1844 } else {
1845 ret = event_ust_disable_tracepoint(usess, uchan, event_name);
1846 }
1847 if (ret != LTTNG_OK) {
1848 goto error_unlock;
1849 }
1850 break;
1851 default:
1852 ret = LTTNG_ERR_UNK;
1853 goto error_unlock;
1854 }
1855
1856 DBG3("Disable UST event %s in channel %s completed", event_name, channel_name);
1857 break;
1858 }
1859 case LTTNG_DOMAIN_LOG4J:
1860 case LTTNG_DOMAIN_LOG4J2:
1861 case LTTNG_DOMAIN_JUL:
1862 case LTTNG_DOMAIN_PYTHON:
1863 {
1864 struct agent *agt;
1865 struct ltt_ust_session *usess = session.ust_session;
1866
1867 LTTNG_ASSERT(usess);
1868
1869 switch (event->type) {
1870 case LTTNG_EVENT_ALL:
1871 break;
1872 default:
1873 ret = LTTNG_ERR_UNK;
1874 goto error_unlock;
1875 }
1876
1877 agt = trace_ust_find_agent(usess, domain);
1878 if (!agt) {
1879 ret = -LTTNG_ERR_UST_EVENT_NOT_FOUND;
1880 goto error_unlock;
1881 }
1882 /*
1883 * An empty event name means that everything
1884 * should be disabled.
1885 */
1886 if (event->name[0] == '\0') {
1887 ret = event_agent_disable_all(usess, agt);
1888 } else {
1889 ret = event_agent_disable(usess, agt, event_name);
1890 }
1891 if (ret != LTTNG_OK) {
1892 goto error_unlock;
1893 }
1894
1895 break;
1896 }
1897 default:
1898 ret = LTTNG_ERR_UND;
1899 goto error_unlock;
1900 }
1901
1902 ret = LTTNG_OK;
1903
1904error_unlock:
1905error:
1906 free(exclusion);
1907 free(bytecode);
1908 free(filter_expression);
1909 return ret;
1910}
1911
1912/*
1913 * Command LTTNG_ADD_CONTEXT processed by the client thread.
1914 */
1915int cmd_add_context(struct command_ctx *cmd_ctx,
1916 ltt_session::locked_ref& locked_session,
1917 const struct lttng_event_context *event_context,
1918 int kwpipe)
1919{
1920 int ret, chan_kern_created = 0, chan_ust_created = 0;
1921 const enum lttng_domain_type domain = cmd_ctx->lsm.domain.type;
1922 const struct ltt_session& session = *locked_session;
1923 const char *channel_name = cmd_ctx->lsm.u.context.channel_name;
1924
1925 /*
1926 * Don't try to add a context if the session has been started at
1927 * some point in time before. The tracer does not allow it and would
1928 * result in a corrupted trace.
1929 */
1930 if (session.has_been_started) {
1931 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
1932 goto end;
1933 }
1934
1935 switch (domain) {
1936 case LTTNG_DOMAIN_KERNEL:
1937 LTTNG_ASSERT(session.kernel_session);
1938
1939 if (session.kernel_session->channel_count == 0) {
1940 /* Create default channel */
1941 ret = channel_kernel_create(session.kernel_session, nullptr, kwpipe);
1942 if (ret != LTTNG_OK) {
1943 goto error;
1944 }
1945 chan_kern_created = 1;
1946 }
1947 /* Add kernel context to kernel tracer */
1948 ret = context_kernel_add(session.kernel_session, event_context, channel_name);
1949 if (ret != LTTNG_OK) {
1950 goto error;
1951 }
1952 break;
1953 case LTTNG_DOMAIN_JUL:
1954 case LTTNG_DOMAIN_LOG4J:
1955 case LTTNG_DOMAIN_LOG4J2:
1956 {
1957 /*
1958 * Validate channel name.
1959 * If no channel name is given and the domain is JUL or LOG4J,
1960 * set it to the appropriate domain-specific channel name. If
1961 * a name is provided but does not match the expexted channel
1962 * name, return an error.
1963 */
1964 if (domain == LTTNG_DOMAIN_JUL && *channel_name &&
1965 strcmp(channel_name, DEFAULT_JUL_CHANNEL_NAME) != 0) {
1966 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1967 goto error;
1968 } else if (domain == LTTNG_DOMAIN_LOG4J && *channel_name &&
1969 strcmp(channel_name, DEFAULT_LOG4J_CHANNEL_NAME) != 0) {
1970 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1971 goto error;
1972 } else if (domain == LTTNG_DOMAIN_LOG4J2 && *channel_name &&
1973 strcmp(channel_name, DEFAULT_LOG4J2_CHANNEL_NAME) != 0) {
1974 ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
1975 goto error;
1976 }
1977 }
1978 /* fall through */
1979 case LTTNG_DOMAIN_UST:
1980 {
1981 struct ltt_ust_session *usess = session.ust_session;
1982 unsigned int chan_count;
1983
1984 LTTNG_ASSERT(usess);
1985
1986 chan_count = lttng_ht_get_count(usess->domain_global.channels);
1987 if (chan_count == 0) {
1988 struct lttng_channel *attr;
1989 /* Create default channel */
1990 attr = channel_new_default_attr(domain, usess->buffer_type);
1991 if (attr == nullptr) {
1992 ret = LTTNG_ERR_FATAL;
1993 goto error;
1994 }
1995
1996 ret = channel_ust_create(usess, attr, usess->buffer_type);
1997 if (ret != LTTNG_OK) {
1998 free(attr);
1999 goto error;
2000 }
2001 channel_attr_destroy(attr);
2002 chan_ust_created = 1;
2003 }
2004
2005 ret = context_ust_add(usess, domain, event_context, channel_name);
2006 if (ret != LTTNG_OK) {
2007 goto error;
2008 }
2009 break;
2010 }
2011 default:
2012 ret = LTTNG_ERR_UND;
2013 goto error;
2014 }
2015
2016 ret = LTTNG_OK;
2017 goto end;
2018
2019error:
2020 if (chan_kern_created) {
2021 struct ltt_kernel_channel *kchan = trace_kernel_get_channel_by_name(
2022 DEFAULT_CHANNEL_NAME, session.kernel_session);
2023 /* Created previously, this should NOT fail. */
2024 LTTNG_ASSERT(kchan);
2025 kernel_destroy_channel(kchan);
2026 }
2027
2028 if (chan_ust_created) {
2029 struct ltt_ust_channel *uchan = trace_ust_find_channel_by_name(
2030 session.ust_session->domain_global.channels, DEFAULT_CHANNEL_NAME);
2031 /* Created previously, this should NOT fail. */
2032 LTTNG_ASSERT(uchan);
2033 /* Remove from the channel list of the session. */
2034 trace_ust_delete_channel(session.ust_session->domain_global.channels, uchan);
2035 trace_ust_destroy_channel(uchan);
2036 }
2037end:
2038 return ret;
2039}
2040
2041static inline bool name_starts_with(const char *name, const char *prefix)
2042{
2043 const size_t max_cmp_len = std::min(strlen(prefix), (size_t) LTTNG_SYMBOL_NAME_LEN);
2044
2045 return !strncmp(name, prefix, max_cmp_len);
2046}
2047
2048/* Perform userspace-specific event name validation */
2049static int validate_ust_event_name(const char *name)
2050{
2051 int ret = 0;
2052
2053 if (!name) {
2054 ret = -1;
2055 goto end;
2056 }
2057
2058 /*
2059 * Check name against all internal UST event component namespaces used
2060 * by the agents.
2061 */
2062 if (name_starts_with(name, DEFAULT_JUL_EVENT_COMPONENT) ||
2063 name_starts_with(name, DEFAULT_LOG4J_EVENT_COMPONENT) ||
2064 name_starts_with(name, DEFAULT_LOG4J2_EVENT_COMPONENT) ||
2065 name_starts_with(name, DEFAULT_PYTHON_EVENT_COMPONENT)) {
2066 ret = -1;
2067 }
2068
2069end:
2070 return ret;
2071}
2072
2073/*
2074 * Internal version of cmd_enable_event() with a supplemental
2075 * "internal_event" flag which is used to enable internal events which should
2076 * be hidden from clients. Such events are used in the agent implementation to
2077 * enable the events through which all "agent" events are funeled.
2078 */
2079static int _cmd_enable_event(ltt_session::locked_ref& locked_session,
2080 const struct lttng_domain *domain,
2081 char *channel_name,
2082 struct lttng_event *event,
2083 char *filter_expression,
2084 struct lttng_bytecode *filter,
2085 struct lttng_event_exclusion *exclusion,
2086 int wpipe,
2087 bool internal_event)
2088{
2089 int ret = 0, channel_created = 0;
2090 struct lttng_channel *attr = nullptr;
2091 const ltt_session& session = *locked_session;
2092
2093 LTTNG_ASSERT(event);
2094 LTTNG_ASSERT(channel_name);
2095
2096 /* If we have a filter, we must have its filter expression */
2097 LTTNG_ASSERT(!(!!filter_expression ^ !!filter));
2098
2099 /* Normalize event name as a globbing pattern */
2100 strutils_normalize_star_glob_pattern(event->name);
2101
2102 /* Normalize exclusion names as globbing patterns */
2103 if (exclusion) {
2104 size_t i;
2105
2106 for (i = 0; i < exclusion->count; i++) {
2107 char *name = LTTNG_EVENT_EXCLUSION_NAME_AT(exclusion, i);
2108
2109 strutils_normalize_star_glob_pattern(name);
2110 }
2111 }
2112
2113 const lttng::urcu::read_lock_guard read_lock;
2114
2115 switch (domain->type) {
2116 case LTTNG_DOMAIN_KERNEL:
2117 {
2118 struct ltt_kernel_channel *kchan;
2119
2120 /*
2121 * If a non-default channel has been created in the
2122 * session, explicitely require that -c chan_name needs
2123 * to be provided.
2124 */
2125 if (session.kernel_session->has_non_default_channel && channel_name[0] == '\0') {
2126 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
2127 goto error;
2128 }
2129
2130 kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
2131 if (kchan == nullptr) {
2132 attr = channel_new_default_attr(LTTNG_DOMAIN_KERNEL, LTTNG_BUFFER_GLOBAL);
2133 if (attr == nullptr) {
2134 ret = LTTNG_ERR_FATAL;
2135 goto error;
2136 }
2137 if (lttng_strncpy(attr->name, channel_name, sizeof(attr->name))) {
2138 ret = LTTNG_ERR_INVALID;
2139 goto error;
2140 }
2141
2142 ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
2143 if (ret != LTTNG_OK) {
2144 goto error;
2145 }
2146 channel_created = 1;
2147 }
2148
2149 /* Get the newly created kernel channel pointer */
2150 kchan = trace_kernel_get_channel_by_name(channel_name, session.kernel_session);
2151 if (kchan == nullptr) {
2152 /* This sould not happen... */
2153 ret = LTTNG_ERR_FATAL;
2154 goto error;
2155 }
2156
2157 switch (event->type) {
2158 case LTTNG_EVENT_ALL:
2159 {
2160 char *filter_expression_a = nullptr;
2161 struct lttng_bytecode *filter_a = nullptr;
2162
2163 /*
2164 * We need to duplicate filter_expression and filter,
2165 * because ownership is passed to first enable
2166 * event.
2167 */
2168 if (filter_expression) {
2169 filter_expression_a = strdup(filter_expression);
2170 if (!filter_expression_a) {
2171 ret = LTTNG_ERR_FATAL;
2172 goto error;
2173 }
2174 }
2175 if (filter) {
2176 filter_a = zmalloc<lttng_bytecode>(sizeof(*filter_a) + filter->len);
2177 if (!filter_a) {
2178 free(filter_expression_a);
2179 ret = LTTNG_ERR_FATAL;
2180 goto error;
2181 }
2182 memcpy(filter_a, filter, sizeof(*filter_a) + filter->len);
2183 }
2184 event->type = LTTNG_EVENT_TRACEPOINT; /* Hack */
2185 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2186 /* We have passed ownership */
2187 filter_expression = nullptr;
2188 filter = nullptr;
2189 if (ret != LTTNG_OK) {
2190 if (channel_created) {
2191 /* Let's not leak a useless channel. */
2192 kernel_destroy_channel(kchan);
2193 }
2194 free(filter_expression_a);
2195 free(filter_a);
2196 goto error;
2197 }
2198 event->type = LTTNG_EVENT_SYSCALL; /* Hack */
2199 ret = event_kernel_enable_event(
2200 kchan, event, filter_expression_a, filter_a);
2201 /* We have passed ownership */
2202 filter_expression_a = nullptr;
2203 filter_a = nullptr;
2204 if (ret != LTTNG_OK) {
2205 goto error;
2206 }
2207 break;
2208 }
2209 case LTTNG_EVENT_PROBE:
2210 case LTTNG_EVENT_USERSPACE_PROBE:
2211 case LTTNG_EVENT_FUNCTION:
2212 case LTTNG_EVENT_FUNCTION_ENTRY:
2213 case LTTNG_EVENT_TRACEPOINT:
2214 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2215 /* We have passed ownership */
2216 filter_expression = nullptr;
2217 filter = nullptr;
2218 if (ret != LTTNG_OK) {
2219 if (channel_created) {
2220 /* Let's not leak a useless channel. */
2221 kernel_destroy_channel(kchan);
2222 }
2223 goto error;
2224 }
2225 break;
2226 case LTTNG_EVENT_SYSCALL:
2227 ret = event_kernel_enable_event(kchan, event, filter_expression, filter);
2228 /* We have passed ownership */
2229 filter_expression = nullptr;
2230 filter = nullptr;
2231 if (ret != LTTNG_OK) {
2232 goto error;
2233 }
2234 break;
2235 default:
2236 ret = LTTNG_ERR_UNK;
2237 goto error;
2238 }
2239
2240 kernel_wait_quiescent();
2241 break;
2242 }
2243 case LTTNG_DOMAIN_UST:
2244 {
2245 struct ltt_ust_channel *uchan;
2246 struct ltt_ust_session *usess = session.ust_session;
2247
2248 LTTNG_ASSERT(usess);
2249
2250 /*
2251 * If a non-default channel has been created in the
2252 * session, explicitely require that -c chan_name needs
2253 * to be provided.
2254 */
2255 if (usess->has_non_default_channel && channel_name[0] == '\0') {
2256 ret = LTTNG_ERR_NEED_CHANNEL_NAME;
2257 goto error;
2258 }
2259
2260 /* Get channel from global UST domain */
2261 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels, channel_name);
2262 if (uchan == nullptr) {
2263 /* Create default channel */
2264 attr = channel_new_default_attr(LTTNG_DOMAIN_UST, usess->buffer_type);
2265 if (attr == nullptr) {
2266 ret = LTTNG_ERR_FATAL;
2267 goto error;
2268 }
2269 if (lttng_strncpy(attr->name, channel_name, sizeof(attr->name))) {
2270 ret = LTTNG_ERR_INVALID;
2271 goto error;
2272 }
2273
2274 ret = cmd_enable_channel_internal(locked_session, domain, attr, wpipe);
2275 if (ret != LTTNG_OK) {
2276 goto error;
2277 }
2278
2279 /* Get the newly created channel reference back */
2280 uchan = trace_ust_find_channel_by_name(usess->domain_global.channels,
2281 channel_name);
2282 LTTNG_ASSERT(uchan);
2283 }
2284
2285 if (uchan->domain != LTTNG_DOMAIN_UST && !internal_event) {
2286 /*
2287 * Don't allow users to add UST events to channels which
2288 * are assigned to a userspace subdomain (JUL, Log4J,
2289 * Python, etc.).
2290 */
2291 ret = LTTNG_ERR_INVALID_CHANNEL_DOMAIN;
2292 goto error;
2293 }
2294
2295 if (!internal_event) {
2296 /*
2297 * Ensure the event name is not reserved for internal
2298 * use.
2299 */
2300 ret = validate_ust_event_name(event->name);
2301 if (ret) {
2302 WARN("Userspace event name %s failed validation.", event->name);
2303 ret = LTTNG_ERR_INVALID_EVENT_NAME;
2304 goto error;
2305 }
2306 }
2307
2308 /* At this point, the session and channel exist on the tracer */
2309 ret = event_ust_enable_tracepoint(
2310 usess, uchan, event, filter_expression, filter, exclusion, internal_event);
2311 /* We have passed ownership */
2312 filter_expression = nullptr;
2313 filter = nullptr;
2314 exclusion = nullptr;
2315 if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
2316 goto already_enabled;
2317 } else if (ret != LTTNG_OK) {
2318 goto error;
2319 }
2320 break;
2321 }
2322 case LTTNG_DOMAIN_LOG4J:
2323 case LTTNG_DOMAIN_LOG4J2:
2324 case LTTNG_DOMAIN_JUL:
2325 case LTTNG_DOMAIN_PYTHON:
2326 {
2327 const char *default_event_name, *default_chan_name;
2328 struct agent *agt;
2329 struct lttng_event uevent;
2330 struct lttng_domain tmp_dom;
2331 struct ltt_ust_session *usess = session.ust_session;
2332
2333 LTTNG_ASSERT(usess);
2334
2335 if (!agent_tracing_is_enabled()) {
2336 DBG("Attempted to enable an event in an agent domain but the agent thread is not running");
2337 ret = LTTNG_ERR_AGENT_TRACING_DISABLED;
2338 goto error;
2339 }
2340
2341 agt = trace_ust_find_agent(usess, domain->type);
2342 if (!agt) {
2343 agt = agent_create(domain->type);
2344 if (!agt) {
2345 ret = LTTNG_ERR_NOMEM;
2346 goto error;
2347 }
2348 agent_add(agt, usess->agents);
2349 }
2350
2351 /* Create the default tracepoint. */
2352 memset(&uevent, 0, sizeof(uevent));
2353 uevent.type = LTTNG_EVENT_TRACEPOINT;
2354 uevent.loglevel_type = LTTNG_EVENT_LOGLEVEL_ALL;
2355 uevent.loglevel = -1;
2356 default_event_name = event_get_default_agent_ust_name(domain->type);
2357 if (!default_event_name) {
2358 ret = LTTNG_ERR_FATAL;
2359 goto error;
2360 }
2361 strncpy(uevent.name, default_event_name, sizeof(uevent.name));
2362 uevent.name[sizeof(uevent.name) - 1] = '\0';
2363
2364 /*
2365 * The domain type is changed because we are about to enable the
2366 * default channel and event for the JUL domain that are hardcoded.
2367 * This happens in the UST domain.
2368 */
2369 memcpy(&tmp_dom, domain, sizeof(tmp_dom));
2370 tmp_dom.type = LTTNG_DOMAIN_UST;
2371
2372 switch (domain->type) {
2373 case LTTNG_DOMAIN_LOG4J:
2374 default_chan_name = DEFAULT_LOG4J_CHANNEL_NAME;
2375 break;
2376 case LTTNG_DOMAIN_LOG4J2:
2377 default_chan_name = DEFAULT_LOG4J2_CHANNEL_NAME;
2378 break;
2379 case LTTNG_DOMAIN_JUL:
2380 default_chan_name = DEFAULT_JUL_CHANNEL_NAME;
2381 break;
2382 case LTTNG_DOMAIN_PYTHON:
2383 default_chan_name = DEFAULT_PYTHON_CHANNEL_NAME;
2384 break;
2385 default:
2386 /* The switch/case we are in makes this impossible */
2387 abort();
2388 }
2389
2390 {
2391 char *filter_expression_copy = nullptr;
2392 struct lttng_bytecode *filter_copy = nullptr;
2393
2394 if (filter) {
2395 const size_t filter_size =
2396 sizeof(struct lttng_bytecode) + filter->len;
2397
2398 filter_copy = zmalloc<lttng_bytecode>(filter_size);
2399 if (!filter_copy) {
2400 ret = LTTNG_ERR_NOMEM;
2401 goto error;
2402 }
2403 memcpy(filter_copy, filter, filter_size);
2404
2405 filter_expression_copy = strdup(filter_expression);
2406 if (!filter_expression) {
2407 ret = LTTNG_ERR_NOMEM;
2408 }
2409
2410 if (!filter_expression_copy || !filter_copy) {
2411 free(filter_expression_copy);
2412 free(filter_copy);
2413 goto error;
2414 }
2415 }
2416
2417 ret = cmd_enable_event_internal(locked_session,
2418 &tmp_dom,
2419 (char *) default_chan_name,
2420 &uevent,
2421 filter_expression_copy,
2422 filter_copy,
2423 nullptr,
2424 wpipe);
2425 }
2426
2427 if (ret == LTTNG_ERR_UST_EVENT_ENABLED) {
2428 goto already_enabled;
2429 } else if (ret != LTTNG_OK) {
2430 goto error;
2431 }
2432
2433 /* The wild card * means that everything should be enabled. */
2434 if (strncmp(event->name, "*", 1) == 0 && strlen(event->name) == 1) {
2435 ret = event_agent_enable_all(usess, agt, event, filter, filter_expression);
2436 } else {
2437 ret = event_agent_enable(usess, agt, event, filter, filter_expression);
2438 }
2439 filter = nullptr;
2440 filter_expression = nullptr;
2441 if (ret != LTTNG_OK) {
2442 goto error;
2443 }
2444
2445 break;
2446 }
2447 default:
2448 ret = LTTNG_ERR_UND;
2449 goto error;
2450 }
2451
2452 ret = LTTNG_OK;
2453
2454already_enabled:
2455error:
2456 free(filter_expression);
2457 free(filter);
2458 free(exclusion);
2459 channel_attr_destroy(attr);
2460 return ret;
2461}
2462
2463/*
2464 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2465 * We own filter, exclusion, and filter_expression.
2466 */
2467int cmd_enable_event(struct command_ctx *cmd_ctx,
2468 ltt_session::locked_ref& locked_session,
2469 struct lttng_event *event,
2470 char *filter_expression,
2471 struct lttng_event_exclusion *exclusion,
2472 struct lttng_bytecode *bytecode,
2473 int wpipe)
2474{
2475 int ret;
2476 /*
2477 * Copied to ensure proper alignment since 'lsm' is a packed structure.
2478 */
2479 const lttng_domain command_domain = cmd_ctx->lsm.domain;
2480
2481 /*
2482 * The ownership of the following parameters is transferred to
2483 * _cmd_enable_event:
2484 *
2485 * - filter_expression,
2486 * - bytecode,
2487 * - exclusion
2488 */
2489 ret = _cmd_enable_event(locked_session,
2490 &command_domain,
2491 cmd_ctx->lsm.u.enable.channel_name,
2492 event,
2493 filter_expression,
2494 bytecode,
2495 exclusion,
2496 wpipe,
2497 false);
2498 filter_expression = nullptr;
2499 bytecode = nullptr;
2500 exclusion = nullptr;
2501 return ret;
2502}
2503
2504/*
2505 * Enable an event which is internal to LTTng. An internal should
2506 * never be made visible to clients and are immune to checks such as
2507 * reserved names.
2508 */
2509static int cmd_enable_event_internal(ltt_session::locked_ref& locked_session,
2510 const struct lttng_domain *domain,
2511 char *channel_name,
2512 struct lttng_event *event,
2513 char *filter_expression,
2514 struct lttng_bytecode *filter,
2515 struct lttng_event_exclusion *exclusion,
2516 int wpipe)
2517{
2518 return _cmd_enable_event(locked_session,
2519 domain,
2520 channel_name,
2521 event,
2522 filter_expression,
2523 filter,
2524 exclusion,
2525 wpipe,
2526 true);
2527}
2528
2529/*
2530 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2531 */
2532enum lttng_error_code cmd_list_tracepoints(enum lttng_domain_type domain,
2533 struct lttng_payload *reply_payload)
2534{
2535 enum lttng_error_code ret_code;
2536 int ret;
2537 ssize_t i, nb_events = 0;
2538 struct lttng_event *events = nullptr;
2539 struct lttcomm_list_command_header reply_command_header = {};
2540 size_t reply_command_header_offset;
2541
2542 assert(reply_payload);
2543
2544 /* Reserve space for command reply header. */
2545 reply_command_header_offset = reply_payload->buffer.size;
2546 ret = lttng_dynamic_buffer_set_size(&reply_payload->buffer,
2547 reply_command_header_offset +
2548 sizeof(struct lttcomm_list_command_header));
2549 if (ret) {
2550 ret_code = LTTNG_ERR_NOMEM;
2551 goto error;
2552 }
2553
2554 switch (domain) {
2555 case LTTNG_DOMAIN_KERNEL:
2556 nb_events = kernel_list_events(&events);
2557 if (nb_events < 0) {
2558 ret_code = LTTNG_ERR_KERN_LIST_FAIL;
2559 goto error;
2560 }
2561 break;
2562 case LTTNG_DOMAIN_UST:
2563 nb_events = ust_app_list_events(&events);
2564 if (nb_events < 0) {
2565 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2566 goto error;
2567 }
2568 break;
2569 case LTTNG_DOMAIN_LOG4J:
2570 case LTTNG_DOMAIN_LOG4J2:
2571 case LTTNG_DOMAIN_JUL:
2572 case LTTNG_DOMAIN_PYTHON:
2573 nb_events = agent_list_events(&events, domain);
2574 if (nb_events < 0) {
2575 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2576 goto error;
2577 }
2578 break;
2579 default:
2580 ret_code = LTTNG_ERR_UND;
2581 goto error;
2582 }
2583
2584 for (i = 0; i < nb_events; i++) {
2585 ret = lttng_event_serialize(
2586 &events[i], 0, nullptr, nullptr, 0, nullptr, reply_payload);
2587 if (ret) {
2588 ret_code = LTTNG_ERR_NOMEM;
2589 goto error;
2590 }
2591 }
2592
2593 if (nb_events > UINT32_MAX) {
2594 ERR("Tracepoint count would overflow the tracepoint listing command's reply");
2595 ret_code = LTTNG_ERR_OVERFLOW;
2596 goto error;
2597 }
2598
2599 /* Update command reply header. */
2600 reply_command_header.count = (uint32_t) nb_events;
2601 memcpy(reply_payload->buffer.data + reply_command_header_offset,
2602 &reply_command_header,
2603 sizeof(reply_command_header));
2604
2605 ret_code = LTTNG_OK;
2606error:
2607 free(events);
2608 return ret_code;
2609}
2610
2611/*
2612 * Command LTTNG_LIST_TRACEPOINT_FIELDS processed by the client thread.
2613 */
2614enum lttng_error_code cmd_list_tracepoint_fields(enum lttng_domain_type domain,
2615 struct lttng_payload *reply)
2616{
2617 enum lttng_error_code ret_code;
2618 int ret;
2619 unsigned int i, nb_fields;
2620 struct lttng_event_field *fields = nullptr;
2621 struct lttcomm_list_command_header reply_command_header = {};
2622 size_t reply_command_header_offset;
2623
2624 assert(reply);
2625
2626 /* Reserve space for command reply header. */
2627 reply_command_header_offset = reply->buffer.size;
2628 ret = lttng_dynamic_buffer_set_size(&reply->buffer,
2629 reply_command_header_offset +
2630 sizeof(struct lttcomm_list_command_header));
2631 if (ret) {
2632 ret_code = LTTNG_ERR_NOMEM;
2633 goto error;
2634 }
2635
2636 switch (domain) {
2637 case LTTNG_DOMAIN_UST:
2638 ret = ust_app_list_event_fields(&fields);
2639 if (ret < 0) {
2640 ret_code = LTTNG_ERR_UST_LIST_FAIL;
2641 goto error;
2642 }
2643
2644 break;
2645 case LTTNG_DOMAIN_KERNEL:
2646 default: /* fall-through */
2647 ret_code = LTTNG_ERR_UND;
2648 goto error;
2649 }
2650
2651 nb_fields = ret;
2652
2653 for (i = 0; i < nb_fields; i++) {
2654 ret = lttng_event_field_serialize(&fields[i], reply);
2655 if (ret) {
2656 ret_code = LTTNG_ERR_NOMEM;
2657 goto error;
2658 }
2659 }
2660
2661 if (nb_fields > UINT32_MAX) {
2662 ERR("Tracepoint field count would overflow the tracepoint field listing command's reply");
2663 ret_code = LTTNG_ERR_OVERFLOW;
2664 goto error;
2665 }
2666
2667 /* Update command reply header. */
2668 reply_command_header.count = (uint32_t) nb_fields;
2669
2670 memcpy(reply->buffer.data + reply_command_header_offset,
2671 &reply_command_header,
2672 sizeof(reply_command_header));
2673
2674 ret_code = LTTNG_OK;
2675
2676error:
2677 free(fields);
2678 return ret_code;
2679}
2680
2681enum lttng_error_code cmd_list_syscalls(struct lttng_payload *reply_payload)
2682{
2683 enum lttng_error_code ret_code;
2684 ssize_t nb_events, i;
2685 int ret;
2686 struct lttng_event *events = nullptr;
2687 struct lttcomm_list_command_header reply_command_header = {};
2688 size_t reply_command_header_offset;
2689
2690 assert(reply_payload);
2691
2692 /* Reserve space for command reply header. */
2693 reply_command_header_offset = reply_payload->buffer.size;
2694 ret = lttng_dynamic_buffer_set_size(&reply_payload->buffer,
2695 reply_command_header_offset +
2696 sizeof(struct lttcomm_list_command_header));
2697 if (ret) {
2698 ret_code = LTTNG_ERR_NOMEM;
2699 goto end;
2700 }
2701
2702 nb_events = syscall_table_list(&events);
2703 if (nb_events < 0) {
2704 ret_code = (enum lttng_error_code) - nb_events;
2705 goto end;
2706 }
2707
2708 for (i = 0; i < nb_events; i++) {
2709 ret = lttng_event_serialize(
2710 &events[i], 0, nullptr, nullptr, 0, nullptr, reply_payload);
2711 if (ret) {
2712 ret_code = LTTNG_ERR_NOMEM;
2713 goto end;
2714 }
2715 }
2716
2717 if (nb_events > UINT32_MAX) {
2718 ERR("Syscall count would overflow the syscall listing command's reply");
2719 ret_code = LTTNG_ERR_OVERFLOW;
2720 goto end;
2721 }
2722
2723 /* Update command reply header. */
2724 reply_command_header.count = (uint32_t) nb_events;
2725 memcpy(reply_payload->buffer.data + reply_command_header_offset,
2726 &reply_command_header,
2727 sizeof(reply_command_header));
2728
2729 ret_code = LTTNG_OK;
2730end:
2731 free(events);
2732 return ret_code;
2733}
2734
2735/*
2736 * Command LTTNG_START_TRACE processed by the client thread.
2737 */
2738int cmd_start_trace(const ltt_session::locked_ref& session)
2739{
2740 enum lttng_error_code ret;
2741 unsigned long nb_chan = 0;
2742 struct ltt_kernel_session *ksession;
2743 struct ltt_ust_session *usess;
2744 const bool session_rotated_after_last_stop = session->rotated_after_last_stop;
2745 const bool session_cleared_after_last_stop = session->cleared_after_last_stop;
2746
2747 /* Ease our life a bit ;) */
2748 ksession = session->kernel_session;
2749 usess = session->ust_session;
2750
2751 /* Is the session already started? */
2752 if (session->active) {
2753 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
2754 /* Perform nothing */
2755 goto end;
2756 }
2757
2758 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING &&
2759 !session->current_trace_chunk) {
2760 /*
2761 * A rotation was launched while the session was stopped and
2762 * it has not been completed yet. It is not possible to start
2763 * the session since starting the session here would require a
2764 * rotation from "NULL" to a new trace chunk. That rotation
2765 * would overlap with the ongoing rotation, which is not
2766 * supported.
2767 */
2768 WARN("Refusing to start session \"%s\" as a rotation launched after the last \"stop\" is still ongoing",
2769 session->name);
2770 ret = LTTNG_ERR_ROTATION_PENDING;
2771 goto error;
2772 }
2773
2774 /*
2775 * Starting a session without channel is useless since after that it's not
2776 * possible to enable channel thus inform the client.
2777 */
2778 if (usess && usess->domain_global.channels) {
2779 nb_chan += lttng_ht_get_count(usess->domain_global.channels);
2780 }
2781 if (ksession) {
2782 nb_chan += ksession->channel_count;
2783 }
2784 if (!nb_chan) {
2785 ret = LTTNG_ERR_NO_CHANNEL;
2786 goto error;
2787 }
2788
2789 session->active = true;
2790 session->rotated_after_last_stop = false;
2791 session->cleared_after_last_stop = false;
2792 if (session->output_traces && !session->current_trace_chunk) {
2793 if (!session->has_been_started) {
2794 struct lttng_trace_chunk *trace_chunk;
2795
2796 DBG("Creating initial trace chunk of session \"%s\"", session->name);
2797 trace_chunk =
2798 session_create_new_trace_chunk(session, nullptr, nullptr, nullptr);
2799 if (!trace_chunk) {
2800 ret = LTTNG_ERR_CREATE_DIR_FAIL;
2801 goto error;
2802 }
2803 LTTNG_ASSERT(!session->current_trace_chunk);
2804 ret = (lttng_error_code) session_set_trace_chunk(
2805 session, trace_chunk, nullptr);
2806 lttng_trace_chunk_put(trace_chunk);
2807 if (ret) {
2808 ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
2809 goto error;
2810 }
2811 } else {
2812 DBG("Rotating session \"%s\" from its current \"NULL\" trace chunk to a new chunk",
2813 session->name);
2814 /*
2815 * Rotate existing streams into the new chunk.
2816 * This is a "quiet" rotation has no client has
2817 * explicitly requested this operation.
2818 *
2819 * There is also no need to wait for the rotation
2820 * to complete as it will happen immediately. No data
2821 * was produced as the session was stopped, so the
2822 * rotation should happen on reception of the command.
2823 */
2824 ret = (lttng_error_code) cmd_rotate_session(
2825 session, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
2826 if (ret != LTTNG_OK) {
2827 goto error;
2828 }
2829 }
2830 }
2831
2832 /* Kernel tracing */
2833 if (ksession != nullptr) {
2834 DBG("Start kernel tracing session %s", session->name);
2835 ret = (lttng_error_code) start_kernel_session(ksession);
2836 if (ret != LTTNG_OK) {
2837 goto error;
2838 }
2839 }
2840
2841 /* Flag session that trace should start automatically */
2842 if (usess) {
2843 const int int_ret = ust_app_start_trace_all(usess);
2844
2845 if (int_ret < 0) {
2846 ret = LTTNG_ERR_UST_START_FAIL;
2847 goto error;
2848 }
2849 }
2850
2851 /*
2852 * Open a packet in every stream of the session to ensure that viewers
2853 * can correctly identify the boundaries of the periods during which
2854 * tracing was active for this session.
2855 */
2856 ret = session_open_packets(session);
2857 if (ret != LTTNG_OK) {
2858 goto error;
2859 }
2860
2861 /*
2862 * Clear the flag that indicates that a rotation was done while the
2863 * session was stopped.
2864 */
2865 session->rotated_after_last_stop = false;
2866
2867 if (session->rotate_timer_period && !session->rotation_schedule_timer_enabled) {
2868 const int int_ret = timer_session_rotation_schedule_timer_start(
2869 session, session->rotate_timer_period);
2870
2871 if (int_ret < 0) {
2872 ERR("Failed to enable rotate timer");
2873 ret = LTTNG_ERR_UNK;
2874 goto error;
2875 }
2876 }
2877
2878 ret = LTTNG_OK;
2879
2880error:
2881 if (ret == LTTNG_OK) {
2882 /* Flag this after a successful start. */
2883 session->has_been_started = true;
2884 } else {
2885 session->active = false;
2886 /* Restore initial state on error. */
2887 session->rotated_after_last_stop = session_rotated_after_last_stop;
2888 session->cleared_after_last_stop = session_cleared_after_last_stop;
2889 }
2890end:
2891 return ret;
2892}
2893
2894/*
2895 * Command LTTNG_STOP_TRACE processed by the client thread.
2896 */
2897int cmd_stop_trace(const ltt_session::locked_ref& session)
2898{
2899 int ret;
2900 struct ltt_kernel_session *ksession;
2901 struct ltt_ust_session *usess;
2902
2903 DBG("Begin stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
2904 /* Short cut */
2905 ksession = session->kernel_session;
2906 usess = session->ust_session;
2907
2908 /* Session is not active. Skip everything and inform the client. */
2909 if (!session->active) {
2910 ret = LTTNG_ERR_TRACE_ALREADY_STOPPED;
2911 goto error;
2912 }
2913
2914 ret = stop_kernel_session(ksession);
2915 if (ret != LTTNG_OK) {
2916 goto error;
2917 }
2918
2919 if (usess && usess->active) {
2920 ret = ust_app_stop_trace_all(usess);
2921 if (ret < 0) {
2922 ret = LTTNG_ERR_UST_STOP_FAIL;
2923 goto error;
2924 }
2925 }
2926
2927 DBG("Completed stop session \"%s\" (id %" PRIu64 ")", session->name, session->id);
2928 /* Flag inactive after a successful stop. */
2929 session->active = false;
2930 ret = LTTNG_OK;
2931
2932error:
2933 return ret;
2934}
2935
2936/*
2937 * Set the base_path of the session only if subdir of a control uris is set.
2938 * Return LTTNG_OK on success, otherwise LTTNG_ERR_*.
2939 */
2940static int set_session_base_path_from_uris(const ltt_session::locked_ref& session,
2941 size_t nb_uri,
2942 struct lttng_uri *uris)
2943{
2944 int ret;
2945 size_t i;
2946
2947 for (i = 0; i < nb_uri; i++) {
2948 if (uris[i].stype != LTTNG_STREAM_CONTROL || uris[i].subdir[0] == '\0') {
2949 /* Not interested in these URIs */
2950 continue;
2951 }
2952
2953 if (session->base_path != nullptr) {
2954 free(session->base_path);
2955 session->base_path = nullptr;
2956 }
2957
2958 /* Set session base_path */
2959 session->base_path = strdup(uris[i].subdir);
2960 if (!session->base_path) {
2961 PERROR("Failed to copy base path \"%s\" to session \"%s\"",
2962 uris[i].subdir,
2963 session->name);
2964 ret = LTTNG_ERR_NOMEM;
2965 goto error;
2966 }
2967 DBG2("Setting base path \"%s\" for session \"%s\"",
2968 session->base_path,
2969 session->name);
2970 }
2971 ret = LTTNG_OK;
2972error:
2973 return ret;
2974}
2975
2976/*
2977 * Command LTTNG_SET_CONSUMER_URI processed by the client thread.
2978 */
2979int cmd_set_consumer_uri(const ltt_session::locked_ref& session,
2980 size_t nb_uri,
2981 struct lttng_uri *uris)
2982{
2983 int ret, i;
2984 struct ltt_kernel_session *ksess = session->kernel_session;
2985 struct ltt_ust_session *usess = session->ust_session;
2986
2987 LTTNG_ASSERT(uris);
2988 LTTNG_ASSERT(nb_uri > 0);
2989
2990 /* Can't set consumer URI if the session is active. */
2991 if (session->active) {
2992 ret = LTTNG_ERR_TRACE_ALREADY_STARTED;
2993 goto error;
2994 }
2995
2996 /*
2997 * Set the session base path if any. This is done inside
2998 * cmd_set_consumer_uri to preserve backward compatibility of the
2999 * previous session creation api vs the session descriptor api.
3000 */
3001 ret = set_session_base_path_from_uris(session, nb_uri, uris);
3002 if (ret != LTTNG_OK) {
3003 goto error;
3004 }
3005
3006 /* Set the "global" consumer URIs */
3007 for (i = 0; i < nb_uri; i++) {
3008 ret = add_uri_to_consumer(session, session->consumer, &uris[i], LTTNG_DOMAIN_NONE);
3009 if (ret != LTTNG_OK) {
3010 goto error;
3011 }
3012 }
3013
3014 /* Set UST session URIs */
3015 if (session->ust_session) {
3016 for (i = 0; i < nb_uri; i++) {
3017 ret = add_uri_to_consumer(session,
3018 session->ust_session->consumer,
3019 &uris[i],
3020 LTTNG_DOMAIN_UST);
3021 if (ret != LTTNG_OK) {
3022 goto error;
3023 }
3024 }
3025 }
3026
3027 /* Set kernel session URIs */
3028 if (session->kernel_session) {
3029 for (i = 0; i < nb_uri; i++) {
3030 ret = add_uri_to_consumer(session,
3031 session->kernel_session->consumer,
3032 &uris[i],
3033 LTTNG_DOMAIN_KERNEL);
3034 if (ret != LTTNG_OK) {
3035 goto error;
3036 }
3037 }
3038 }
3039
3040 /*
3041 * Make sure to set the session in output mode after we set URI since a
3042 * session can be created without URL (thus flagged in no output mode).
3043 */
3044 session->output_traces = 1;
3045 if (ksess) {
3046 ksess->output_traces = 1;
3047 }
3048
3049 if (usess) {
3050 usess->output_traces = 1;
3051 }
3052
3053 /* All good! */
3054 ret = LTTNG_OK;
3055
3056error:
3057 return ret;
3058}
3059
3060static enum lttng_error_code
3061set_session_output_from_descriptor(const ltt_session::locked_ref& session,
3062 const struct lttng_session_descriptor *descriptor)
3063{
3064 int ret;
3065 enum lttng_error_code ret_code = LTTNG_OK;
3066 const lttng_session_descriptor_type session_type =
3067 lttng_session_descriptor_get_type(descriptor);
3068 const lttng_session_descriptor_output_type output_type =
3069 lttng_session_descriptor_get_output_type(descriptor);
3070 struct lttng_uri uris[2] = {};
3071 size_t uri_count = 0;
3072
3073 switch (output_type) {
3074 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NONE:
3075 goto end;
3076 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_LOCAL:
3077 lttng_session_descriptor_get_local_output_uri(descriptor, &uris[0]);
3078 uri_count = 1;
3079 break;
3080 case LTTNG_SESSION_DESCRIPTOR_OUTPUT_TYPE_NETWORK:
3081 lttng_session_descriptor_get_network_output_uris(descriptor, &uris[0], &uris[1]);
3082 uri_count = 2;
3083 break;
3084 default:
3085 ret_code = LTTNG_ERR_INVALID;
3086 goto end;
3087 }
3088
3089 switch (session_type) {
3090 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
3091 {
3092 struct snapshot_output *new_output = nullptr;
3093
3094 new_output = snapshot_output_alloc();
3095 if (!new_output) {
3096 ret_code = LTTNG_ERR_NOMEM;
3097 goto end;
3098 }
3099
3100 ret = snapshot_output_init_with_uri(session,
3101 DEFAULT_SNAPSHOT_MAX_SIZE,
3102 nullptr,
3103 uris,
3104 uri_count,
3105 session->consumer,
3106 new_output,
3107 &session->snapshot);
3108 if (ret < 0) {
3109 ret_code = (ret == -ENOMEM) ? LTTNG_ERR_NOMEM : LTTNG_ERR_INVALID;
3110 snapshot_output_destroy(new_output);
3111 goto end;
3112 }
3113 snapshot_add_output(&session->snapshot, new_output);
3114 break;
3115 }
3116 case LTTNG_SESSION_DESCRIPTOR_TYPE_REGULAR:
3117 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
3118 {
3119 ret_code = (lttng_error_code) cmd_set_consumer_uri(session, uri_count, uris);
3120 break;
3121 }
3122 default:
3123 ret_code = LTTNG_ERR_INVALID;
3124 goto end;
3125 }
3126end:
3127 return ret_code;
3128}
3129
3130static enum lttng_error_code
3131cmd_create_session_from_descriptor(struct lttng_session_descriptor *descriptor,
3132 const lttng_sock_cred *creds,
3133 const char *home_path)
3134{
3135 int ret;
3136 enum lttng_error_code ret_code;
3137 const char *session_name;
3138 struct ltt_session *new_session = nullptr;
3139 enum lttng_session_descriptor_status descriptor_status;
3140
3141 const auto list_lock = lttng::sessiond::lock_session_list();
3142 if (home_path) {
3143 if (*home_path != '/') {
3144 ERR("Home path provided by client is not absolute");
3145 ret_code = LTTNG_ERR_INVALID;
3146 goto end;
3147 }
3148 }
3149
3150 descriptor_status = lttng_session_descriptor_get_session_name(descriptor, &session_name);
3151 switch (descriptor_status) {
3152 case LTTNG_SESSION_DESCRIPTOR_STATUS_OK:
3153 break;
3154 case LTTNG_SESSION_DESCRIPTOR_STATUS_UNSET:
3155 session_name = nullptr;
3156 break;
3157 default:
3158 ret_code = LTTNG_ERR_INVALID;
3159 goto end;
3160 }
3161
3162 ret_code = session_create(session_name, creds->uid, creds->gid, &new_session);
3163 if (ret_code != LTTNG_OK) {
3164 goto end;
3165 }
3166
3167 ret_code = notification_thread_command_add_session(the_notification_thread_handle,
3168 new_session->id,
3169 new_session->name,
3170 new_session->uid,
3171 new_session->gid);
3172 if (ret_code != LTTNG_OK) {
3173 goto end;
3174 }
3175
3176 /* Announce the session's destruction to the notification thread when it is destroyed. */
3177 ret = session_add_destroy_notifier(
3178 [new_session]() {
3179 session_get(new_session);
3180 new_session->lock();
3181 return ltt_session::make_locked_ref(*new_session);
3182 }(),
3183 [](const ltt_session::locked_ref& session,
3184 void *user_data __attribute__((unused))) {
3185 (void) notification_thread_command_remove_session(
3186 the_notification_thread_handle, session->id);
3187 },
3188 nullptr);
3189 if (ret) {
3190 PERROR("Failed to add notification thread command to session's destroy notifiers: session name = %s",
3191 new_session->name);
3192 ret = LTTNG_ERR_NOMEM;
3193 goto end;
3194 }
3195
3196 if (!session_name) {
3197 ret = lttng_session_descriptor_set_session_name(descriptor, new_session->name);
3198 if (ret) {
3199 ret_code = LTTNG_ERR_SESSION_FAIL;
3200 goto end;
3201 }
3202 }
3203
3204 if (!lttng_session_descriptor_is_output_destination_initialized(descriptor)) {
3205 /*
3206 * Only include the session's creation time in the output
3207 * destination if the name of the session itself was
3208 * not auto-generated.
3209 */
3210 ret_code = lttng_session_descriptor_set_default_output(
3211 descriptor,
3212 session_name ? &new_session->creation_time : nullptr,
3213 home_path);
3214 if (ret_code != LTTNG_OK) {
3215 goto end;
3216 }
3217 } else {
3218 new_session->has_user_specified_directory =
3219 lttng_session_descriptor_has_output_directory(descriptor);
3220 }
3221
3222 switch (lttng_session_descriptor_get_type(descriptor)) {
3223 case LTTNG_SESSION_DESCRIPTOR_TYPE_SNAPSHOT:
3224 new_session->snapshot_mode = 1;
3225 break;
3226 case LTTNG_SESSION_DESCRIPTOR_TYPE_LIVE:
3227 new_session->live_timer =
3228 lttng_session_descriptor_live_get_timer_interval(descriptor);
3229 break;
3230 default:
3231 break;
3232 }
3233
3234 ret_code = set_session_output_from_descriptor(
3235 [new_session]() {
3236 session_get(new_session);
3237 new_session->lock();
3238 return ltt_session::make_locked_ref(*new_session);
3239 }(),
3240 descriptor);
3241 if (ret_code != LTTNG_OK) {
3242 goto end;
3243 }
3244 new_session->consumer->enabled = true;
3245 ret_code = LTTNG_OK;
3246end:
3247 /* Release reference provided by the session_create function. */
3248 session_put(new_session);
3249 if (ret_code != LTTNG_OK && new_session) {
3250 /* Release the global reference on error. */
3251 session_destroy(new_session);
3252 }
3253
3254 return ret_code;
3255}
3256
3257enum lttng_error_code cmd_create_session(struct command_ctx *cmd_ctx,
3258 int sock,
3259 struct lttng_session_descriptor **return_descriptor)
3260{
3261 int ret;
3262 size_t payload_size;
3263 struct lttng_dynamic_buffer payload;
3264 struct lttng_buffer_view home_dir_view;
3265 struct lttng_buffer_view session_descriptor_view;
3266 struct lttng_session_descriptor *session_descriptor = nullptr;
3267 enum lttng_error_code ret_code;
3268
3269 lttng_dynamic_buffer_init(&payload);
3270 if (cmd_ctx->lsm.u.create_session.home_dir_size >= LTTNG_PATH_MAX) {
3271 ret_code = LTTNG_ERR_INVALID;
3272 goto error;
3273 }
3274 if (cmd_ctx->lsm.u.create_session.session_descriptor_size >
3275 LTTNG_SESSION_DESCRIPTOR_MAX_LEN) {
3276 ret_code = LTTNG_ERR_INVALID;
3277 goto error;
3278 }
3279
3280 payload_size = cmd_ctx->lsm.u.create_session.home_dir_size +
3281 cmd_ctx->lsm.u.create_session.session_descriptor_size;
3282 ret = lttng_dynamic_buffer_set_size(&payload, payload_size);
3283 if (ret) {
3284 ret_code = LTTNG_ERR_NOMEM;
3285 goto error;
3286 }
3287
3288 ret = lttcomm_recv_unix_sock(sock, payload.data, payload.size);
3289 if (ret <= 0) {
3290 ERR("Reception of session descriptor failed, aborting.");
3291 ret_code = LTTNG_ERR_SESSION_FAIL;
3292 goto error;
3293 }
3294
3295 home_dir_view = lttng_buffer_view_from_dynamic_buffer(
3296 &payload, 0, cmd_ctx->lsm.u.create_session.home_dir_size);
3297 if (cmd_ctx->lsm.u.create_session.home_dir_size > 0 &&
3298 !lttng_buffer_view_is_valid(&home_dir_view)) {
3299 ERR("Invalid payload in \"create session\" command: buffer too short to contain home directory");
3300 ret_code = LTTNG_ERR_INVALID_PROTOCOL;
3301 goto error;
3302 }
3303
3304 session_descriptor_view = lttng_buffer_view_from_dynamic_buffer(
3305 &payload,
3306 cmd_ctx->lsm.u.create_session.home_dir_size,
3307 cmd_ctx->lsm.u.create_session.session_descriptor_size);
3308 if (!lttng_buffer_view_is_valid(&session_descriptor_view)) {
3309 ERR("Invalid payload in \"create session\" command: buffer too short to contain session descriptor");
3310 ret_code = LTTNG_ERR_INVALID_PROTOCOL;
3311 goto error;
3312 }
3313
3314 ret = lttng_session_descriptor_create_from_buffer(&session_descriptor_view,
3315 &session_descriptor);
3316 if (ret < 0) {
3317 ERR("Failed to create session descriptor from payload of \"create session\" command");
3318 ret_code = LTTNG_ERR_INVALID;
3319 goto error;
3320 }
3321
3322 /*
3323 * Sets the descriptor's auto-generated properties (name, output) if
3324 * needed.
3325 */
3326 ret_code = cmd_create_session_from_descriptor(session_descriptor,
3327 &cmd_ctx->creds,
3328 home_dir_view.size ? home_dir_view.data :
3329 nullptr);
3330 if (ret_code != LTTNG_OK) {
3331 goto error;
3332 }
3333
3334 ret_code = LTTNG_OK;
3335 *return_descriptor = session_descriptor;
3336 session_descriptor = nullptr;
3337error:
3338 lttng_dynamic_buffer_reset(&payload);
3339 lttng_session_descriptor_destroy(session_descriptor);
3340 return ret_code;
3341}
3342
3343static void cmd_destroy_session_reply(const ltt_session::locked_ref& session, void *_reply_context)
3344{
3345 int ret;
3346 ssize_t comm_ret;
3347 const struct cmd_destroy_session_reply_context *reply_context =
3348 (cmd_destroy_session_reply_context *) _reply_context;
3349 struct lttng_dynamic_buffer payload;
3350 struct lttcomm_session_destroy_command_header cmd_header;
3351 struct lttng_trace_archive_location *location = nullptr;
3352 struct lttcomm_lttng_msg llm = {
3353 .cmd_type = LTTCOMM_SESSIOND_COMMAND_DESTROY_SESSION,
3354 .ret_code = reply_context->destruction_status,
3355 .pid = UINT32_MAX,
3356 .cmd_header_size = sizeof(struct lttcomm_session_destroy_command_header),
3357 .data_size = 0,
3358 .fd_count = 0,
3359 };
3360 size_t payload_size_before_location;
3361
3362 lttng_dynamic_buffer_init(&payload);
3363
3364 ret = lttng_dynamic_buffer_append(&payload, &llm, sizeof(llm));
3365 if (ret) {
3366 ERR("Failed to append session destruction message");
3367 goto error;
3368 }
3369
3370 cmd_header.rotation_state = (int32_t) (reply_context->implicit_rotation_on_destroy ?
3371 session->rotation_state :
3372 LTTNG_ROTATION_STATE_NO_ROTATION);
3373 ret = lttng_dynamic_buffer_append(&payload, &cmd_header, sizeof(cmd_header));
3374 if (ret) {
3375 ERR("Failed to append session destruction command header");
3376 goto error;
3377 }
3378
3379 if (!reply_context->implicit_rotation_on_destroy) {
3380 DBG("No implicit rotation performed during the destruction of session \"%s\", sending reply",
3381 session->name);
3382 goto send_reply;
3383 }
3384 if (session->rotation_state != LTTNG_ROTATION_STATE_COMPLETED) {
3385 DBG("Rotation state of session \"%s\" is not \"completed\", sending session destruction reply",
3386 session->name);
3387 goto send_reply;
3388 }
3389
3390 location = session_get_trace_archive_location(session);
3391 if (!location) {
3392 ERR("Failed to get the location of the trace archive produced during the destruction of session \"%s\"",
3393 session->name);
3394 goto error;
3395 }
3396
3397 payload_size_before_location = payload.size;
3398 comm_ret = lttng_trace_archive_location_serialize(location, &payload);
3399 lttng_trace_archive_location_put(location);
3400 if (comm_ret < 0) {
3401 ERR("Failed to serialize the location of the trace archive produced during the destruction of session \"%s\"",
3402 session->name);
3403 goto error;
3404 }
3405 /* Update the message to indicate the location's length. */
3406 ((struct lttcomm_lttng_msg *) payload.data)->data_size =
3407 payload.size - payload_size_before_location;
3408send_reply:
3409 comm_ret = lttcomm_send_unix_sock(reply_context->reply_sock_fd, payload.data, payload.size);
3410 if (comm_ret != (ssize_t) payload.size) {
3411 ERR("Failed to send result of the destruction of session \"%s\" to client",
3412 session->name);
3413 }
3414error:
3415 ret = close(reply_context->reply_sock_fd);
3416 if (ret) {
3417 PERROR("Failed to close client socket in deferred session destroy reply");
3418 }
3419 lttng_dynamic_buffer_reset(&payload);
3420 free(_reply_context);
3421}
3422
3423/*
3424 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3425 *
3426 * Called with session lock held.
3427 */
3428int cmd_destroy_session(const ltt_session::locked_ref& session, int *sock_fd)
3429{
3430 int ret;
3431 enum lttng_error_code destruction_last_error = LTTNG_OK;
3432 struct cmd_destroy_session_reply_context *reply_context = nullptr;
3433
3434 if (sock_fd) {
3435 reply_context = zmalloc<cmd_destroy_session_reply_context>();
3436 if (!reply_context) {
3437 ret = LTTNG_ERR_NOMEM;
3438 goto end;
3439 }
3440
3441 reply_context->reply_sock_fd = *sock_fd;
3442 }
3443
3444 DBG("Begin destroy session %s (id %" PRIu64 ")", session->name, session->id);
3445 if (session->active) {
3446 DBG("Session \"%s\" is active, attempting to stop it before destroying it",
3447 session->name);
3448 ret = cmd_stop_trace(session);
3449 if (ret != LTTNG_OK && ret != LTTNG_ERR_TRACE_ALREADY_STOPPED) {
3450 /* Carry on with the destruction of the session. */
3451 ERR("Failed to stop session \"%s\" as part of its destruction: %s",
3452 session->name,
3453 lttng_strerror(-ret));
3454 destruction_last_error = (lttng_error_code) ret;
3455 }
3456 }
3457
3458 if (session->rotation_schedule_timer_enabled) {
3459 if (timer_session_rotation_schedule_timer_stop(session)) {
3460 ERR("Failed to stop the \"rotation schedule\" timer of session %s",
3461 session->name);
3462 destruction_last_error = LTTNG_ERR_TIMER_STOP_ERROR;
3463 }
3464 }
3465
3466 if (session->rotate_size) {
3467 try {
3468 the_rotation_thread_handle->unsubscribe_session_consumed_size_rotation(
3469 *session);
3470 } catch (const std::exception& e) {
3471 /* Continue the destruction of the session anyway. */
3472 ERR("Failed to unsubscribe rotation thread notification channel from consumed size condition during session destruction: %s",
3473 e.what());
3474 }
3475
3476 session->rotate_size = 0;
3477 }
3478
3479 if (session->rotated && session->current_trace_chunk && session->output_traces) {
3480 /*
3481 * Perform a last rotation on destruction if rotations have
3482 * occurred during the session's lifetime.
3483 */
3484 ret = cmd_rotate_session(
3485 session, nullptr, false, LTTNG_TRACE_CHUNK_COMMAND_TYPE_MOVE_TO_COMPLETED);
3486 if (ret != LTTNG_OK) {
3487 ERR("Failed to perform an implicit rotation as part of the destruction of session \"%s\": %s",
3488 session->name,
3489 lttng_strerror(-ret));
3490 destruction_last_error = (lttng_error_code) -ret;
3491 }
3492 if (reply_context) {
3493 reply_context->implicit_rotation_on_destroy = true;
3494 }
3495 } else if (session->has_been_started && session->current_trace_chunk) {
3496 /*
3497 * The user has not triggered a session rotation. However, to
3498 * ensure all data has been consumed, the session is rotated
3499 * to a 'null' trace chunk before it is destroyed.
3500 *
3501 * This is a "quiet" rotation meaning that no notification is
3502 * emitted and no renaming of the current trace chunk takes
3503 * place.
3504 */
3505 ret = cmd_rotate_session(
3506 session, nullptr, true, LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION);
3507 /*
3508 * Rotation operations may not be supported by the kernel
3509 * tracer. Hence, do not consider this implicit rotation as
3510 * a session destruction error. The library has already stopped
3511 * the session and waited for pending data; there is nothing
3512 * left to do but complete the destruction of the session.
3513 */
3514 if (ret != LTTNG_OK && ret != -LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL) {
3515 ERR("Failed to perform a quiet rotation as part of the destruction of session \"%s\": %s",
3516 session->name,
3517 lttng_strerror(ret));
3518 destruction_last_error = (lttng_error_code) -ret;
3519 }
3520 }
3521
3522 if (session->shm_path[0]) {
3523 /*
3524 * When a session is created with an explicit shm_path,
3525 * the consumer daemon will create its shared memory files
3526 * at that location and will *not* unlink them. This is normal
3527 * as the intention of that feature is to make it possible
3528 * to retrieve the content of those files should a crash occur.
3529 *
3530 * To ensure the content of those files can be used, the
3531 * sessiond daemon will replicate the content of the metadata
3532 * cache in a metadata file.
3533 *
3534 * On clean-up, it is expected that the consumer daemon will
3535 * unlink the shared memory files and that the session daemon
3536 * will unlink the metadata file. Then, the session's directory
3537 * in the shm path can be removed.
3538 *
3539 * Unfortunately, a flaw in the design of the sessiond's and
3540 * consumerd's tear down of channels makes it impossible to
3541 * determine when the sessiond _and_ the consumerd have both
3542 * destroyed their representation of a channel. For one, the
3543 * unlinking, close, and rmdir happen in deferred 'call_rcu'
3544 * callbacks in both daemons.
3545 *
3546 * However, it is also impossible for the sessiond to know when
3547 * the consumer daemon is done destroying its channel(s) since
3548 * it occurs as a reaction to the closing of the channel's file
3549 * descriptor. There is no resulting communication initiated
3550 * from the consumerd to the sessiond to confirm that the
3551 * operation is completed (and was successful).
3552 *
3553 * Until this is all fixed, the session daemon checks for the
3554 * removal of the session's shm path which makes it possible
3555 * to safely advertise a session as having been destroyed.
3556 *
3557 * Prior to this fix, it was not possible to reliably save
3558 * a session making use of the --shm-path option, destroy it,
3559 * and load it again. This is because the creation of the
3560 * session would fail upon seeing the session's shm path
3561 * already in existence.
3562 *
3563 * Note that none of the error paths in the check for the
3564 * directory's existence return an error. This is normal
3565 * as there isn't much that can be done. The session will
3566 * be destroyed properly, except that we can't offer the
3567 * guarantee that the same session can be re-created.
3568 */
3569 current_completion_handler = &destroy_completion_handler.handler;
3570 ret = lttng_strncpy(destroy_completion_handler.shm_path,
3571 session->shm_path,
3572 sizeof(destroy_completion_handler.shm_path));
3573 LTTNG_ASSERT(!ret);
3574 }
3575
3576 /*
3577 * The session is destroyed. However, note that the command context
3578 * still holds a reference to the session, thus delaying its destruction
3579 * _at least_ up to the point when that reference is released.
3580 */
3581 session_destroy(&session.get());
3582 if (reply_context) {
3583 reply_context->destruction_status = destruction_last_error;
3584 ret = session_add_destroy_notifier(
3585 session, cmd_destroy_session_reply, (void *) reply_context);
3586 if (ret) {
3587 ret = LTTNG_ERR_FATAL;
3588 goto end;
3589 } else {
3590 *sock_fd = -1;
3591 }
3592 }
3593 ret = LTTNG_OK;
3594end:
3595 return ret;
3596}
3597
3598/*
3599 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3600 */
3601int cmd_register_consumer(const ltt_session::locked_ref& session,
3602 enum lttng_domain_type domain,
3603 const char *sock_path,
3604 struct consumer_data *cdata)
3605{
3606 int ret, sock;
3607 struct consumer_socket *socket = nullptr;
3608
3609 LTTNG_ASSERT(cdata);
3610 LTTNG_ASSERT(sock_path);
3611
3612 switch (domain) {
3613 case LTTNG_DOMAIN_KERNEL:
3614 {
3615 struct ltt_kernel_session *ksess = session->kernel_session;
3616
3617 LTTNG_ASSERT(ksess);
3618
3619 /* Can't register a consumer if there is already one */
3620 if (ksess->consumer_fds_sent != 0) {
3621 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
3622 goto error;
3623 }
3624
3625 sock = lttcomm_connect_unix_sock(sock_path);
3626 if (sock < 0) {
3627 ret = LTTNG_ERR_CONNECT_FAIL;
3628 goto error;
3629 }
3630 cdata->cmd_sock = sock;
3631
3632 socket = consumer_allocate_socket(&cdata->cmd_sock);
3633 if (socket == nullptr) {
3634 ret = close(sock);
3635 if (ret < 0) {
3636 PERROR("close register consumer");
3637 }
3638 cdata->cmd_sock = -1;
3639 ret = LTTNG_ERR_FATAL;
3640 goto error;
3641 }
3642
3643 socket->lock = zmalloc<pthread_mutex_t>();
3644 if (socket->lock == nullptr) {
3645 PERROR("zmalloc pthread mutex");
3646 ret = LTTNG_ERR_FATAL;
3647 goto error;
3648 }
3649
3650 pthread_mutex_init(socket->lock, nullptr);
3651 socket->registered = 1;
3652
3653 const lttng::urcu::read_lock_guard read_lock;
3654 consumer_add_socket(socket, ksess->consumer);
3655
3656 pthread_mutex_lock(&cdata->pid_mutex);
3657 cdata->pid = -1;
3658 pthread_mutex_unlock(&cdata->pid_mutex);
3659
3660 break;
3661 }
3662 default:
3663 /* TODO: Userspace tracing */
3664 ret = LTTNG_ERR_UND;
3665 goto error;
3666 }
3667
3668 return LTTNG_OK;
3669
3670error:
3671 if (socket) {
3672 consumer_destroy_socket(socket);
3673 }
3674 return ret;
3675}
3676
3677/*
3678 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3679 */
3680ssize_t cmd_list_domains(const ltt_session::locked_ref& session, struct lttng_domain **domains)
3681{
3682 int ret, index = 0;
3683 ssize_t nb_dom = 0;
3684
3685 if (session->kernel_session != nullptr) {
3686 DBG3("Listing domains found kernel domain");
3687 nb_dom++;
3688 }
3689
3690 if (session->ust_session != nullptr) {
3691 DBG3("Listing domains found UST global domain");
3692 nb_dom++;
3693
3694 for (auto *agt :
3695 lttng::urcu::lfht_iteration_adapter<agent, decltype(agent::node), &agent::node>(
3696 *session->ust_session->agents->ht)) {
3697 if (agt->being_used) {
3698 nb_dom++;
3699 }
3700 }
3701 }
3702
3703 if (!nb_dom) {
3704 goto end;
3705 }
3706
3707 *domains = calloc<lttng_domain>(nb_dom);
3708 if (*domains == nullptr) {
3709 ret = LTTNG_ERR_FATAL;
3710 goto error;
3711 }
3712
3713 if (session->kernel_session != nullptr) {
3714 (*domains)[index].type = LTTNG_DOMAIN_KERNEL;
3715
3716 /* Kernel session buffer type is always GLOBAL */
3717 (*domains)[index].buf_type = LTTNG_BUFFER_GLOBAL;
3718
3719 index++;
3720 }
3721
3722 if (session->ust_session != nullptr) {
3723 (*domains)[index].type = LTTNG_DOMAIN_UST;
3724 (*domains)[index].buf_type = session->ust_session->buffer_type;
3725 index++;
3726
3727 {
3728 const lttng::urcu::read_lock_guard read_lock;
3729
3730 for (auto *agt : lttng::urcu::lfht_iteration_adapter<agent,
3731 decltype(agent::node),
3732 &agent::node>(
3733 *session->ust_session->agents->ht)) {
3734 if (agt->being_used) {
3735 (*domains)[index].type = agt->domain;
3736 (*domains)[index].buf_type =
3737 session->ust_session->buffer_type;
3738 index++;
3739 }
3740 }
3741 }
3742 }
3743end:
3744 return nb_dom;
3745
3746error:
3747 /* Return negative value to differentiate return code */
3748 return -ret;
3749}
3750
3751/*
3752 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3753 */
3754enum lttng_error_code cmd_list_channels(enum lttng_domain_type domain,
3755 const ltt_session::locked_ref& session,
3756 struct lttng_payload *payload)
3757{
3758 int ret = 0;
3759 unsigned int i = 0;
3760 struct lttcomm_list_command_header cmd_header = {};
3761 size_t cmd_header_offset;
3762 enum lttng_error_code ret_code;
3763
3764 LTTNG_ASSERT(payload);
3765
3766 DBG("Listing channels for session %s", session->name);
3767
3768 cmd_header_offset = payload->buffer.size;
3769
3770 /* Reserve space for command reply header. */
3771 ret = lttng_dynamic_buffer_set_size(&payload->buffer,
3772 cmd_header_offset + sizeof(cmd_header));
3773 if (ret) {
3774 ret_code = LTTNG_ERR_NOMEM;
3775 goto end;
3776 }
3777
3778 switch (domain) {
3779 case LTTNG_DOMAIN_KERNEL:
3780 {
3781 /* Kernel channels */
3782 if (session->kernel_session != nullptr) {
3783 for (auto kchan :
3784 lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
3785 &ltt_kernel_channel::list>(
3786 session->kernel_session->channel_list.head)) {
3787 uint64_t discarded_events, lost_packets;
3788 struct lttng_channel_extended *extended;
3789
3790 extended = (struct lttng_channel_extended *)
3791 kchan->channel->attr.extended.ptr;
3792
3793 ret = get_kernel_runtime_stats(
3794 session, kchan, &discarded_events, &lost_packets);
3795 if (ret < 0) {
3796 ret_code = LTTNG_ERR_UNK;
3797 goto end;
3798 }
3799
3800 /*
3801 * Update the discarded_events and lost_packets
3802 * count for the channel
3803 */
3804 extended->discarded_events = discarded_events;
3805 extended->lost_packets = lost_packets;
3806
3807 ret = lttng_channel_serialize(kchan->channel, &payload->buffer);
3808 if (ret) {
3809 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3810 kchan->channel->name);
3811 ret_code = LTTNG_ERR_UNK;
3812 goto end;
3813 }
3814
3815 i++;
3816 }
3817 }
3818 break;
3819 }
3820 case LTTNG_DOMAIN_UST:
3821 {
3822 for (auto *uchan :
3823 lttng::urcu::lfht_iteration_adapter<ltt_ust_channel,
3824 decltype(ltt_ust_channel::node),
3825 &ltt_ust_channel::node>(
3826 *session->ust_session->domain_global.channels->ht)) {
3827 uint64_t discarded_events = 0, lost_packets = 0;
3828 struct lttng_channel *channel = nullptr;
3829 struct lttng_channel_extended *extended;
3830
3831 channel = trace_ust_channel_to_lttng_channel(uchan);
3832 if (!channel) {
3833 ret_code = LTTNG_ERR_NOMEM;
3834 goto end;
3835 }
3836
3837 extended = (struct lttng_channel_extended *) channel->attr.extended.ptr;
3838
3839 ret = get_ust_runtime_stats(
3840 session, uchan, &discarded_events, &lost_packets);
3841 if (ret < 0) {
3842 lttng_channel_destroy(channel);
3843 ret_code = LTTNG_ERR_UNK;
3844 goto end;
3845 }
3846
3847 extended->discarded_events = discarded_events;
3848 extended->lost_packets = lost_packets;
3849
3850 ret = lttng_channel_serialize(channel, &payload->buffer);
3851 if (ret) {
3852 ERR("Failed to serialize lttng_channel: channel name = '%s'",
3853 channel->name);
3854 lttng_channel_destroy(channel);
3855 ret_code = LTTNG_ERR_UNK;
3856 goto end;
3857 }
3858
3859 lttng_channel_destroy(channel);
3860 i++;
3861 }
3862
3863 break;
3864 }
3865 default:
3866 break;
3867 }
3868
3869 if (i > UINT32_MAX) {
3870 ERR("Channel count would overflow the channel listing command's reply");
3871 ret_code = LTTNG_ERR_OVERFLOW;
3872 goto end;
3873 }
3874
3875 /* Update command reply header. */
3876 cmd_header.count = (uint32_t) i;
3877 memcpy(payload->buffer.data + cmd_header_offset, &cmd_header, sizeof(cmd_header));
3878 ret_code = LTTNG_OK;
3879
3880end:
3881 return ret_code;
3882}
3883
3884/*
3885 * Command LTTNG_LIST_EVENTS processed by the client thread.
3886 */
3887enum lttng_error_code cmd_list_events(enum lttng_domain_type domain,
3888 const ltt_session::locked_ref& session,
3889 char *channel_name,
3890 struct lttng_payload *reply_payload)
3891{
3892 int buffer_resize_ret;
3893 enum lttng_error_code ret_code = LTTNG_OK;
3894 struct lttcomm_list_command_header reply_command_header = {};
3895 size_t reply_command_header_offset;
3896 unsigned int nb_events = 0;
3897
3898 assert(reply_payload);
3899
3900 /* Reserve space for command reply header. */
3901 reply_command_header_offset = reply_payload->buffer.size;
3902 buffer_resize_ret = lttng_dynamic_buffer_set_size(
3903 &reply_payload->buffer,
3904 reply_command_header_offset + sizeof(struct lttcomm_list_command_header));
3905 if (buffer_resize_ret) {
3906 ret_code = LTTNG_ERR_NOMEM;
3907 goto end;
3908 }
3909
3910 switch (domain) {
3911 case LTTNG_DOMAIN_KERNEL:
3912 if (session->kernel_session != nullptr) {
3913 ret_code = list_lttng_kernel_events(
3914 channel_name, session->kernel_session, reply_payload, &nb_events);
3915 }
3916
3917 break;
3918 case LTTNG_DOMAIN_UST:
3919 {
3920 if (session->ust_session != nullptr) {
3921 ret_code =
3922 list_lttng_ust_global_events(channel_name,
3923 &session->ust_session->domain_global,
3924 reply_payload,
3925 &nb_events);
3926 }
3927
3928 break;
3929 }
3930 case LTTNG_DOMAIN_LOG4J:
3931 case LTTNG_DOMAIN_LOG4J2:
3932 case LTTNG_DOMAIN_JUL:
3933 case LTTNG_DOMAIN_PYTHON:
3934 if (session->ust_session) {
3935 for (auto *agt : lttng::urcu::lfht_iteration_adapter<agent,
3936 decltype(agent::node),
3937 &agent::node>(
3938 *session->ust_session->agents->ht)) {
3939 if (agt->domain == domain) {
3940 ret_code = list_lttng_agent_events(
3941 agt, reply_payload, &nb_events);
3942 break;
3943 }
3944 }
3945 }
3946 break;
3947 default:
3948 ret_code = LTTNG_ERR_UND;
3949 break;
3950 }
3951
3952 if (nb_events > UINT32_MAX) {
3953 ret_code = LTTNG_ERR_OVERFLOW;
3954 goto end;
3955 }
3956
3957 /* Update command reply header. */
3958 reply_command_header.count = (uint32_t) nb_events;
3959 memcpy(reply_payload->buffer.data + reply_command_header_offset,
3960 &reply_command_header,
3961 sizeof(reply_command_header));
3962
3963end:
3964 return ret_code;
3965}
3966
3967/*
3968 * Using the session list, filled a lttng_session array to send back to the
3969 * client for session listing.
3970 *
3971 * The session list lock MUST be acquired before calling this function.
3972 */
3973void cmd_list_lttng_sessions(struct lttng_session *sessions,
3974 size_t session_count,
3975 uid_t uid,
3976 gid_t gid)
3977{
3978 int ret;
3979 unsigned int i = 0;
3980 struct ltt_session_list *list = session_get_list();
3981 struct lttng_session_extended *extended = (typeof(extended)) (&sessions[session_count]);
3982
3983 DBG("Getting all available session for UID %d GID %d", uid, gid);
3984 /*
3985 * Iterate over session list and append data after the control struct in
3986 * the buffer.
3987 */
3988 for (auto raw_session_ptr :
3989 lttng::urcu::list_iteration_adapter<ltt_session, &ltt_session::list>(list->head)) {
3990 auto session = [raw_session_ptr]() {
3991 session_get(raw_session_ptr);
3992 raw_session_ptr->lock();
3993 return ltt_session::make_locked_ref(*raw_session_ptr);
3994 }();
3995
3996 /*
3997 * Only list the sessions the user can control.
3998 */
3999 if (!session_access_ok(session, uid) || session->destroyed) {
4000 continue;
4001 }
4002
4003 struct ltt_kernel_session *ksess = session->kernel_session;
4004 struct ltt_ust_session *usess = session->ust_session;
4005
4006 if (session->consumer->type == CONSUMER_DST_NET ||
4007 (ksess && ksess->consumer->type == CONSUMER_DST_NET) ||
4008 (usess && usess->consumer->type == CONSUMER_DST_NET)) {
4009 ret = build_network_session_path(
4010 sessions[i].path, sizeof(sessions[i].path), session);
4011 } else {
4012 ret = snprintf(sessions[i].path,
4013 sizeof(sessions[i].path),
4014 "%s",
4015 session->consumer->dst.session_root_path);
4016 }
4017 if (ret < 0) {
4018 PERROR("snprintf session path");
4019 continue;
4020 }
4021
4022 strncpy(sessions[i].name, session->name, NAME_MAX);
4023 sessions[i].name[NAME_MAX - 1] = '\0';
4024 sessions[i].enabled = session->active;
4025 sessions[i].snapshot_mode = session->snapshot_mode;
4026 sessions[i].live_timer_interval = session->live_timer;
4027 extended[i].creation_time.value = (uint64_t) session->creation_time;
4028 extended[i].creation_time.is_set = 1;
4029 i++;
4030 }
4031}
4032
4033/*
4034 * Command LTTCOMM_SESSIOND_COMMAND_KERNEL_TRACER_STATUS
4035 */
4036enum lttng_error_code cmd_kernel_tracer_status(enum lttng_kernel_tracer_status *status)
4037{
4038 if (status == nullptr) {
4039 return LTTNG_ERR_INVALID;
4040 }
4041
4042 *status = get_kernel_tracer_status();
4043 return LTTNG_OK;
4044}
4045
4046/*
4047 * Command LTTNG_DATA_PENDING returning 0 if the data is NOT pending meaning
4048 * ready for trace analysis (or any kind of reader) or else 1 for pending data.
4049 */
4050int cmd_data_pending(const ltt_session::locked_ref& session)
4051{
4052 int ret;
4053 struct ltt_kernel_session *ksess = session->kernel_session;
4054 struct ltt_ust_session *usess = session->ust_session;
4055
4056 DBG("Data pending for session %s", session->name);
4057
4058 /* Session MUST be stopped to ask for data availability. */
4059 if (session->active) {
4060 ret = LTTNG_ERR_SESSION_STARTED;
4061 goto error;
4062 } else {
4063 /*
4064 * If stopped, just make sure we've started before else the above call
4065 * will always send that there is data pending.
4066 *
4067 * The consumer assumes that when the data pending command is received,
4068 * the trace has been started before or else no output data is written
4069 * by the streams which is a condition for data pending. So, this is
4070 * *VERY* important that we don't ask the consumer before a start
4071 * trace.
4072 */
4073 if (!session->has_been_started) {
4074 ret = 0;
4075 goto error;
4076 }
4077 }
4078
4079 /* A rotation is still pending, we have to wait. */
4080 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
4081 DBG("Rotate still pending for session %s", session->name);
4082 ret = 1;
4083 goto error;
4084 }
4085
4086 if (ksess && ksess->consumer) {
4087 ret = consumer_is_data_pending(ksess->id, ksess->consumer);
4088 if (ret == 1) {
4089 /* Data is still being extracted for the kernel. */
4090 goto error;
4091 }
4092 }
4093
4094 if (usess && usess->consumer) {
4095 ret = consumer_is_data_pending(usess->id, usess->consumer);
4096 if (ret == 1) {
4097 /* Data is still being extracted for the kernel. */
4098 goto error;
4099 }
4100 }
4101
4102 /* Data is ready to be read by a viewer */
4103 ret = 0;
4104
4105error:
4106 return ret;
4107}
4108
4109/*
4110 * Command LTTNG_SNAPSHOT_ADD_OUTPUT from the lttng ctl library.
4111 *
4112 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4113 */
4114int cmd_snapshot_add_output(const ltt_session::locked_ref& session,
4115 const struct lttng_snapshot_output *output,
4116 uint32_t *id)
4117{
4118 int ret;
4119 struct snapshot_output *new_output;
4120
4121 LTTNG_ASSERT(output);
4122
4123 DBG("Cmd snapshot add output for session %s", session->name);
4124
4125 /*
4126 * Can't create an output if the session is not set in no-output mode.
4127 */
4128 if (session->output_traces) {
4129 ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4130 goto error;
4131 }
4132
4133 if (session->has_non_mmap_channel) {
4134 ret = LTTNG_ERR_SNAPSHOT_UNSUPPORTED;
4135 goto error;
4136 }
4137
4138 /* Only one output is allowed until we have the "tee" feature. */
4139 if (session->snapshot.nb_output == 1) {
4140 ret = LTTNG_ERR_SNAPSHOT_OUTPUT_EXIST;
4141 goto error;
4142 }
4143
4144 new_output = snapshot_output_alloc();
4145 if (!new_output) {
4146 ret = LTTNG_ERR_NOMEM;
4147 goto error;
4148 }
4149
4150 ret = snapshot_output_init(session,
4151 output->max_size,
4152 output->name,
4153 output->ctrl_url,
4154 output->data_url,
4155 session->consumer,
4156 new_output,
4157 &session->snapshot);
4158 if (ret < 0) {
4159 if (ret == -ENOMEM) {
4160 ret = LTTNG_ERR_NOMEM;
4161 } else {
4162 ret = LTTNG_ERR_INVALID;
4163 }
4164 goto free_error;
4165 }
4166
4167 snapshot_add_output(&session->snapshot, new_output);
4168 if (id) {
4169 *id = new_output->id;
4170 }
4171
4172 return LTTNG_OK;
4173
4174free_error:
4175 snapshot_output_destroy(new_output);
4176error:
4177 return ret;
4178}
4179
4180/*
4181 * Command LTTNG_SNAPSHOT_DEL_OUTPUT from lib lttng ctl.
4182 *
4183 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4184 */
4185int cmd_snapshot_del_output(const ltt_session::locked_ref& session,
4186 const struct lttng_snapshot_output *output)
4187{
4188 int ret;
4189 struct snapshot_output *sout = nullptr;
4190
4191 LTTNG_ASSERT(output);
4192
4193 const lttng::urcu::read_lock_guard read_lock;
4194
4195 /*
4196 * Permission denied to create an output if the session is not
4197 * set in no output mode.
4198 */
4199 if (session->output_traces) {
4200 ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4201 goto error;
4202 }
4203
4204 if (output->id) {
4205 DBG("Cmd snapshot del output id %" PRIu32 " for session %s",
4206 output->id,
4207 session->name);
4208 sout = snapshot_find_output_by_id(output->id, &session->snapshot);
4209 } else if (*output->name != '\0') {
4210 DBG("Cmd snapshot del output name %s for session %s", output->name, session->name);
4211 sout = snapshot_find_output_by_name(output->name, &session->snapshot);
4212 }
4213 if (!sout) {
4214 ret = LTTNG_ERR_INVALID;
4215 goto error;
4216 }
4217
4218 snapshot_delete_output(&session->snapshot, sout);
4219 snapshot_output_destroy(sout);
4220 ret = LTTNG_OK;
4221
4222error:
4223 return ret;
4224}
4225
4226/*
4227 * Command LTTNG_SNAPSHOT_LIST_OUTPUT from lib lttng ctl.
4228 *
4229 * If no output is available, outputs is untouched and 0 is returned.
4230 *
4231 * Return the size of the newly allocated outputs or a negative LTTNG_ERR code.
4232 */
4233ssize_t cmd_snapshot_list_outputs(const ltt_session::locked_ref& session,
4234 struct lttng_snapshot_output **outputs)
4235{
4236 int ret, idx = 0;
4237 struct lttng_snapshot_output *list = nullptr;
4238
4239 LTTNG_ASSERT(outputs);
4240
4241 DBG("Cmd snapshot list outputs for session %s", session->name);
4242
4243 /*
4244 * Permission denied to create an output if the session is not
4245 * set in no output mode.
4246 */
4247 if (session->output_traces) {
4248 ret = -LTTNG_ERR_NOT_SNAPSHOT_SESSION;
4249 goto end;
4250 }
4251
4252 if (session->snapshot.nb_output == 0) {
4253 ret = 0;
4254 goto end;
4255 }
4256
4257 list = calloc<lttng_snapshot_output>(session->snapshot.nb_output);
4258 if (!list) {
4259 ret = -LTTNG_ERR_NOMEM;
4260 goto end;
4261 }
4262
4263 /* Copy list from session to the new list object. */
4264 for (auto *output : lttng::urcu::lfht_iteration_adapter<snapshot_output,
4265 decltype(snapshot_output::node),
4266 &snapshot_output::node>(
4267 *session->snapshot.output_ht->ht)) {
4268 LTTNG_ASSERT(output->consumer);
4269 list[idx].id = output->id;
4270 list[idx].max_size = output->max_size;
4271 if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
4272 ret = -LTTNG_ERR_INVALID;
4273 goto error;
4274 }
4275
4276 if (output->consumer->type == CONSUMER_DST_LOCAL) {
4277 if (lttng_strncpy(list[idx].ctrl_url,
4278 output->consumer->dst.session_root_path,
4279 sizeof(list[idx].ctrl_url))) {
4280 ret = -LTTNG_ERR_INVALID;
4281 goto error;
4282 }
4283 } else {
4284 /* Control URI. */
4285 ret = uri_to_str_url(&output->consumer->dst.net.control,
4286 list[idx].ctrl_url,
4287 sizeof(list[idx].ctrl_url));
4288 if (ret < 0) {
4289 ret = -LTTNG_ERR_NOMEM;
4290 goto error;
4291 }
4292
4293 /* Data URI. */
4294 ret = uri_to_str_url(&output->consumer->dst.net.data,
4295 list[idx].data_url,
4296 sizeof(list[idx].data_url));
4297 if (ret < 0) {
4298 ret = -LTTNG_ERR_NOMEM;
4299 goto error;
4300 }
4301 }
4302
4303 idx++;
4304 }
4305
4306 *outputs = list;
4307 list = nullptr;
4308 ret = session->snapshot.nb_output;
4309error:
4310 free(list);
4311end:
4312 return ret;
4313}
4314
4315/*
4316 * Check if we can regenerate the metadata for this session.
4317 * Only kernel, UST per-uid and non-live sessions are supported.
4318 *
4319 * Return 0 if the metadata can be generated, a LTTNG_ERR code otherwise.
4320 */
4321static int check_regenerate_metadata_support(const ltt_session::locked_ref& session)
4322{
4323 int ret;
4324
4325 if (session->live_timer != 0) {
4326 ret = LTTNG_ERR_LIVE_SESSION;
4327 goto end;
4328 }
4329 if (!session->active) {
4330 ret = LTTNG_ERR_SESSION_NOT_STARTED;
4331 goto end;
4332 }
4333 if (session->ust_session) {
4334 switch (session->ust_session->buffer_type) {
4335 case LTTNG_BUFFER_PER_UID:
4336 break;
4337 case LTTNG_BUFFER_PER_PID:
4338 ret = LTTNG_ERR_PER_PID_SESSION;
4339 goto end;
4340 default:
4341 abort();
4342 ret = LTTNG_ERR_UNK;
4343 goto end;
4344 }
4345 }
4346 if (session->consumer->type == CONSUMER_DST_NET &&
4347 session->consumer->relay_minor_version < 8) {
4348 ret = LTTNG_ERR_RELAYD_VERSION_FAIL;
4349 goto end;
4350 }
4351 ret = 0;
4352
4353end:
4354 return ret;
4355}
4356
4357/*
4358 * Command LTTNG_REGENERATE_METADATA from the lttng-ctl library.
4359 *
4360 * Ask the consumer to truncate the existing metadata file(s) and
4361 * then regenerate the metadata. Live and per-pid sessions are not
4362 * supported and return an error.
4363 *
4364 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4365 */
4366int cmd_regenerate_metadata(const ltt_session::locked_ref& session)
4367{
4368 int ret;
4369
4370 ret = check_regenerate_metadata_support(session);
4371 if (ret) {
4372 goto end;
4373 }
4374
4375 if (session->kernel_session) {
4376 ret = kernctl_session_regenerate_metadata(session->kernel_session->fd);
4377 if (ret < 0) {
4378 ERR("Failed to regenerate the kernel metadata");
4379 goto end;
4380 }
4381 }
4382
4383 if (session->ust_session) {
4384 ret = trace_ust_regenerate_metadata(session->ust_session);
4385 if (ret < 0) {
4386 ERR("Failed to regenerate the UST metadata");
4387 goto end;
4388 }
4389 }
4390 DBG("Cmd metadata regenerate for session %s", session->name);
4391 ret = LTTNG_OK;
4392
4393end:
4394 return ret;
4395}
4396
4397/*
4398 * Command LTTNG_REGENERATE_STATEDUMP from the lttng-ctl library.
4399 *
4400 * Ask the tracer to regenerate a new statedump.
4401 *
4402 * Return LTTNG_OK on success or else a LTTNG_ERR code.
4403 */
4404int cmd_regenerate_statedump(const ltt_session::locked_ref& session)
4405{
4406 int ret;
4407
4408 if (!session->active) {
4409 ret = LTTNG_ERR_SESSION_NOT_STARTED;
4410 goto end;
4411 }
4412
4413 if (session->kernel_session) {
4414 ret = kernctl_session_regenerate_statedump(session->kernel_session->fd);
4415 /*
4416 * Currently, the statedump in kernel can only fail if out
4417 * of memory.
4418 */
4419 if (ret < 0) {
4420 if (ret == -ENOMEM) {
4421 ret = LTTNG_ERR_REGEN_STATEDUMP_NOMEM;
4422 } else {
4423 ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
4424 }
4425 ERR("Failed to regenerate the kernel statedump");
4426 goto end;
4427 }
4428 }
4429
4430 if (session->ust_session) {
4431 ret = ust_app_regenerate_statedump_all(session->ust_session);
4432 /*
4433 * Currently, the statedump in UST always returns 0.
4434 */
4435 if (ret < 0) {
4436 ret = LTTNG_ERR_REGEN_STATEDUMP_FAIL;
4437 ERR("Failed to regenerate the UST statedump");
4438 goto end;
4439 }
4440 }
4441 DBG("Cmd regenerate statedump for session %s", session->name);
4442 ret = LTTNG_OK;
4443
4444end:
4445 return ret;
4446}
4447
4448static enum lttng_error_code
4449synchronize_tracer_notifier_register(struct notification_thread_handle *notification_thread,
4450 struct lttng_trigger *trigger,
4451 const struct lttng_credentials *cmd_creds)
4452{
4453 enum lttng_error_code ret_code;
4454 const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
4455 const char *trigger_name;
4456 uid_t trigger_owner;
4457 enum lttng_trigger_status trigger_status;
4458 const enum lttng_domain_type trigger_domain =
4459 lttng_trigger_get_underlying_domain_type_restriction(trigger);
4460
4461 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4462 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4463
4464 LTTNG_ASSERT(condition);
4465 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
4466 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
4467
4468 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4469 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4470
4471 const auto list_lock = lttng::sessiond::lock_session_list();
4472 switch (trigger_domain) {
4473 case LTTNG_DOMAIN_KERNEL:
4474 {
4475 ret_code = kernel_register_event_notifier(trigger, cmd_creds);
4476 if (ret_code != LTTNG_OK) {
4477 enum lttng_error_code notif_thread_unregister_ret;
4478
4479 notif_thread_unregister_ret =
4480 notification_thread_command_unregister_trigger(notification_thread,
4481 trigger);
4482
4483 if (notif_thread_unregister_ret != LTTNG_OK) {
4484 /* Return the original error code. */
4485 ERR("Failed to unregister trigger from notification thread during error recovery: trigger name = '%s', trigger owner uid = %d, error code = %d",
4486 trigger_name,
4487 (int) trigger_owner,
4488 ret_code);
4489 }
4490
4491 return ret_code;
4492 }
4493 break;
4494 }
4495 case LTTNG_DOMAIN_UST:
4496 ust_app_global_update_all_event_notifier_rules();
4497 break;
4498 case LTTNG_DOMAIN_JUL:
4499 case LTTNG_DOMAIN_LOG4J:
4500 case LTTNG_DOMAIN_LOG4J2:
4501 case LTTNG_DOMAIN_PYTHON:
4502 {
4503 /* Agent domains. */
4504 struct agent *agt = agent_find_by_event_notifier_domain(trigger_domain);
4505
4506 if (!agt) {
4507 agt = agent_create(trigger_domain);
4508 if (!agt) {
4509 ret_code = LTTNG_ERR_NOMEM;
4510 return ret_code;
4511 }
4512
4513 agent_add(agt, the_trigger_agents_ht_by_domain);
4514 }
4515
4516 ret_code = (lttng_error_code) trigger_agent_enable(trigger, agt);
4517 if (ret_code != LTTNG_OK) {
4518 return ret_code;
4519 }
4520
4521 break;
4522 }
4523 case LTTNG_DOMAIN_NONE:
4524 default:
4525 abort();
4526 }
4527
4528 return LTTNG_OK;
4529}
4530
4531lttng::ctl::trigger cmd_register_trigger(const struct lttng_credentials *cmd_creds,
4532 struct lttng_trigger *trigger,
4533 bool is_trigger_anonymous,
4534 struct notification_thread_handle *notification_thread)
4535{
4536 enum lttng_error_code ret_code;
4537 const char *trigger_name;
4538 uid_t trigger_owner;
4539 enum lttng_trigger_status trigger_status;
4540
4541 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4542 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4543
4544 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4545 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4546
4547 DBG("Running register trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4548 trigger_name,
4549 (int) trigger_owner,
4550 (int) lttng_credentials_get_uid(cmd_creds));
4551
4552 /*
4553 * Validate the trigger credentials against the command credentials.
4554 * Only the root user can register a trigger with non-matching
4555 * credentials.
4556 */
4557 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger), cmd_creds)) {
4558 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4559 LTTNG_THROW_CTL(
4560 fmt::format(
4561 "Trigger credentials do not match the command credentials: trigger_name = `{}`, trigger_owner_uid={}, command_creds_uid={}",
4562 trigger_name,
4563 trigger_owner,
4564 lttng_credentials_get_uid(cmd_creds)),
4565 LTTNG_ERR_INVALID_TRIGGER);
4566 }
4567 }
4568
4569 /*
4570 * The bytecode generation also serves as a validation step for the
4571 * bytecode expressions.
4572 */
4573 ret_code = lttng_trigger_generate_bytecode(trigger, cmd_creds);
4574 if (ret_code != LTTNG_OK) {
4575 LTTNG_THROW_CTL(
4576 fmt::format(
4577 "Failed to generate bytecode of trigger: trigger_name=`{}`, trigger_owner_uid={}",
4578 trigger_name,
4579 trigger_owner),
4580 ret_code);
4581 }
4582
4583 /*
4584 * A reference to the trigger is acquired by the notification thread.
4585 * It is safe to return the same trigger to the caller since it the
4586 * other user holds a reference.
4587 *
4588 * The trigger is modified during the execution of the
4589 * "register trigger" command. However, by the time the command returns,
4590 * it is safe to use without any locking as its properties are
4591 * immutable.
4592 */
4593 ret_code = notification_thread_command_register_trigger(
4594 notification_thread, trigger, is_trigger_anonymous);
4595 if (ret_code != LTTNG_OK) {
4596 LTTNG_THROW_CTL(
4597 fmt::format(
4598 "Failed to register trigger to notification thread: trigger_name=`{}`, trigger_owner_uid={}",
4599 trigger_name,
4600 trigger_owner),
4601 ret_code);
4602 }
4603
4604 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4605 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4606
4607 /*
4608 * Synchronize tracers if the trigger adds an event notifier.
4609 */
4610 if (lttng_trigger_needs_tracer_notifier(trigger)) {
4611 ret_code = synchronize_tracer_notifier_register(
4612 notification_thread, trigger, cmd_creds);
4613 if (ret_code != LTTNG_OK) {
4614 LTTNG_THROW_CTL("Failed to register tracer notifier", ret_code);
4615 }
4616 }
4617
4618 /*
4619 * Return an updated trigger to the client.
4620 *
4621 * Since a modified version of the same trigger is returned, acquire a
4622 * reference to the trigger so the caller doesn't have to care if those
4623 * are distinct instances or not.
4624 */
4625 LTTNG_ASSERT(ret_code == LTTNG_OK);
4626 lttng_trigger_get(trigger);
4627 return lttng::ctl::trigger(trigger);
4628}
4629
4630static enum lttng_error_code
4631synchronize_tracer_notifier_unregister(const struct lttng_trigger *trigger)
4632{
4633 enum lttng_error_code ret_code;
4634 const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
4635 const enum lttng_domain_type trigger_domain =
4636 lttng_trigger_get_underlying_domain_type_restriction(trigger);
4637
4638 LTTNG_ASSERT(condition);
4639 LTTNG_ASSERT(lttng_condition_get_type(condition) ==
4640 LTTNG_CONDITION_TYPE_EVENT_RULE_MATCHES);
4641
4642 const auto list_lock = lttng::sessiond::lock_session_list();
4643 switch (trigger_domain) {
4644 case LTTNG_DOMAIN_KERNEL:
4645 ret_code = kernel_unregister_event_notifier(trigger);
4646 if (ret_code != LTTNG_OK) {
4647 return ret_code;
4648 }
4649
4650 break;
4651 case LTTNG_DOMAIN_UST:
4652 ust_app_global_update_all_event_notifier_rules();
4653 break;
4654 case LTTNG_DOMAIN_JUL:
4655 case LTTNG_DOMAIN_LOG4J:
4656 case LTTNG_DOMAIN_LOG4J2:
4657 case LTTNG_DOMAIN_PYTHON:
4658 {
4659 /* Agent domains. */
4660 struct agent *agt = agent_find_by_event_notifier_domain(trigger_domain);
4661
4662 /*
4663 * This trigger was never registered in the first place. Calling
4664 * this function under those circumstances is an internal error.
4665 */
4666 LTTNG_ASSERT(agt);
4667 ret_code = (lttng_error_code) trigger_agent_disable(trigger, agt);
4668 if (ret_code != LTTNG_OK) {
4669 return ret_code;
4670 }
4671
4672 break;
4673 }
4674 case LTTNG_DOMAIN_NONE:
4675 default:
4676 abort();
4677 }
4678
4679 return LTTNG_OK;
4680}
4681
4682enum lttng_error_code cmd_unregister_trigger(const struct lttng_credentials *cmd_creds,
4683 const struct lttng_trigger *trigger,
4684 struct notification_thread_handle *notification_thread)
4685{
4686 enum lttng_error_code ret_code;
4687 const char *trigger_name;
4688 uid_t trigger_owner;
4689 enum lttng_trigger_status trigger_status;
4690 struct lttng_trigger *sessiond_trigger = nullptr;
4691
4692 trigger_status = lttng_trigger_get_name(trigger, &trigger_name);
4693 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4694 trigger_status = lttng_trigger_get_owner_uid(trigger, &trigger_owner);
4695 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4696
4697 DBG("Running unregister trigger command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4698 trigger_name,
4699 (int) trigger_owner,
4700 (int) lttng_credentials_get_uid(cmd_creds));
4701
4702 /*
4703 * Validate the trigger credentials against the command credentials.
4704 * Only the root user can unregister a trigger with non-matching
4705 * credentials.
4706 */
4707 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(trigger), cmd_creds)) {
4708 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4709 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4710 trigger_name,
4711 (int) trigger_owner,
4712 (int) lttng_credentials_get_uid(cmd_creds));
4713 ret_code = LTTNG_ERR_INVALID_TRIGGER;
4714 goto end;
4715 }
4716 }
4717
4718 /* Fetch the sessiond side trigger object. */
4719 ret_code = notification_thread_command_get_trigger(
4720 notification_thread, trigger, &sessiond_trigger);
4721 if (ret_code != LTTNG_OK) {
4722 DBG("Failed to get trigger from notification thread during unregister: trigger name = '%s', trigger owner uid = %d, error code = %d",
4723 trigger_name,
4724 (int) trigger_owner,
4725 ret_code);
4726 goto end;
4727 }
4728
4729 LTTNG_ASSERT(sessiond_trigger);
4730
4731 /*
4732 * From this point on, no matter what, consider the trigger
4733 * unregistered.
4734 *
4735 * We set the unregistered state of the sessiond side trigger object in
4736 * the client thread since we want to minimize the possibility of the
4737 * notification thread being stalled due to a long execution of an
4738 * action that required the trigger lock.
4739 */
4740 lttng_trigger_set_as_unregistered(sessiond_trigger);
4741
4742 ret_code = notification_thread_command_unregister_trigger(notification_thread, trigger);
4743 if (ret_code != LTTNG_OK) {
4744 DBG("Failed to unregister trigger from notification thread: trigger name = '%s', trigger owner uid = %d, error code = %d",
4745 trigger_name,
4746 (int) trigger_owner,
4747 ret_code);
4748 goto end;
4749 }
4750
4751 /*
4752 * Synchronize tracers if the trigger removes an event notifier.
4753 * Do this even if the trigger unregistration failed to at least stop
4754 * the tracers from producing notifications associated with this
4755 * event notifier.
4756 */
4757 if (lttng_trigger_needs_tracer_notifier(trigger)) {
4758 ret_code = synchronize_tracer_notifier_unregister(trigger);
4759 if (ret_code != LTTNG_OK) {
4760 ERR("Error unregistering trigger to tracer.");
4761 goto end;
4762 }
4763 }
4764
4765end:
4766 lttng_trigger_put(sessiond_trigger);
4767 return ret_code;
4768}
4769
4770enum lttng_error_code cmd_list_triggers(struct command_ctx *cmd_ctx,
4771 struct notification_thread_handle *notification_thread,
4772 struct lttng_triggers **return_triggers)
4773{
4774 int ret;
4775 enum lttng_error_code ret_code;
4776 struct lttng_triggers *triggers = nullptr;
4777
4778 /* Get the set of triggers from the notification thread. */
4779 ret_code = notification_thread_command_list_triggers(
4780 notification_thread, cmd_ctx->creds.uid, &triggers);
4781 if (ret_code != LTTNG_OK) {
4782 goto end;
4783 }
4784
4785 ret = lttng_triggers_remove_hidden_triggers(triggers);
4786 if (ret) {
4787 ret_code = LTTNG_ERR_UNK;
4788 goto end;
4789 }
4790
4791 *return_triggers = triggers;
4792 triggers = nullptr;
4793 ret_code = LTTNG_OK;
4794end:
4795 lttng_triggers_destroy(triggers);
4796 return ret_code;
4797}
4798
4799enum lttng_error_code
4800cmd_execute_error_query(const struct lttng_credentials *cmd_creds,
4801 const struct lttng_error_query *query,
4802 struct lttng_error_query_results **_results,
4803 struct notification_thread_handle *notification_thread)
4804{
4805 enum lttng_error_code ret_code;
4806 const struct lttng_trigger *query_target_trigger;
4807 const struct lttng_action *query_target_action = nullptr;
4808 struct lttng_trigger *matching_trigger = nullptr;
4809 const char *trigger_name;
4810 uid_t trigger_owner;
4811 enum lttng_trigger_status trigger_status;
4812 struct lttng_error_query_results *results = nullptr;
4813
4814 switch (lttng_error_query_get_target_type(query)) {
4815 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
4816 query_target_trigger = lttng_error_query_trigger_borrow_target(query);
4817 break;
4818 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
4819 query_target_trigger = lttng_error_query_condition_borrow_target(query);
4820 break;
4821 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
4822 query_target_trigger = lttng_error_query_action_borrow_trigger_target(query);
4823 break;
4824 default:
4825 abort();
4826 }
4827
4828 LTTNG_ASSERT(query_target_trigger);
4829
4830 ret_code = notification_thread_command_get_trigger(
4831 notification_thread, query_target_trigger, &matching_trigger);
4832 if (ret_code != LTTNG_OK) {
4833 goto end;
4834 }
4835
4836 /* No longer needed. */
4837 query_target_trigger = nullptr;
4838
4839 if (lttng_error_query_get_target_type(query) == LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION) {
4840 /* Get the sessiond-side version of the target action. */
4841 query_target_action =
4842 lttng_error_query_action_borrow_action_target(query, matching_trigger);
4843 }
4844
4845 trigger_status = lttng_trigger_get_name(matching_trigger, &trigger_name);
4846 trigger_name = trigger_status == LTTNG_TRIGGER_STATUS_OK ? trigger_name : "(anonymous)";
4847 trigger_status = lttng_trigger_get_owner_uid(matching_trigger, &trigger_owner);
4848 LTTNG_ASSERT(trigger_status == LTTNG_TRIGGER_STATUS_OK);
4849
4850 results = lttng_error_query_results_create();
4851 if (!results) {
4852 ret_code = LTTNG_ERR_NOMEM;
4853 goto end;
4854 }
4855
4856 DBG("Running \"execute error query\" command: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4857 trigger_name,
4858 (int) trigger_owner,
4859 (int) lttng_credentials_get_uid(cmd_creds));
4860
4861 /*
4862 * Validate the trigger credentials against the command credentials.
4863 * Only the root user can target a trigger with non-matching
4864 * credentials.
4865 */
4866 if (!lttng_credentials_is_equal_uid(lttng_trigger_get_credentials(matching_trigger),
4867 cmd_creds)) {
4868 if (lttng_credentials_get_uid(cmd_creds) != 0) {
4869 ERR("Trigger credentials do not match the command credentials: trigger name = '%s', trigger owner uid = %d, command creds uid = %d",
4870 trigger_name,
4871 (int) trigger_owner,
4872 (int) lttng_credentials_get_uid(cmd_creds));
4873 ret_code = LTTNG_ERR_INVALID_TRIGGER;
4874 goto end;
4875 }
4876 }
4877
4878 switch (lttng_error_query_get_target_type(query)) {
4879 case LTTNG_ERROR_QUERY_TARGET_TYPE_TRIGGER:
4880 trigger_status = lttng_trigger_add_error_results(matching_trigger, results);
4881
4882 switch (trigger_status) {
4883 case LTTNG_TRIGGER_STATUS_OK:
4884 break;
4885 default:
4886 ret_code = LTTNG_ERR_UNK;
4887 goto end;
4888 }
4889
4890 break;
4891 case LTTNG_ERROR_QUERY_TARGET_TYPE_CONDITION:
4892 {
4893 trigger_status =
4894 lttng_trigger_condition_add_error_results(matching_trigger, results);
4895
4896 switch (trigger_status) {
4897 case LTTNG_TRIGGER_STATUS_OK:
4898 break;
4899 default:
4900 ret_code = LTTNG_ERR_UNK;
4901 goto end;
4902 }
4903
4904 break;
4905 }
4906 case LTTNG_ERROR_QUERY_TARGET_TYPE_ACTION:
4907 {
4908 const enum lttng_action_status action_status =
4909 lttng_action_add_error_query_results(query_target_action, results);
4910
4911 switch (action_status) {
4912 case LTTNG_ACTION_STATUS_OK:
4913 break;
4914 default:
4915 ret_code = LTTNG_ERR_UNK;
4916 goto end;
4917 }
4918
4919 break;
4920 }
4921 default:
4922 abort();
4923 break;
4924 }
4925
4926 *_results = results;
4927 results = nullptr;
4928 ret_code = LTTNG_OK;
4929end:
4930 lttng_trigger_put(matching_trigger);
4931 lttng_error_query_results_destroy(results);
4932 return ret_code;
4933}
4934
4935/*
4936 * Send relayd sockets from snapshot output to consumer. Ignore request if the
4937 * snapshot output is *not* set with a remote destination.
4938 *
4939 * Return LTTNG_OK on success or a LTTNG_ERR code.
4940 */
4941static enum lttng_error_code set_relayd_for_snapshot(struct consumer_output *output,
4942 const ltt_session::locked_ref& session)
4943{
4944 enum lttng_error_code status = LTTNG_OK;
4945 LTTNG_OPTIONAL(uint64_t) current_chunk_id = {};
4946 const char *base_path;
4947
4948 LTTNG_ASSERT(output);
4949
4950 DBG2("Set relayd object from snapshot output");
4951
4952 if (session->current_trace_chunk) {
4953 const lttng_trace_chunk_status chunk_status = lttng_trace_chunk_get_id(
4954 session->current_trace_chunk, &current_chunk_id.value);
4955
4956 if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK) {
4957 current_chunk_id.is_set = true;
4958 } else {
4959 ERR("Failed to get current trace chunk id");
4960 status = LTTNG_ERR_UNK;
4961 goto error;
4962 }
4963 }
4964
4965 /* Ignore if snapshot consumer output is not network. */
4966 if (output->type != CONSUMER_DST_NET) {
4967 goto error;
4968 }
4969
4970 /*
4971 * The snapshot record URI base path overrides the session
4972 * base path.
4973 */
4974 if (output->dst.net.control.subdir[0] != '\0') {
4975 base_path = output->dst.net.control.subdir;
4976 } else {
4977 base_path = session->base_path;
4978 }
4979
4980 /*
4981 * For each consumer socket, create and send the relayd object of the
4982 * snapshot output.
4983 */
4984 for (auto *socket :
4985 lttng::urcu::lfht_iteration_adapter<consumer_socket,
4986 decltype(consumer_socket::node),
4987 &consumer_socket::node>(*output->socks->ht)) {
4988 pthread_mutex_lock(socket->lock);
4989 status = send_consumer_relayd_sockets(
4990 session->id,
4991 output,
4992 socket,
4993 session->name,
4994 session->hostname,
4995 base_path,
4996 session->live_timer,
4997 current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
4998 session->creation_time,
4999 session->name_contains_creation_time);
5000 pthread_mutex_unlock(socket->lock);
5001 if (status != LTTNG_OK) {
5002 goto error;
5003 }
5004 }
5005
5006error:
5007 return status;
5008}
5009
5010/*
5011 * Record a kernel snapshot.
5012 *
5013 * Return LTTNG_OK on success or a LTTNG_ERR code.
5014 */
5015static enum lttng_error_code record_kernel_snapshot(struct ltt_kernel_session *ksess,
5016 const struct consumer_output *output,
5017 uint64_t nb_packets_per_stream)
5018{
5019 enum lttng_error_code status;
5020
5021 LTTNG_ASSERT(ksess);
5022 LTTNG_ASSERT(output);
5023
5024 status = kernel_snapshot_record(ksess, output, nb_packets_per_stream);
5025 return status;
5026}
5027
5028/*
5029 * Record a UST snapshot.
5030 *
5031 * Returns LTTNG_OK on success or a LTTNG_ERR error code.
5032 */
5033static enum lttng_error_code record_ust_snapshot(struct ltt_ust_session *usess,
5034 const struct consumer_output *output,
5035 uint64_t nb_packets_per_stream)
5036{
5037 enum lttng_error_code status;
5038
5039 LTTNG_ASSERT(usess);
5040 LTTNG_ASSERT(output);
5041
5042 status = ust_app_snapshot_record(usess, output, nb_packets_per_stream);
5043 return status;
5044}
5045
5046static uint64_t get_session_size_one_more_packet_per_stream(const ltt_session::locked_ref& session,
5047 uint64_t cur_nr_packets)
5048{
5049 uint64_t tot_size = 0;
5050
5051 if (session->kernel_session) {
5052 struct ltt_kernel_session *ksess = session->kernel_session;
5053
5054 for (auto chan : lttng::urcu::list_iteration_adapter<ltt_kernel_channel,
5055 &ltt_kernel_channel::list>(
5056 ksess->channel_list.head)) {
5057 if (cur_nr_packets >= chan->channel->attr.num_subbuf) {
5058 /*
5059 * Don't take channel into account if we
5060 * already grab all its packets.
5061 */
5062 continue;
5063 }
5064 tot_size += chan->channel->attr.subbuf_size * chan->stream_count;
5065 }
5066 }
5067
5068 if (session->ust_session) {
5069 const struct ltt_ust_session *usess = session->ust_session;
5070
5071 tot_size += ust_app_get_size_one_more_packet_per_stream(usess, cur_nr_packets);
5072 }
5073
5074 return tot_size;
5075}
5076
5077/*
5078 * Calculate the number of packets we can grab from each stream that
5079 * fits within the overall snapshot max size.
5080 *
5081 * Returns -1 on error, 0 means infinite number of packets, else > 0 is
5082 * the number of packets per stream.
5083 *
5084 * TODO: this approach is not perfect: we consider the worse case
5085 * (packet filling the sub-buffers) as an upper bound, but we could do
5086 * better if we do this calculation while we actually grab the packet
5087 * content: we would know how much padding we don't actually store into
5088 * the file.
5089 *
5090 * This algorithm is currently bounded by the number of packets per
5091 * stream.
5092 *
5093 * Since we call this algorithm before actually grabbing the data, it's
5094 * an approximation: for instance, applications could appear/disappear
5095 * in between this call and actually grabbing data.
5096 */
5097static int64_t get_session_nb_packets_per_stream(const ltt_session::locked_ref& session,
5098 uint64_t max_size)
5099{
5100 int64_t size_left;
5101 uint64_t cur_nb_packets = 0;
5102
5103 if (!max_size) {
5104 return 0; /* Infinite */
5105 }
5106
5107 size_left = max_size;
5108 for (;;) {
5109 uint64_t one_more_packet_tot_size;
5110
5111 one_more_packet_tot_size =
5112 get_session_size_one_more_packet_per_stream(session, cur_nb_packets);
5113 if (!one_more_packet_tot_size) {
5114 /* We are already grabbing all packets. */
5115 break;
5116 }
5117 size_left -= one_more_packet_tot_size;
5118 if (size_left < 0) {
5119 break;
5120 }
5121 cur_nb_packets++;
5122 }
5123 if (!cur_nb_packets && size_left != max_size) {
5124 /* Not enough room to grab one packet of each stream, error. */
5125 return -1;
5126 }
5127 return cur_nb_packets;
5128}
5129
5130static enum lttng_error_code snapshot_record(const ltt_session::locked_ref& session,
5131 const struct snapshot_output *snapshot_output)
5132{
5133 int64_t nb_packets_per_stream;
5134 char snapshot_chunk_name[LTTNG_NAME_MAX];
5135 int ret;
5136 enum lttng_error_code ret_code = LTTNG_OK;
5137 struct lttng_trace_chunk *snapshot_trace_chunk;
5138 struct consumer_output *original_ust_consumer_output = nullptr;
5139 struct consumer_output *original_kernel_consumer_output = nullptr;
5140 struct consumer_output *snapshot_ust_consumer_output = nullptr;
5141 struct consumer_output *snapshot_kernel_consumer_output = nullptr;
5142
5143 ret = snprintf(snapshot_chunk_name,
5144 sizeof(snapshot_chunk_name),
5145 "%s-%s-%" PRIu64,
5146 snapshot_output->name,
5147 snapshot_output->datetime,
5148 snapshot_output->nb_snapshot);
5149 if (ret < 0 || ret >= sizeof(snapshot_chunk_name)) {
5150 ERR("Failed to format snapshot name");
5151 ret_code = LTTNG_ERR_INVALID;
5152 goto error;
5153 }
5154 DBG("Recording snapshot \"%s\" for session \"%s\" with chunk name \"%s\"",
5155 snapshot_output->name,
5156 session->name,
5157 snapshot_chunk_name);
5158 if (!session->kernel_session && !session->ust_session) {
5159 ERR("Failed to record snapshot as no channels exist");
5160 ret_code = LTTNG_ERR_NO_CHANNEL;
5161 goto error;
5162 }
5163
5164 if (session->kernel_session) {
5165 original_kernel_consumer_output = session->kernel_session->consumer;
5166 snapshot_kernel_consumer_output = consumer_copy_output(snapshot_output->consumer);
5167 strcpy(snapshot_kernel_consumer_output->chunk_path, snapshot_chunk_name);
5168
5169 /* Copy the original domain subdir. */
5170 strcpy(snapshot_kernel_consumer_output->domain_subdir,
5171 original_kernel_consumer_output->domain_subdir);
5172
5173 ret = consumer_copy_sockets(snapshot_kernel_consumer_output,
5174 original_kernel_consumer_output);
5175 if (ret < 0) {
5176 ERR("Failed to copy consumer sockets from snapshot output configuration");
5177 ret_code = LTTNG_ERR_NOMEM;
5178 goto error;
5179 }
5180 ret_code = set_relayd_for_snapshot(snapshot_kernel_consumer_output, session);
5181 if (ret_code != LTTNG_OK) {
5182 ERR("Failed to setup relay daemon for kernel tracer snapshot");
5183 goto error;
5184 }
5185 session->kernel_session->consumer = snapshot_kernel_consumer_output;
5186 }
5187 if (session->ust_session) {
5188 original_ust_consumer_output = session->ust_session->consumer;
5189 snapshot_ust_consumer_output = consumer_copy_output(snapshot_output->consumer);
5190 strcpy(snapshot_ust_consumer_output->chunk_path, snapshot_chunk_name);
5191
5192 /* Copy the original domain subdir. */
5193 strcpy(snapshot_ust_consumer_output->domain_subdir,
5194 original_ust_consumer_output->domain_subdir);
5195
5196 ret = consumer_copy_sockets(snapshot_ust_consumer_output,
5197 original_ust_consumer_output);
5198 if (ret < 0) {
5199 ERR("Failed to copy consumer sockets from snapshot output configuration");
5200 ret_code = LTTNG_ERR_NOMEM;
5201 goto error;
5202 }
5203 ret_code = set_relayd_for_snapshot(snapshot_ust_consumer_output, session);
5204 if (ret_code != LTTNG_OK) {
5205 ERR("Failed to setup relay daemon for userspace tracer snapshot");
5206 goto error;
5207 }
5208 session->ust_session->consumer = snapshot_ust_consumer_output;
5209 }
5210
5211 snapshot_trace_chunk = session_create_new_trace_chunk(
5212 session,
5213 snapshot_kernel_consumer_output ?: snapshot_ust_consumer_output,
5214 consumer_output_get_base_path(snapshot_output->consumer),
5215 snapshot_chunk_name);
5216 if (!snapshot_trace_chunk) {
5217 ERR("Failed to create temporary trace chunk to record a snapshot of session \"%s\"",
5218 session->name);
5219 ret_code = LTTNG_ERR_CREATE_DIR_FAIL;
5220 goto error;
5221 }
5222 LTTNG_ASSERT(!session->current_trace_chunk);
5223 ret = session_set_trace_chunk(session, snapshot_trace_chunk, nullptr);
5224 lttng_trace_chunk_put(snapshot_trace_chunk);
5225 snapshot_trace_chunk = nullptr;
5226 if (ret) {
5227 ERR("Failed to set temporary trace chunk to record a snapshot of session \"%s\"",
5228 session->name);
5229 ret_code = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
5230 goto error;
5231 }
5232
5233 nb_packets_per_stream =
5234 get_session_nb_packets_per_stream(session, snapshot_output->max_size);
5235 if (nb_packets_per_stream < 0) {
5236 ret_code = LTTNG_ERR_MAX_SIZE_INVALID;
5237 goto error_close_trace_chunk;
5238 }
5239
5240 if (session->kernel_session) {
5241 ret_code = record_kernel_snapshot(session->kernel_session,
5242 snapshot_kernel_consumer_output,
5243 nb_packets_per_stream);
5244 if (ret_code != LTTNG_OK) {
5245 goto error_close_trace_chunk;
5246 }
5247 }
5248
5249 if (session->ust_session) {
5250 ret_code = record_ust_snapshot(
5251 session->ust_session, snapshot_ust_consumer_output, nb_packets_per_stream);
5252 if (ret_code != LTTNG_OK) {
5253 goto error_close_trace_chunk;
5254 }
5255 }
5256
5257error_close_trace_chunk:
5258 if (session_set_trace_chunk(session, nullptr, &snapshot_trace_chunk)) {
5259 ERR("Failed to release the current trace chunk of session \"%s\"", session->name);
5260 ret_code = LTTNG_ERR_UNK;
5261 }
5262
5263 if (session_close_trace_chunk(session,
5264 snapshot_trace_chunk,
5265 LTTNG_TRACE_CHUNK_COMMAND_TYPE_NO_OPERATION,
5266 nullptr)) {
5267 /*
5268 * Don't goto end; make sure the chunk is closed for the session
5269 * to allow future snapshots.
5270 */
5271 ERR("Failed to close snapshot trace chunk of session \"%s\"", session->name);
5272 ret_code = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
5273 }
5274
5275 lttng_trace_chunk_put(snapshot_trace_chunk);
5276 snapshot_trace_chunk = nullptr;
5277error:
5278 if (original_ust_consumer_output) {
5279 session->ust_session->consumer = original_ust_consumer_output;
5280 }
5281 if (original_kernel_consumer_output) {
5282 session->kernel_session->consumer = original_kernel_consumer_output;
5283 }
5284 consumer_output_put(snapshot_ust_consumer_output);
5285 consumer_output_put(snapshot_kernel_consumer_output);
5286 return ret_code;
5287}
5288
5289/*
5290 * Command LTTNG_SNAPSHOT_RECORD from lib lttng ctl.
5291 *
5292 * The wait parameter is ignored so this call always wait for the snapshot to
5293 * complete before returning.
5294 *
5295 * Return LTTNG_OK on success or else a LTTNG_ERR code.
5296 */
5297int cmd_snapshot_record(const ltt_session::locked_ref& session,
5298 const struct lttng_snapshot_output *output,
5299 int wait __attribute__((unused)))
5300{
5301 enum lttng_error_code cmd_ret = LTTNG_OK;
5302 int ret;
5303 unsigned int snapshot_success = 0;
5304 char datetime[16];
5305 struct snapshot_output *tmp_output = nullptr;
5306
5307 LTTNG_ASSERT(output);
5308
5309 DBG("Cmd snapshot record for session %s", session->name);
5310
5311 /* Get the datetime for the snapshot output directory. */
5312 ret = utils_get_current_time_str("%Y%m%d-%H%M%S", datetime, sizeof(datetime));
5313 if (!ret) {
5314 cmd_ret = LTTNG_ERR_INVALID;
5315 goto error;
5316 }
5317
5318 /*
5319 * Permission denied to create an output if the session is not
5320 * set in no output mode.
5321 */
5322 if (session->output_traces) {
5323 cmd_ret = LTTNG_ERR_NOT_SNAPSHOT_SESSION;
5324 goto error;
5325 }
5326
5327 /* The session needs to be started at least once. */
5328 if (!session->has_been_started) {
5329 cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
5330 goto error;
5331 }
5332
5333 /* Use temporary output for the session. */
5334 if (*output->ctrl_url != '\0') {
5335 tmp_output = snapshot_output_alloc();
5336 if (!tmp_output) {
5337 cmd_ret = LTTNG_ERR_NOMEM;
5338 goto error;
5339 }
5340
5341 ret = snapshot_output_init(session,
5342 output->max_size,
5343 output->name,
5344 output->ctrl_url,
5345 output->data_url,
5346 session->consumer,
5347 tmp_output,
5348 nullptr);
5349 if (ret < 0) {
5350 if (ret == -ENOMEM) {
5351 cmd_ret = LTTNG_ERR_NOMEM;
5352 } else {
5353 cmd_ret = LTTNG_ERR_INVALID;
5354 }
5355 goto error;
5356 }
5357 /* Use the global session count for the temporary snapshot. */
5358 tmp_output->nb_snapshot = session->snapshot.nb_snapshot;
5359
5360 /* Use the global datetime */
5361 memcpy(tmp_output->datetime, datetime, sizeof(datetime));
5362 cmd_ret = snapshot_record(session, tmp_output);
5363 if (cmd_ret != LTTNG_OK) {
5364 goto error;
5365 }
5366 snapshot_success = 1;
5367 } else {
5368 for (auto *sout :
5369 lttng::urcu::lfht_iteration_adapter<snapshot_output,
5370 decltype(snapshot_output::node),
5371 &snapshot_output::node>(
5372 *session->snapshot.output_ht->ht)) {
5373 struct snapshot_output output_copy;
5374
5375 /*
5376 * Make a local copy of the output and override output
5377 * parameters with those provided as part of the
5378 * command.
5379 */
5380 memcpy(&output_copy, sout, sizeof(output_copy));
5381
5382 if (output->max_size != (uint64_t) -1ULL) {
5383 output_copy.max_size = output->max_size;
5384 }
5385
5386 output_copy.nb_snapshot = session->snapshot.nb_snapshot;
5387 memcpy(output_copy.datetime, datetime, sizeof(datetime));
5388
5389 /* Use temporary name. */
5390 if (*output->name != '\0') {
5391 if (lttng_strncpy(output_copy.name,
5392 output->name,
5393 sizeof(output_copy.name))) {
5394 cmd_ret = LTTNG_ERR_INVALID;
5395 goto error;
5396 }
5397 }
5398
5399 cmd_ret = snapshot_record(session, &output_copy);
5400 if (cmd_ret != LTTNG_OK) {
5401 goto error;
5402 }
5403
5404 snapshot_success = 1;
5405 }
5406 }
5407
5408 if (snapshot_success) {
5409 session->snapshot.nb_snapshot++;
5410 } else {
5411 cmd_ret = LTTNG_ERR_SNAPSHOT_FAIL;
5412 }
5413
5414error:
5415 if (tmp_output) {
5416 snapshot_output_destroy(tmp_output);
5417 }
5418
5419 return cmd_ret;
5420}
5421
5422/*
5423 * Command LTTNG_SET_SESSION_SHM_PATH processed by the client thread.
5424 */
5425int cmd_set_session_shm_path(const ltt_session::locked_ref& session, const char *shm_path)
5426{
5427 /*
5428 * Can only set shm path before session is started.
5429 */
5430 if (session->has_been_started) {
5431 return LTTNG_ERR_SESSION_STARTED;
5432 }
5433
5434 /* Report an error if shm_path is too long or not null-terminated. */
5435 const auto copy_ret = lttng_strncpy(session->shm_path, shm_path, sizeof(session->shm_path));
5436 return copy_ret == 0 ? LTTNG_OK : LTTNG_ERR_INVALID;
5437}
5438
5439/*
5440 * Command LTTNG_ROTATE_SESSION from the lttng-ctl library.
5441 *
5442 * Ask the consumer to rotate the session output directory.
5443 * The session lock must be held.
5444 *
5445 * Returns LTTNG_OK on success or else a negative LTTng error code.
5446 */
5447int cmd_rotate_session(const ltt_session::locked_ref& session,
5448 struct lttng_rotate_session_return *rotate_return,
5449 bool quiet_rotation,
5450 enum lttng_trace_chunk_command_type command)
5451{
5452 int ret;
5453 uint64_t ongoing_rotation_chunk_id;
5454 enum lttng_error_code cmd_ret = LTTNG_OK;
5455 struct lttng_trace_chunk *chunk_being_archived = nullptr;
5456 struct lttng_trace_chunk *new_trace_chunk = nullptr;
5457 enum lttng_trace_chunk_status chunk_status;
5458 bool failed_to_rotate = false;
5459 enum lttng_error_code rotation_fail_code = LTTNG_OK;
5460
5461 if (!session->has_been_started) {
5462 cmd_ret = LTTNG_ERR_START_SESSION_ONCE;
5463 goto end;
5464 }
5465
5466 /*
5467 * Explicit rotation is not supported for live sessions.
5468 * However, live sessions can perform a quiet rotation on
5469 * destroy.
5470 * Rotation is not supported for snapshot traces (no output).
5471 */
5472 if ((!quiet_rotation && session->live_timer) || !session->output_traces) {
5473 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
5474 goto end;
5475 }
5476
5477 /* Unsupported feature in lttng-relayd before 2.11. */
5478 if (!quiet_rotation && session->consumer->type == CONSUMER_DST_NET &&
5479 (session->consumer->relay_major_version == 2 &&
5480 session->consumer->relay_minor_version < 11)) {
5481 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_RELAY;
5482 goto end;
5483 }
5484
5485 /* Unsupported feature in lttng-modules before 2.8 (lack of sequence number). */
5486 if (session->kernel_session && !kernel_supports_ring_buffer_packet_sequence_number()) {
5487 cmd_ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE_KERNEL;
5488 goto end;
5489 }
5490
5491 if (session->rotation_state == LTTNG_ROTATION_STATE_ONGOING) {
5492 DBG("Refusing to launch a rotation; a rotation is already in progress for session %s",
5493 session->name);
5494 cmd_ret = LTTNG_ERR_ROTATION_PENDING;
5495 goto end;
5496 }
5497
5498 /*
5499 * After a stop, we only allow one rotation to occur, the other ones are
5500 * useless until a new start.
5501 */
5502 if (session->rotated_after_last_stop) {
5503 DBG("Session \"%s\" was already rotated after stop, refusing rotation",
5504 session->name);
5505 cmd_ret = LTTNG_ERR_ROTATION_MULTIPLE_AFTER_STOP;
5506 goto end;
5507 }
5508
5509 /*
5510 * After a stop followed by a clear, disallow following rotations a they would
5511 * generate empty chunks.
5512 */
5513 if (session->cleared_after_last_stop) {
5514 DBG("Session \"%s\" was already cleared after stop, refusing rotation",
5515 session->name);
5516 cmd_ret = LTTNG_ERR_ROTATION_AFTER_STOP_CLEAR;
5517 goto end;
5518 }
5519
5520 if (session->active) {
5521 new_trace_chunk =
5522 session_create_new_trace_chunk(session, nullptr, nullptr, nullptr);
5523 if (!new_trace_chunk) {
5524 cmd_ret = LTTNG_ERR_CREATE_DIR_FAIL;
5525 goto error;
5526 }
5527 }
5528
5529 /*
5530 * The current trace chunk becomes the chunk being archived.
5531 *
5532 * After this point, "chunk_being_archived" must absolutely
5533 * be closed on the consumer(s), otherwise it will never be
5534 * cleaned-up, which will result in a leak.
5535 */
5536 ret = session_set_trace_chunk(session, new_trace_chunk, &chunk_being_archived);
5537 if (ret) {
5538 cmd_ret = LTTNG_ERR_CREATE_TRACE_CHUNK_FAIL_CONSUMER;
5539 goto error;
5540 }
5541
5542 if (session->kernel_session) {
5543 cmd_ret = kernel_rotate_session(session);
5544 if (cmd_ret != LTTNG_OK) {
5545 failed_to_rotate = true;
5546 rotation_fail_code = cmd_ret;
5547 }
5548 }
5549 if (session->ust_session) {
5550 cmd_ret = ust_app_rotate_session(session);
5551 if (cmd_ret != LTTNG_OK) {
5552 failed_to_rotate = true;
5553 rotation_fail_code = cmd_ret;
5554 }
5555 }
5556
5557 if (!session->active) {
5558 session->rotated_after_last_stop = true;
5559 }
5560
5561 if (!chunk_being_archived) {
5562 DBG("Rotating session \"%s\" from a \"NULL\" trace chunk to a new trace chunk, skipping completion check",
5563 session->name);
5564 if (failed_to_rotate) {
5565 cmd_ret = rotation_fail_code;
5566 goto error;
5567 }
5568 cmd_ret = LTTNG_OK;
5569 goto end;
5570 }
5571
5572 session->rotation_state = LTTNG_ROTATION_STATE_ONGOING;
5573 chunk_status = lttng_trace_chunk_get_id(chunk_being_archived, &ongoing_rotation_chunk_id);
5574 LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
5575
5576 ret = session_close_trace_chunk(
5577 session, chunk_being_archived, command, session->last_chunk_path);
5578 if (ret) {
5579 cmd_ret = LTTNG_ERR_CLOSE_TRACE_CHUNK_FAIL_CONSUMER;
5580 goto error;
5581 }
5582
5583 if (failed_to_rotate) {
5584 cmd_ret = rotation_fail_code;
5585 goto error;
5586 }
5587
5588 session->quiet_rotation = quiet_rotation;
5589 ret = timer_session_rotation_pending_check_start(session, DEFAULT_ROTATE_PENDING_TIMER);
5590 if (ret) {
5591 cmd_ret = LTTNG_ERR_UNK;
5592 goto error;
5593 }
5594
5595 if (rotate_return) {
5596 rotate_return->rotation_id = ongoing_rotation_chunk_id;
5597 }
5598
5599 session->chunk_being_archived = chunk_being_archived;
5600 chunk_being_archived = nullptr;
5601 if (!quiet_rotation) {
5602 ret = notification_thread_command_session_rotation_ongoing(
5603 the_notification_thread_handle, session->id, ongoing_rotation_chunk_id);
5604 if (ret != LTTNG_OK) {
5605 ERR("Failed to notify notification thread that a session rotation is ongoing for session %s",
5606 session->name);
5607 cmd_ret = (lttng_error_code) ret;
5608 }
5609 }
5610
5611 DBG("Cmd rotate session %s, archive_id %" PRIu64 " sent",
5612 session->name,
5613 ongoing_rotation_chunk_id);
5614end:
5615 lttng_trace_chunk_put(new_trace_chunk);
5616 lttng_trace_chunk_put(chunk_being_archived);
5617 ret = (cmd_ret == LTTNG_OK) ? cmd_ret : -((int) cmd_ret);
5618 return ret;
5619error:
5620 if (session_reset_rotation_state(session, LTTNG_ROTATION_STATE_ERROR)) {
5621 ERR("Failed to reset rotation state of session \"%s\"", session->name);
5622 }
5623 goto end;
5624}
5625
5626/*
5627 * Command LTTNG_ROTATION_GET_INFO from the lttng-ctl library.
5628 *
5629 * Check if the session has finished its rotation.
5630 *
5631 * Return LTTNG_OK on success or else an LTTNG_ERR code.
5632 */
5633int cmd_rotate_get_info(const ltt_session::locked_ref& session,
5634 struct lttng_rotation_get_info_return *info_return,
5635 uint64_t rotation_id)
5636{
5637 enum lttng_error_code cmd_ret = LTTNG_OK;
5638 enum lttng_rotation_state rotation_state;
5639
5640 DBG("Cmd rotate_get_info session %s, rotation id %" PRIu64,
5641 session->name,
5642 session->most_recent_chunk_id.value);
5643
5644 if (session->chunk_being_archived) {
5645 enum lttng_trace_chunk_status chunk_status;
5646 uint64_t chunk_id;
5647
5648 chunk_status = lttng_trace_chunk_get_id(session->chunk_being_archived, &chunk_id);
5649 LTTNG_ASSERT(chunk_status == LTTNG_TRACE_CHUNK_STATUS_OK);
5650
5651 rotation_state = rotation_id == chunk_id ? LTTNG_ROTATION_STATE_ONGOING :
5652 LTTNG_ROTATION_STATE_EXPIRED;
5653 } else {
5654 if (session->last_archived_chunk_id.is_set &&
5655 rotation_id != session->last_archived_chunk_id.value) {
5656 rotation_state = LTTNG_ROTATION_STATE_EXPIRED;
5657 } else {
5658 rotation_state = session->rotation_state;
5659 }
5660 }
5661
5662 switch (rotation_state) {
5663 case LTTNG_ROTATION_STATE_NO_ROTATION:
5664 DBG("Reporting that no rotation has occurred within the lifetime of session \"%s\"",
5665 session->name);
5666 goto end;
5667 case LTTNG_ROTATION_STATE_EXPIRED:
5668 DBG("Reporting that the rotation state of rotation id %" PRIu64
5669 " of session \"%s\" has expired",
5670 rotation_id,
5671 session->name);
5672 break;
5673 case LTTNG_ROTATION_STATE_ONGOING:
5674 DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is still pending",
5675 rotation_id,
5676 session->name);
5677 break;
5678 case LTTNG_ROTATION_STATE_COMPLETED:
5679 {
5680 int fmt_ret;
5681 char *chunk_path;
5682 char *current_tracing_path_reply;
5683 size_t current_tracing_path_reply_len;
5684
5685 DBG("Reporting that rotation id %" PRIu64 " of session \"%s\" is completed",
5686 rotation_id,
5687 session->name);
5688
5689 switch (session_get_consumer_destination_type(session)) {
5690 case CONSUMER_DST_LOCAL:
5691 current_tracing_path_reply = info_return->location.local.absolute_path;
5692 current_tracing_path_reply_len =
5693 sizeof(info_return->location.local.absolute_path);
5694 info_return->location_type =
5695 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_LOCAL;
5696 fmt_ret = asprintf(&chunk_path,
5697 "%s/" DEFAULT_ARCHIVED_TRACE_CHUNKS_DIRECTORY "/%s",
5698 session_get_base_path(session),
5699 session->last_archived_chunk_name);
5700 if (fmt_ret == -1) {
5701 PERROR("Failed to format the path of the last archived trace chunk");
5702 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5703 cmd_ret = LTTNG_ERR_UNK;
5704 goto end;
5705 }
5706 break;
5707 case CONSUMER_DST_NET:
5708 {
5709 uint16_t ctrl_port, data_port;
5710
5711 current_tracing_path_reply = info_return->location.relay.relative_path;
5712 current_tracing_path_reply_len =
5713 sizeof(info_return->location.relay.relative_path);
5714 /* Currently the only supported relay protocol. */
5715 info_return->location.relay.protocol =
5716 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_RELAY_PROTOCOL_TYPE_TCP;
5717
5718 fmt_ret = lttng_strncpy(info_return->location.relay.host,
5719 session_get_net_consumer_hostname(session),
5720 sizeof(info_return->location.relay.host));
5721 if (fmt_ret) {
5722 ERR("Failed to copy host name to rotate_get_info reply");
5723 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5724 cmd_ret = LTTNG_ERR_SET_URL;
5725 goto end;
5726 }
5727
5728 session_get_net_consumer_ports(session, &ctrl_port, &data_port);
5729 info_return->location.relay.ports.control = ctrl_port;
5730 info_return->location.relay.ports.data = data_port;
5731 info_return->location_type =
5732 (int8_t) LTTNG_TRACE_ARCHIVE_LOCATION_TYPE_RELAY;
5733 chunk_path = strdup(session->last_chunk_path);
5734 if (!chunk_path) {
5735 ERR("Failed to allocate the path of the last archived trace chunk");
5736 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5737 cmd_ret = LTTNG_ERR_UNK;
5738 goto end;
5739 }
5740 break;
5741 }
5742 default:
5743 abort();
5744 }
5745
5746 fmt_ret = lttng_strncpy(
5747 current_tracing_path_reply, chunk_path, current_tracing_path_reply_len);
5748 free(chunk_path);
5749 if (fmt_ret) {
5750 ERR("Failed to copy path of the last archived trace chunk to rotate_get_info reply");
5751 info_return->status = LTTNG_ROTATION_STATUS_ERROR;
5752 cmd_ret = LTTNG_ERR_UNK;
5753 goto end;
5754 }
5755
5756 break;
5757 }
5758 case LTTNG_ROTATION_STATE_ERROR:
5759 DBG("Reporting that an error occurred during rotation %" PRIu64
5760 " of session \"%s\"",
5761 rotation_id,
5762 session->name);
5763 break;
5764 default:
5765 abort();
5766 }
5767
5768 cmd_ret = LTTNG_OK;
5769end:
5770 info_return->status = (int32_t) rotation_state;
5771 return cmd_ret;
5772}
5773
5774/*
5775 * Command LTTNG_ROTATION_SET_SCHEDULE from the lttng-ctl library.
5776 *
5777 * Configure the automatic rotation parameters.
5778 * 'activate' to true means activate the rotation schedule type with 'new_value'.
5779 * 'activate' to false means deactivate the rotation schedule and validate that
5780 * 'new_value' has the same value as the currently active value.
5781 *
5782 * Return LTTNG_OK on success or else a positive LTTNG_ERR code.
5783 */
5784int cmd_rotation_set_schedule(const ltt_session::locked_ref& session,
5785 bool activate,
5786 enum lttng_rotation_schedule_type schedule_type,
5787 uint64_t new_value)
5788{
5789 int ret;
5790 uint64_t *parameter_value;
5791
5792 DBG("Cmd rotate set schedule session %s", session->name);
5793
5794 if (session->live_timer || !session->output_traces) {
5795 DBG("Failing ROTATION_SET_SCHEDULE command as the rotation feature is not available for this session");
5796 ret = LTTNG_ERR_ROTATION_NOT_AVAILABLE;
5797 goto end;
5798 }
5799
5800 switch (schedule_type) {
5801 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
5802 parameter_value = &session->rotate_size;
5803 break;
5804 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
5805 parameter_value = &session->rotate_timer_period;
5806 if (new_value >= UINT_MAX) {
5807 DBG("Failing ROTATION_SET_SCHEDULE command as the value requested for a periodic rotation schedule is invalid: %" PRIu64
5808 " > %u (UINT_MAX)",
5809 new_value,
5810 UINT_MAX);
5811 ret = LTTNG_ERR_INVALID;
5812 goto end;
5813 }
5814 break;
5815 default:
5816 WARN("Failing ROTATION_SET_SCHEDULE command on unknown schedule type");
5817 ret = LTTNG_ERR_INVALID;
5818 goto end;
5819 }
5820
5821 /* Improper use of the API. */
5822 if (new_value == -1ULL) {
5823 WARN("Failing ROTATION_SET_SCHEDULE command as the value requested is -1");
5824 ret = LTTNG_ERR_INVALID;
5825 goto end;
5826 }
5827
5828 /*
5829 * As indicated in struct ltt_session's comments, a value of == 0 means
5830 * this schedule rotation type is not in use.
5831 *
5832 * Reject the command if we were asked to activate a schedule that was
5833 * already active.
5834 */
5835 if (activate && *parameter_value != 0) {
5836 DBG("Failing ROTATION_SET_SCHEDULE (activate) command as the schedule is already active");
5837 ret = LTTNG_ERR_ROTATION_SCHEDULE_SET;
5838 goto end;
5839 }
5840
5841 /*
5842 * Reject the command if we were asked to deactivate a schedule that was
5843 * not active.
5844 */
5845 if (!activate && *parameter_value == 0) {
5846 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as the schedule is already inactive");
5847 ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
5848 goto end;
5849 }
5850
5851 /*
5852 * Reject the command if we were asked to deactivate a schedule that
5853 * doesn't exist.
5854 */
5855 if (!activate && *parameter_value != new_value) {
5856 DBG("Failing ROTATION_SET_SCHEDULE (deactivate) command as an inexistant schedule was provided");
5857 ret = LTTNG_ERR_ROTATION_SCHEDULE_NOT_SET;
5858 goto end;
5859 }
5860
5861 *parameter_value = activate ? new_value : 0;
5862
5863 switch (schedule_type) {
5864 case LTTNG_ROTATION_SCHEDULE_TYPE_PERIODIC:
5865 if (activate && session->active) {
5866 /*
5867 * Only start the timer if the session is active,
5868 * otherwise it will be started when the session starts.
5869 */
5870 ret = timer_session_rotation_schedule_timer_start(session, new_value);
5871 if (ret) {
5872 ERR("Failed to enable session rotation timer in ROTATION_SET_SCHEDULE command");
5873 ret = LTTNG_ERR_UNK;
5874 goto end;
5875 }
5876 } else {
5877 ret = timer_session_rotation_schedule_timer_stop(session);
5878 if (ret) {
5879 ERR("Failed to disable session rotation timer in ROTATION_SET_SCHEDULE command");
5880 ret = LTTNG_ERR_UNK;
5881 goto end;
5882 }
5883 }
5884 break;
5885 case LTTNG_ROTATION_SCHEDULE_TYPE_SIZE_THRESHOLD:
5886 if (activate) {
5887 try {
5888 the_rotation_thread_handle->subscribe_session_consumed_size_rotation(
5889 *session, new_value);
5890 } catch (const std::exception& e) {
5891 ERR("Failed to enable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5892 e.what());
5893 ret = LTTNG_ERR_UNK;
5894 goto end;
5895 }
5896 } else {
5897 try {
5898 the_rotation_thread_handle
5899 ->unsubscribe_session_consumed_size_rotation(*session);
5900 } catch (const std::exception& e) {
5901 ERR("Failed to disable consumed-size notification in ROTATION_SET_SCHEDULE command: %s",
5902 e.what());
5903 ret = LTTNG_ERR_UNK;
5904 goto end;
5905 }
5906 }
5907 break;
5908 default:
5909 /* Would have been caught before. */
5910 abort();
5911 }
5912
5913 ret = LTTNG_OK;
5914
5915 goto end;
5916
5917end:
5918 return ret;
5919}
5920
5921/* Wait for a given path to be removed before continuing. */
5922static enum lttng_error_code wait_on_path(void *path_data)
5923{
5924 const char *shm_path = (const char *) path_data;
5925
5926 DBG("Waiting for the shm path at %s to be removed before completing session destruction",
5927 shm_path);
5928 while (true) {
5929 int ret;
5930 struct stat st;
5931
5932 ret = stat(shm_path, &st);
5933 if (ret) {
5934 if (errno != ENOENT) {
5935 PERROR("stat() returned an error while checking for the existence of the shm path");
5936 } else {
5937 DBG("shm path no longer exists, completing the destruction of session");
5938 }
5939 break;
5940 } else {
5941 if (!S_ISDIR(st.st_mode)) {
5942 ERR("The type of shm path %s returned by stat() is not a directory; aborting the wait for shm path removal",
5943 shm_path);
5944 break;
5945 }
5946 }
5947 usleep(SESSION_DESTROY_SHM_PATH_CHECK_DELAY_US);
5948 }
5949 return LTTNG_OK;
5950}
5951
5952/*
5953 * Returns a pointer to a handler to run on completion of a command.
5954 * Returns NULL if no handler has to be run for the last command executed.
5955 */
5956const struct cmd_completion_handler *cmd_pop_completion_handler()
5957{
5958 struct cmd_completion_handler *handler = current_completion_handler;
5959
5960 current_completion_handler = nullptr;
5961 return handler;
5962}
5963
5964/*
5965 * Init command subsystem.
5966 */
5967void cmd_init()
5968{
5969 /*
5970 * Set network sequence index to 1 for streams to match a relayd
5971 * socket on the consumer side.
5972 */
5973 pthread_mutex_lock(&relayd_net_seq_idx_lock);
5974 relayd_net_seq_idx = 1;
5975 pthread_mutex_unlock(&relayd_net_seq_idx_lock);
5976
5977 DBG("Command subsystem initialized");
5978}
This page took 0.046156 seconds and 5 git commands to generate.