2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <common/compat/getenv.h>
25 #include <common/unix.h>
26 #include <common/utils.h>
27 #include <lttng/userspace-probe-internal.h>
28 #include <lttng/event-internal.h>
31 #include "lttng-sessiond.h"
35 #include "health-sessiond.h"
36 #include "testpoint.h"
41 static struct thread_state
{
46 .cond
= PTHREAD_COND_INITIALIZER
,
47 .lock
= PTHREAD_MUTEX_INITIALIZER
,
51 void set_thread_state_running(void)
53 pthread_mutex_lock(&thread_state
.lock
);
54 thread_state
.is_running
= true;
55 pthread_cond_broadcast(&thread_state
.cond
);
56 pthread_mutex_unlock(&thread_state
.lock
);
59 static void wait_thread_state_running(void)
61 pthread_mutex_lock(&thread_state
.lock
);
62 while (!thread_state
.is_running
) {
63 pthread_cond_wait(&thread_state
.cond
,
66 pthread_mutex_unlock(&thread_state
.lock
);
70 * Setup the outgoing data buffer for the response (llm) by allocating the
71 * right amount of memory and copying the original information from the lsm
74 * Return 0 on success, negative value on error.
76 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
,
77 const void *payload_buf
, size_t payload_len
,
78 const void *cmd_header_buf
, size_t cmd_header_len
)
81 const size_t header_len
= sizeof(struct lttcomm_lttng_msg
);
82 const size_t cmd_header_offset
= header_len
;
83 const size_t payload_offset
= cmd_header_offset
+ cmd_header_len
;
84 const size_t total_msg_size
= header_len
+ cmd_header_len
+ payload_len
;
86 cmd_ctx
->llm
= zmalloc(total_msg_size
);
88 if (cmd_ctx
->llm
== NULL
) {
94 /* Copy common data */
95 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
96 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
97 cmd_ctx
->llm
->cmd_header_size
= cmd_header_len
;
98 cmd_ctx
->llm
->data_size
= payload_len
;
99 cmd_ctx
->lttng_msg_size
= total_msg_size
;
101 /* Copy command header */
102 if (cmd_header_len
) {
103 memcpy(((uint8_t *) cmd_ctx
->llm
) + cmd_header_offset
, cmd_header_buf
,
109 memcpy(((uint8_t *) cmd_ctx
->llm
) + payload_offset
, payload_buf
,
118 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
119 * exec or it will fails.
121 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
124 struct timespec timeout
;
127 * Make sure we set the readiness flag to 0 because we are NOT ready.
128 * This access to consumer_thread_is_ready does not need to be
129 * protected by consumer_data.cond_mutex (yet) since the consumer
130 * management thread has not been started at this point.
132 consumer_data
->consumer_thread_is_ready
= 0;
134 /* Setup pthread condition */
135 ret
= pthread_condattr_init(&consumer_data
->condattr
);
138 PERROR("pthread_condattr_init consumer data");
143 * Set the monotonic clock in order to make sure we DO NOT jump in time
144 * between the clock_gettime() call and the timedwait call. See bug #324
145 * for a more details and how we noticed it.
147 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
150 PERROR("pthread_condattr_setclock consumer data");
154 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
157 PERROR("pthread_cond_init consumer data");
161 ret
= pthread_create(&consumer_data
->thread
, default_pthread_attr(),
162 thread_manage_consumer
, consumer_data
);
165 PERROR("pthread_create consumer");
170 /* We are about to wait on a pthread condition */
171 pthread_mutex_lock(&consumer_data
->cond_mutex
);
173 /* Get time for sem_timedwait absolute timeout */
174 clock_ret
= lttng_clock_gettime(CLOCK_MONOTONIC
, &timeout
);
176 * Set the timeout for the condition timed wait even if the clock gettime
177 * call fails since we might loop on that call and we want to avoid to
178 * increment the timeout too many times.
180 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
183 * The following loop COULD be skipped in some conditions so this is why we
184 * set ret to 0 in order to make sure at least one round of the loop is
190 * Loop until the condition is reached or when a timeout is reached. Note
191 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
192 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
193 * possible. This loop does not take any chances and works with both of
196 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
198 PERROR("clock_gettime spawn consumer");
199 /* Infinite wait for the consumerd thread to be ready */
200 ret
= pthread_cond_wait(&consumer_data
->cond
,
201 &consumer_data
->cond_mutex
);
203 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
204 &consumer_data
->cond_mutex
, &timeout
);
208 /* Release the pthread condition */
209 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
213 if (ret
== ETIMEDOUT
) {
217 * Call has timed out so we kill the kconsumerd_thread and return
220 ERR("Condition timed out. The consumer thread was never ready."
222 pth_ret
= pthread_cancel(consumer_data
->thread
);
224 PERROR("pthread_cancel consumer thread");
227 PERROR("pthread_cond_wait failed consumer thread");
229 /* Caller is expecting a negative value on failure. */
234 pthread_mutex_lock(&consumer_data
->pid_mutex
);
235 if (consumer_data
->pid
== 0) {
236 ERR("Consumerd did not start");
237 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
240 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
249 * Fork and exec a consumer daemon (consumerd).
251 * Return pid if successful else -1.
253 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
257 const char *consumer_to_use
;
258 const char *verbosity
;
261 DBG("Spawning consumerd");
268 if (config
.verbose_consumer
) {
269 verbosity
= "--verbose";
270 } else if (lttng_opt_quiet
) {
271 verbosity
= "--quiet";
276 switch (consumer_data
->type
) {
277 case LTTNG_CONSUMER_KERNEL
:
279 * Find out which consumerd to execute. We will first try the
280 * 64-bit path, then the sessiond's installation directory, and
281 * fallback on the 32-bit one,
283 DBG3("Looking for a kernel consumer at these locations:");
284 DBG3(" 1) %s", config
.consumerd64_bin_path
.value
? : "NULL");
285 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, DEFAULT_CONSUMERD_FILE
);
286 DBG3(" 3) %s", config
.consumerd32_bin_path
.value
? : "NULL");
287 if (stat(config
.consumerd64_bin_path
.value
, &st
) == 0) {
288 DBG3("Found location #1");
289 consumer_to_use
= config
.consumerd64_bin_path
.value
;
290 } else if (stat(INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
, &st
) == 0) {
291 DBG3("Found location #2");
292 consumer_to_use
= INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
;
293 } else if (config
.consumerd32_bin_path
.value
&&
294 stat(config
.consumerd32_bin_path
.value
, &st
) == 0) {
295 DBG3("Found location #3");
296 consumer_to_use
= config
.consumerd32_bin_path
.value
;
298 DBG("Could not find any valid consumerd executable");
302 DBG("Using kernel consumer at: %s", consumer_to_use
);
303 (void) execl(consumer_to_use
,
304 "lttng-consumerd", verbosity
, "-k",
305 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
306 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
307 "--group", config
.tracing_group_name
.value
,
310 case LTTNG_CONSUMER64_UST
:
312 if (config
.consumerd64_lib_dir
.value
) {
317 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
321 tmplen
= strlen(config
.consumerd64_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
322 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
327 strcat(tmpnew
, config
.consumerd64_lib_dir
.value
);
328 if (tmp
[0] != '\0') {
332 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
339 DBG("Using 64-bit UST consumer at: %s", config
.consumerd64_bin_path
.value
);
340 (void) execl(config
.consumerd64_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
341 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
342 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
343 "--group", config
.tracing_group_name
.value
,
347 case LTTNG_CONSUMER32_UST
:
349 if (config
.consumerd32_lib_dir
.value
) {
354 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
358 tmplen
= strlen(config
.consumerd32_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
359 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
364 strcat(tmpnew
, config
.consumerd32_lib_dir
.value
);
365 if (tmp
[0] != '\0') {
369 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
376 DBG("Using 32-bit UST consumer at: %s", config
.consumerd32_bin_path
.value
);
377 (void) execl(config
.consumerd32_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
378 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
379 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
380 "--group", config
.tracing_group_name
.value
,
385 ERR("unknown consumer type");
389 PERROR("Consumer execl()");
391 /* Reaching this point, we got a failure on our execl(). */
393 } else if (pid
> 0) {
396 PERROR("start consumer fork");
404 * Spawn the consumerd daemon and session daemon thread.
406 static int start_consumerd(struct consumer_data
*consumer_data
)
411 * Set the listen() state on the socket since there is a possible race
412 * between the exec() of the consumer daemon and this call if place in the
413 * consumer thread. See bug #366 for more details.
415 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
420 pthread_mutex_lock(&consumer_data
->pid_mutex
);
421 if (consumer_data
->pid
!= 0) {
422 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
426 ret
= spawn_consumerd(consumer_data
);
428 ERR("Spawning consumerd failed");
429 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
433 /* Setting up the consumer_data pid */
434 consumer_data
->pid
= ret
;
435 DBG2("Consumer pid %d", consumer_data
->pid
);
436 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
438 DBG2("Spawning consumer control thread");
439 ret
= spawn_consumer_thread(consumer_data
);
441 ERR("Fatal error spawning consumer control thread");
449 /* Cleanup already created sockets on error. */
450 if (consumer_data
->err_sock
>= 0) {
453 err
= close(consumer_data
->err_sock
);
455 PERROR("close consumer data error socket");
462 * Copy consumer output from the tracing session to the domain session. The
463 * function also applies the right modification on a per domain basis for the
464 * trace files destination directory.
466 * Should *NOT* be called with RCU read-side lock held.
468 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
471 const char *dir_name
;
472 struct consumer_output
*consumer
;
475 assert(session
->consumer
);
478 case LTTNG_DOMAIN_KERNEL
:
479 DBG3("Copying tracing session consumer output in kernel session");
481 * XXX: We should audit the session creation and what this function
482 * does "extra" in order to avoid a destroy since this function is used
483 * in the domain session creation (kernel and ust) only. Same for UST
486 if (session
->kernel_session
->consumer
) {
487 consumer_output_put(session
->kernel_session
->consumer
);
489 session
->kernel_session
->consumer
=
490 consumer_copy_output(session
->consumer
);
491 /* Ease our life a bit for the next part */
492 consumer
= session
->kernel_session
->consumer
;
493 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
495 case LTTNG_DOMAIN_JUL
:
496 case LTTNG_DOMAIN_LOG4J
:
497 case LTTNG_DOMAIN_PYTHON
:
498 case LTTNG_DOMAIN_UST
:
499 DBG3("Copying tracing session consumer output in UST session");
500 if (session
->ust_session
->consumer
) {
501 consumer_output_put(session
->ust_session
->consumer
);
503 session
->ust_session
->consumer
=
504 consumer_copy_output(session
->consumer
);
505 /* Ease our life a bit for the next part */
506 consumer
= session
->ust_session
->consumer
;
507 dir_name
= DEFAULT_UST_TRACE_DIR
;
510 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
514 /* Append correct directory to subdir */
515 strncat(consumer
->subdir
, dir_name
,
516 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
517 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
526 * Create an UST session and add it to the session ust list.
528 * Should *NOT* be called with RCU read-side lock held.
530 static int create_ust_session(struct ltt_session
*session
,
531 struct lttng_domain
*domain
)
534 struct ltt_ust_session
*lus
= NULL
;
538 assert(session
->consumer
);
540 switch (domain
->type
) {
541 case LTTNG_DOMAIN_JUL
:
542 case LTTNG_DOMAIN_LOG4J
:
543 case LTTNG_DOMAIN_PYTHON
:
544 case LTTNG_DOMAIN_UST
:
547 ERR("Unknown UST domain on create session %d", domain
->type
);
548 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
552 DBG("Creating UST session");
554 lus
= trace_ust_create_session(session
->id
);
556 ret
= LTTNG_ERR_UST_SESS_FAIL
;
560 lus
->uid
= session
->uid
;
561 lus
->gid
= session
->gid
;
562 lus
->output_traces
= session
->output_traces
;
563 lus
->snapshot_mode
= session
->snapshot_mode
;
564 lus
->live_timer_interval
= session
->live_timer
;
565 session
->ust_session
= lus
;
566 if (session
->shm_path
[0]) {
567 strncpy(lus
->root_shm_path
, session
->shm_path
,
568 sizeof(lus
->root_shm_path
));
569 lus
->root_shm_path
[sizeof(lus
->root_shm_path
) - 1] = '\0';
570 strncpy(lus
->shm_path
, session
->shm_path
,
571 sizeof(lus
->shm_path
));
572 lus
->shm_path
[sizeof(lus
->shm_path
) - 1] = '\0';
573 strncat(lus
->shm_path
, "/ust",
574 sizeof(lus
->shm_path
) - strlen(lus
->shm_path
) - 1);
576 /* Copy session output to the newly created UST session */
577 ret
= copy_session_consumer(domain
->type
, session
);
578 if (ret
!= LTTNG_OK
) {
586 session
->ust_session
= NULL
;
591 * Create a kernel tracer session then create the default channel.
593 static int create_kernel_session(struct ltt_session
*session
)
597 DBG("Creating kernel session");
599 ret
= kernel_create_session(session
, kernel_tracer_fd
);
601 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
605 /* Code flow safety */
606 assert(session
->kernel_session
);
608 /* Copy session output to the newly created Kernel session */
609 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
610 if (ret
!= LTTNG_OK
) {
614 session
->kernel_session
->uid
= session
->uid
;
615 session
->kernel_session
->gid
= session
->gid
;
616 session
->kernel_session
->output_traces
= session
->output_traces
;
617 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
622 trace_kernel_destroy_session(session
->kernel_session
);
623 session
->kernel_session
= NULL
;
628 * Count number of session permitted by uid/gid.
630 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
633 struct ltt_session
*session
;
634 const struct ltt_session_list
*session_list
= session_get_list();
636 DBG("Counting number of available session for UID %d GID %d",
638 cds_list_for_each_entry(session
, &session_list
->head
, list
) {
639 if (!session_get(session
)) {
642 session_lock(session
);
643 /* Only count the sessions the user can control. */
644 if (session_access_ok(session
, uid
, gid
) &&
645 !session
->destroyed
) {
648 session_unlock(session
);
649 session_put(session
);
654 static int receive_userspace_probe(struct command_ctx
*cmd_ctx
, int sock
,
655 int *sock_error
, struct lttng_event
*event
)
658 struct lttng_userspace_probe_location
*probe_location
;
659 const struct lttng_userspace_probe_location_lookup_method
*lookup
= NULL
;
660 struct lttng_dynamic_buffer probe_location_buffer
;
661 struct lttng_buffer_view buffer_view
;
664 * Create a buffer to store the serialized version of the probe
667 lttng_dynamic_buffer_init(&probe_location_buffer
);
668 ret
= lttng_dynamic_buffer_set_size(&probe_location_buffer
,
669 cmd_ctx
->lsm
->u
.enable
.userspace_probe_location_len
);
671 ret
= LTTNG_ERR_NOMEM
;
676 * Receive the probe location.
678 ret
= lttcomm_recv_unix_sock(sock
, probe_location_buffer
.data
,
679 probe_location_buffer
.size
);
681 DBG("Nothing recv() from client var len data... continuing");
683 lttng_dynamic_buffer_reset(&probe_location_buffer
);
684 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
688 buffer_view
= lttng_buffer_view_from_dynamic_buffer(
689 &probe_location_buffer
, 0, probe_location_buffer
.size
);
692 * Extract the probe location from the serialized version.
694 ret
= lttng_userspace_probe_location_create_from_buffer(
695 &buffer_view
, &probe_location
);
697 WARN("Failed to create a userspace probe location from the received buffer");
698 lttng_dynamic_buffer_reset( &probe_location_buffer
);
699 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
704 * Receive the file descriptor to the target binary from the client.
706 DBG("Receiving userspace probe target FD from client ...");
707 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
709 DBG("Nothing recv() from client userspace probe fd... continuing");
711 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
716 * Set the file descriptor received from the client through the unix
717 * socket in the probe location.
719 lookup
= lttng_userspace_probe_location_get_lookup_method(probe_location
);
721 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
726 * From the kernel tracer's perspective, all userspace probe event types
727 * are all the same: a file and an offset.
729 switch (lttng_userspace_probe_location_lookup_method_get_type(lookup
)) {
730 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF
:
731 ret
= lttng_userspace_probe_location_function_set_binary_fd(
734 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT
:
735 ret
= lttng_userspace_probe_location_tracepoint_set_binary_fd(
739 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
744 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
748 /* Attach the probe location to the event. */
749 ret
= lttng_event_set_userspace_probe_location(event
, probe_location
);
751 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
755 lttng_dynamic_buffer_reset(&probe_location_buffer
);
761 * Join consumer thread
763 static int join_consumer_thread(struct consumer_data
*consumer_data
)
767 /* Consumer pid must be a real one. */
768 if (consumer_data
->pid
> 0) {
770 ret
= kill(consumer_data
->pid
, SIGTERM
);
772 PERROR("Error killing consumer daemon");
775 return pthread_join(consumer_data
->thread
, &status
);
782 * Version of setup_lttng_msg() without command header.
784 static int setup_lttng_msg_no_cmd_header(struct command_ctx
*cmd_ctx
,
785 void *payload_buf
, size_t payload_len
)
787 return setup_lttng_msg(cmd_ctx
, payload_buf
, payload_len
, NULL
, 0);
791 * Free memory of a command context structure.
793 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
795 DBG("Clean command context structure");
797 if ((*cmd_ctx
)->llm
) {
798 free((*cmd_ctx
)->llm
);
800 if ((*cmd_ctx
)->lsm
) {
801 free((*cmd_ctx
)->lsm
);
809 * Check if the current kernel tracer supports the session rotation feature.
810 * Return 1 if it does, 0 otherwise.
812 static int check_rotate_compatible(void)
816 if (kernel_tracer_version
.major
!= 2 || kernel_tracer_version
.minor
< 11) {
817 DBG("Kernel tracer version is not compatible with the rotation feature");
825 * Send data on a unix socket using the liblttsessiondcomm API.
827 * Return lttcomm error code.
829 static int send_unix_sock(int sock
, void *buf
, size_t len
)
831 /* Check valid length */
836 return lttcomm_send_unix_sock(sock
, buf
, len
);
840 * Process the command requested by the lttng client within the command
841 * context structure. This function make sure that the return structure (llm)
842 * is set and ready for transmission before returning.
844 * Return any error encountered or 0 for success.
846 * "sock" is only used for special-case var. len data.
848 * Should *NOT* be called with RCU read-side lock held.
850 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
854 int need_tracing_session
= 1;
857 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
859 assert(!rcu_read_ongoing());
863 switch (cmd_ctx
->lsm
->cmd_type
) {
864 case LTTNG_CREATE_SESSION
:
865 case LTTNG_CREATE_SESSION_SNAPSHOT
:
866 case LTTNG_CREATE_SESSION_LIVE
:
867 case LTTNG_DESTROY_SESSION
:
868 case LTTNG_LIST_SESSIONS
:
869 case LTTNG_LIST_DOMAINS
:
870 case LTTNG_START_TRACE
:
871 case LTTNG_STOP_TRACE
:
872 case LTTNG_DATA_PENDING
:
873 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
874 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
875 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
876 case LTTNG_SNAPSHOT_RECORD
:
877 case LTTNG_SAVE_SESSION
:
878 case LTTNG_SET_SESSION_SHM_PATH
:
879 case LTTNG_REGENERATE_METADATA
:
880 case LTTNG_REGENERATE_STATEDUMP
:
881 case LTTNG_REGISTER_TRIGGER
:
882 case LTTNG_UNREGISTER_TRIGGER
:
883 case LTTNG_ROTATE_SESSION
:
884 case LTTNG_ROTATION_GET_INFO
:
885 case LTTNG_ROTATION_SET_SCHEDULE
:
886 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
893 if (config
.no_kernel
&& need_domain
894 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
896 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
898 ret
= LTTNG_ERR_KERN_NA
;
903 /* Deny register consumer if we already have a spawned consumer. */
904 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
905 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
906 if (kconsumer_data
.pid
> 0) {
907 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
908 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
911 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
915 * Check for command that don't needs to allocate a returned payload. We do
916 * this here so we don't have to make the call for no payload at each
919 switch(cmd_ctx
->lsm
->cmd_type
) {
920 case LTTNG_LIST_SESSIONS
:
921 case LTTNG_LIST_TRACEPOINTS
:
922 case LTTNG_LIST_TRACEPOINT_FIELDS
:
923 case LTTNG_LIST_DOMAINS
:
924 case LTTNG_LIST_CHANNELS
:
925 case LTTNG_LIST_EVENTS
:
926 case LTTNG_LIST_SYSCALLS
:
927 case LTTNG_LIST_TRACKER_PIDS
:
928 case LTTNG_DATA_PENDING
:
929 case LTTNG_ROTATE_SESSION
:
930 case LTTNG_ROTATION_GET_INFO
:
931 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
934 /* Setup lttng message with no payload */
935 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0);
937 /* This label does not try to unlock the session */
938 goto init_setup_error
;
942 /* Commands that DO NOT need a session. */
943 switch (cmd_ctx
->lsm
->cmd_type
) {
944 case LTTNG_CREATE_SESSION
:
945 case LTTNG_CREATE_SESSION_SNAPSHOT
:
946 case LTTNG_CREATE_SESSION_LIVE
:
947 case LTTNG_LIST_SESSIONS
:
948 case LTTNG_LIST_TRACEPOINTS
:
949 case LTTNG_LIST_SYSCALLS
:
950 case LTTNG_LIST_TRACEPOINT_FIELDS
:
951 case LTTNG_SAVE_SESSION
:
952 case LTTNG_REGISTER_TRIGGER
:
953 case LTTNG_UNREGISTER_TRIGGER
:
954 need_tracing_session
= 0;
957 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
959 * We keep the session list lock across _all_ commands
960 * for now, because the per-session lock does not
961 * handle teardown properly.
964 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
965 if (cmd_ctx
->session
== NULL
) {
966 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
969 /* Acquire lock for the session */
970 session_lock(cmd_ctx
->session
);
976 * Commands that need a valid session but should NOT create one if none
977 * exists. Instead of creating one and destroying it when the command is
978 * handled, process that right before so we save some round trip in useless
981 switch (cmd_ctx
->lsm
->cmd_type
) {
982 case LTTNG_DISABLE_CHANNEL
:
983 case LTTNG_DISABLE_EVENT
:
984 switch (cmd_ctx
->lsm
->domain
.type
) {
985 case LTTNG_DOMAIN_KERNEL
:
986 if (!cmd_ctx
->session
->kernel_session
) {
987 ret
= LTTNG_ERR_NO_CHANNEL
;
991 case LTTNG_DOMAIN_JUL
:
992 case LTTNG_DOMAIN_LOG4J
:
993 case LTTNG_DOMAIN_PYTHON
:
994 case LTTNG_DOMAIN_UST
:
995 if (!cmd_ctx
->session
->ust_session
) {
996 ret
= LTTNG_ERR_NO_CHANNEL
;
1001 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1013 * Check domain type for specific "pre-action".
1015 switch (cmd_ctx
->lsm
->domain
.type
) {
1016 case LTTNG_DOMAIN_KERNEL
:
1018 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
1022 /* Consumer is in an ERROR state. Report back to client */
1023 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
1024 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
1028 /* Need a session for kernel command */
1029 if (need_tracing_session
) {
1030 if (cmd_ctx
->session
->kernel_session
== NULL
) {
1031 ret
= create_kernel_session(cmd_ctx
->session
);
1033 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
1038 /* Start the kernel consumer daemon */
1039 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
1040 if (kconsumer_data
.pid
== 0 &&
1041 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
1042 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
1043 ret
= start_consumerd(&kconsumer_data
);
1045 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
1048 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
1050 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
1054 * The consumer was just spawned so we need to add the socket to
1055 * the consumer output of the session if exist.
1057 ret
= consumer_create_socket(&kconsumer_data
,
1058 cmd_ctx
->session
->kernel_session
->consumer
);
1065 case LTTNG_DOMAIN_JUL
:
1066 case LTTNG_DOMAIN_LOG4J
:
1067 case LTTNG_DOMAIN_PYTHON
:
1068 case LTTNG_DOMAIN_UST
:
1070 if (!ust_app_supported()) {
1071 ret
= LTTNG_ERR_NO_UST
;
1074 /* Consumer is in an ERROR state. Report back to client */
1075 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
1076 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
1080 if (need_tracing_session
) {
1081 /* Create UST session if none exist. */
1082 if (cmd_ctx
->session
->ust_session
== NULL
) {
1083 ret
= create_ust_session(cmd_ctx
->session
,
1084 &cmd_ctx
->lsm
->domain
);
1085 if (ret
!= LTTNG_OK
) {
1090 /* Start the UST consumer daemons */
1092 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
1093 if (config
.consumerd64_bin_path
.value
&&
1094 ustconsumer64_data
.pid
== 0 &&
1095 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
1096 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
1097 ret
= start_consumerd(&ustconsumer64_data
);
1099 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
1100 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
1104 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
1105 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
1107 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
1111 * Setup socket for consumer 64 bit. No need for atomic access
1112 * since it was set above and can ONLY be set in this thread.
1114 ret
= consumer_create_socket(&ustconsumer64_data
,
1115 cmd_ctx
->session
->ust_session
->consumer
);
1121 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
1122 if (config
.consumerd32_bin_path
.value
&&
1123 ustconsumer32_data
.pid
== 0 &&
1124 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
1125 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
1126 ret
= start_consumerd(&ustconsumer32_data
);
1128 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
1129 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
1133 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
1134 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
1136 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
1140 * Setup socket for consumer 32 bit. No need for atomic access
1141 * since it was set above and can ONLY be set in this thread.
1143 ret
= consumer_create_socket(&ustconsumer32_data
,
1144 cmd_ctx
->session
->ust_session
->consumer
);
1156 /* Validate consumer daemon state when start/stop trace command */
1157 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
1158 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
1159 switch (cmd_ctx
->lsm
->domain
.type
) {
1160 case LTTNG_DOMAIN_NONE
:
1162 case LTTNG_DOMAIN_JUL
:
1163 case LTTNG_DOMAIN_LOG4J
:
1164 case LTTNG_DOMAIN_PYTHON
:
1165 case LTTNG_DOMAIN_UST
:
1166 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
1167 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
1171 case LTTNG_DOMAIN_KERNEL
:
1172 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
1173 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
1178 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
1184 * Check that the UID or GID match that of the tracing session.
1185 * The root user can interact with all sessions.
1187 if (need_tracing_session
) {
1188 if (!session_access_ok(cmd_ctx
->session
,
1189 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
1190 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
)) ||
1191 cmd_ctx
->session
->destroyed
) {
1192 ret
= LTTNG_ERR_EPERM
;
1198 * Send relayd information to consumer as soon as we have a domain and a
1201 if (cmd_ctx
->session
&& need_domain
) {
1203 * Setup relayd if not done yet. If the relayd information was already
1204 * sent to the consumer, this call will gracefully return.
1206 ret
= cmd_setup_relayd(cmd_ctx
->session
);
1207 if (ret
!= LTTNG_OK
) {
1212 /* Process by command type */
1213 switch (cmd_ctx
->lsm
->cmd_type
) {
1214 case LTTNG_ADD_CONTEXT
:
1217 * An LTTNG_ADD_CONTEXT command might have a supplementary
1218 * payload if the context being added is an application context.
1220 if (cmd_ctx
->lsm
->u
.context
.ctx
.ctx
==
1221 LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
1222 char *provider_name
= NULL
, *context_name
= NULL
;
1223 size_t provider_name_len
=
1224 cmd_ctx
->lsm
->u
.context
.provider_name_len
;
1225 size_t context_name_len
=
1226 cmd_ctx
->lsm
->u
.context
.context_name_len
;
1228 if (provider_name_len
== 0 || context_name_len
== 0) {
1230 * Application provider and context names MUST
1233 ret
= -LTTNG_ERR_INVALID
;
1237 provider_name
= zmalloc(provider_name_len
+ 1);
1238 if (!provider_name
) {
1239 ret
= -LTTNG_ERR_NOMEM
;
1242 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
=
1245 context_name
= zmalloc(context_name_len
+ 1);
1246 if (!context_name
) {
1247 ret
= -LTTNG_ERR_NOMEM
;
1248 goto error_add_context
;
1250 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
=
1253 ret
= lttcomm_recv_unix_sock(sock
, provider_name
,
1256 goto error_add_context
;
1259 ret
= lttcomm_recv_unix_sock(sock
, context_name
,
1262 goto error_add_context
;
1267 * cmd_add_context assumes ownership of the provider and context
1270 ret
= cmd_add_context(cmd_ctx
->session
,
1271 cmd_ctx
->lsm
->domain
.type
,
1272 cmd_ctx
->lsm
->u
.context
.channel_name
,
1273 &cmd_ctx
->lsm
->u
.context
.ctx
,
1274 kernel_poll_pipe
[1]);
1276 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
= NULL
;
1277 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
= NULL
;
1279 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
);
1280 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
);
1286 case LTTNG_DISABLE_CHANNEL
:
1288 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
1289 cmd_ctx
->lsm
->u
.disable
.channel_name
);
1292 case LTTNG_DISABLE_EVENT
:
1296 * FIXME: handle filter; for now we just receive the filter's
1297 * bytecode along with the filter expression which are sent by
1298 * liblttng-ctl and discard them.
1300 * This fixes an issue where the client may block while sending
1301 * the filter payload and encounter an error because the session
1302 * daemon closes the socket without ever handling this data.
1304 size_t count
= cmd_ctx
->lsm
->u
.disable
.expression_len
+
1305 cmd_ctx
->lsm
->u
.disable
.bytecode_len
;
1308 char data
[LTTNG_FILTER_MAX_LEN
];
1310 DBG("Discarding disable event command payload of size %zu", count
);
1312 ret
= lttcomm_recv_unix_sock(sock
, data
,
1313 count
> sizeof(data
) ? sizeof(data
) : count
);
1318 count
-= (size_t) ret
;
1321 /* FIXME: passing packed structure to non-packed pointer */
1322 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
1323 cmd_ctx
->lsm
->u
.disable
.channel_name
,
1324 &cmd_ctx
->lsm
->u
.disable
.event
);
1327 case LTTNG_ENABLE_CHANNEL
:
1329 cmd_ctx
->lsm
->u
.channel
.chan
.attr
.extended
.ptr
=
1330 (struct lttng_channel_extended
*) &cmd_ctx
->lsm
->u
.channel
.extended
;
1331 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
1332 &cmd_ctx
->lsm
->u
.channel
.chan
,
1333 kernel_poll_pipe
[1]);
1336 case LTTNG_TRACK_PID
:
1338 ret
= cmd_track_pid(cmd_ctx
->session
,
1339 cmd_ctx
->lsm
->domain
.type
,
1340 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
1343 case LTTNG_UNTRACK_PID
:
1345 ret
= cmd_untrack_pid(cmd_ctx
->session
,
1346 cmd_ctx
->lsm
->domain
.type
,
1347 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
1350 case LTTNG_ENABLE_EVENT
:
1352 struct lttng_event
*ev
= NULL
;
1353 struct lttng_event_exclusion
*exclusion
= NULL
;
1354 struct lttng_filter_bytecode
*bytecode
= NULL
;
1355 char *filter_expression
= NULL
;
1357 /* Handle exclusion events and receive it from the client. */
1358 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
1359 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
1361 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
1362 (count
* LTTNG_SYMBOL_NAME_LEN
));
1364 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
1368 DBG("Receiving var len exclusion event list from client ...");
1369 exclusion
->count
= count
;
1370 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
1371 count
* LTTNG_SYMBOL_NAME_LEN
);
1373 DBG("Nothing recv() from client var len data... continuing");
1376 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
1381 /* Get filter expression from client. */
1382 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
1383 size_t expression_len
=
1384 cmd_ctx
->lsm
->u
.enable
.expression_len
;
1386 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
1387 ret
= LTTNG_ERR_FILTER_INVAL
;
1392 filter_expression
= zmalloc(expression_len
);
1393 if (!filter_expression
) {
1395 ret
= LTTNG_ERR_FILTER_NOMEM
;
1399 /* Receive var. len. data */
1400 DBG("Receiving var len filter's expression from client ...");
1401 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
1404 DBG("Nothing recv() from client var len data... continuing");
1406 free(filter_expression
);
1408 ret
= LTTNG_ERR_FILTER_INVAL
;
1413 /* Handle filter and get bytecode from client. */
1414 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
1415 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
1417 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
1418 ret
= LTTNG_ERR_FILTER_INVAL
;
1419 free(filter_expression
);
1424 bytecode
= zmalloc(bytecode_len
);
1426 free(filter_expression
);
1428 ret
= LTTNG_ERR_FILTER_NOMEM
;
1432 /* Receive var. len. data */
1433 DBG("Receiving var len filter's bytecode from client ...");
1434 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
1436 DBG("Nothing recv() from client var len data... continuing");
1438 free(filter_expression
);
1441 ret
= LTTNG_ERR_FILTER_INVAL
;
1445 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
1446 free(filter_expression
);
1449 ret
= LTTNG_ERR_FILTER_INVAL
;
1454 ev
= lttng_event_copy(&cmd_ctx
->lsm
->u
.enable
.event
);
1456 DBG("Failed to copy event: %s",
1457 cmd_ctx
->lsm
->u
.enable
.event
.name
);
1458 free(filter_expression
);
1461 ret
= LTTNG_ERR_NOMEM
;
1466 if (cmd_ctx
->lsm
->u
.enable
.userspace_probe_location_len
> 0) {
1467 /* Expect a userspace probe description. */
1468 ret
= receive_userspace_probe(cmd_ctx
, sock
, sock_error
, ev
);
1470 free(filter_expression
);
1473 lttng_event_destroy(ev
);
1478 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
1479 cmd_ctx
->lsm
->u
.enable
.channel_name
,
1481 filter_expression
, bytecode
, exclusion
,
1482 kernel_poll_pipe
[1]);
1483 lttng_event_destroy(ev
);
1486 case LTTNG_LIST_TRACEPOINTS
:
1488 struct lttng_event
*events
;
1491 session_lock_list();
1492 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
1493 session_unlock_list();
1494 if (nb_events
< 0) {
1495 /* Return value is a negative lttng_error_code. */
1501 * Setup lttng message with payload size set to the event list size in
1502 * bytes and then copy list into the llm payload.
1504 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
1505 sizeof(struct lttng_event
) * nb_events
);
1515 case LTTNG_LIST_TRACEPOINT_FIELDS
:
1517 struct lttng_event_field
*fields
;
1520 session_lock_list();
1521 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
1523 session_unlock_list();
1524 if (nb_fields
< 0) {
1525 /* Return value is a negative lttng_error_code. */
1531 * Setup lttng message with payload size set to the event list size in
1532 * bytes and then copy list into the llm payload.
1534 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, fields
,
1535 sizeof(struct lttng_event_field
) * nb_fields
);
1545 case LTTNG_LIST_SYSCALLS
:
1547 struct lttng_event
*events
;
1550 nb_events
= cmd_list_syscalls(&events
);
1551 if (nb_events
< 0) {
1552 /* Return value is a negative lttng_error_code. */
1558 * Setup lttng message with payload size set to the event list size in
1559 * bytes and then copy list into the llm payload.
1561 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
1562 sizeof(struct lttng_event
) * nb_events
);
1572 case LTTNG_LIST_TRACKER_PIDS
:
1574 int32_t *pids
= NULL
;
1577 nr_pids
= cmd_list_tracker_pids(cmd_ctx
->session
,
1578 cmd_ctx
->lsm
->domain
.type
, &pids
);
1580 /* Return value is a negative lttng_error_code. */
1586 * Setup lttng message with payload size set to the event list size in
1587 * bytes and then copy list into the llm payload.
1589 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, pids
,
1590 sizeof(int32_t) * nr_pids
);
1600 case LTTNG_SET_CONSUMER_URI
:
1603 struct lttng_uri
*uris
;
1605 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
1606 len
= nb_uri
* sizeof(struct lttng_uri
);
1609 ret
= LTTNG_ERR_INVALID
;
1613 uris
= zmalloc(len
);
1615 ret
= LTTNG_ERR_FATAL
;
1619 /* Receive variable len data */
1620 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
1621 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
1623 DBG("No URIs received from client... continuing");
1625 ret
= LTTNG_ERR_SESSION_FAIL
;
1630 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
1632 if (ret
!= LTTNG_OK
) {
1639 case LTTNG_START_TRACE
:
1642 * On the first start, if we have a kernel session and we have
1643 * enabled time or size-based rotations, we have to make sure
1644 * the kernel tracer supports it.
1646 if (!cmd_ctx
->session
->has_been_started
&& \
1647 cmd_ctx
->session
->kernel_session
&& \
1648 (cmd_ctx
->session
->rotate_timer_period
|| \
1649 cmd_ctx
->session
->rotate_size
) && \
1650 !check_rotate_compatible()) {
1651 DBG("Kernel tracer version is not compatible with the rotation feature");
1652 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
1655 ret
= cmd_start_trace(cmd_ctx
->session
);
1658 case LTTNG_STOP_TRACE
:
1660 ret
= cmd_stop_trace(cmd_ctx
->session
);
1663 case LTTNG_CREATE_SESSION
:
1666 struct lttng_uri
*uris
= NULL
;
1668 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
1669 len
= nb_uri
* sizeof(struct lttng_uri
);
1672 uris
= zmalloc(len
);
1674 ret
= LTTNG_ERR_FATAL
;
1678 /* Receive variable len data */
1679 DBG("Waiting for %zu URIs from client ...", nb_uri
);
1680 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
1682 DBG("No URIs received from client... continuing");
1684 ret
= LTTNG_ERR_SESSION_FAIL
;
1689 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
1690 DBG("Creating session with ONE network URI is a bad call");
1691 ret
= LTTNG_ERR_SESSION_FAIL
;
1697 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
1698 &cmd_ctx
->creds
, 0);
1704 case LTTNG_DESTROY_SESSION
:
1706 ret
= cmd_destroy_session(cmd_ctx
->session
,
1707 notification_thread_handle
);
1710 case LTTNG_LIST_DOMAINS
:
1713 struct lttng_domain
*domains
= NULL
;
1715 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
1717 /* Return value is a negative lttng_error_code. */
1722 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, domains
,
1723 nb_dom
* sizeof(struct lttng_domain
));
1733 case LTTNG_LIST_CHANNELS
:
1735 ssize_t payload_size
;
1736 struct lttng_channel
*channels
= NULL
;
1738 payload_size
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
1739 cmd_ctx
->session
, &channels
);
1740 if (payload_size
< 0) {
1741 /* Return value is a negative lttng_error_code. */
1742 ret
= -payload_size
;
1746 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, channels
,
1757 case LTTNG_LIST_EVENTS
:
1760 struct lttng_event
*events
= NULL
;
1761 struct lttcomm_event_command_header cmd_header
;
1764 memset(&cmd_header
, 0, sizeof(cmd_header
));
1765 /* Extended infos are included at the end of events */
1766 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
,
1767 cmd_ctx
->session
, cmd_ctx
->lsm
->u
.list
.channel_name
,
1768 &events
, &total_size
);
1771 /* Return value is a negative lttng_error_code. */
1776 cmd_header
.nb_events
= nb_event
;
1777 ret
= setup_lttng_msg(cmd_ctx
, events
, total_size
,
1778 &cmd_header
, sizeof(cmd_header
));
1788 case LTTNG_LIST_SESSIONS
:
1790 unsigned int nr_sessions
;
1791 void *sessions_payload
;
1794 session_lock_list();
1795 nr_sessions
= lttng_sessions_count(
1796 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
1797 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
1798 payload_len
= sizeof(struct lttng_session
) * nr_sessions
;
1799 sessions_payload
= zmalloc(payload_len
);
1801 if (!sessions_payload
) {
1802 session_unlock_list();
1807 cmd_list_lttng_sessions(sessions_payload
,
1808 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
1809 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
1810 session_unlock_list();
1812 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, sessions_payload
,
1814 free(sessions_payload
);
1823 case LTTNG_REGISTER_CONSUMER
:
1825 struct consumer_data
*cdata
;
1827 switch (cmd_ctx
->lsm
->domain
.type
) {
1828 case LTTNG_DOMAIN_KERNEL
:
1829 cdata
= &kconsumer_data
;
1832 ret
= LTTNG_ERR_UND
;
1836 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
1837 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
1840 case LTTNG_DATA_PENDING
:
1843 uint8_t pending_ret_byte
;
1845 pending_ret
= cmd_data_pending(cmd_ctx
->session
);
1850 * This function may returns 0 or 1 to indicate whether or not
1851 * there is data pending. In case of error, it should return an
1852 * LTTNG_ERR code. However, some code paths may still return
1853 * a nondescript error code, which we handle by returning an
1856 if (pending_ret
== 0 || pending_ret
== 1) {
1858 * ret will be set to LTTNG_OK at the end of
1861 } else if (pending_ret
< 0) {
1862 ret
= LTTNG_ERR_UNK
;
1869 pending_ret_byte
= (uint8_t) pending_ret
;
1871 /* 1 byte to return whether or not data is pending */
1872 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
,
1873 &pending_ret_byte
, 1);
1882 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
1884 struct lttcomm_lttng_output_id reply
;
1886 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
1887 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
1888 if (ret
!= LTTNG_OK
) {
1892 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &reply
,
1898 /* Copy output list into message payload */
1902 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
1904 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
1905 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
1908 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
1911 struct lttng_snapshot_output
*outputs
= NULL
;
1913 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
1914 if (nb_output
< 0) {
1919 assert((nb_output
> 0 && outputs
) || nb_output
== 0);
1920 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, outputs
,
1921 nb_output
* sizeof(struct lttng_snapshot_output
));
1931 case LTTNG_SNAPSHOT_RECORD
:
1933 ret
= cmd_snapshot_record(cmd_ctx
->session
,
1934 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
1935 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
1938 case LTTNG_CREATE_SESSION_SNAPSHOT
:
1941 struct lttng_uri
*uris
= NULL
;
1943 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
1944 len
= nb_uri
* sizeof(struct lttng_uri
);
1947 uris
= zmalloc(len
);
1949 ret
= LTTNG_ERR_FATAL
;
1953 /* Receive variable len data */
1954 DBG("Waiting for %zu URIs from client ...", nb_uri
);
1955 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
1957 DBG("No URIs received from client... continuing");
1959 ret
= LTTNG_ERR_SESSION_FAIL
;
1964 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
1965 DBG("Creating session with ONE network URI is a bad call");
1966 ret
= LTTNG_ERR_SESSION_FAIL
;
1972 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
1973 nb_uri
, &cmd_ctx
->creds
);
1977 case LTTNG_CREATE_SESSION_LIVE
:
1980 struct lttng_uri
*uris
= NULL
;
1982 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
1983 len
= nb_uri
* sizeof(struct lttng_uri
);
1986 uris
= zmalloc(len
);
1988 ret
= LTTNG_ERR_FATAL
;
1992 /* Receive variable len data */
1993 DBG("Waiting for %zu URIs from client ...", nb_uri
);
1994 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
1996 DBG("No URIs received from client... continuing");
1998 ret
= LTTNG_ERR_SESSION_FAIL
;
2003 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2004 DBG("Creating session with ONE network URI is a bad call");
2005 ret
= LTTNG_ERR_SESSION_FAIL
;
2011 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
2012 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
2016 case LTTNG_SAVE_SESSION
:
2018 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
2022 case LTTNG_SET_SESSION_SHM_PATH
:
2024 ret
= cmd_set_session_shm_path(cmd_ctx
->session
,
2025 cmd_ctx
->lsm
->u
.set_shm_path
.shm_path
);
2028 case LTTNG_REGENERATE_METADATA
:
2030 ret
= cmd_regenerate_metadata(cmd_ctx
->session
);
2033 case LTTNG_REGENERATE_STATEDUMP
:
2035 ret
= cmd_regenerate_statedump(cmd_ctx
->session
);
2038 case LTTNG_REGISTER_TRIGGER
:
2040 ret
= cmd_register_trigger(cmd_ctx
, sock
,
2041 notification_thread_handle
);
2044 case LTTNG_UNREGISTER_TRIGGER
:
2046 ret
= cmd_unregister_trigger(cmd_ctx
, sock
,
2047 notification_thread_handle
);
2050 case LTTNG_ROTATE_SESSION
:
2052 struct lttng_rotate_session_return rotate_return
;
2054 DBG("Client rotate session \"%s\"", cmd_ctx
->session
->name
);
2056 memset(&rotate_return
, 0, sizeof(rotate_return
));
2057 if (cmd_ctx
->session
->kernel_session
&& !check_rotate_compatible()) {
2058 DBG("Kernel tracer version is not compatible with the rotation feature");
2059 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
2063 ret
= cmd_rotate_session(cmd_ctx
->session
, &rotate_return
);
2069 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &rotate_return
,
2070 sizeof(rotate_return
));
2079 case LTTNG_ROTATION_GET_INFO
:
2081 struct lttng_rotation_get_info_return get_info_return
;
2083 memset(&get_info_return
, 0, sizeof(get_info_return
));
2084 ret
= cmd_rotate_get_info(cmd_ctx
->session
, &get_info_return
,
2085 cmd_ctx
->lsm
->u
.get_rotation_info
.rotation_id
);
2091 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &get_info_return
,
2092 sizeof(get_info_return
));
2101 case LTTNG_ROTATION_SET_SCHEDULE
:
2104 enum lttng_rotation_schedule_type schedule_type
;
2107 if (cmd_ctx
->session
->kernel_session
&& !check_rotate_compatible()) {
2108 DBG("Kernel tracer version does not support session rotations");
2109 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
2113 set_schedule
= cmd_ctx
->lsm
->u
.rotation_set_schedule
.set
== 1;
2114 schedule_type
= (enum lttng_rotation_schedule_type
) cmd_ctx
->lsm
->u
.rotation_set_schedule
.type
;
2115 value
= cmd_ctx
->lsm
->u
.rotation_set_schedule
.value
;
2117 ret
= cmd_rotation_set_schedule(cmd_ctx
->session
,
2121 notification_thread_handle
);
2122 if (ret
!= LTTNG_OK
) {
2128 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
2130 struct lttng_session_list_schedules_return schedules
= {
2131 .periodic
.set
= !!cmd_ctx
->session
->rotate_timer_period
,
2132 .periodic
.value
= cmd_ctx
->session
->rotate_timer_period
,
2133 .size
.set
= !!cmd_ctx
->session
->rotate_size
,
2134 .size
.value
= cmd_ctx
->session
->rotate_size
,
2137 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &schedules
,
2148 ret
= LTTNG_ERR_UND
;
2153 if (cmd_ctx
->llm
== NULL
) {
2154 DBG("Missing llm structure. Allocating one.");
2155 if (setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0) < 0) {
2159 /* Set return code */
2160 cmd_ctx
->llm
->ret_code
= ret
;
2162 if (cmd_ctx
->session
) {
2163 session_unlock(cmd_ctx
->session
);
2164 session_put(cmd_ctx
->session
);
2166 if (need_tracing_session
) {
2167 session_unlock_list();
2170 assert(!rcu_read_ongoing());
2174 static int create_client_sock(void)
2176 int ret
, client_sock
;
2177 const mode_t old_umask
= umask(0);
2179 /* Create client tool unix socket */
2180 client_sock
= lttcomm_create_unix_sock(config
.client_unix_sock_path
.value
);
2181 if (client_sock
< 0) {
2182 ERR("Create unix sock failed: %s", config
.client_unix_sock_path
.value
);
2187 /* Set the cloexec flag */
2188 ret
= utils_set_fd_cloexec(client_sock
);
2190 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
2191 "Continuing but note that the consumer daemon will have a "
2192 "reference to this socket on exec()", client_sock
);
2195 /* File permission MUST be 660 */
2196 ret
= chmod(config
.client_unix_sock_path
.value
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
2198 ERR("Set file permissions failed: %s", config
.client_unix_sock_path
.value
);
2202 DBG("Created client socket (fd = %i)", client_sock
);
2209 static void cleanup_client_thread(void *data
)
2211 struct lttng_pipe
*quit_pipe
= data
;
2213 lttng_pipe_destroy(quit_pipe
);
2217 * This thread manage all clients request using the unix client socket for
2220 static void *thread_manage_clients(void *data
)
2222 int sock
= -1, ret
, i
, pollfd
, err
= -1;
2224 uint32_t revents
, nb_fd
;
2225 struct command_ctx
*cmd_ctx
= NULL
;
2226 struct lttng_poll_event events
;
2227 int client_sock
= -1;
2228 struct lttng_pipe
*quit_pipe
= data
;
2229 const int thread_quit_pipe_fd
= lttng_pipe_get_readfd(quit_pipe
);
2231 DBG("[thread] Manage client started");
2233 is_root
= (getuid() == 0);
2235 client_sock
= create_client_sock();
2236 if (client_sock
< 0) {
2240 rcu_register_thread();
2242 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
2244 health_code_update();
2246 ret
= lttcomm_listen_unix_sock(client_sock
);
2252 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2253 * more will be added to this poll set.
2255 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2257 goto error_create_poll
;
2260 /* Add the application registration socket */
2261 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
2266 /* Add thread quit pipe */
2267 ret
= lttng_poll_add(&events
, thread_quit_pipe_fd
, LPOLLIN
| LPOLLERR
);
2272 /* This testpoint is after we signal readiness to the parent. */
2273 if (testpoint(sessiond_thread_manage_clients
)) {
2277 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
2281 health_code_update();
2283 /* Set state as running. */
2284 set_thread_state_running();
2287 const struct cmd_completion_handler
*cmd_completion_handler
;
2289 DBG("Accepting client command ...");
2291 /* Inifinite blocking call, waiting for transmission */
2293 health_poll_entry();
2294 ret
= lttng_poll_wait(&events
, -1);
2298 * Restart interrupted system call.
2300 if (errno
== EINTR
) {
2308 for (i
= 0; i
< nb_fd
; i
++) {
2309 revents
= LTTNG_POLL_GETEV(&events
, i
);
2310 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2312 health_code_update();
2315 /* No activity for this FD (poll implementation). */
2319 if (pollfd
== thread_quit_pipe_fd
) {
2323 /* Event on the registration socket */
2324 if (revents
& LPOLLIN
) {
2326 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2327 ERR("Client socket poll error");
2330 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2336 DBG("Wait for client response");
2338 health_code_update();
2340 sock
= lttcomm_accept_unix_sock(client_sock
);
2346 * Set the CLOEXEC flag. Return code is useless because either way, the
2349 (void) utils_set_fd_cloexec(sock
);
2351 /* Set socket option for credentials retrieval */
2352 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
2357 /* Allocate context command to process the client request */
2358 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
2359 if (cmd_ctx
== NULL
) {
2360 PERROR("zmalloc cmd_ctx");
2364 /* Allocate data buffer for reception */
2365 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
2366 if (cmd_ctx
->lsm
== NULL
) {
2367 PERROR("zmalloc cmd_ctx->lsm");
2371 cmd_ctx
->llm
= NULL
;
2372 cmd_ctx
->session
= NULL
;
2374 health_code_update();
2377 * Data is received from the lttng client. The struct
2378 * lttcomm_session_msg (lsm) contains the command and data request of
2381 DBG("Receiving data from client ...");
2382 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
2383 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
2385 DBG("Nothing recv() from client... continuing");
2391 clean_command_ctx(&cmd_ctx
);
2395 health_code_update();
2397 // TODO: Validate cmd_ctx including sanity check for
2398 // security purpose.
2400 rcu_thread_online();
2402 * This function dispatch the work to the kernel or userspace tracer
2403 * libs and fill the lttcomm_lttng_msg data structure of all the needed
2404 * informations for the client. The command context struct contains
2405 * everything this function may needs.
2407 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
2408 rcu_thread_offline();
2416 * TODO: Inform client somehow of the fatal error. At
2417 * this point, ret < 0 means that a zmalloc failed
2418 * (ENOMEM). Error detected but still accept
2419 * command, unless a socket error has been
2422 clean_command_ctx(&cmd_ctx
);
2426 cmd_completion_handler
= cmd_pop_completion_handler();
2427 if (cmd_completion_handler
) {
2428 enum lttng_error_code completion_code
;
2430 completion_code
= cmd_completion_handler
->run(
2431 cmd_completion_handler
->data
);
2432 if (completion_code
!= LTTNG_OK
) {
2433 clean_command_ctx(&cmd_ctx
);
2438 health_code_update();
2440 DBG("Sending response (size: %d, retcode: %s (%d))",
2441 cmd_ctx
->lttng_msg_size
,
2442 lttng_strerror(-cmd_ctx
->llm
->ret_code
),
2443 cmd_ctx
->llm
->ret_code
);
2444 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
2446 ERR("Failed to send data back to client");
2449 /* End of transmission */
2456 clean_command_ctx(&cmd_ctx
);
2458 health_code_update();
2470 lttng_poll_clean(&events
);
2471 clean_command_ctx(&cmd_ctx
);
2475 unlink(config
.client_unix_sock_path
.value
);
2476 if (client_sock
>= 0) {
2477 ret
= close(client_sock
);
2485 ERR("Health error occurred in %s", __func__
);
2488 health_unregister(health_sessiond
);
2490 DBG("Client thread dying");
2492 rcu_unregister_thread();
2495 * Since we are creating the consumer threads, we own them, so we need
2496 * to join them before our thread exits.
2498 ret
= join_consumer_thread(&kconsumer_data
);
2501 PERROR("join_consumer");
2504 ret
= join_consumer_thread(&ustconsumer32_data
);
2507 PERROR("join_consumer ust32");
2510 ret
= join_consumer_thread(&ustconsumer64_data
);
2513 PERROR("join_consumer ust64");
2519 bool shutdown_client_thread(void *thread_data
)
2521 struct lttng_pipe
*client_quit_pipe
= thread_data
;
2522 const int write_fd
= lttng_pipe_get_writefd(client_quit_pipe
);
2524 return notify_thread_pipe(write_fd
) == 1;
2527 struct lttng_thread
*launch_client_thread(void)
2529 struct lttng_pipe
*client_quit_pipe
;
2530 struct lttng_thread
*thread
;
2532 client_quit_pipe
= lttng_pipe_open(FD_CLOEXEC
);
2533 if (!client_quit_pipe
) {
2537 thread
= lttng_thread_create("Client management",
2538 thread_manage_clients
,
2539 shutdown_client_thread
,
2540 cleanup_client_thread
,
2547 * This thread is part of the threads that need to be fully
2548 * initialized before the session daemon is marked as "ready".
2550 wait_thread_state_running();
2554 cleanup_client_thread(client_quit_pipe
);