2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/compat/getenv.h>
45 #include <common/defaults.h>
46 #include <common/kernel-consumer/kernel-consumer.h>
47 #include <common/futex.h>
48 #include <common/relayd/relayd.h>
49 #include <common/utils.h>
50 #include <common/daemonize.h>
51 #include <common/config/session-config.h>
52 #include <common/dynamic-buffer.h>
53 #include <lttng/userspace-probe-internal.h>
54 #include <lttng/event-internal.h>
56 #include "lttng-sessiond.h"
57 #include "buffer-registry.h"
64 #include "kernel-consumer.h"
68 #include "ust-consumer.h"
71 #include "health-sessiond.h"
72 #include "testpoint.h"
73 #include "ust-thread.h"
74 #include "agent-thread.h"
76 #include "load-session-thread.h"
77 #include "notification-thread.h"
78 #include "notification-thread-commands.h"
79 #include "rotation-thread.h"
80 #include "lttng-syscall.h"
82 #include "ht-cleanup.h"
83 #include "sessiond-config.h"
87 static const char *help_msg
=
88 #ifdef LTTNG_EMBED_HELP
89 #include <lttng-sessiond.8.h>
96 static int lockfile_fd
= -1;
98 /* Set to 1 when a SIGUSR1 signal is received. */
99 static int recv_child_signal
;
101 static struct lttng_kernel_tracer_version kernel_tracer_version
;
102 static struct lttng_kernel_tracer_abi_version kernel_tracer_abi_version
;
105 * Consumer daemon specific control data. Every value not initialized here is
106 * set to 0 by the static definition.
108 static struct consumer_data kconsumer_data
= {
109 .type
= LTTNG_CONSUMER_KERNEL
,
112 .channel_monitor_pipe
= -1,
113 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
114 .lock
= PTHREAD_MUTEX_INITIALIZER
,
115 .cond
= PTHREAD_COND_INITIALIZER
,
116 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
118 static struct consumer_data ustconsumer64_data
= {
119 .type
= LTTNG_CONSUMER64_UST
,
122 .channel_monitor_pipe
= -1,
123 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
124 .lock
= PTHREAD_MUTEX_INITIALIZER
,
125 .cond
= PTHREAD_COND_INITIALIZER
,
126 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
128 static struct consumer_data ustconsumer32_data
= {
129 .type
= LTTNG_CONSUMER32_UST
,
132 .channel_monitor_pipe
= -1,
133 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
134 .lock
= PTHREAD_MUTEX_INITIALIZER
,
135 .cond
= PTHREAD_COND_INITIALIZER
,
136 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
139 /* Command line options */
140 static const struct option long_options
[] = {
141 { "client-sock", required_argument
, 0, 'c' },
142 { "apps-sock", required_argument
, 0, 'a' },
143 { "kconsumerd-cmd-sock", required_argument
, 0, '\0' },
144 { "kconsumerd-err-sock", required_argument
, 0, '\0' },
145 { "ustconsumerd32-cmd-sock", required_argument
, 0, '\0' },
146 { "ustconsumerd32-err-sock", required_argument
, 0, '\0' },
147 { "ustconsumerd64-cmd-sock", required_argument
, 0, '\0' },
148 { "ustconsumerd64-err-sock", required_argument
, 0, '\0' },
149 { "consumerd32-path", required_argument
, 0, '\0' },
150 { "consumerd32-libdir", required_argument
, 0, '\0' },
151 { "consumerd64-path", required_argument
, 0, '\0' },
152 { "consumerd64-libdir", required_argument
, 0, '\0' },
153 { "daemonize", no_argument
, 0, 'd' },
154 { "background", no_argument
, 0, 'b' },
155 { "sig-parent", no_argument
, 0, 'S' },
156 { "help", no_argument
, 0, 'h' },
157 { "group", required_argument
, 0, 'g' },
158 { "version", no_argument
, 0, 'V' },
159 { "quiet", no_argument
, 0, 'q' },
160 { "verbose", no_argument
, 0, 'v' },
161 { "verbose-consumer", no_argument
, 0, '\0' },
162 { "no-kernel", no_argument
, 0, '\0' },
163 { "pidfile", required_argument
, 0, 'p' },
164 { "agent-tcp-port", required_argument
, 0, '\0' },
165 { "config", required_argument
, 0, 'f' },
166 { "load", required_argument
, 0, 'l' },
167 { "kmod-probes", required_argument
, 0, '\0' },
168 { "extra-kmod-probes", required_argument
, 0, '\0' },
172 /* Command line options to ignore from configuration file */
173 static const char *config_ignore_options
[] = { "help", "version", "config" };
175 /* Shared between threads */
176 static int dispatch_thread_exit
;
178 /* Sockets and FDs */
179 static int client_sock
= -1;
180 static int apps_sock
= -1;
183 * This pipe is used to inform the thread managing application communication
184 * that a command is queued and ready to be processed.
186 static int apps_cmd_pipe
[2] = { -1, -1 };
188 /* Pthread, Mutexes and Semaphores */
189 static pthread_t apps_thread
;
190 static pthread_t apps_notify_thread
;
191 static pthread_t reg_apps_thread
;
192 static pthread_t client_thread
;
193 static pthread_t kernel_thread
;
194 static pthread_t dispatch_thread
;
195 static pthread_t health_thread
;
196 static pthread_t agent_reg_thread
;
197 static pthread_t load_session_thread
;
198 static pthread_t notification_thread
;
199 static pthread_t rotation_thread
;
200 static pthread_t timer_thread
;
203 * UST registration command queue. This queue is tied with a futex and uses a N
204 * wakers / 1 waiter implemented and detailed in futex.c/.h
206 * The thread_registration_apps and thread_dispatch_ust_registration uses this
207 * queue along with the wait/wake scheme. The thread_manage_apps receives down
208 * the line new application socket and monitors it for any I/O error or clean
209 * close that triggers an unregistration of the application.
211 static struct ust_cmd_queue ust_cmd_queue
;
213 static const char *module_proc_lttng
= "/proc/lttng";
216 * Consumer daemon state which is changed when spawning it, killing it or in
217 * case of a fatal error.
219 enum consumerd_state
{
220 CONSUMER_STARTED
= 1,
221 CONSUMER_STOPPED
= 2,
226 * This consumer daemon state is used to validate if a client command will be
227 * able to reach the consumer. If not, the client is informed. For instance,
228 * doing a "lttng start" when the consumer state is set to ERROR will return an
229 * error to the client.
231 * The following example shows a possible race condition of this scheme:
233 * consumer thread error happens
235 * client cmd checks state -> still OK
236 * consumer thread exit, sets error
237 * client cmd try to talk to consumer
240 * However, since the consumer is a different daemon, we have no way of making
241 * sure the command will reach it safely even with this state flag. This is why
242 * we consider that up to the state validation during command processing, the
243 * command is safe. After that, we can not guarantee the correctness of the
244 * client request vis-a-vis the consumer.
246 static enum consumerd_state ust_consumerd_state
;
247 static enum consumerd_state kernel_consumerd_state
;
249 /* Load session thread information to operate. */
250 static struct load_session_thread_data
*load_info
;
253 * Section name to look for in the daemon configuration file.
255 static const char * const config_section_name
= "sessiond";
257 /* Am I root or not. Set to 1 if the daemon is running as root */
260 /* Rotation thread handle. */
261 static struct rotation_thread_handle
*rotation_thread_handle
;
264 * Stop all threads by closing the thread quit pipe.
266 static void stop_threads(void)
270 /* Stopping all threads */
271 DBG("Terminating all threads");
272 ret
= sessiond_notify_quit_pipe();
274 ERR("write error on thread quit pipe");
277 /* Dispatch thread */
278 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
279 futex_nto1_wake(&ust_cmd_queue
.futex
);
283 * Close every consumer sockets.
285 static void close_consumer_sockets(void)
289 if (kconsumer_data
.err_sock
>= 0) {
290 ret
= close(kconsumer_data
.err_sock
);
292 PERROR("kernel consumer err_sock close");
295 if (ustconsumer32_data
.err_sock
>= 0) {
296 ret
= close(ustconsumer32_data
.err_sock
);
298 PERROR("UST consumerd32 err_sock close");
301 if (ustconsumer64_data
.err_sock
>= 0) {
302 ret
= close(ustconsumer64_data
.err_sock
);
304 PERROR("UST consumerd64 err_sock close");
307 if (kconsumer_data
.cmd_sock
>= 0) {
308 ret
= close(kconsumer_data
.cmd_sock
);
310 PERROR("kernel consumer cmd_sock close");
313 if (ustconsumer32_data
.cmd_sock
>= 0) {
314 ret
= close(ustconsumer32_data
.cmd_sock
);
316 PERROR("UST consumerd32 cmd_sock close");
319 if (ustconsumer64_data
.cmd_sock
>= 0) {
320 ret
= close(ustconsumer64_data
.cmd_sock
);
322 PERROR("UST consumerd64 cmd_sock close");
325 if (kconsumer_data
.channel_monitor_pipe
>= 0) {
326 ret
= close(kconsumer_data
.channel_monitor_pipe
);
328 PERROR("kernel consumer channel monitor pipe close");
331 if (ustconsumer32_data
.channel_monitor_pipe
>= 0) {
332 ret
= close(ustconsumer32_data
.channel_monitor_pipe
);
334 PERROR("UST consumerd32 channel monitor pipe close");
337 if (ustconsumer64_data
.channel_monitor_pipe
>= 0) {
338 ret
= close(ustconsumer64_data
.channel_monitor_pipe
);
340 PERROR("UST consumerd64 channel monitor pipe close");
346 * Wait on consumer process termination.
348 * Need to be called with the consumer data lock held or from a context
349 * ensuring no concurrent access to data (e.g: cleanup).
351 static void wait_consumer(struct consumer_data
*consumer_data
)
356 if (consumer_data
->pid
<= 0) {
360 DBG("Waiting for complete teardown of consumerd (PID: %d)",
362 ret
= waitpid(consumer_data
->pid
, &status
, 0);
364 PERROR("consumerd waitpid pid: %d", consumer_data
->pid
)
365 } else if (!WIFEXITED(status
)) {
366 ERR("consumerd termination with error: %d",
369 consumer_data
->pid
= 0;
373 * Cleanup the session daemon's data structures.
375 static void sessiond_cleanup(void)
378 struct ltt_session_list
*session_list
= session_get_list();
380 DBG("Cleanup sessiond");
383 * Close the thread quit pipe. It has already done its job,
384 * since we are now called.
386 sessiond_close_quit_pipe();
388 ret
= remove(config
.pid_file_path
.value
);
390 PERROR("remove pidfile %s", config
.pid_file_path
.value
);
393 DBG("Removing sessiond and consumerd content of directory %s",
394 config
.rundir
.value
);
397 DBG("Removing %s", config
.pid_file_path
.value
);
398 (void) unlink(config
.pid_file_path
.value
);
400 DBG("Removing %s", config
.agent_port_file_path
.value
);
401 (void) unlink(config
.agent_port_file_path
.value
);
404 DBG("Removing %s", kconsumer_data
.err_unix_sock_path
);
405 (void) unlink(kconsumer_data
.err_unix_sock_path
);
407 DBG("Removing directory %s", config
.kconsumerd_path
.value
);
408 (void) rmdir(config
.kconsumerd_path
.value
);
410 /* ust consumerd 32 */
411 DBG("Removing %s", config
.consumerd32_err_unix_sock_path
.value
);
412 (void) unlink(config
.consumerd32_err_unix_sock_path
.value
);
414 DBG("Removing directory %s", config
.consumerd32_path
.value
);
415 (void) rmdir(config
.consumerd32_path
.value
);
417 /* ust consumerd 64 */
418 DBG("Removing %s", config
.consumerd64_err_unix_sock_path
.value
);
419 (void) unlink(config
.consumerd64_err_unix_sock_path
.value
);
421 DBG("Removing directory %s", config
.consumerd64_path
.value
);
422 (void) rmdir(config
.consumerd64_path
.value
);
424 pthread_mutex_destroy(&session_list
->lock
);
426 wait_consumer(&kconsumer_data
);
427 wait_consumer(&ustconsumer64_data
);
428 wait_consumer(&ustconsumer32_data
);
430 DBG("Cleaning up all agent apps");
431 agent_app_ht_clean();
433 DBG("Closing all UST sockets");
434 ust_app_clean_list();
435 buffer_reg_destroy_registries();
437 if (is_root
&& !config
.no_kernel
) {
438 DBG2("Closing kernel fd");
439 if (kernel_tracer_fd
>= 0) {
440 ret
= close(kernel_tracer_fd
);
445 DBG("Unloading kernel modules");
446 modprobe_remove_lttng_all();
450 close_consumer_sockets();
453 load_session_destroy_data(load_info
);
458 * We do NOT rmdir rundir because there are other processes
459 * using it, for instance lttng-relayd, which can start in
460 * parallel with this teardown.
465 * Cleanup the daemon's option data structures.
467 static void sessiond_cleanup_options(void)
469 DBG("Cleaning up options");
471 sessiond_config_fini(&config
);
473 run_as_destroy_worker();
477 * Send data on a unix socket using the liblttsessiondcomm API.
479 * Return lttcomm error code.
481 static int send_unix_sock(int sock
, void *buf
, size_t len
)
483 /* Check valid length */
488 return lttcomm_send_unix_sock(sock
, buf
, len
);
492 * Free memory of a command context structure.
494 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
496 DBG("Clean command context structure");
498 if ((*cmd_ctx
)->llm
) {
499 free((*cmd_ctx
)->llm
);
501 if ((*cmd_ctx
)->lsm
) {
502 free((*cmd_ctx
)->lsm
);
510 * Notify UST applications using the shm mmap futex.
512 static int notify_ust_apps(int active
)
516 DBG("Notifying applications of session daemon state: %d", active
);
518 /* See shm.c for this call implying mmap, shm and futex calls */
519 wait_shm_mmap
= shm_ust_get_mmap(config
.wait_shm_path
.value
, is_root
);
520 if (wait_shm_mmap
== NULL
) {
524 /* Wake waiting process */
525 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
527 /* Apps notified successfully */
535 * Setup the outgoing data buffer for the response (llm) by allocating the
536 * right amount of memory and copying the original information from the lsm
539 * Return 0 on success, negative value on error.
541 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
,
542 const void *payload_buf
, size_t payload_len
,
543 const void *cmd_header_buf
, size_t cmd_header_len
)
546 const size_t header_len
= sizeof(struct lttcomm_lttng_msg
);
547 const size_t cmd_header_offset
= header_len
;
548 const size_t payload_offset
= cmd_header_offset
+ cmd_header_len
;
549 const size_t total_msg_size
= header_len
+ cmd_header_len
+ payload_len
;
551 cmd_ctx
->llm
= zmalloc(total_msg_size
);
553 if (cmd_ctx
->llm
== NULL
) {
559 /* Copy common data */
560 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
561 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
562 cmd_ctx
->llm
->cmd_header_size
= cmd_header_len
;
563 cmd_ctx
->llm
->data_size
= payload_len
;
564 cmd_ctx
->lttng_msg_size
= total_msg_size
;
566 /* Copy command header */
567 if (cmd_header_len
) {
568 memcpy(((uint8_t *) cmd_ctx
->llm
) + cmd_header_offset
, cmd_header_buf
,
574 memcpy(((uint8_t *) cmd_ctx
->llm
) + payload_offset
, payload_buf
,
583 * Version of setup_lttng_msg() without command header.
585 static int setup_lttng_msg_no_cmd_header(struct command_ctx
*cmd_ctx
,
586 void *payload_buf
, size_t payload_len
)
588 return setup_lttng_msg(cmd_ctx
, payload_buf
, payload_len
, NULL
, 0);
591 * Update the kernel poll set of all channel fd available over all tracing
592 * session. Add the wakeup pipe at the end of the set.
594 static int update_kernel_poll(struct lttng_poll_event
*events
)
597 struct ltt_kernel_channel
*channel
;
598 struct ltt_session
*session
;
599 const struct ltt_session_list
*session_list
= session_get_list();
601 DBG("Updating kernel poll set");
604 cds_list_for_each_entry(session
, &session_list
->head
, list
) {
605 if (!session_get(session
)) {
608 session_lock(session
);
609 if (session
->kernel_session
== NULL
) {
610 session_unlock(session
);
611 session_put(session
);
615 cds_list_for_each_entry(channel
,
616 &session
->kernel_session
->channel_list
.head
, list
) {
617 /* Add channel fd to the kernel poll set */
618 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
620 session_unlock(session
);
621 session_put(session
);
624 DBG("Channel fd %d added to kernel set", channel
->fd
);
626 session_unlock(session
);
628 session_unlock_list();
633 session_unlock_list();
638 * Find the channel fd from 'fd' over all tracing session. When found, check
639 * for new channel stream and send those stream fds to the kernel consumer.
641 * Useful for CPU hotplug feature.
643 static int update_kernel_stream(int fd
)
646 struct ltt_session
*session
;
647 struct ltt_kernel_session
*ksess
;
648 struct ltt_kernel_channel
*channel
;
649 const struct ltt_session_list
*session_list
= session_get_list();
651 DBG("Updating kernel streams for channel fd %d", fd
);
654 cds_list_for_each_entry(session
, &session_list
->head
, list
) {
655 if (!session_get(session
)) {
658 session_lock(session
);
659 if (session
->kernel_session
== NULL
) {
660 session_unlock(session
);
661 session_put(session
);
664 ksess
= session
->kernel_session
;
666 cds_list_for_each_entry(channel
,
667 &ksess
->channel_list
.head
, list
) {
668 struct lttng_ht_iter iter
;
669 struct consumer_socket
*socket
;
671 if (channel
->fd
!= fd
) {
674 DBG("Channel found, updating kernel streams");
675 ret
= kernel_open_channel_stream(channel
);
679 /* Update the stream global counter */
680 ksess
->stream_count_global
+= ret
;
683 * Have we already sent fds to the consumer? If yes, it
684 * means that tracing is started so it is safe to send
685 * our updated stream fds.
687 if (ksess
->consumer_fds_sent
!= 1
688 || ksess
->consumer
== NULL
) {
694 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
695 &iter
.iter
, socket
, node
.node
) {
696 pthread_mutex_lock(socket
->lock
);
697 ret
= kernel_consumer_send_channel_streams(socket
,
699 session
->output_traces
? 1 : 0);
700 pthread_mutex_unlock(socket
->lock
);
708 session_unlock(session
);
709 session_put(session
);
711 session_unlock_list();
715 session_unlock(session
);
716 session_put(session
);
717 session_unlock_list();
722 * For each tracing session, update newly registered apps. The session list
723 * lock MUST be acquired before calling this.
725 static void update_ust_app(int app_sock
)
727 struct ltt_session
*sess
, *stmp
;
728 const struct ltt_session_list
*session_list
= session_get_list();
730 /* Consumer is in an ERROR state. Stop any application update. */
731 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
732 /* Stop the update process since the consumer is dead. */
736 /* For all tracing session(s) */
737 cds_list_for_each_entry_safe(sess
, stmp
, &session_list
->head
, list
) {
740 if (!session_get(sess
)) {
744 if (!sess
->ust_session
) {
749 assert(app_sock
>= 0);
750 app
= ust_app_find_by_sock(app_sock
);
753 * Application can be unregistered before so
754 * this is possible hence simply stopping the
757 DBG3("UST app update failed to find app sock %d",
761 ust_app_global_update(sess
->ust_session
, app
);
765 session_unlock(sess
);
771 * This thread manage event coming from the kernel.
773 * Features supported in this thread:
776 static void *thread_manage_kernel(void *data
)
778 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
779 uint32_t revents
, nb_fd
;
781 struct lttng_poll_event events
;
783 DBG("[thread] Thread manage kernel started");
785 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
788 * This first step of the while is to clean this structure which could free
789 * non NULL pointers so initialize it before the loop.
791 lttng_poll_init(&events
);
793 if (testpoint(sessiond_thread_manage_kernel
)) {
794 goto error_testpoint
;
797 health_code_update();
799 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
800 goto error_testpoint
;
804 health_code_update();
806 if (update_poll_flag
== 1) {
807 /* Clean events object. We are about to populate it again. */
808 lttng_poll_clean(&events
);
810 ret
= sessiond_set_thread_pollset(&events
, 2);
812 goto error_poll_create
;
815 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
820 /* This will add the available kernel channel if any. */
821 ret
= update_kernel_poll(&events
);
825 update_poll_flag
= 0;
828 DBG("Thread kernel polling");
830 /* Poll infinite value of time */
833 ret
= lttng_poll_wait(&events
, -1);
834 DBG("Thread kernel return from poll on %d fds",
835 LTTNG_POLL_GETNB(&events
));
839 * Restart interrupted system call.
841 if (errno
== EINTR
) {
845 } else if (ret
== 0) {
846 /* Should not happen since timeout is infinite */
847 ERR("Return value of poll is 0 with an infinite timeout.\n"
848 "This should not have happened! Continuing...");
854 for (i
= 0; i
< nb_fd
; i
++) {
855 /* Fetch once the poll data */
856 revents
= LTTNG_POLL_GETEV(&events
, i
);
857 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
859 health_code_update();
862 /* No activity for this FD (poll implementation). */
866 /* Thread quit pipe has been closed. Killing thread. */
867 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
873 /* Check for data on kernel pipe */
874 if (revents
& LPOLLIN
) {
875 if (pollfd
== kernel_poll_pipe
[0]) {
876 (void) lttng_read(kernel_poll_pipe
[0],
879 * Ret value is useless here, if this pipe gets any actions an
880 * update is required anyway.
882 update_poll_flag
= 1;
886 * New CPU detected by the kernel. Adding kernel stream to
887 * kernel session and updating the kernel consumer
889 ret
= update_kernel_stream(pollfd
);
895 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
896 update_poll_flag
= 1;
899 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
907 lttng_poll_clean(&events
);
910 utils_close_pipe(kernel_poll_pipe
);
911 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
914 ERR("Health error occurred in %s", __func__
);
915 WARN("Kernel thread died unexpectedly. "
916 "Kernel tracing can continue but CPU hotplug is disabled.");
918 health_unregister(health_sessiond
);
919 DBG("Kernel thread dying");
924 * Signal pthread condition of the consumer data that the thread.
926 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
928 pthread_mutex_lock(&data
->cond_mutex
);
931 * The state is set before signaling. It can be any value, it's the waiter
932 * job to correctly interpret this condition variable associated to the
933 * consumer pthread_cond.
935 * A value of 0 means that the corresponding thread of the consumer data
936 * was not started. 1 indicates that the thread has started and is ready
937 * for action. A negative value means that there was an error during the
940 data
->consumer_thread_is_ready
= state
;
941 (void) pthread_cond_signal(&data
->cond
);
943 pthread_mutex_unlock(&data
->cond_mutex
);
947 * This thread manage the consumer error sent back to the session daemon.
949 static void *thread_manage_consumer(void *data
)
951 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
952 uint32_t revents
, nb_fd
;
953 enum lttcomm_return_code code
;
954 struct lttng_poll_event events
;
955 struct consumer_data
*consumer_data
= data
;
956 struct consumer_socket
*cmd_socket_wrapper
= NULL
;
958 DBG("[thread] Manage consumer started");
960 rcu_register_thread();
963 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
965 health_code_update();
968 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
969 * metadata_sock. Nothing more will be added to this poll set.
971 ret
= sessiond_set_thread_pollset(&events
, 3);
977 * The error socket here is already in a listening state which was done
978 * just before spawning this thread to avoid a race between the consumer
979 * daemon exec trying to connect and the listen() call.
981 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
986 health_code_update();
988 /* Infinite blocking call, waiting for transmission */
992 if (testpoint(sessiond_thread_manage_consumer
)) {
996 ret
= lttng_poll_wait(&events
, -1);
1000 * Restart interrupted system call.
1002 if (errno
== EINTR
) {
1010 for (i
= 0; i
< nb_fd
; i
++) {
1011 /* Fetch once the poll data */
1012 revents
= LTTNG_POLL_GETEV(&events
, i
);
1013 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1015 health_code_update();
1018 /* No activity for this FD (poll implementation). */
1022 /* Thread quit pipe has been closed. Killing thread. */
1023 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1029 /* Event on the registration socket */
1030 if (pollfd
== consumer_data
->err_sock
) {
1031 if (revents
& LPOLLIN
) {
1033 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1034 ERR("consumer err socket poll error");
1037 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1043 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1049 * Set the CLOEXEC flag. Return code is useless because either way, the
1052 (void) utils_set_fd_cloexec(sock
);
1054 health_code_update();
1056 DBG2("Receiving code from consumer err_sock");
1058 /* Getting status code from kconsumerd */
1059 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1060 sizeof(enum lttcomm_return_code
));
1065 health_code_update();
1066 if (code
!= LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1067 ERR("consumer error when waiting for SOCK_READY : %s",
1068 lttcomm_get_readable_code(-code
));
1072 /* Connect both command and metadata sockets. */
1073 consumer_data
->cmd_sock
=
1074 lttcomm_connect_unix_sock(
1075 consumer_data
->cmd_unix_sock_path
);
1076 consumer_data
->metadata_fd
=
1077 lttcomm_connect_unix_sock(
1078 consumer_data
->cmd_unix_sock_path
);
1079 if (consumer_data
->cmd_sock
< 0 || consumer_data
->metadata_fd
< 0) {
1080 PERROR("consumer connect cmd socket");
1081 /* On error, signal condition and quit. */
1082 signal_consumer_condition(consumer_data
, -1);
1086 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1088 /* Create metadata socket lock. */
1089 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1090 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1091 PERROR("zmalloc pthread mutex");
1094 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1096 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1097 DBG("Consumer metadata socket ready (fd: %d)",
1098 consumer_data
->metadata_fd
);
1101 * Remove the consumerd error sock since we've established a connection.
1103 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1108 /* Add new accepted error socket. */
1109 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1114 /* Add metadata socket that is successfully connected. */
1115 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1116 LPOLLIN
| LPOLLRDHUP
);
1121 health_code_update();
1124 * Transfer the write-end of the channel monitoring and rotate pipe
1125 * to the consumer by issuing a SET_CHANNEL_MONITOR_PIPE command.
1127 cmd_socket_wrapper
= consumer_allocate_socket(&consumer_data
->cmd_sock
);
1128 if (!cmd_socket_wrapper
) {
1131 cmd_socket_wrapper
->lock
= &consumer_data
->lock
;
1133 ret
= consumer_send_channel_monitor_pipe(cmd_socket_wrapper
,
1134 consumer_data
->channel_monitor_pipe
);
1139 /* Discard the socket wrapper as it is no longer needed. */
1140 consumer_destroy_socket(cmd_socket_wrapper
);
1141 cmd_socket_wrapper
= NULL
;
1143 /* The thread is completely initialized, signal that it is ready. */
1144 signal_consumer_condition(consumer_data
, 1);
1146 /* Infinite blocking call, waiting for transmission */
1149 health_code_update();
1151 /* Exit the thread because the thread quit pipe has been triggered. */
1153 /* Not a health error. */
1158 health_poll_entry();
1159 ret
= lttng_poll_wait(&events
, -1);
1163 * Restart interrupted system call.
1165 if (errno
== EINTR
) {
1173 for (i
= 0; i
< nb_fd
; i
++) {
1174 /* Fetch once the poll data */
1175 revents
= LTTNG_POLL_GETEV(&events
, i
);
1176 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1178 health_code_update();
1181 /* No activity for this FD (poll implementation). */
1186 * Thread quit pipe has been triggered, flag that we should stop
1187 * but continue the current loop to handle potential data from
1190 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1192 if (pollfd
== sock
) {
1193 /* Event on the consumerd socket */
1194 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1195 && !(revents
& LPOLLIN
)) {
1196 ERR("consumer err socket second poll error");
1199 health_code_update();
1200 /* Wait for any kconsumerd error */
1201 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1202 sizeof(enum lttcomm_return_code
));
1204 ERR("consumer closed the command socket");
1208 ERR("consumer return code : %s",
1209 lttcomm_get_readable_code(-code
));
1212 } else if (pollfd
== consumer_data
->metadata_fd
) {
1213 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)
1214 && !(revents
& LPOLLIN
)) {
1215 ERR("consumer err metadata socket second poll error");
1218 /* UST metadata requests */
1219 ret
= ust_consumer_metadata_request(
1220 &consumer_data
->metadata_sock
);
1222 ERR("Handling metadata request");
1226 /* No need for an else branch all FDs are tested prior. */
1228 health_code_update();
1234 * We lock here because we are about to close the sockets and some other
1235 * thread might be using them so get exclusive access which will abort all
1236 * other consumer command by other threads.
1238 pthread_mutex_lock(&consumer_data
->lock
);
1240 /* Immediately set the consumerd state to stopped */
1241 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1242 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1243 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1244 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1245 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1247 /* Code flow error... */
1251 if (consumer_data
->err_sock
>= 0) {
1252 ret
= close(consumer_data
->err_sock
);
1256 consumer_data
->err_sock
= -1;
1258 if (consumer_data
->cmd_sock
>= 0) {
1259 ret
= close(consumer_data
->cmd_sock
);
1263 consumer_data
->cmd_sock
= -1;
1265 if (consumer_data
->metadata_sock
.fd_ptr
&&
1266 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1267 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1279 unlink(consumer_data
->err_unix_sock_path
);
1280 unlink(consumer_data
->cmd_unix_sock_path
);
1281 pthread_mutex_unlock(&consumer_data
->lock
);
1283 /* Cleanup metadata socket mutex. */
1284 if (consumer_data
->metadata_sock
.lock
) {
1285 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1286 free(consumer_data
->metadata_sock
.lock
);
1288 lttng_poll_clean(&events
);
1290 if (cmd_socket_wrapper
) {
1291 consumer_destroy_socket(cmd_socket_wrapper
);
1296 ERR("Health error occurred in %s", __func__
);
1298 health_unregister(health_sessiond
);
1299 DBG("consumer thread cleanup completed");
1301 rcu_thread_offline();
1302 rcu_unregister_thread();
1308 * This thread receives application command sockets (FDs) on the
1309 * apps_cmd_pipe and waits (polls) on them until they are closed
1310 * or an error occurs.
1312 * At that point, it flushes the data (tracing and metadata) associated
1313 * with this application and tears down ust app sessions and other
1314 * associated data structures through ust_app_unregister().
1316 * Note that this thread never sends commands to the applications
1317 * through the command sockets; it merely listens for hang-ups
1318 * and errors on those sockets and cleans-up as they occur.
1320 static void *thread_manage_apps(void *data
)
1322 int i
, ret
, pollfd
, err
= -1;
1324 uint32_t revents
, nb_fd
;
1325 struct lttng_poll_event events
;
1327 DBG("[thread] Manage application started");
1329 rcu_register_thread();
1330 rcu_thread_online();
1332 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1334 if (testpoint(sessiond_thread_manage_apps
)) {
1335 goto error_testpoint
;
1338 health_code_update();
1340 ret
= sessiond_set_thread_pollset(&events
, 2);
1342 goto error_poll_create
;
1345 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1350 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1354 health_code_update();
1357 DBG("Apps thread polling");
1359 /* Inifinite blocking call, waiting for transmission */
1361 health_poll_entry();
1362 ret
= lttng_poll_wait(&events
, -1);
1363 DBG("Apps thread return from poll on %d fds",
1364 LTTNG_POLL_GETNB(&events
));
1368 * Restart interrupted system call.
1370 if (errno
== EINTR
) {
1378 for (i
= 0; i
< nb_fd
; i
++) {
1379 /* Fetch once the poll data */
1380 revents
= LTTNG_POLL_GETEV(&events
, i
);
1381 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1383 health_code_update();
1386 /* No activity for this FD (poll implementation). */
1390 /* Thread quit pipe has been closed. Killing thread. */
1391 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1397 /* Inspect the apps cmd pipe */
1398 if (pollfd
== apps_cmd_pipe
[0]) {
1399 if (revents
& LPOLLIN
) {
1403 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1404 if (size_ret
< sizeof(sock
)) {
1405 PERROR("read apps cmd pipe");
1409 health_code_update();
1412 * Since this is a command socket (write then read),
1413 * we only monitor the error events of the socket.
1415 ret
= lttng_poll_add(&events
, sock
,
1416 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1421 DBG("Apps with sock %d added to poll set", sock
);
1422 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1423 ERR("Apps command pipe error");
1426 ERR("Unknown poll events %u for sock %d", revents
, pollfd
);
1431 * At this point, we know that a registered application made
1432 * the event at poll_wait.
1434 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1435 /* Removing from the poll set */
1436 ret
= lttng_poll_del(&events
, pollfd
);
1441 /* Socket closed on remote end. */
1442 ust_app_unregister(pollfd
);
1444 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1449 health_code_update();
1455 lttng_poll_clean(&events
);
1458 utils_close_pipe(apps_cmd_pipe
);
1459 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1462 * We don't clean the UST app hash table here since already registered
1463 * applications can still be controlled so let them be until the session
1464 * daemon dies or the applications stop.
1469 ERR("Health error occurred in %s", __func__
);
1471 health_unregister(health_sessiond
);
1472 DBG("Application communication apps thread cleanup complete");
1473 rcu_thread_offline();
1474 rcu_unregister_thread();
1479 * Send a socket to a thread This is called from the dispatch UST registration
1480 * thread once all sockets are set for the application.
1482 * The sock value can be invalid, we don't really care, the thread will handle
1483 * it and make the necessary cleanup if so.
1485 * On success, return 0 else a negative value being the errno message of the
1488 static int send_socket_to_thread(int fd
, int sock
)
1493 * It's possible that the FD is set as invalid with -1 concurrently just
1494 * before calling this function being a shutdown state of the thread.
1501 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1502 if (ret
< sizeof(sock
)) {
1503 PERROR("write apps pipe %d", fd
);
1510 /* All good. Don't send back the write positive ret value. */
1517 * Sanitize the wait queue of the dispatch registration thread meaning removing
1518 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1519 * notify socket is never received.
1521 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1523 int ret
, nb_fd
= 0, i
;
1524 unsigned int fd_added
= 0;
1525 struct lttng_poll_event events
;
1526 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1530 lttng_poll_init(&events
);
1532 /* Just skip everything for an empty queue. */
1533 if (!wait_queue
->count
) {
1537 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1542 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1543 &wait_queue
->head
, head
) {
1544 assert(wait_node
->app
);
1545 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1546 LPOLLHUP
| LPOLLERR
);
1559 * Poll but don't block so we can quickly identify the faulty events and
1560 * clean them afterwards from the wait queue.
1562 ret
= lttng_poll_wait(&events
, 0);
1568 for (i
= 0; i
< nb_fd
; i
++) {
1569 /* Get faulty FD. */
1570 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1571 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1574 /* No activity for this FD (poll implementation). */
1578 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1579 &wait_queue
->head
, head
) {
1580 if (pollfd
== wait_node
->app
->sock
&&
1581 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1582 cds_list_del(&wait_node
->head
);
1583 wait_queue
->count
--;
1584 ust_app_destroy(wait_node
->app
);
1587 * Silence warning of use-after-free in
1588 * cds_list_for_each_entry_safe which uses
1589 * __typeof__(*wait_node).
1594 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
1601 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1605 lttng_poll_clean(&events
);
1609 lttng_poll_clean(&events
);
1611 ERR("Unable to sanitize wait queue");
1616 * Dispatch request from the registration threads to the application
1617 * communication thread.
1619 static void *thread_dispatch_ust_registration(void *data
)
1622 struct cds_wfcq_node
*node
;
1623 struct ust_command
*ust_cmd
= NULL
;
1624 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1625 struct ust_reg_wait_queue wait_queue
= {
1629 rcu_register_thread();
1631 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1633 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1634 goto error_testpoint
;
1637 health_code_update();
1639 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1641 DBG("[thread] Dispatch UST command started");
1644 health_code_update();
1646 /* Atomically prepare the queue futex */
1647 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1649 if (CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1654 struct ust_app
*app
= NULL
;
1658 * Make sure we don't have node(s) that have hung up before receiving
1659 * the notify socket. This is to clean the list in order to avoid
1660 * memory leaks from notify socket that are never seen.
1662 sanitize_wait_queue(&wait_queue
);
1664 health_code_update();
1665 /* Dequeue command for registration */
1666 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1668 DBG("Woken up but nothing in the UST command queue");
1669 /* Continue thread execution */
1673 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1675 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1676 " gid:%d sock:%d name:%s (version %d.%d)",
1677 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1678 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1679 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1680 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1682 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1683 wait_node
= zmalloc(sizeof(*wait_node
));
1685 PERROR("zmalloc wait_node dispatch");
1686 ret
= close(ust_cmd
->sock
);
1688 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1690 lttng_fd_put(LTTNG_FD_APPS
, 1);
1694 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1696 /* Create application object if socket is CMD. */
1697 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1699 if (!wait_node
->app
) {
1700 ret
= close(ust_cmd
->sock
);
1702 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1704 lttng_fd_put(LTTNG_FD_APPS
, 1);
1710 * Add application to the wait queue so we can set the notify
1711 * socket before putting this object in the global ht.
1713 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1718 * We have to continue here since we don't have the notify
1719 * socket and the application MUST be added to the hash table
1720 * only at that moment.
1725 * Look for the application in the local wait queue and set the
1726 * notify socket if found.
1728 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1729 &wait_queue
.head
, head
) {
1730 health_code_update();
1731 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1732 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1733 cds_list_del(&wait_node
->head
);
1735 app
= wait_node
->app
;
1737 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1743 * With no application at this stage the received socket is
1744 * basically useless so close it before we free the cmd data
1745 * structure for good.
1748 ret
= close(ust_cmd
->sock
);
1750 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1752 lttng_fd_put(LTTNG_FD_APPS
, 1);
1759 * @session_lock_list
1761 * Lock the global session list so from the register up to the
1762 * registration done message, no thread can see the application
1763 * and change its state.
1765 session_lock_list();
1769 * Add application to the global hash table. This needs to be
1770 * done before the update to the UST registry can locate the
1775 /* Set app version. This call will print an error if needed. */
1776 (void) ust_app_version(app
);
1778 /* Send notify socket through the notify pipe. */
1779 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1783 session_unlock_list();
1785 * No notify thread, stop the UST tracing. However, this is
1786 * not an internal error of the this thread thus setting
1787 * the health error code to a normal exit.
1794 * Update newly registered application with the tracing
1795 * registry info already enabled information.
1797 update_ust_app(app
->sock
);
1800 * Don't care about return value. Let the manage apps threads
1801 * handle app unregistration upon socket close.
1803 (void) ust_app_register_done(app
);
1806 * Even if the application socket has been closed, send the app
1807 * to the thread and unregistration will take place at that
1810 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1813 session_unlock_list();
1815 * No apps. thread, stop the UST tracing. However, this is
1816 * not an internal error of the this thread thus setting
1817 * the health error code to a normal exit.
1824 session_unlock_list();
1826 } while (node
!= NULL
);
1828 health_poll_entry();
1829 /* Futex wait on queue. Blocking call on futex() */
1830 futex_nto1_wait(&ust_cmd_queue
.futex
);
1833 /* Normal exit, no error */
1837 /* Clean up wait queue. */
1838 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1839 &wait_queue
.head
, head
) {
1840 cds_list_del(&wait_node
->head
);
1845 /* Empty command queue. */
1847 /* Dequeue command for registration */
1848 node
= cds_wfcq_dequeue_blocking(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
1852 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1853 ret
= close(ust_cmd
->sock
);
1855 PERROR("close ust sock exit dispatch %d", ust_cmd
->sock
);
1857 lttng_fd_put(LTTNG_FD_APPS
, 1);
1862 DBG("Dispatch thread dying");
1865 ERR("Health error occurred in %s", __func__
);
1867 health_unregister(health_sessiond
);
1868 rcu_unregister_thread();
1873 * This thread manage application registration.
1875 static void *thread_registration_apps(void *data
)
1877 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1878 uint32_t revents
, nb_fd
;
1879 struct lttng_poll_event events
;
1881 * Get allocated in this thread, enqueued to a global queue, dequeued and
1882 * freed in the manage apps thread.
1884 struct ust_command
*ust_cmd
= NULL
;
1886 DBG("[thread] Manage application registration started");
1888 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
1890 if (testpoint(sessiond_thread_registration_apps
)) {
1891 goto error_testpoint
;
1894 ret
= lttcomm_listen_unix_sock(apps_sock
);
1900 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1901 * more will be added to this poll set.
1903 ret
= sessiond_set_thread_pollset(&events
, 2);
1905 goto error_create_poll
;
1908 /* Add the application registration socket */
1909 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1911 goto error_poll_add
;
1914 /* Notify all applications to register */
1915 ret
= notify_ust_apps(1);
1917 ERR("Failed to notify applications or create the wait shared memory.\n"
1918 "Execution continues but there might be problem for already\n"
1919 "running applications that wishes to register.");
1923 DBG("Accepting application registration");
1925 /* Inifinite blocking call, waiting for transmission */
1927 health_poll_entry();
1928 ret
= lttng_poll_wait(&events
, -1);
1932 * Restart interrupted system call.
1934 if (errno
== EINTR
) {
1942 for (i
= 0; i
< nb_fd
; i
++) {
1943 health_code_update();
1945 /* Fetch once the poll data */
1946 revents
= LTTNG_POLL_GETEV(&events
, i
);
1947 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1950 /* No activity for this FD (poll implementation). */
1954 /* Thread quit pipe has been closed. Killing thread. */
1955 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1961 /* Event on the registration socket */
1962 if (pollfd
== apps_sock
) {
1963 if (revents
& LPOLLIN
) {
1964 sock
= lttcomm_accept_unix_sock(apps_sock
);
1970 * Set socket timeout for both receiving and ending.
1971 * app_socket_timeout is in seconds, whereas
1972 * lttcomm_setsockopt_rcv_timeout and
1973 * lttcomm_setsockopt_snd_timeout expect msec as
1976 if (config
.app_socket_timeout
>= 0) {
1977 (void) lttcomm_setsockopt_rcv_timeout(sock
,
1978 config
.app_socket_timeout
* 1000);
1979 (void) lttcomm_setsockopt_snd_timeout(sock
,
1980 config
.app_socket_timeout
* 1000);
1984 * Set the CLOEXEC flag. Return code is useless because
1985 * either way, the show must go on.
1987 (void) utils_set_fd_cloexec(sock
);
1989 /* Create UST registration command for enqueuing */
1990 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1991 if (ust_cmd
== NULL
) {
1992 PERROR("ust command zmalloc");
2001 * Using message-based transmissions to ensure we don't
2002 * have to deal with partially received messages.
2004 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2006 ERR("Exhausted file descriptors allowed for applications.");
2016 health_code_update();
2017 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2020 /* Close socket of the application. */
2025 lttng_fd_put(LTTNG_FD_APPS
, 1);
2029 health_code_update();
2031 ust_cmd
->sock
= sock
;
2034 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2035 " gid:%d sock:%d name:%s (version %d.%d)",
2036 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2037 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2038 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2039 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2042 * Lock free enqueue the registration request. The red pill
2043 * has been taken! This apps will be part of the *system*.
2045 cds_wfcq_enqueue(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
, &ust_cmd
->node
);
2048 * Wake the registration queue futex. Implicit memory
2049 * barrier with the exchange in cds_wfcq_enqueue.
2051 futex_nto1_wake(&ust_cmd_queue
.futex
);
2052 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2053 ERR("Register apps socket poll error");
2056 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
2065 /* Notify that the registration thread is gone */
2068 if (apps_sock
>= 0) {
2069 ret
= close(apps_sock
);
2079 lttng_fd_put(LTTNG_FD_APPS
, 1);
2081 unlink(config
.apps_unix_sock_path
.value
);
2084 lttng_poll_clean(&events
);
2088 DBG("UST Registration thread cleanup complete");
2091 ERR("Health error occurred in %s", __func__
);
2093 health_unregister(health_sessiond
);
2099 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2100 * exec or it will fails.
2102 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2105 struct timespec timeout
;
2108 * Make sure we set the readiness flag to 0 because we are NOT ready.
2109 * This access to consumer_thread_is_ready does not need to be
2110 * protected by consumer_data.cond_mutex (yet) since the consumer
2111 * management thread has not been started at this point.
2113 consumer_data
->consumer_thread_is_ready
= 0;
2115 /* Setup pthread condition */
2116 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2119 PERROR("pthread_condattr_init consumer data");
2124 * Set the monotonic clock in order to make sure we DO NOT jump in time
2125 * between the clock_gettime() call and the timedwait call. See bug #324
2126 * for a more details and how we noticed it.
2128 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2131 PERROR("pthread_condattr_setclock consumer data");
2135 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2138 PERROR("pthread_cond_init consumer data");
2142 ret
= pthread_create(&consumer_data
->thread
, default_pthread_attr(),
2143 thread_manage_consumer
, consumer_data
);
2146 PERROR("pthread_create consumer");
2151 /* We are about to wait on a pthread condition */
2152 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2154 /* Get time for sem_timedwait absolute timeout */
2155 clock_ret
= lttng_clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2157 * Set the timeout for the condition timed wait even if the clock gettime
2158 * call fails since we might loop on that call and we want to avoid to
2159 * increment the timeout too many times.
2161 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2164 * The following loop COULD be skipped in some conditions so this is why we
2165 * set ret to 0 in order to make sure at least one round of the loop is
2171 * Loop until the condition is reached or when a timeout is reached. Note
2172 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2173 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2174 * possible. This loop does not take any chances and works with both of
2177 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2178 if (clock_ret
< 0) {
2179 PERROR("clock_gettime spawn consumer");
2180 /* Infinite wait for the consumerd thread to be ready */
2181 ret
= pthread_cond_wait(&consumer_data
->cond
,
2182 &consumer_data
->cond_mutex
);
2184 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2185 &consumer_data
->cond_mutex
, &timeout
);
2189 /* Release the pthread condition */
2190 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2194 if (ret
== ETIMEDOUT
) {
2198 * Call has timed out so we kill the kconsumerd_thread and return
2201 ERR("Condition timed out. The consumer thread was never ready."
2203 pth_ret
= pthread_cancel(consumer_data
->thread
);
2205 PERROR("pthread_cancel consumer thread");
2208 PERROR("pthread_cond_wait failed consumer thread");
2210 /* Caller is expecting a negative value on failure. */
2215 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2216 if (consumer_data
->pid
== 0) {
2217 ERR("Consumerd did not start");
2218 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2221 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2230 * Join consumer thread
2232 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2236 /* Consumer pid must be a real one. */
2237 if (consumer_data
->pid
> 0) {
2239 ret
= kill(consumer_data
->pid
, SIGTERM
);
2241 PERROR("Error killing consumer daemon");
2244 return pthread_join(consumer_data
->thread
, &status
);
2251 * Fork and exec a consumer daemon (consumerd).
2253 * Return pid if successful else -1.
2255 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2259 const char *consumer_to_use
;
2260 const char *verbosity
;
2263 DBG("Spawning consumerd");
2270 if (config
.verbose_consumer
) {
2271 verbosity
= "--verbose";
2272 } else if (lttng_opt_quiet
) {
2273 verbosity
= "--quiet";
2278 switch (consumer_data
->type
) {
2279 case LTTNG_CONSUMER_KERNEL
:
2281 * Find out which consumerd to execute. We will first try the
2282 * 64-bit path, then the sessiond's installation directory, and
2283 * fallback on the 32-bit one,
2285 DBG3("Looking for a kernel consumer at these locations:");
2286 DBG3(" 1) %s", config
.consumerd64_bin_path
.value
? : "NULL");
2287 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, DEFAULT_CONSUMERD_FILE
);
2288 DBG3(" 3) %s", config
.consumerd32_bin_path
.value
? : "NULL");
2289 if (stat(config
.consumerd64_bin_path
.value
, &st
) == 0) {
2290 DBG3("Found location #1");
2291 consumer_to_use
= config
.consumerd64_bin_path
.value
;
2292 } else if (stat(INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
, &st
) == 0) {
2293 DBG3("Found location #2");
2294 consumer_to_use
= INSTALL_BIN_PATH
"/" DEFAULT_CONSUMERD_FILE
;
2295 } else if (config
.consumerd32_bin_path
.value
&&
2296 stat(config
.consumerd32_bin_path
.value
, &st
) == 0) {
2297 DBG3("Found location #3");
2298 consumer_to_use
= config
.consumerd32_bin_path
.value
;
2300 DBG("Could not find any valid consumerd executable");
2304 DBG("Using kernel consumer at: %s", consumer_to_use
);
2305 (void) execl(consumer_to_use
,
2306 "lttng-consumerd", verbosity
, "-k",
2307 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2308 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2309 "--group", config
.tracing_group_name
.value
,
2312 case LTTNG_CONSUMER64_UST
:
2314 if (config
.consumerd64_lib_dir
.value
) {
2319 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2323 tmplen
= strlen(config
.consumerd64_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2324 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2329 strcat(tmpnew
, config
.consumerd64_lib_dir
.value
);
2330 if (tmp
[0] != '\0') {
2331 strcat(tmpnew
, ":");
2332 strcat(tmpnew
, tmp
);
2334 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2341 DBG("Using 64-bit UST consumer at: %s", config
.consumerd64_bin_path
.value
);
2342 (void) execl(config
.consumerd64_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2343 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2344 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2345 "--group", config
.tracing_group_name
.value
,
2349 case LTTNG_CONSUMER32_UST
:
2351 if (config
.consumerd32_lib_dir
.value
) {
2356 tmp
= lttng_secure_getenv("LD_LIBRARY_PATH");
2360 tmplen
= strlen(config
.consumerd32_lib_dir
.value
) + 1 /* : */ + strlen(tmp
);
2361 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2366 strcat(tmpnew
, config
.consumerd32_lib_dir
.value
);
2367 if (tmp
[0] != '\0') {
2368 strcat(tmpnew
, ":");
2369 strcat(tmpnew
, tmp
);
2371 ret
= setenv("LD_LIBRARY_PATH", tmpnew
, 1);
2378 DBG("Using 32-bit UST consumer at: %s", config
.consumerd32_bin_path
.value
);
2379 (void) execl(config
.consumerd32_bin_path
.value
, "lttng-consumerd", verbosity
, "-u",
2380 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2381 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2382 "--group", config
.tracing_group_name
.value
,
2387 ERR("unknown consumer type");
2391 PERROR("Consumer execl()");
2393 /* Reaching this point, we got a failure on our execl(). */
2395 } else if (pid
> 0) {
2398 PERROR("start consumer fork");
2406 * Spawn the consumerd daemon and session daemon thread.
2408 static int start_consumerd(struct consumer_data
*consumer_data
)
2413 * Set the listen() state on the socket since there is a possible race
2414 * between the exec() of the consumer daemon and this call if place in the
2415 * consumer thread. See bug #366 for more details.
2417 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2422 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2423 if (consumer_data
->pid
!= 0) {
2424 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2428 ret
= spawn_consumerd(consumer_data
);
2430 ERR("Spawning consumerd failed");
2431 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2435 /* Setting up the consumer_data pid */
2436 consumer_data
->pid
= ret
;
2437 DBG2("Consumer pid %d", consumer_data
->pid
);
2438 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2440 DBG2("Spawning consumer control thread");
2441 ret
= spawn_consumer_thread(consumer_data
);
2443 ERR("Fatal error spawning consumer control thread");
2451 /* Cleanup already created sockets on error. */
2452 if (consumer_data
->err_sock
>= 0) {
2455 err
= close(consumer_data
->err_sock
);
2457 PERROR("close consumer data error socket");
2464 * Setup necessary data for kernel tracer action.
2466 static int init_kernel_tracer(void)
2470 /* Modprobe lttng kernel modules */
2471 ret
= modprobe_lttng_control();
2476 /* Open debugfs lttng */
2477 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2478 if (kernel_tracer_fd
< 0) {
2479 DBG("Failed to open %s", module_proc_lttng
);
2483 /* Validate kernel version */
2484 ret
= kernel_validate_version(kernel_tracer_fd
, &kernel_tracer_version
,
2485 &kernel_tracer_abi_version
);
2490 ret
= modprobe_lttng_data();
2495 ret
= kernel_supports_ring_buffer_snapshot_sample_positions(
2502 WARN("Kernel tracer does not support buffer monitoring. "
2503 "The monitoring timer of channels in the kernel domain "
2504 "will be set to 0 (disabled).");
2507 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2511 modprobe_remove_lttng_control();
2512 ret
= close(kernel_tracer_fd
);
2516 kernel_tracer_fd
= -1;
2517 return LTTNG_ERR_KERN_VERSION
;
2520 ret
= close(kernel_tracer_fd
);
2526 modprobe_remove_lttng_control();
2529 WARN("No kernel tracer available");
2530 kernel_tracer_fd
= -1;
2532 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2534 return LTTNG_ERR_KERN_NA
;
2540 * Copy consumer output from the tracing session to the domain session. The
2541 * function also applies the right modification on a per domain basis for the
2542 * trace files destination directory.
2544 * Should *NOT* be called with RCU read-side lock held.
2546 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2549 const char *dir_name
;
2550 struct consumer_output
*consumer
;
2553 assert(session
->consumer
);
2556 case LTTNG_DOMAIN_KERNEL
:
2557 DBG3("Copying tracing session consumer output in kernel session");
2559 * XXX: We should audit the session creation and what this function
2560 * does "extra" in order to avoid a destroy since this function is used
2561 * in the domain session creation (kernel and ust) only. Same for UST
2564 if (session
->kernel_session
->consumer
) {
2565 consumer_output_put(session
->kernel_session
->consumer
);
2567 session
->kernel_session
->consumer
=
2568 consumer_copy_output(session
->consumer
);
2569 /* Ease our life a bit for the next part */
2570 consumer
= session
->kernel_session
->consumer
;
2571 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2573 case LTTNG_DOMAIN_JUL
:
2574 case LTTNG_DOMAIN_LOG4J
:
2575 case LTTNG_DOMAIN_PYTHON
:
2576 case LTTNG_DOMAIN_UST
:
2577 DBG3("Copying tracing session consumer output in UST session");
2578 if (session
->ust_session
->consumer
) {
2579 consumer_output_put(session
->ust_session
->consumer
);
2581 session
->ust_session
->consumer
=
2582 consumer_copy_output(session
->consumer
);
2583 /* Ease our life a bit for the next part */
2584 consumer
= session
->ust_session
->consumer
;
2585 dir_name
= DEFAULT_UST_TRACE_DIR
;
2588 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2592 /* Append correct directory to subdir */
2593 strncat(consumer
->subdir
, dir_name
,
2594 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2595 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2604 * Create an UST session and add it to the session ust list.
2606 * Should *NOT* be called with RCU read-side lock held.
2608 static int create_ust_session(struct ltt_session
*session
,
2609 struct lttng_domain
*domain
)
2612 struct ltt_ust_session
*lus
= NULL
;
2616 assert(session
->consumer
);
2618 switch (domain
->type
) {
2619 case LTTNG_DOMAIN_JUL
:
2620 case LTTNG_DOMAIN_LOG4J
:
2621 case LTTNG_DOMAIN_PYTHON
:
2622 case LTTNG_DOMAIN_UST
:
2625 ERR("Unknown UST domain on create session %d", domain
->type
);
2626 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2630 DBG("Creating UST session");
2632 lus
= trace_ust_create_session(session
->id
);
2634 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2638 lus
->uid
= session
->uid
;
2639 lus
->gid
= session
->gid
;
2640 lus
->output_traces
= session
->output_traces
;
2641 lus
->snapshot_mode
= session
->snapshot_mode
;
2642 lus
->live_timer_interval
= session
->live_timer
;
2643 session
->ust_session
= lus
;
2644 if (session
->shm_path
[0]) {
2645 strncpy(lus
->root_shm_path
, session
->shm_path
,
2646 sizeof(lus
->root_shm_path
));
2647 lus
->root_shm_path
[sizeof(lus
->root_shm_path
) - 1] = '\0';
2648 strncpy(lus
->shm_path
, session
->shm_path
,
2649 sizeof(lus
->shm_path
));
2650 lus
->shm_path
[sizeof(lus
->shm_path
) - 1] = '\0';
2651 strncat(lus
->shm_path
, "/ust",
2652 sizeof(lus
->shm_path
) - strlen(lus
->shm_path
) - 1);
2654 /* Copy session output to the newly created UST session */
2655 ret
= copy_session_consumer(domain
->type
, session
);
2656 if (ret
!= LTTNG_OK
) {
2664 session
->ust_session
= NULL
;
2669 * Create a kernel tracer session then create the default channel.
2671 static int create_kernel_session(struct ltt_session
*session
)
2675 DBG("Creating kernel session");
2677 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2679 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2683 /* Code flow safety */
2684 assert(session
->kernel_session
);
2686 /* Copy session output to the newly created Kernel session */
2687 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2688 if (ret
!= LTTNG_OK
) {
2692 session
->kernel_session
->uid
= session
->uid
;
2693 session
->kernel_session
->gid
= session
->gid
;
2694 session
->kernel_session
->output_traces
= session
->output_traces
;
2695 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2700 trace_kernel_destroy_session(session
->kernel_session
);
2701 session
->kernel_session
= NULL
;
2706 * Count number of session permitted by uid/gid.
2708 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2711 struct ltt_session
*session
;
2712 const struct ltt_session_list
*session_list
= session_get_list();
2714 DBG("Counting number of available session for UID %d GID %d",
2716 cds_list_for_each_entry(session
, &session_list
->head
, list
) {
2717 if (!session_get(session
)) {
2720 session_lock(session
);
2721 /* Only count the sessions the user can control. */
2722 if (session_access_ok(session
, uid
, gid
) &&
2723 !session
->destroyed
) {
2726 session_unlock(session
);
2727 session_put(session
);
2732 static int receive_userspace_probe(struct command_ctx
*cmd_ctx
, int sock
,
2733 int *sock_error
, struct lttng_event
*event
)
2736 struct lttng_userspace_probe_location
*probe_location
;
2737 const struct lttng_userspace_probe_location_lookup_method
*lookup
= NULL
;
2738 struct lttng_dynamic_buffer probe_location_buffer
;
2739 struct lttng_buffer_view buffer_view
;
2742 * Create a buffer to store the serialized version of the probe
2745 lttng_dynamic_buffer_init(&probe_location_buffer
);
2746 ret
= lttng_dynamic_buffer_set_size(&probe_location_buffer
,
2747 cmd_ctx
->lsm
->u
.enable
.userspace_probe_location_len
);
2749 ret
= LTTNG_ERR_NOMEM
;
2754 * Receive the probe location.
2756 ret
= lttcomm_recv_unix_sock(sock
, probe_location_buffer
.data
,
2757 probe_location_buffer
.size
);
2759 DBG("Nothing recv() from client var len data... continuing");
2761 lttng_dynamic_buffer_reset(&probe_location_buffer
);
2762 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2766 buffer_view
= lttng_buffer_view_from_dynamic_buffer(
2767 &probe_location_buffer
, 0, probe_location_buffer
.size
);
2770 * Extract the probe location from the serialized version.
2772 ret
= lttng_userspace_probe_location_create_from_buffer(
2773 &buffer_view
, &probe_location
);
2775 WARN("Failed to create a userspace probe location from the received buffer");
2776 lttng_dynamic_buffer_reset( &probe_location_buffer
);
2777 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2782 * Receive the file descriptor to the target binary from the client.
2784 DBG("Receiving userspace probe target FD from client ...");
2785 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
2787 DBG("Nothing recv() from client userspace probe fd... continuing");
2789 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2794 * Set the file descriptor received from the client through the unix
2795 * socket in the probe location.
2797 lookup
= lttng_userspace_probe_location_get_lookup_method(probe_location
);
2799 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2804 * From the kernel tracer's perspective, all userspace probe event types
2805 * are all the same: a file and an offset.
2807 switch (lttng_userspace_probe_location_lookup_method_get_type(lookup
)) {
2808 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_FUNCTION_ELF
:
2809 ret
= lttng_userspace_probe_location_function_set_binary_fd(
2810 probe_location
, fd
);
2812 case LTTNG_USERSPACE_PROBE_LOCATION_LOOKUP_METHOD_TYPE_TRACEPOINT_SDT
:
2813 ret
= lttng_userspace_probe_location_tracepoint_set_binary_fd(
2814 probe_location
, fd
);
2817 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2822 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2826 /* Attach the probe location to the event. */
2827 ret
= lttng_event_set_userspace_probe_location(event
, probe_location
);
2829 ret
= LTTNG_ERR_PROBE_LOCATION_INVAL
;
2833 lttng_dynamic_buffer_reset(&probe_location_buffer
);
2839 * Check if the current kernel tracer supports the session rotation feature.
2840 * Return 1 if it does, 0 otherwise.
2842 static int check_rotate_compatible(void)
2846 if (kernel_tracer_version
.major
!= 2 || kernel_tracer_version
.minor
< 11) {
2847 DBG("Kernel tracer version is not compatible with the rotation feature");
2855 * Process the command requested by the lttng client within the command
2856 * context structure. This function make sure that the return structure (llm)
2857 * is set and ready for transmission before returning.
2859 * Return any error encountered or 0 for success.
2861 * "sock" is only used for special-case var. len data.
2863 * Should *NOT* be called with RCU read-side lock held.
2865 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2869 int need_tracing_session
= 1;
2872 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2874 assert(!rcu_read_ongoing());
2878 switch (cmd_ctx
->lsm
->cmd_type
) {
2879 case LTTNG_CREATE_SESSION
:
2880 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2881 case LTTNG_CREATE_SESSION_LIVE
:
2882 case LTTNG_DESTROY_SESSION
:
2883 case LTTNG_LIST_SESSIONS
:
2884 case LTTNG_LIST_DOMAINS
:
2885 case LTTNG_START_TRACE
:
2886 case LTTNG_STOP_TRACE
:
2887 case LTTNG_DATA_PENDING
:
2888 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2889 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2890 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2891 case LTTNG_SNAPSHOT_RECORD
:
2892 case LTTNG_SAVE_SESSION
:
2893 case LTTNG_SET_SESSION_SHM_PATH
:
2894 case LTTNG_REGENERATE_METADATA
:
2895 case LTTNG_REGENERATE_STATEDUMP
:
2896 case LTTNG_REGISTER_TRIGGER
:
2897 case LTTNG_UNREGISTER_TRIGGER
:
2898 case LTTNG_ROTATE_SESSION
:
2899 case LTTNG_ROTATION_GET_INFO
:
2900 case LTTNG_ROTATION_SET_SCHEDULE
:
2901 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
2908 if (config
.no_kernel
&& need_domain
2909 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2911 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2913 ret
= LTTNG_ERR_KERN_NA
;
2918 /* Deny register consumer if we already have a spawned consumer. */
2919 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2920 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2921 if (kconsumer_data
.pid
> 0) {
2922 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2923 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2926 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2930 * Check for command that don't needs to allocate a returned payload. We do
2931 * this here so we don't have to make the call for no payload at each
2934 switch(cmd_ctx
->lsm
->cmd_type
) {
2935 case LTTNG_LIST_SESSIONS
:
2936 case LTTNG_LIST_TRACEPOINTS
:
2937 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2938 case LTTNG_LIST_DOMAINS
:
2939 case LTTNG_LIST_CHANNELS
:
2940 case LTTNG_LIST_EVENTS
:
2941 case LTTNG_LIST_SYSCALLS
:
2942 case LTTNG_LIST_TRACKER_PIDS
:
2943 case LTTNG_DATA_PENDING
:
2944 case LTTNG_ROTATE_SESSION
:
2945 case LTTNG_ROTATION_GET_INFO
:
2946 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
2949 /* Setup lttng message with no payload */
2950 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0);
2952 /* This label does not try to unlock the session */
2953 goto init_setup_error
;
2957 /* Commands that DO NOT need a session. */
2958 switch (cmd_ctx
->lsm
->cmd_type
) {
2959 case LTTNG_CREATE_SESSION
:
2960 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2961 case LTTNG_CREATE_SESSION_LIVE
:
2962 case LTTNG_LIST_SESSIONS
:
2963 case LTTNG_LIST_TRACEPOINTS
:
2964 case LTTNG_LIST_SYSCALLS
:
2965 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2966 case LTTNG_SAVE_SESSION
:
2967 case LTTNG_REGISTER_TRIGGER
:
2968 case LTTNG_UNREGISTER_TRIGGER
:
2969 need_tracing_session
= 0;
2972 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2974 * We keep the session list lock across _all_ commands
2975 * for now, because the per-session lock does not
2976 * handle teardown properly.
2978 session_lock_list();
2979 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2980 if (cmd_ctx
->session
== NULL
) {
2981 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2984 /* Acquire lock for the session */
2985 session_lock(cmd_ctx
->session
);
2991 * Commands that need a valid session but should NOT create one if none
2992 * exists. Instead of creating one and destroying it when the command is
2993 * handled, process that right before so we save some round trip in useless
2996 switch (cmd_ctx
->lsm
->cmd_type
) {
2997 case LTTNG_DISABLE_CHANNEL
:
2998 case LTTNG_DISABLE_EVENT
:
2999 switch (cmd_ctx
->lsm
->domain
.type
) {
3000 case LTTNG_DOMAIN_KERNEL
:
3001 if (!cmd_ctx
->session
->kernel_session
) {
3002 ret
= LTTNG_ERR_NO_CHANNEL
;
3006 case LTTNG_DOMAIN_JUL
:
3007 case LTTNG_DOMAIN_LOG4J
:
3008 case LTTNG_DOMAIN_PYTHON
:
3009 case LTTNG_DOMAIN_UST
:
3010 if (!cmd_ctx
->session
->ust_session
) {
3011 ret
= LTTNG_ERR_NO_CHANNEL
;
3016 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3028 * Check domain type for specific "pre-action".
3030 switch (cmd_ctx
->lsm
->domain
.type
) {
3031 case LTTNG_DOMAIN_KERNEL
:
3033 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
3037 /* Kernel tracer check */
3038 if (kernel_tracer_fd
== -1) {
3039 /* Basically, load kernel tracer modules */
3040 ret
= init_kernel_tracer();
3046 /* Consumer is in an ERROR state. Report back to client */
3047 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3048 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3052 /* Need a session for kernel command */
3053 if (need_tracing_session
) {
3054 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3055 ret
= create_kernel_session(cmd_ctx
->session
);
3057 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
3062 /* Start the kernel consumer daemon */
3063 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3064 if (kconsumer_data
.pid
== 0 &&
3065 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3066 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3067 ret
= start_consumerd(&kconsumer_data
);
3069 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
3072 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3074 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3078 * The consumer was just spawned so we need to add the socket to
3079 * the consumer output of the session if exist.
3081 ret
= consumer_create_socket(&kconsumer_data
,
3082 cmd_ctx
->session
->kernel_session
->consumer
);
3089 case LTTNG_DOMAIN_JUL
:
3090 case LTTNG_DOMAIN_LOG4J
:
3091 case LTTNG_DOMAIN_PYTHON
:
3092 case LTTNG_DOMAIN_UST
:
3094 if (!ust_app_supported()) {
3095 ret
= LTTNG_ERR_NO_UST
;
3098 /* Consumer is in an ERROR state. Report back to client */
3099 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3100 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3104 if (need_tracing_session
) {
3105 /* Create UST session if none exist. */
3106 if (cmd_ctx
->session
->ust_session
== NULL
) {
3107 ret
= create_ust_session(cmd_ctx
->session
,
3108 &cmd_ctx
->lsm
->domain
);
3109 if (ret
!= LTTNG_OK
) {
3114 /* Start the UST consumer daemons */
3116 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3117 if (config
.consumerd64_bin_path
.value
&&
3118 ustconsumer64_data
.pid
== 0 &&
3119 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3120 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3121 ret
= start_consumerd(&ustconsumer64_data
);
3123 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3124 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3128 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3129 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3131 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3135 * Setup socket for consumer 64 bit. No need for atomic access
3136 * since it was set above and can ONLY be set in this thread.
3138 ret
= consumer_create_socket(&ustconsumer64_data
,
3139 cmd_ctx
->session
->ust_session
->consumer
);
3145 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3146 if (config
.consumerd32_bin_path
.value
&&
3147 ustconsumer32_data
.pid
== 0 &&
3148 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3149 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3150 ret
= start_consumerd(&ustconsumer32_data
);
3152 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3153 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3157 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3158 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3160 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3164 * Setup socket for consumer 32 bit. No need for atomic access
3165 * since it was set above and can ONLY be set in this thread.
3167 ret
= consumer_create_socket(&ustconsumer32_data
,
3168 cmd_ctx
->session
->ust_session
->consumer
);
3180 /* Validate consumer daemon state when start/stop trace command */
3181 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3182 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3183 switch (cmd_ctx
->lsm
->domain
.type
) {
3184 case LTTNG_DOMAIN_NONE
:
3186 case LTTNG_DOMAIN_JUL
:
3187 case LTTNG_DOMAIN_LOG4J
:
3188 case LTTNG_DOMAIN_PYTHON
:
3189 case LTTNG_DOMAIN_UST
:
3190 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3191 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3195 case LTTNG_DOMAIN_KERNEL
:
3196 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3197 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3202 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
3208 * Check that the UID or GID match that of the tracing session.
3209 * The root user can interact with all sessions.
3211 if (need_tracing_session
) {
3212 if (!session_access_ok(cmd_ctx
->session
,
3213 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3214 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
)) ||
3215 cmd_ctx
->session
->destroyed
) {
3216 ret
= LTTNG_ERR_EPERM
;
3222 * Send relayd information to consumer as soon as we have a domain and a
3225 if (cmd_ctx
->session
&& need_domain
) {
3227 * Setup relayd if not done yet. If the relayd information was already
3228 * sent to the consumer, this call will gracefully return.
3230 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3231 if (ret
!= LTTNG_OK
) {
3236 /* Process by command type */
3237 switch (cmd_ctx
->lsm
->cmd_type
) {
3238 case LTTNG_ADD_CONTEXT
:
3241 * An LTTNG_ADD_CONTEXT command might have a supplementary
3242 * payload if the context being added is an application context.
3244 if (cmd_ctx
->lsm
->u
.context
.ctx
.ctx
==
3245 LTTNG_EVENT_CONTEXT_APP_CONTEXT
) {
3246 char *provider_name
= NULL
, *context_name
= NULL
;
3247 size_t provider_name_len
=
3248 cmd_ctx
->lsm
->u
.context
.provider_name_len
;
3249 size_t context_name_len
=
3250 cmd_ctx
->lsm
->u
.context
.context_name_len
;
3252 if (provider_name_len
== 0 || context_name_len
== 0) {
3254 * Application provider and context names MUST
3257 ret
= -LTTNG_ERR_INVALID
;
3261 provider_name
= zmalloc(provider_name_len
+ 1);
3262 if (!provider_name
) {
3263 ret
= -LTTNG_ERR_NOMEM
;
3266 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
=
3269 context_name
= zmalloc(context_name_len
+ 1);
3270 if (!context_name
) {
3271 ret
= -LTTNG_ERR_NOMEM
;
3272 goto error_add_context
;
3274 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
=
3277 ret
= lttcomm_recv_unix_sock(sock
, provider_name
,
3280 goto error_add_context
;
3283 ret
= lttcomm_recv_unix_sock(sock
, context_name
,
3286 goto error_add_context
;
3291 * cmd_add_context assumes ownership of the provider and context
3294 ret
= cmd_add_context(cmd_ctx
->session
,
3295 cmd_ctx
->lsm
->domain
.type
,
3296 cmd_ctx
->lsm
->u
.context
.channel_name
,
3297 &cmd_ctx
->lsm
->u
.context
.ctx
,
3298 kernel_poll_pipe
[1]);
3300 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
= NULL
;
3301 cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
= NULL
;
3303 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.provider_name
);
3304 free(cmd_ctx
->lsm
->u
.context
.ctx
.u
.app_ctx
.ctx_name
);
3310 case LTTNG_DISABLE_CHANNEL
:
3312 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3313 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3316 case LTTNG_DISABLE_EVENT
:
3320 * FIXME: handle filter; for now we just receive the filter's
3321 * bytecode along with the filter expression which are sent by
3322 * liblttng-ctl and discard them.
3324 * This fixes an issue where the client may block while sending
3325 * the filter payload and encounter an error because the session
3326 * daemon closes the socket without ever handling this data.
3328 size_t count
= cmd_ctx
->lsm
->u
.disable
.expression_len
+
3329 cmd_ctx
->lsm
->u
.disable
.bytecode_len
;
3332 char data
[LTTNG_FILTER_MAX_LEN
];
3334 DBG("Discarding disable event command payload of size %zu", count
);
3336 ret
= lttcomm_recv_unix_sock(sock
, data
,
3337 count
> sizeof(data
) ? sizeof(data
) : count
);
3342 count
-= (size_t) ret
;
3345 /* FIXME: passing packed structure to non-packed pointer */
3346 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3347 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3348 &cmd_ctx
->lsm
->u
.disable
.event
);
3351 case LTTNG_ENABLE_CHANNEL
:
3353 cmd_ctx
->lsm
->u
.channel
.chan
.attr
.extended
.ptr
=
3354 (struct lttng_channel_extended
*) &cmd_ctx
->lsm
->u
.channel
.extended
;
3355 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3356 &cmd_ctx
->lsm
->u
.channel
.chan
,
3357 kernel_poll_pipe
[1]);
3360 case LTTNG_TRACK_PID
:
3362 ret
= cmd_track_pid(cmd_ctx
->session
,
3363 cmd_ctx
->lsm
->domain
.type
,
3364 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3367 case LTTNG_UNTRACK_PID
:
3369 ret
= cmd_untrack_pid(cmd_ctx
->session
,
3370 cmd_ctx
->lsm
->domain
.type
,
3371 cmd_ctx
->lsm
->u
.pid_tracker
.pid
);
3374 case LTTNG_ENABLE_EVENT
:
3376 struct lttng_event
*ev
= NULL
;
3377 struct lttng_event_exclusion
*exclusion
= NULL
;
3378 struct lttng_filter_bytecode
*bytecode
= NULL
;
3379 char *filter_expression
= NULL
;
3381 /* Handle exclusion events and receive it from the client. */
3382 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3383 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3385 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3386 (count
* LTTNG_SYMBOL_NAME_LEN
));
3388 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3392 DBG("Receiving var len exclusion event list from client ...");
3393 exclusion
->count
= count
;
3394 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3395 count
* LTTNG_SYMBOL_NAME_LEN
);
3397 DBG("Nothing recv() from client var len data... continuing");
3400 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3405 /* Get filter expression from client. */
3406 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3407 size_t expression_len
=
3408 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3410 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3411 ret
= LTTNG_ERR_FILTER_INVAL
;
3416 filter_expression
= zmalloc(expression_len
);
3417 if (!filter_expression
) {
3419 ret
= LTTNG_ERR_FILTER_NOMEM
;
3423 /* Receive var. len. data */
3424 DBG("Receiving var len filter's expression from client ...");
3425 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3428 DBG("Nothing recv() from client var len data... continuing");
3430 free(filter_expression
);
3432 ret
= LTTNG_ERR_FILTER_INVAL
;
3437 /* Handle filter and get bytecode from client. */
3438 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3439 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3441 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3442 ret
= LTTNG_ERR_FILTER_INVAL
;
3443 free(filter_expression
);
3448 bytecode
= zmalloc(bytecode_len
);
3450 free(filter_expression
);
3452 ret
= LTTNG_ERR_FILTER_NOMEM
;
3456 /* Receive var. len. data */
3457 DBG("Receiving var len filter's bytecode from client ...");
3458 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3460 DBG("Nothing recv() from client var len data... continuing");
3462 free(filter_expression
);
3465 ret
= LTTNG_ERR_FILTER_INVAL
;
3469 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3470 free(filter_expression
);
3473 ret
= LTTNG_ERR_FILTER_INVAL
;
3478 ev
= lttng_event_copy(&cmd_ctx
->lsm
->u
.enable
.event
);
3480 DBG("Failed to copy event: %s",
3481 cmd_ctx
->lsm
->u
.enable
.event
.name
);
3482 free(filter_expression
);
3485 ret
= LTTNG_ERR_NOMEM
;
3490 if (cmd_ctx
->lsm
->u
.enable
.userspace_probe_location_len
> 0) {
3491 /* Expect a userspace probe description. */
3492 ret
= receive_userspace_probe(cmd_ctx
, sock
, sock_error
, ev
);
3494 free(filter_expression
);
3497 lttng_event_destroy(ev
);
3502 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3503 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3505 filter_expression
, bytecode
, exclusion
,
3506 kernel_poll_pipe
[1]);
3507 lttng_event_destroy(ev
);
3510 case LTTNG_LIST_TRACEPOINTS
:
3512 struct lttng_event
*events
;
3515 session_lock_list();
3516 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3517 session_unlock_list();
3518 if (nb_events
< 0) {
3519 /* Return value is a negative lttng_error_code. */
3525 * Setup lttng message with payload size set to the event list size in
3526 * bytes and then copy list into the llm payload.
3528 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3529 sizeof(struct lttng_event
) * nb_events
);
3539 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3541 struct lttng_event_field
*fields
;
3544 session_lock_list();
3545 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3547 session_unlock_list();
3548 if (nb_fields
< 0) {
3549 /* Return value is a negative lttng_error_code. */
3555 * Setup lttng message with payload size set to the event list size in
3556 * bytes and then copy list into the llm payload.
3558 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, fields
,
3559 sizeof(struct lttng_event_field
) * nb_fields
);
3569 case LTTNG_LIST_SYSCALLS
:
3571 struct lttng_event
*events
;
3574 nb_events
= cmd_list_syscalls(&events
);
3575 if (nb_events
< 0) {
3576 /* Return value is a negative lttng_error_code. */
3582 * Setup lttng message with payload size set to the event list size in
3583 * bytes and then copy list into the llm payload.
3585 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, events
,
3586 sizeof(struct lttng_event
) * nb_events
);
3596 case LTTNG_LIST_TRACKER_PIDS
:
3598 int32_t *pids
= NULL
;
3601 nr_pids
= cmd_list_tracker_pids(cmd_ctx
->session
,
3602 cmd_ctx
->lsm
->domain
.type
, &pids
);
3604 /* Return value is a negative lttng_error_code. */
3610 * Setup lttng message with payload size set to the event list size in
3611 * bytes and then copy list into the llm payload.
3613 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, pids
,
3614 sizeof(int32_t) * nr_pids
);
3624 case LTTNG_SET_CONSUMER_URI
:
3627 struct lttng_uri
*uris
;
3629 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3630 len
= nb_uri
* sizeof(struct lttng_uri
);
3633 ret
= LTTNG_ERR_INVALID
;
3637 uris
= zmalloc(len
);
3639 ret
= LTTNG_ERR_FATAL
;
3643 /* Receive variable len data */
3644 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3645 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3647 DBG("No URIs received from client... continuing");
3649 ret
= LTTNG_ERR_SESSION_FAIL
;
3654 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3656 if (ret
!= LTTNG_OK
) {
3663 case LTTNG_START_TRACE
:
3666 * On the first start, if we have a kernel session and we have
3667 * enabled time or size-based rotations, we have to make sure
3668 * the kernel tracer supports it.
3670 if (!cmd_ctx
->session
->has_been_started
&& \
3671 cmd_ctx
->session
->kernel_session
&& \
3672 (cmd_ctx
->session
->rotate_timer_period
|| \
3673 cmd_ctx
->session
->rotate_size
) && \
3674 !check_rotate_compatible()) {
3675 DBG("Kernel tracer version is not compatible with the rotation feature");
3676 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
3679 ret
= cmd_start_trace(cmd_ctx
->session
);
3682 case LTTNG_STOP_TRACE
:
3684 ret
= cmd_stop_trace(cmd_ctx
->session
);
3687 case LTTNG_CREATE_SESSION
:
3690 struct lttng_uri
*uris
= NULL
;
3692 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3693 len
= nb_uri
* sizeof(struct lttng_uri
);
3696 uris
= zmalloc(len
);
3698 ret
= LTTNG_ERR_FATAL
;
3702 /* Receive variable len data */
3703 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3704 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3706 DBG("No URIs received from client... continuing");
3708 ret
= LTTNG_ERR_SESSION_FAIL
;
3713 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3714 DBG("Creating session with ONE network URI is a bad call");
3715 ret
= LTTNG_ERR_SESSION_FAIL
;
3721 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3722 &cmd_ctx
->creds
, 0);
3728 case LTTNG_DESTROY_SESSION
:
3730 ret
= cmd_destroy_session(cmd_ctx
->session
,
3731 notification_thread_handle
);
3734 case LTTNG_LIST_DOMAINS
:
3737 struct lttng_domain
*domains
= NULL
;
3739 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3741 /* Return value is a negative lttng_error_code. */
3746 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, domains
,
3747 nb_dom
* sizeof(struct lttng_domain
));
3757 case LTTNG_LIST_CHANNELS
:
3759 ssize_t payload_size
;
3760 struct lttng_channel
*channels
= NULL
;
3762 payload_size
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3763 cmd_ctx
->session
, &channels
);
3764 if (payload_size
< 0) {
3765 /* Return value is a negative lttng_error_code. */
3766 ret
= -payload_size
;
3770 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, channels
,
3781 case LTTNG_LIST_EVENTS
:
3784 struct lttng_event
*events
= NULL
;
3785 struct lttcomm_event_command_header cmd_header
;
3788 memset(&cmd_header
, 0, sizeof(cmd_header
));
3789 /* Extended infos are included at the end of events */
3790 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
,
3791 cmd_ctx
->session
, cmd_ctx
->lsm
->u
.list
.channel_name
,
3792 &events
, &total_size
);
3795 /* Return value is a negative lttng_error_code. */
3800 cmd_header
.nb_events
= nb_event
;
3801 ret
= setup_lttng_msg(cmd_ctx
, events
, total_size
,
3802 &cmd_header
, sizeof(cmd_header
));
3812 case LTTNG_LIST_SESSIONS
:
3814 unsigned int nr_sessions
;
3815 void *sessions_payload
;
3818 session_lock_list();
3819 nr_sessions
= lttng_sessions_count(
3820 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3821 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3822 payload_len
= sizeof(struct lttng_session
) * nr_sessions
;
3823 sessions_payload
= zmalloc(payload_len
);
3825 if (!sessions_payload
) {
3826 session_unlock_list();
3831 cmd_list_lttng_sessions(sessions_payload
,
3832 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3833 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3834 session_unlock_list();
3836 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, sessions_payload
,
3838 free(sessions_payload
);
3847 case LTTNG_REGISTER_CONSUMER
:
3849 struct consumer_data
*cdata
;
3851 switch (cmd_ctx
->lsm
->domain
.type
) {
3852 case LTTNG_DOMAIN_KERNEL
:
3853 cdata
= &kconsumer_data
;
3856 ret
= LTTNG_ERR_UND
;
3860 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3861 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3864 case LTTNG_DATA_PENDING
:
3867 uint8_t pending_ret_byte
;
3869 pending_ret
= cmd_data_pending(cmd_ctx
->session
);
3874 * This function may returns 0 or 1 to indicate whether or not
3875 * there is data pending. In case of error, it should return an
3876 * LTTNG_ERR code. However, some code paths may still return
3877 * a nondescript error code, which we handle by returning an
3880 if (pending_ret
== 0 || pending_ret
== 1) {
3882 * ret will be set to LTTNG_OK at the end of
3885 } else if (pending_ret
< 0) {
3886 ret
= LTTNG_ERR_UNK
;
3893 pending_ret_byte
= (uint8_t) pending_ret
;
3895 /* 1 byte to return whether or not data is pending */
3896 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
,
3897 &pending_ret_byte
, 1);
3906 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3908 struct lttcomm_lttng_output_id reply
;
3910 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3911 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3912 if (ret
!= LTTNG_OK
) {
3916 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &reply
,
3922 /* Copy output list into message payload */
3926 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3928 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3929 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3932 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3935 struct lttng_snapshot_output
*outputs
= NULL
;
3937 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3938 if (nb_output
< 0) {
3943 assert((nb_output
> 0 && outputs
) || nb_output
== 0);
3944 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, outputs
,
3945 nb_output
* sizeof(struct lttng_snapshot_output
));
3955 case LTTNG_SNAPSHOT_RECORD
:
3957 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3958 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3959 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3962 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3965 struct lttng_uri
*uris
= NULL
;
3967 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3968 len
= nb_uri
* sizeof(struct lttng_uri
);
3971 uris
= zmalloc(len
);
3973 ret
= LTTNG_ERR_FATAL
;
3977 /* Receive variable len data */
3978 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3979 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3981 DBG("No URIs received from client... continuing");
3983 ret
= LTTNG_ERR_SESSION_FAIL
;
3988 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3989 DBG("Creating session with ONE network URI is a bad call");
3990 ret
= LTTNG_ERR_SESSION_FAIL
;
3996 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3997 nb_uri
, &cmd_ctx
->creds
);
4001 case LTTNG_CREATE_SESSION_LIVE
:
4004 struct lttng_uri
*uris
= NULL
;
4006 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
4007 len
= nb_uri
* sizeof(struct lttng_uri
);
4010 uris
= zmalloc(len
);
4012 ret
= LTTNG_ERR_FATAL
;
4016 /* Receive variable len data */
4017 DBG("Waiting for %zu URIs from client ...", nb_uri
);
4018 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
4020 DBG("No URIs received from client... continuing");
4022 ret
= LTTNG_ERR_SESSION_FAIL
;
4027 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
4028 DBG("Creating session with ONE network URI is a bad call");
4029 ret
= LTTNG_ERR_SESSION_FAIL
;
4035 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
4036 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
4040 case LTTNG_SAVE_SESSION
:
4042 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
4046 case LTTNG_SET_SESSION_SHM_PATH
:
4048 ret
= cmd_set_session_shm_path(cmd_ctx
->session
,
4049 cmd_ctx
->lsm
->u
.set_shm_path
.shm_path
);
4052 case LTTNG_REGENERATE_METADATA
:
4054 ret
= cmd_regenerate_metadata(cmd_ctx
->session
);
4057 case LTTNG_REGENERATE_STATEDUMP
:
4059 ret
= cmd_regenerate_statedump(cmd_ctx
->session
);
4062 case LTTNG_REGISTER_TRIGGER
:
4064 ret
= cmd_register_trigger(cmd_ctx
, sock
,
4065 notification_thread_handle
);
4068 case LTTNG_UNREGISTER_TRIGGER
:
4070 ret
= cmd_unregister_trigger(cmd_ctx
, sock
,
4071 notification_thread_handle
);
4074 case LTTNG_ROTATE_SESSION
:
4076 struct lttng_rotate_session_return rotate_return
;
4078 DBG("Client rotate session \"%s\"", cmd_ctx
->session
->name
);
4080 memset(&rotate_return
, 0, sizeof(rotate_return
));
4081 if (cmd_ctx
->session
->kernel_session
&& !check_rotate_compatible()) {
4082 DBG("Kernel tracer version is not compatible with the rotation feature");
4083 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
4087 ret
= cmd_rotate_session(cmd_ctx
->session
, &rotate_return
);
4093 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &rotate_return
,
4094 sizeof(rotate_return
));
4103 case LTTNG_ROTATION_GET_INFO
:
4105 struct lttng_rotation_get_info_return get_info_return
;
4107 memset(&get_info_return
, 0, sizeof(get_info_return
));
4108 ret
= cmd_rotate_get_info(cmd_ctx
->session
, &get_info_return
,
4109 cmd_ctx
->lsm
->u
.get_rotation_info
.rotation_id
);
4115 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &get_info_return
,
4116 sizeof(get_info_return
));
4125 case LTTNG_ROTATION_SET_SCHEDULE
:
4128 enum lttng_rotation_schedule_type schedule_type
;
4131 if (cmd_ctx
->session
->kernel_session
&& !check_rotate_compatible()) {
4132 DBG("Kernel tracer version does not support session rotations");
4133 ret
= LTTNG_ERR_ROTATION_WRONG_VERSION
;
4137 set_schedule
= cmd_ctx
->lsm
->u
.rotation_set_schedule
.set
== 1;
4138 schedule_type
= (enum lttng_rotation_schedule_type
) cmd_ctx
->lsm
->u
.rotation_set_schedule
.type
;
4139 value
= cmd_ctx
->lsm
->u
.rotation_set_schedule
.value
;
4141 ret
= cmd_rotation_set_schedule(cmd_ctx
->session
,
4145 notification_thread_handle
);
4146 if (ret
!= LTTNG_OK
) {
4152 case LTTNG_SESSION_LIST_ROTATION_SCHEDULES
:
4154 struct lttng_session_list_schedules_return schedules
= {
4155 .periodic
.set
= !!cmd_ctx
->session
->rotate_timer_period
,
4156 .periodic
.value
= cmd_ctx
->session
->rotate_timer_period
,
4157 .size
.set
= !!cmd_ctx
->session
->rotate_size
,
4158 .size
.value
= cmd_ctx
->session
->rotate_size
,
4161 ret
= setup_lttng_msg_no_cmd_header(cmd_ctx
, &schedules
,
4172 ret
= LTTNG_ERR_UND
;
4177 if (cmd_ctx
->llm
== NULL
) {
4178 DBG("Missing llm structure. Allocating one.");
4179 if (setup_lttng_msg_no_cmd_header(cmd_ctx
, NULL
, 0) < 0) {
4183 /* Set return code */
4184 cmd_ctx
->llm
->ret_code
= ret
;
4186 if (cmd_ctx
->session
) {
4187 session_unlock(cmd_ctx
->session
);
4188 session_put(cmd_ctx
->session
);
4190 if (need_tracing_session
) {
4191 session_unlock_list();
4194 assert(!rcu_read_ongoing());
4199 * Thread managing health check socket.
4201 static void *thread_manage_health(void *data
)
4203 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
4204 uint32_t revents
, nb_fd
;
4205 struct lttng_poll_event events
;
4206 struct health_comm_msg msg
;
4207 struct health_comm_reply reply
;
4209 DBG("[thread] Manage health check started");
4211 rcu_register_thread();
4213 /* We might hit an error path before this is created. */
4214 lttng_poll_init(&events
);
4216 /* Create unix socket */
4217 sock
= lttcomm_create_unix_sock(config
.health_unix_sock_path
.value
);
4219 ERR("Unable to create health check Unix socket");
4224 /* lttng health client socket path permissions */
4225 ret
= chown(config
.health_unix_sock_path
.value
, 0,
4226 utils_get_group_id(config
.tracing_group_name
.value
));
4228 ERR("Unable to set group on %s", config
.health_unix_sock_path
.value
);
4233 ret
= chmod(config
.health_unix_sock_path
.value
,
4234 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4236 ERR("Unable to set permissions on %s", config
.health_unix_sock_path
.value
);
4243 * Set the CLOEXEC flag. Return code is useless because either way, the
4246 (void) utils_set_fd_cloexec(sock
);
4248 ret
= lttcomm_listen_unix_sock(sock
);
4254 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4255 * more will be added to this poll set.
4257 ret
= sessiond_set_thread_pollset(&events
, 2);
4262 /* Add the application registration socket */
4263 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
4268 sessiond_notify_ready();
4271 DBG("Health check ready");
4273 /* Inifinite blocking call, waiting for transmission */
4275 ret
= lttng_poll_wait(&events
, -1);
4278 * Restart interrupted system call.
4280 if (errno
== EINTR
) {
4288 for (i
= 0; i
< nb_fd
; i
++) {
4289 /* Fetch once the poll data */
4290 revents
= LTTNG_POLL_GETEV(&events
, i
);
4291 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4294 /* No activity for this FD (poll implementation). */
4298 /* Thread quit pipe has been closed. Killing thread. */
4299 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4305 /* Event on the registration socket */
4306 if (pollfd
== sock
) {
4307 if (revents
& LPOLLIN
) {
4309 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4310 ERR("Health socket poll error");
4313 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4319 new_sock
= lttcomm_accept_unix_sock(sock
);
4325 * Set the CLOEXEC flag. Return code is useless because either way, the
4328 (void) utils_set_fd_cloexec(new_sock
);
4330 DBG("Receiving data from client for health...");
4331 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
4333 DBG("Nothing recv() from client... continuing");
4334 ret
= close(new_sock
);
4341 rcu_thread_online();
4343 memset(&reply
, 0, sizeof(reply
));
4344 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
4346 * health_check_state returns 0 if health is
4349 if (!health_check_state(health_sessiond
, i
)) {
4350 reply
.ret_code
|= 1ULL << i
;
4354 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
4356 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
4358 ERR("Failed to send health data back to client");
4361 /* End of transmission */
4362 ret
= close(new_sock
);
4371 ERR("Health error occurred in %s", __func__
);
4373 DBG("Health check thread dying");
4374 unlink(config
.health_unix_sock_path
.value
);
4382 lttng_poll_clean(&events
);
4384 rcu_unregister_thread();
4389 * This thread manage all clients request using the unix client socket for
4392 static void *thread_manage_clients(void *data
)
4394 int sock
= -1, ret
, i
, pollfd
, err
= -1;
4396 uint32_t revents
, nb_fd
;
4397 struct command_ctx
*cmd_ctx
= NULL
;
4398 struct lttng_poll_event events
;
4400 DBG("[thread] Manage client started");
4402 rcu_register_thread();
4404 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
4406 health_code_update();
4408 ret
= lttcomm_listen_unix_sock(client_sock
);
4414 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
4415 * more will be added to this poll set.
4417 ret
= sessiond_set_thread_pollset(&events
, 2);
4419 goto error_create_poll
;
4422 /* Add the application registration socket */
4423 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
4428 ret
= sem_post(&load_info
->message_thread_ready
);
4430 PERROR("sem_post message_thread_ready");
4435 * Wait until all support threads are initialized before accepting
4438 while (uatomic_read(<tng_sessiond_ready
) != 0) {
4440 * If a support thread failed to launch, it may signal that
4441 * we must exit and the sessiond would never be marked as
4444 * The timeout is set to 1ms, which serves as a way to
4445 * pace down this check.
4447 ret
= sessiond_wait_for_quit_pipe(1000);
4453 * This barrier is paired with the one in sessiond_notify_ready() to
4454 * ensure that loads accessing data initialized by the other threads,
4455 * on which this thread was waiting, are not performed before this point.
4457 * Note that this could be a 'read' memory barrier, but a full barrier
4458 * is used in case the code changes. The performance implications of
4459 * this choice are minimal since this is a slow path.
4463 /* This testpoint is after we signal readiness to the parent. */
4464 if (testpoint(sessiond_thread_manage_clients
)) {
4468 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4472 health_code_update();
4474 /* Set state as running. */
4475 sessiond_set_client_thread_state(true);
4478 const struct cmd_completion_handler
*cmd_completion_handler
;
4480 DBG("Accepting client command ...");
4482 /* Inifinite blocking call, waiting for transmission */
4484 health_poll_entry();
4485 ret
= lttng_poll_wait(&events
, -1);
4489 * Restart interrupted system call.
4491 if (errno
== EINTR
) {
4499 for (i
= 0; i
< nb_fd
; i
++) {
4500 /* Fetch once the poll data */
4501 revents
= LTTNG_POLL_GETEV(&events
, i
);
4502 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4504 health_code_update();
4507 /* No activity for this FD (poll implementation). */
4511 /* Thread quit pipe has been closed. Killing thread. */
4512 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4518 /* Event on the registration socket */
4519 if (pollfd
== client_sock
) {
4520 if (revents
& LPOLLIN
) {
4522 } else if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4523 ERR("Client socket poll error");
4526 ERR("Unexpected poll events %u for sock %d", revents
, pollfd
);
4532 DBG("Wait for client response");
4534 health_code_update();
4536 sock
= lttcomm_accept_unix_sock(client_sock
);
4542 * Set the CLOEXEC flag. Return code is useless because either way, the
4545 (void) utils_set_fd_cloexec(sock
);
4547 /* Set socket option for credentials retrieval */
4548 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4553 /* Allocate context command to process the client request */
4554 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4555 if (cmd_ctx
== NULL
) {
4556 PERROR("zmalloc cmd_ctx");
4560 /* Allocate data buffer for reception */
4561 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4562 if (cmd_ctx
->lsm
== NULL
) {
4563 PERROR("zmalloc cmd_ctx->lsm");
4567 cmd_ctx
->llm
= NULL
;
4568 cmd_ctx
->session
= NULL
;
4570 health_code_update();
4573 * Data is received from the lttng client. The struct
4574 * lttcomm_session_msg (lsm) contains the command and data request of
4577 DBG("Receiving data from client ...");
4578 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4579 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4581 DBG("Nothing recv() from client... continuing");
4587 clean_command_ctx(&cmd_ctx
);
4591 health_code_update();
4593 // TODO: Validate cmd_ctx including sanity check for
4594 // security purpose.
4596 rcu_thread_online();
4598 * This function dispatch the work to the kernel or userspace tracer
4599 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4600 * informations for the client. The command context struct contains
4601 * everything this function may needs.
4603 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4604 rcu_thread_offline();
4612 * TODO: Inform client somehow of the fatal error. At
4613 * this point, ret < 0 means that a zmalloc failed
4614 * (ENOMEM). Error detected but still accept
4615 * command, unless a socket error has been
4618 clean_command_ctx(&cmd_ctx
);
4622 cmd_completion_handler
= cmd_pop_completion_handler();
4623 if (cmd_completion_handler
) {
4624 enum lttng_error_code completion_code
;
4626 completion_code
= cmd_completion_handler
->run(
4627 cmd_completion_handler
->data
);
4628 if (completion_code
!= LTTNG_OK
) {
4629 clean_command_ctx(&cmd_ctx
);
4634 health_code_update();
4636 DBG("Sending response (size: %d, retcode: %s (%d))",
4637 cmd_ctx
->lttng_msg_size
,
4638 lttng_strerror(-cmd_ctx
->llm
->ret_code
),
4639 cmd_ctx
->llm
->ret_code
);
4640 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4642 ERR("Failed to send data back to client");
4645 /* End of transmission */
4652 clean_command_ctx(&cmd_ctx
);
4654 health_code_update();
4666 lttng_poll_clean(&events
);
4667 clean_command_ctx(&cmd_ctx
);
4671 unlink(config
.client_unix_sock_path
.value
);
4672 if (client_sock
>= 0) {
4673 ret
= close(client_sock
);
4681 ERR("Health error occurred in %s", __func__
);
4684 health_unregister(health_sessiond
);
4686 DBG("Client thread dying");
4688 rcu_unregister_thread();
4691 * Since we are creating the consumer threads, we own them, so we need
4692 * to join them before our thread exits.
4694 ret
= join_consumer_thread(&kconsumer_data
);
4697 PERROR("join_consumer");
4700 ret
= join_consumer_thread(&ustconsumer32_data
);
4703 PERROR("join_consumer ust32");
4706 ret
= join_consumer_thread(&ustconsumer64_data
);
4709 PERROR("join_consumer ust64");
4712 /* Set state as non-running. */
4713 sessiond_set_client_thread_state(false);
4717 static int string_match(const char *str1
, const char *str2
)
4719 return (str1
&& str2
) && !strcmp(str1
, str2
);
4723 * Take an option from the getopt output and set it in the right variable to be
4726 * Return 0 on success else a negative value.
4728 static int set_option(int opt
, const char *arg
, const char *optname
)
4732 if (string_match(optname
, "client-sock") || opt
== 'c') {
4733 if (!arg
|| *arg
== '\0') {
4737 if (lttng_is_setuid_setgid()) {
4738 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4739 "-c, --client-sock");
4741 config_string_set(&config
.client_unix_sock_path
,
4743 if (!config
.client_unix_sock_path
.value
) {
4748 } else if (string_match(optname
, "apps-sock") || opt
== 'a') {
4749 if (!arg
|| *arg
== '\0') {
4753 if (lttng_is_setuid_setgid()) {
4754 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4757 config_string_set(&config
.apps_unix_sock_path
,
4759 if (!config
.apps_unix_sock_path
.value
) {
4764 } else if (string_match(optname
, "daemonize") || opt
== 'd') {
4765 config
.daemonize
= true;
4766 } else if (string_match(optname
, "background") || opt
== 'b') {
4767 config
.background
= true;
4768 } else if (string_match(optname
, "group") || opt
== 'g') {
4769 if (!arg
|| *arg
== '\0') {
4773 if (lttng_is_setuid_setgid()) {
4774 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4777 config_string_set(&config
.tracing_group_name
,
4779 if (!config
.tracing_group_name
.value
) {
4784 } else if (string_match(optname
, "help") || opt
== 'h') {
4785 ret
= utils_show_help(8, "lttng-sessiond", help_msg
);
4787 ERR("Cannot show --help for `lttng-sessiond`");
4790 exit(ret
? EXIT_FAILURE
: EXIT_SUCCESS
);
4791 } else if (string_match(optname
, "version") || opt
== 'V') {
4792 fprintf(stdout
, "%s\n", VERSION
);
4794 } else if (string_match(optname
, "sig-parent") || opt
== 'S') {
4795 config
.sig_parent
= true;
4796 } else if (string_match(optname
, "kconsumerd-err-sock")) {
4797 if (!arg
|| *arg
== '\0') {
4801 if (lttng_is_setuid_setgid()) {
4802 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4803 "--kconsumerd-err-sock");
4805 config_string_set(&config
.kconsumerd_err_unix_sock_path
,
4807 if (!config
.kconsumerd_err_unix_sock_path
.value
) {
4812 } else if (string_match(optname
, "kconsumerd-cmd-sock")) {
4813 if (!arg
|| *arg
== '\0') {
4817 if (lttng_is_setuid_setgid()) {
4818 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4819 "--kconsumerd-cmd-sock");
4821 config_string_set(&config
.kconsumerd_cmd_unix_sock_path
,
4823 if (!config
.kconsumerd_cmd_unix_sock_path
.value
) {
4828 } else if (string_match(optname
, "ustconsumerd64-err-sock")) {
4829 if (!arg
|| *arg
== '\0') {
4833 if (lttng_is_setuid_setgid()) {
4834 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4835 "--ustconsumerd64-err-sock");
4837 config_string_set(&config
.consumerd64_err_unix_sock_path
,
4839 if (!config
.consumerd64_err_unix_sock_path
.value
) {
4844 } else if (string_match(optname
, "ustconsumerd64-cmd-sock")) {
4845 if (!arg
|| *arg
== '\0') {
4849 if (lttng_is_setuid_setgid()) {
4850 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4851 "--ustconsumerd64-cmd-sock");
4853 config_string_set(&config
.consumerd64_cmd_unix_sock_path
,
4855 if (!config
.consumerd64_cmd_unix_sock_path
.value
) {
4860 } else if (string_match(optname
, "ustconsumerd32-err-sock")) {
4861 if (!arg
|| *arg
== '\0') {
4865 if (lttng_is_setuid_setgid()) {
4866 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4867 "--ustconsumerd32-err-sock");
4869 config_string_set(&config
.consumerd32_err_unix_sock_path
,
4871 if (!config
.consumerd32_err_unix_sock_path
.value
) {
4876 } else if (string_match(optname
, "ustconsumerd32-cmd-sock")) {
4877 if (!arg
|| *arg
== '\0') {
4881 if (lttng_is_setuid_setgid()) {
4882 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4883 "--ustconsumerd32-cmd-sock");
4885 config_string_set(&config
.consumerd32_cmd_unix_sock_path
,
4887 if (!config
.consumerd32_cmd_unix_sock_path
.value
) {
4892 } else if (string_match(optname
, "no-kernel")) {
4893 config
.no_kernel
= true;
4894 } else if (string_match(optname
, "quiet") || opt
== 'q') {
4895 config
.quiet
= true;
4896 } else if (string_match(optname
, "verbose") || opt
== 'v') {
4897 /* Verbose level can increase using multiple -v */
4899 /* Value obtained from config file */
4900 config
.verbose
= config_parse_value(arg
);
4902 /* -v used on command line */
4905 /* Clamp value to [0, 3] */
4906 config
.verbose
= config
.verbose
< 0 ? 0 :
4907 (config
.verbose
<= 3 ? config
.verbose
: 3);
4908 } else if (string_match(optname
, "verbose-consumer")) {
4910 config
.verbose_consumer
= config_parse_value(arg
);
4912 config
.verbose_consumer
++;
4914 } else if (string_match(optname
, "consumerd32-path")) {
4915 if (!arg
|| *arg
== '\0') {
4919 if (lttng_is_setuid_setgid()) {
4920 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4921 "--consumerd32-path");
4923 config_string_set(&config
.consumerd32_bin_path
,
4925 if (!config
.consumerd32_bin_path
.value
) {
4930 } else if (string_match(optname
, "consumerd32-libdir")) {
4931 if (!arg
|| *arg
== '\0') {
4935 if (lttng_is_setuid_setgid()) {
4936 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4937 "--consumerd32-libdir");
4939 config_string_set(&config
.consumerd32_lib_dir
,
4941 if (!config
.consumerd32_lib_dir
.value
) {
4946 } else if (string_match(optname
, "consumerd64-path")) {
4947 if (!arg
|| *arg
== '\0') {
4951 if (lttng_is_setuid_setgid()) {
4952 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4953 "--consumerd64-path");
4955 config_string_set(&config
.consumerd64_bin_path
,
4957 if (!config
.consumerd64_bin_path
.value
) {
4962 } else if (string_match(optname
, "consumerd64-libdir")) {
4963 if (!arg
|| *arg
== '\0') {
4967 if (lttng_is_setuid_setgid()) {
4968 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4969 "--consumerd64-libdir");
4971 config_string_set(&config
.consumerd64_lib_dir
,
4973 if (!config
.consumerd64_lib_dir
.value
) {
4978 } else if (string_match(optname
, "pidfile") || opt
== 'p') {
4979 if (!arg
|| *arg
== '\0') {
4983 if (lttng_is_setuid_setgid()) {
4984 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
4987 config_string_set(&config
.pid_file_path
, strdup(arg
));
4988 if (!config
.pid_file_path
.value
) {
4993 } else if (string_match(optname
, "agent-tcp-port")) {
4994 if (!arg
|| *arg
== '\0') {
4998 if (lttng_is_setuid_setgid()) {
4999 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5000 "--agent-tcp-port");
5005 v
= strtoul(arg
, NULL
, 0);
5006 if (errno
!= 0 || !isdigit(arg
[0])) {
5007 ERR("Wrong value in --agent-tcp-port parameter: %s", arg
);
5010 if (v
== 0 || v
>= 65535) {
5011 ERR("Port overflow in --agent-tcp-port parameter: %s", arg
);
5014 config
.agent_tcp_port
.begin
= config
.agent_tcp_port
.end
= (int) v
;
5015 DBG3("Agent TCP port set to non default: %i", (int) v
);
5017 } else if (string_match(optname
, "load") || opt
== 'l') {
5018 if (!arg
|| *arg
== '\0') {
5022 if (lttng_is_setuid_setgid()) {
5023 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5026 config_string_set(&config
.load_session_path
, strdup(arg
));
5027 if (!config
.load_session_path
.value
) {
5032 } else if (string_match(optname
, "kmod-probes")) {
5033 if (!arg
|| *arg
== '\0') {
5037 if (lttng_is_setuid_setgid()) {
5038 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5041 config_string_set(&config
.kmod_probes_list
, strdup(arg
));
5042 if (!config
.kmod_probes_list
.value
) {
5047 } else if (string_match(optname
, "extra-kmod-probes")) {
5048 if (!arg
|| *arg
== '\0') {
5052 if (lttng_is_setuid_setgid()) {
5053 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5054 "--extra-kmod-probes");
5056 config_string_set(&config
.kmod_extra_probes_list
,
5058 if (!config
.kmod_extra_probes_list
.value
) {
5063 } else if (string_match(optname
, "config") || opt
== 'f') {
5064 /* This is handled in set_options() thus silent skip. */
5067 /* Unknown option or other error.
5068 * Error is printed by getopt, just return */
5073 if (ret
== -EINVAL
) {
5074 const char *opt_name
= "unknown";
5077 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
5079 if (opt
== long_options
[i
].val
) {
5080 opt_name
= long_options
[i
].name
;
5085 WARN("Invalid argument provided for option \"%s\", using default value.",
5093 * config_entry_handler_cb used to handle options read from a config file.
5094 * See config_entry_handler_cb comment in common/config/session-config.h for the
5095 * return value conventions.
5097 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
5101 if (!entry
|| !entry
->name
|| !entry
->value
) {
5106 /* Check if the option is to be ignored */
5107 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
5108 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
5113 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
5116 /* Ignore if not fully matched. */
5117 if (strcmp(entry
->name
, long_options
[i
].name
)) {
5122 * If the option takes no argument on the command line, we have to
5123 * check if the value is "true". We support non-zero numeric values,
5126 if (!long_options
[i
].has_arg
) {
5127 ret
= config_parse_value(entry
->value
);
5130 WARN("Invalid configuration value \"%s\" for option %s",
5131 entry
->value
, entry
->name
);
5133 /* False, skip boolean config option. */
5138 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
5142 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
5149 * daemon configuration loading and argument parsing
5151 static int set_options(int argc
, char **argv
)
5153 int ret
= 0, c
= 0, option_index
= 0;
5154 int orig_optopt
= optopt
, orig_optind
= optind
;
5156 const char *config_path
= NULL
;
5158 optstring
= utils_generate_optstring(long_options
,
5159 sizeof(long_options
) / sizeof(struct option
));
5165 /* Check for the --config option */
5166 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
5167 &option_index
)) != -1) {
5171 } else if (c
!= 'f') {
5172 /* if not equal to --config option. */
5176 if (lttng_is_setuid_setgid()) {
5177 WARN("Getting '%s' argument from setuid/setgid binary refused for security reasons.",
5180 config_path
= utils_expand_path(optarg
);
5182 ERR("Failed to resolve path: %s", optarg
);
5187 ret
= config_get_section_entries(config_path
, config_section_name
,
5188 config_entry_handler
, NULL
);
5191 ERR("Invalid configuration option at line %i", ret
);
5197 /* Reset getopt's global state */
5198 optopt
= orig_optopt
;
5199 optind
= orig_optind
;
5203 * getopt_long() will not set option_index if it encounters a
5206 c
= getopt_long(argc
, argv
, optstring
, long_options
,
5213 * Pass NULL as the long option name if popt left the index
5216 ret
= set_option(c
, optarg
,
5217 option_index
< 0 ? NULL
:
5218 long_options
[option_index
].name
);
5230 * Creates the two needed socket by the daemon.
5231 * apps_sock - The communication socket for all UST apps.
5232 * client_sock - The communication of the cli tool (lttng).
5234 static int init_daemon_socket(void)
5239 old_umask
= umask(0);
5241 /* Create client tool unix socket */
5242 client_sock
= lttcomm_create_unix_sock(config
.client_unix_sock_path
.value
);
5243 if (client_sock
< 0) {
5244 ERR("Create unix sock failed: %s", config
.client_unix_sock_path
.value
);
5249 /* Set the cloexec flag */
5250 ret
= utils_set_fd_cloexec(client_sock
);
5252 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
5253 "Continuing but note that the consumer daemon will have a "
5254 "reference to this socket on exec()", client_sock
);
5257 /* File permission MUST be 660 */
5258 ret
= chmod(config
.client_unix_sock_path
.value
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5260 ERR("Set file permissions failed: %s", config
.client_unix_sock_path
.value
);
5265 /* Create the application unix socket */
5266 apps_sock
= lttcomm_create_unix_sock(config
.apps_unix_sock_path
.value
);
5267 if (apps_sock
< 0) {
5268 ERR("Create unix sock failed: %s", config
.apps_unix_sock_path
.value
);
5273 /* Set the cloexec flag */
5274 ret
= utils_set_fd_cloexec(apps_sock
);
5276 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
5277 "Continuing but note that the consumer daemon will have a "
5278 "reference to this socket on exec()", apps_sock
);
5281 /* File permission MUST be 666 */
5282 ret
= chmod(config
.apps_unix_sock_path
.value
,
5283 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
5285 ERR("Set file permissions failed: %s", config
.apps_unix_sock_path
.value
);
5290 DBG3("Session daemon client socket %d and application socket %d created",
5291 client_sock
, apps_sock
);
5299 * Create lockfile using the rundir and return its fd.
5301 static int create_lockfile(void)
5303 return utils_create_lock_file(config
.lock_file_path
.value
);
5307 * Check if the global socket is available, and if a daemon is answering at the
5308 * other side. If yes, error is returned.
5310 * Also attempts to create and hold the lock file.
5312 static int check_existing_daemon(void)
5316 /* Is there anybody out there ? */
5317 if (lttng_session_daemon_alive()) {
5322 lockfile_fd
= create_lockfile();
5323 if (lockfile_fd
< 0) {
5331 static void sessiond_cleanup_lock_file(void)
5336 * Cleanup lock file by deleting it and finaly closing it which will
5337 * release the file system lock.
5339 if (lockfile_fd
>= 0) {
5340 ret
= remove(config
.lock_file_path
.value
);
5342 PERROR("remove lock file");
5344 ret
= close(lockfile_fd
);
5346 PERROR("close lock file");
5352 * Set the tracing group gid onto the client socket.
5354 * Race window between mkdir and chown is OK because we are going from more
5355 * permissive (root.root) to less permissive (root.tracing).
5357 static int set_permissions(char *rundir
)
5362 gid
= utils_get_group_id(config
.tracing_group_name
.value
);
5364 /* Set lttng run dir */
5365 ret
= chown(rundir
, 0, gid
);
5367 ERR("Unable to set group on %s", rundir
);
5372 * Ensure all applications and tracing group can search the run
5373 * dir. Allow everyone to read the directory, since it does not
5374 * buy us anything to hide its content.
5376 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
5378 ERR("Unable to set permissions on %s", rundir
);
5382 /* lttng client socket path */
5383 ret
= chown(config
.client_unix_sock_path
.value
, 0, gid
);
5385 ERR("Unable to set group on %s", config
.client_unix_sock_path
.value
);
5389 /* kconsumer error socket path */
5390 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
5392 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
5396 /* 64-bit ustconsumer error socket path */
5397 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
5399 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
5403 /* 32-bit ustconsumer compat32 error socket path */
5404 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
5406 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
5410 DBG("All permissions are set");
5416 * Create the lttng run directory needed for all global sockets and pipe.
5418 static int create_lttng_rundir(void)
5422 DBG3("Creating LTTng run directory: %s", config
.rundir
.value
);
5424 ret
= mkdir(config
.rundir
.value
, S_IRWXU
);
5426 if (errno
!= EEXIST
) {
5427 ERR("Unable to create %s", config
.rundir
.value
);
5439 * Setup sockets and directory needed by the consumerds' communication with the
5442 static int set_consumer_sockets(struct consumer_data
*consumer_data
)
5447 switch (consumer_data
->type
) {
5448 case LTTNG_CONSUMER_KERNEL
:
5449 path
= config
.kconsumerd_path
.value
;
5451 case LTTNG_CONSUMER64_UST
:
5452 path
= config
.consumerd64_path
.value
;
5454 case LTTNG_CONSUMER32_UST
:
5455 path
= config
.consumerd32_path
.value
;
5458 ERR("Consumer type unknown");
5464 DBG2("Creating consumer directory: %s", path
);
5466 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
5467 if (ret
< 0 && errno
!= EEXIST
) {
5469 ERR("Failed to create %s", path
);
5473 ret
= chown(path
, 0, utils_get_group_id(config
.tracing_group_name
.value
));
5475 ERR("Unable to set group on %s", path
);
5481 /* Create the consumerd error unix socket */
5482 consumer_data
->err_sock
=
5483 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
5484 if (consumer_data
->err_sock
< 0) {
5485 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
5491 * Set the CLOEXEC flag. Return code is useless because either way, the
5494 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
5496 PERROR("utils_set_fd_cloexec");
5497 /* continue anyway */
5500 /* File permission MUST be 660 */
5501 ret
= chmod(consumer_data
->err_unix_sock_path
,
5502 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
5504 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
5514 * Signal handler for the daemon
5516 * Simply stop all worker threads, leaving main() return gracefully after
5517 * joining all threads and calling cleanup().
5519 static void sighandler(int sig
)
5523 DBG("SIGINT caught");
5527 DBG("SIGTERM caught");
5531 CMM_STORE_SHARED(recv_child_signal
, 1);
5539 * Setup signal handler for :
5540 * SIGINT, SIGTERM, SIGPIPE
5542 static int set_signal_handler(void)
5545 struct sigaction sa
;
5548 if ((ret
= sigemptyset(&sigset
)) < 0) {
5549 PERROR("sigemptyset");
5553 sa
.sa_mask
= sigset
;
5556 sa
.sa_handler
= sighandler
;
5557 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
5558 PERROR("sigaction");
5562 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
5563 PERROR("sigaction");
5567 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
5568 PERROR("sigaction");
5572 sa
.sa_handler
= SIG_IGN
;
5573 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
5574 PERROR("sigaction");
5578 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
5584 * Set open files limit to unlimited. This daemon can open a large number of
5585 * file descriptors in order to consume multiple kernel traces.
5587 static void set_ulimit(void)
5592 /* The kernel does not allow an infinite limit for open files */
5593 lim
.rlim_cur
= 65535;
5594 lim
.rlim_max
= 65535;
5596 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
5598 PERROR("failed to set open files limit");
5602 static int write_pidfile(void)
5604 return utils_create_pid_file(getpid(), config
.pid_file_path
.value
);
5607 static int set_clock_plugin_env(void)
5610 char *env_value
= NULL
;
5612 if (!config
.lttng_ust_clock_plugin
.value
) {
5616 ret
= asprintf(&env_value
, "LTTNG_UST_CLOCK_PLUGIN=%s",
5617 config
.lttng_ust_clock_plugin
.value
);
5623 ret
= putenv(env_value
);
5626 PERROR("putenv of LTTNG_UST_CLOCK_PLUGIN");
5630 DBG("Updated LTTNG_UST_CLOCK_PLUGIN environment variable to \"%s\"",
5631 config
.lttng_ust_clock_plugin
.value
);
5636 static void destroy_all_sessions_and_wait(void)
5638 struct ltt_session
*session
, *tmp
;
5639 struct ltt_session_list
*session_list
;
5641 session_list
= session_get_list();
5642 DBG("Initiating destruction of all sessions");
5644 if (!session_list
) {
5649 * Ensure that the client thread is no longer accepting new commands,
5650 * which could cause new sessions to be created.
5652 sessiond_wait_client_thread_stopped();
5654 session_lock_list();
5655 /* Initiate the destruction of all sessions. */
5656 cds_list_for_each_entry_safe(session
, tmp
,
5657 &session_list
->head
, list
) {
5658 if (!session_get(session
)) {
5662 session_lock(session
);
5663 if (session
->destroyed
) {
5664 goto unlock_session
;
5666 (void) cmd_destroy_session(session
,
5667 notification_thread_handle
);
5669 session_unlock(session
);
5670 session_put(session
);
5672 session_unlock_list();
5674 /* Wait for the destruction of all sessions to complete. */
5675 DBG("Waiting for the destruction of all sessions to complete");
5676 session_list_wait_empty();
5677 DBG("Destruction of all sessions completed");
5683 int main(int argc
, char **argv
)
5685 int ret
= 0, retval
= 0;
5687 const char *env_app_timeout
;
5688 struct lttng_pipe
*ust32_channel_monitor_pipe
= NULL
,
5689 *ust64_channel_monitor_pipe
= NULL
,
5690 *kernel_channel_monitor_pipe
= NULL
;
5691 bool notification_thread_launched
= false;
5692 bool rotation_thread_launched
= false;
5693 bool timer_thread_launched
= false;
5694 struct lttng_thread
*ht_cleanup_thread
= NULL
;
5695 struct timer_thread_parameters timer_thread_ctx
;
5696 /* Queue of rotation jobs populated by the sessiond-timer. */
5697 struct rotation_thread_timer_queue
*rotation_timer_queue
= NULL
;
5698 sem_t notification_thread_ready
;
5700 init_kernel_workarounds();
5702 rcu_register_thread();
5704 if (set_signal_handler()) {
5706 goto exit_set_signal_handler
;
5709 if (timer_signal_init()) {
5711 goto exit_set_signal_handler
;
5714 page_size
= sysconf(_SC_PAGESIZE
);
5715 if (page_size
< 0) {
5716 PERROR("sysconf _SC_PAGESIZE");
5717 page_size
= LONG_MAX
;
5718 WARN("Fallback page size to %ld", page_size
);
5721 ret
= sessiond_config_init(&config
);
5724 goto exit_set_signal_handler
;
5728 * Init config from environment variables.
5729 * Command line option override env configuration per-doc. Do env first.
5731 sessiond_config_apply_env_config(&config
);
5734 * Parse arguments and load the daemon configuration file.
5736 * We have an exit_options exit path to free memory reserved by
5737 * set_options. This is needed because the rest of sessiond_cleanup()
5738 * depends on ht_cleanup_thread, which depends on lttng_daemonize, which
5739 * depends on set_options.
5742 if (set_options(argc
, argv
)) {
5748 * Resolve all paths received as arguments, configuration option, or
5749 * through environment variable as absolute paths. This is necessary
5750 * since daemonizing causes the sessiond's current working directory
5753 ret
= sessiond_config_resolve_paths(&config
);
5759 lttng_opt_verbose
= config
.verbose
;
5760 lttng_opt_quiet
= config
.quiet
;
5761 kconsumer_data
.err_unix_sock_path
=
5762 config
.kconsumerd_err_unix_sock_path
.value
;
5763 kconsumer_data
.cmd_unix_sock_path
=
5764 config
.kconsumerd_cmd_unix_sock_path
.value
;
5765 ustconsumer32_data
.err_unix_sock_path
=
5766 config
.consumerd32_err_unix_sock_path
.value
;
5767 ustconsumer32_data
.cmd_unix_sock_path
=
5768 config
.consumerd32_cmd_unix_sock_path
.value
;
5769 ustconsumer64_data
.err_unix_sock_path
=
5770 config
.consumerd64_err_unix_sock_path
.value
;
5771 ustconsumer64_data
.cmd_unix_sock_path
=
5772 config
.consumerd64_cmd_unix_sock_path
.value
;
5773 set_clock_plugin_env();
5775 sessiond_config_log(&config
);
5777 if (create_lttng_rundir()) {
5782 /* Abort launch if a session daemon is already running. */
5783 if (check_existing_daemon()) {
5784 ERR("A session daemon is already running.");
5790 if (config
.daemonize
|| config
.background
) {
5793 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5794 !config
.background
);
5801 * We are in the child. Make sure all other file descriptors are
5802 * closed, in case we are called with more opened file
5803 * descriptors than the standard ones and the lock file.
5805 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5806 if (i
== lockfile_fd
) {
5813 if (run_as_create_worker(argv
[0]) < 0) {
5814 goto exit_create_run_as_worker_cleanup
;
5818 * Starting from here, we can create threads. This needs to be after
5819 * lttng_daemonize due to RCU.
5823 * Initialize the health check subsystem. This call should set the
5824 * appropriate time values.
5826 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5827 if (!health_sessiond
) {
5828 PERROR("health_app_create error");
5830 goto exit_health_sessiond_cleanup
;
5833 /* Create thread to clean up RCU hash tables */
5834 ht_cleanup_thread
= launch_ht_cleanup_thread();
5835 if (!ht_cleanup_thread
) {
5837 goto exit_ht_cleanup
;
5840 /* Create thread quit pipe */
5841 if (sessiond_init_thread_quit_pipe()) {
5843 goto exit_init_data
;
5846 /* Check if daemon is UID = 0 */
5847 is_root
= !getuid();
5849 /* Create global run dir with root access */
5851 kernel_channel_monitor_pipe
= lttng_pipe_open(0);
5852 if (!kernel_channel_monitor_pipe
) {
5853 ERR("Failed to create kernel consumer channel monitor pipe");
5855 goto exit_init_data
;
5857 kconsumer_data
.channel_monitor_pipe
=
5858 lttng_pipe_release_writefd(
5859 kernel_channel_monitor_pipe
);
5860 if (kconsumer_data
.channel_monitor_pipe
< 0) {
5862 goto exit_init_data
;
5866 /* Set consumer initial state */
5867 kernel_consumerd_state
= CONSUMER_STOPPED
;
5868 ust_consumerd_state
= CONSUMER_STOPPED
;
5870 ust32_channel_monitor_pipe
= lttng_pipe_open(0);
5871 if (!ust32_channel_monitor_pipe
) {
5872 ERR("Failed to create 32-bit user space consumer channel monitor pipe");
5874 goto exit_init_data
;
5876 ustconsumer32_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5877 ust32_channel_monitor_pipe
);
5878 if (ustconsumer32_data
.channel_monitor_pipe
< 0) {
5880 goto exit_init_data
;
5884 * The rotation_thread_timer_queue structure is shared between the
5885 * sessiond timer thread and the rotation thread. The main thread keeps
5886 * its ownership and destroys it when both threads have been joined.
5888 rotation_timer_queue
= rotation_thread_timer_queue_create();
5889 if (!rotation_timer_queue
) {
5891 goto exit_init_data
;
5893 timer_thread_ctx
.rotation_thread_job_queue
= rotation_timer_queue
;
5895 ust64_channel_monitor_pipe
= lttng_pipe_open(0);
5896 if (!ust64_channel_monitor_pipe
) {
5897 ERR("Failed to create 64-bit user space consumer channel monitor pipe");
5899 goto exit_init_data
;
5901 ustconsumer64_data
.channel_monitor_pipe
= lttng_pipe_release_writefd(
5902 ust64_channel_monitor_pipe
);
5903 if (ustconsumer64_data
.channel_monitor_pipe
< 0) {
5905 goto exit_init_data
;
5909 * Init UST app hash table. Alloc hash table before this point since
5910 * cleanup() can get called after that point.
5912 if (ust_app_ht_alloc()) {
5913 ERR("Failed to allocate UST app hash table");
5915 goto exit_init_data
;
5919 * Initialize agent app hash table. We allocate the hash table here
5920 * since cleanup() can get called after this point.
5922 if (agent_app_ht_alloc()) {
5923 ERR("Failed to allocate Agent app hash table");
5925 goto exit_init_data
;
5929 * These actions must be executed as root. We do that *after* setting up
5930 * the sockets path because we MUST make the check for another daemon using
5931 * those paths *before* trying to set the kernel consumer sockets and init
5935 if (set_consumer_sockets(&kconsumer_data
)) {
5937 goto exit_init_data
;
5940 /* Setup kernel tracer */
5941 if (!config
.no_kernel
) {
5942 init_kernel_tracer();
5943 if (kernel_tracer_fd
>= 0) {
5944 ret
= syscall_init_table();
5946 ERR("Unable to populate syscall table. "
5947 "Syscall tracing won't work "
5948 "for this session daemon.");
5953 /* Set ulimit for open files */
5956 /* init lttng_fd tracking must be done after set_ulimit. */
5959 if (set_consumer_sockets(&ustconsumer64_data
)) {
5961 goto exit_init_data
;
5964 if (set_consumer_sockets(&ustconsumer32_data
)) {
5966 goto exit_init_data
;
5969 /* Setup the needed unix socket */
5970 if (init_daemon_socket()) {
5972 goto exit_init_data
;
5975 /* Set credentials to socket */
5976 if (is_root
&& set_permissions(config
.rundir
.value
)) {
5978 goto exit_init_data
;
5981 /* Get parent pid if -S, --sig-parent is specified. */
5982 if (config
.sig_parent
) {
5986 /* Setup the kernel pipe for waking up the kernel thread */
5987 if (is_root
&& !config
.no_kernel
) {
5988 if (utils_create_pipe_cloexec(kernel_poll_pipe
)) {
5990 goto exit_init_data
;
5994 /* Setup the thread apps communication pipe. */
5995 if (utils_create_pipe_cloexec(apps_cmd_pipe
)) {
5997 goto exit_init_data
;
6000 /* Setup the thread apps notify communication pipe. */
6001 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
)) {
6003 goto exit_init_data
;
6006 /* Initialize global buffer per UID and PID registry. */
6007 buffer_reg_init_uid_registry();
6008 buffer_reg_init_pid_registry();
6010 /* Init UST command queue. */
6011 cds_wfcq_init(&ust_cmd_queue
.head
, &ust_cmd_queue
.tail
);
6015 /* Check for the application socket timeout env variable. */
6016 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
6017 if (env_app_timeout
) {
6018 config
.app_socket_timeout
= atoi(env_app_timeout
);
6020 config
.app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
6023 ret
= write_pidfile();
6025 ERR("Error in write_pidfile");
6027 goto exit_init_data
;
6030 /* Initialize communication library */
6032 /* Initialize TCP timeout values */
6033 lttcomm_inet_init();
6035 if (load_session_init_data(&load_info
) < 0) {
6037 goto exit_init_data
;
6039 load_info
->path
= config
.load_session_path
.value
;
6041 /* Create health-check thread. */
6042 ret
= pthread_create(&health_thread
, default_pthread_attr(),
6043 thread_manage_health
, (void *) NULL
);
6046 PERROR("pthread_create health");
6052 * The rotation thread needs the notification thread to be ready before
6053 * creating the rotate_notification_channel, so we use this semaphore as
6054 * a rendez-vous point.
6056 sem_init(¬ification_thread_ready
, 0, 0);
6058 /* notification_thread_data acquires the pipes' read side. */
6059 notification_thread_handle
= notification_thread_handle_create(
6060 ust32_channel_monitor_pipe
,
6061 ust64_channel_monitor_pipe
,
6062 kernel_channel_monitor_pipe
,
6063 ¬ification_thread_ready
);
6064 if (!notification_thread_handle
) {
6066 ERR("Failed to create notification thread shared data");
6068 goto exit_notification
;
6071 /* Create notification thread. */
6072 ret
= pthread_create(¬ification_thread
, default_pthread_attr(),
6073 thread_notification
, notification_thread_handle
);
6076 PERROR("pthread_create notification");
6079 goto exit_notification
;
6081 notification_thread_launched
= true;
6083 /* Create timer thread. */
6084 ret
= pthread_create(&timer_thread
, default_pthread_attr(),
6085 timer_thread_func
, &timer_thread_ctx
);
6088 PERROR("pthread_create timer");
6091 goto exit_notification
;
6093 timer_thread_launched
= true;
6095 /* rotation_thread_data acquires the pipes' read side. */
6096 rotation_thread_handle
= rotation_thread_handle_create(
6097 rotation_timer_queue
,
6098 notification_thread_handle
,
6099 ¬ification_thread_ready
);
6100 if (!rotation_thread_handle
) {
6102 ERR("Failed to create rotation thread shared data");
6107 /* Create rotation thread. */
6108 ret
= pthread_create(&rotation_thread
, default_pthread_attr(),
6109 thread_rotation
, rotation_thread_handle
);
6112 PERROR("pthread_create rotation");
6117 rotation_thread_launched
= true;
6119 /* Create thread to manage the client socket */
6120 ret
= pthread_create(&client_thread
, default_pthread_attr(),
6121 thread_manage_clients
, (void *) NULL
);
6124 PERROR("pthread_create clients");
6130 /* Create thread to dispatch registration */
6131 ret
= pthread_create(&dispatch_thread
, default_pthread_attr(),
6132 thread_dispatch_ust_registration
, (void *) NULL
);
6135 PERROR("pthread_create dispatch");
6141 /* Create thread to manage application registration. */
6142 ret
= pthread_create(®_apps_thread
, default_pthread_attr(),
6143 thread_registration_apps
, (void *) NULL
);
6146 PERROR("pthread_create registration");
6152 /* Create thread to manage application socket */
6153 ret
= pthread_create(&apps_thread
, default_pthread_attr(),
6154 thread_manage_apps
, (void *) NULL
);
6157 PERROR("pthread_create apps");
6163 /* Create thread to manage application notify socket */
6164 ret
= pthread_create(&apps_notify_thread
, default_pthread_attr(),
6165 ust_thread_manage_notify
, (void *) NULL
);
6168 PERROR("pthread_create notify");
6171 goto exit_apps_notify
;
6174 /* Create agent registration thread. */
6175 ret
= pthread_create(&agent_reg_thread
, default_pthread_attr(),
6176 agent_thread_manage_registration
, (void *) NULL
);
6179 PERROR("pthread_create agent");
6182 goto exit_agent_reg
;
6185 /* Don't start this thread if kernel tracing is not requested nor root */
6186 if (is_root
&& !config
.no_kernel
) {
6187 /* Create kernel thread to manage kernel event */
6188 ret
= pthread_create(&kernel_thread
, default_pthread_attr(),
6189 thread_manage_kernel
, (void *) NULL
);
6192 PERROR("pthread_create kernel");
6199 /* Create session loading thread. */
6200 ret
= pthread_create(&load_session_thread
, default_pthread_attr(),
6201 thread_load_session
, load_info
);
6204 PERROR("pthread_create load_session_thread");
6207 goto exit_load_session
;
6211 * This is where we start awaiting program completion (e.g. through
6212 * signal that asks threads to teardown).
6215 ret
= pthread_join(load_session_thread
, &status
);
6218 PERROR("pthread_join load_session_thread");
6222 /* Initiate teardown once activity occurs on the quit pipe. */
6223 sessiond_wait_for_quit_pipe(-1U);
6224 destroy_all_sessions_and_wait();
6227 if (is_root
&& !config
.no_kernel
) {
6228 ret
= pthread_join(kernel_thread
, &status
);
6231 PERROR("pthread_join");
6237 ret
= pthread_join(agent_reg_thread
, &status
);
6240 PERROR("pthread_join agent");
6245 ret
= pthread_join(apps_notify_thread
, &status
);
6248 PERROR("pthread_join apps notify");
6253 ret
= pthread_join(apps_thread
, &status
);
6256 PERROR("pthread_join apps");
6261 ret
= pthread_join(reg_apps_thread
, &status
);
6264 PERROR("pthread_join");
6270 * Join dispatch thread after joining reg_apps_thread to ensure
6271 * we don't leak applications in the queue.
6273 ret
= pthread_join(dispatch_thread
, &status
);
6276 PERROR("pthread_join");
6281 ret
= pthread_join(client_thread
, &status
);
6284 PERROR("pthread_join");
6291 sem_destroy(¬ification_thread_ready
);
6292 ret
= pthread_join(health_thread
, &status
);
6295 PERROR("pthread_join health thread");
6298 lttng_thread_list_shutdown_orphans();
6303 * Wait for all pending call_rcu work to complete before tearing
6304 * down data structures. call_rcu worker may be trying to
6305 * perform lookups in those structures.
6309 * sessiond_cleanup() is called when no other thread is running, except
6310 * the ht_cleanup thread, which is needed to destroy the hash tables.
6312 rcu_thread_online();
6316 * Ensure all prior call_rcu are done. call_rcu callbacks may push
6317 * hash tables to the ht_cleanup thread. Therefore, we ensure that
6318 * the queue is empty before shutting down the clean-up thread.
6323 * The teardown of the notification system is performed after the
6324 * session daemon's teardown in order to allow it to be notified
6325 * of the active session and channels at the moment of the teardown.
6327 if (notification_thread_handle
) {
6328 if (notification_thread_launched
) {
6329 notification_thread_command_quit(
6330 notification_thread_handle
);
6331 ret
= pthread_join(notification_thread
, &status
);
6334 PERROR("pthread_join notification thread");
6338 notification_thread_handle_destroy(notification_thread_handle
);
6341 if (rotation_thread_handle
) {
6342 if (rotation_thread_launched
) {
6343 ret
= pthread_join(rotation_thread
, &status
);
6346 PERROR("pthread_join rotation thread");
6350 rotation_thread_handle_destroy(rotation_thread_handle
);
6353 if (timer_thread_launched
) {
6355 ret
= pthread_join(timer_thread
, &status
);
6358 PERROR("pthread_join timer thread");
6363 if (ht_cleanup_thread
) {
6364 lttng_thread_shutdown(ht_cleanup_thread
);
6365 lttng_thread_put(ht_cleanup_thread
);
6369 * After the rotation and timer thread have quit, we can safely destroy
6370 * the rotation_timer_queue.
6372 rotation_thread_timer_queue_destroy(rotation_timer_queue
);
6374 rcu_thread_offline();
6375 rcu_unregister_thread();
6377 lttng_pipe_destroy(ust32_channel_monitor_pipe
);
6378 lttng_pipe_destroy(ust64_channel_monitor_pipe
);
6379 lttng_pipe_destroy(kernel_channel_monitor_pipe
);
6382 health_app_destroy(health_sessiond
);
6383 exit_health_sessiond_cleanup
:
6384 exit_create_run_as_worker_cleanup
:
6387 sessiond_cleanup_lock_file();
6388 sessiond_cleanup_options();
6390 exit_set_signal_handler
: