2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
32 #include <sys/mount.h>
33 #include <sys/resource.h>
34 #include <sys/socket.h>
36 #include <sys/types.h>
38 #include <urcu/uatomic.h>
42 #include <common/common.h>
43 #include <common/compat/socket.h>
44 #include <common/defaults.h>
45 #include <common/kernel-consumer/kernel-consumer.h>
46 #include <common/futex.h>
47 #include <common/relayd/relayd.h>
48 #include <common/utils.h>
49 #include <common/daemonize.h>
50 #include <common/config/config.h>
52 #include "lttng-sessiond.h"
53 #include "buffer-registry.h"
60 #include "kernel-consumer.h"
64 #include "ust-consumer.h"
67 #include "health-sessiond.h"
68 #include "testpoint.h"
69 #include "ust-thread.h"
70 #include "jul-thread.h"
72 #include "load-session-thread.h"
74 #define CONSUMERD_FILE "lttng-consumerd"
77 static const char *tracing_group_name
= DEFAULT_TRACING_GROUP
;
78 static int tracing_group_name_override
;
79 static char *opt_pidfile
;
80 static int opt_sig_parent
;
81 static int opt_verbose_consumer
;
82 static int opt_daemon
, opt_background
;
83 static int opt_no_kernel
;
84 static char *opt_load_session_path
;
85 static pid_t ppid
; /* Parent PID for --sig-parent option */
86 static pid_t child_ppid
; /* Internal parent PID use with daemonize. */
88 static int lockfile_fd
= -1;
90 /* Set to 1 when a SIGUSR1 signal is received. */
91 static int recv_child_signal
;
94 * Consumer daemon specific control data. Every value not initialized here is
95 * set to 0 by the static definition.
97 static struct consumer_data kconsumer_data
= {
98 .type
= LTTNG_CONSUMER_KERNEL
,
99 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
100 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
103 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
104 .lock
= PTHREAD_MUTEX_INITIALIZER
,
105 .cond
= PTHREAD_COND_INITIALIZER
,
106 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
108 static struct consumer_data ustconsumer64_data
= {
109 .type
= LTTNG_CONSUMER64_UST
,
110 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
111 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
114 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
115 .lock
= PTHREAD_MUTEX_INITIALIZER
,
116 .cond
= PTHREAD_COND_INITIALIZER
,
117 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
119 static struct consumer_data ustconsumer32_data
= {
120 .type
= LTTNG_CONSUMER32_UST
,
121 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
122 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
125 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
126 .lock
= PTHREAD_MUTEX_INITIALIZER
,
127 .cond
= PTHREAD_COND_INITIALIZER
,
128 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
131 /* Command line options */
132 static const struct option long_options
[] = {
133 { "client-sock", 1, 0, 'c' },
134 { "apps-sock", 1, 0, 'a' },
135 { "kconsumerd-cmd-sock", 1, 0, 'C' },
136 { "kconsumerd-err-sock", 1, 0, 'E' },
137 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
138 { "ustconsumerd32-err-sock", 1, 0, 'H' },
139 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
140 { "ustconsumerd64-err-sock", 1, 0, 'F' },
141 { "consumerd32-path", 1, 0, 'u' },
142 { "consumerd32-libdir", 1, 0, 'U' },
143 { "consumerd64-path", 1, 0, 't' },
144 { "consumerd64-libdir", 1, 0, 'T' },
145 { "daemonize", 0, 0, 'd' },
146 { "background", 0, 0, 'b' },
147 { "sig-parent", 0, 0, 'S' },
148 { "help", 0, 0, 'h' },
149 { "group", 1, 0, 'g' },
150 { "version", 0, 0, 'V' },
151 { "quiet", 0, 0, 'q' },
152 { "verbose", 0, 0, 'v' },
153 { "verbose-consumer", 0, 0, 'Z' },
154 { "no-kernel", 0, 0, 'N' },
155 { "pidfile", 1, 0, 'p' },
156 { "jul-tcp-port", 1, 0, 'J' },
157 { "config", 1, 0, 'f' },
158 { "load", 1, 0, 'l' },
159 { "kmod-probes", 1, 0, 'P' },
163 /* Command line options to ignore from configuration file */
164 static const char *config_ignore_options
[] = { "help", "version", "config" };
166 /* Shared between threads */
167 static int dispatch_thread_exit
;
169 /* Global application Unix socket path */
170 static char apps_unix_sock_path
[PATH_MAX
];
171 /* Global client Unix socket path */
172 static char client_unix_sock_path
[PATH_MAX
];
173 /* global wait shm path for UST */
174 static char wait_shm_path
[PATH_MAX
];
175 /* Global health check unix path */
176 static char health_unix_sock_path
[PATH_MAX
];
178 /* Sockets and FDs */
179 static int client_sock
= -1;
180 static int apps_sock
= -1;
181 int kernel_tracer_fd
= -1;
182 static int kernel_poll_pipe
[2] = { -1, -1 };
185 * Quit pipe for all threads. This permits a single cancellation point
186 * for all threads when receiving an event on the pipe.
188 static int thread_quit_pipe
[2] = { -1, -1 };
191 * This pipe is used to inform the thread managing application communication
192 * that a command is queued and ready to be processed.
194 static int apps_cmd_pipe
[2] = { -1, -1 };
196 int apps_cmd_notify_pipe
[2] = { -1, -1 };
198 /* Pthread, Mutexes and Semaphores */
199 static pthread_t apps_thread
;
200 static pthread_t apps_notify_thread
;
201 static pthread_t reg_apps_thread
;
202 static pthread_t client_thread
;
203 static pthread_t kernel_thread
;
204 static pthread_t dispatch_thread
;
205 static pthread_t health_thread
;
206 static pthread_t ht_cleanup_thread
;
207 static pthread_t jul_reg_thread
;
208 static pthread_t load_session_thread
;
211 * UST registration command queue. This queue is tied with a futex and uses a N
212 * wakers / 1 waiter implemented and detailed in futex.c/.h
214 * The thread_registration_apps and thread_dispatch_ust_registration uses this
215 * queue along with the wait/wake scheme. The thread_manage_apps receives down
216 * the line new application socket and monitors it for any I/O error or clean
217 * close that triggers an unregistration of the application.
219 static struct ust_cmd_queue ust_cmd_queue
;
222 * Pointer initialized before thread creation.
224 * This points to the tracing session list containing the session count and a
225 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
226 * MUST NOT be taken if you call a public function in session.c.
228 * The lock is nested inside the structure: session_list_ptr->lock. Please use
229 * session_lock_list and session_unlock_list for lock acquisition.
231 static struct ltt_session_list
*session_list_ptr
;
233 int ust_consumerd64_fd
= -1;
234 int ust_consumerd32_fd
= -1;
236 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
237 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
238 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
239 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
240 static int consumerd32_bin_override
;
241 static int consumerd64_bin_override
;
242 static int consumerd32_libdir_override
;
243 static int consumerd64_libdir_override
;
245 static const char *module_proc_lttng
= "/proc/lttng";
248 * Consumer daemon state which is changed when spawning it, killing it or in
249 * case of a fatal error.
251 enum consumerd_state
{
252 CONSUMER_STARTED
= 1,
253 CONSUMER_STOPPED
= 2,
258 * This consumer daemon state is used to validate if a client command will be
259 * able to reach the consumer. If not, the client is informed. For instance,
260 * doing a "lttng start" when the consumer state is set to ERROR will return an
261 * error to the client.
263 * The following example shows a possible race condition of this scheme:
265 * consumer thread error happens
267 * client cmd checks state -> still OK
268 * consumer thread exit, sets error
269 * client cmd try to talk to consumer
272 * However, since the consumer is a different daemon, we have no way of making
273 * sure the command will reach it safely even with this state flag. This is why
274 * we consider that up to the state validation during command processing, the
275 * command is safe. After that, we can not guarantee the correctness of the
276 * client request vis-a-vis the consumer.
278 static enum consumerd_state ust_consumerd_state
;
279 static enum consumerd_state kernel_consumerd_state
;
282 * Socket timeout for receiving and sending in seconds.
284 static int app_socket_timeout
;
286 /* Set in main() with the current page size. */
289 /* Application health monitoring */
290 struct health_app
*health_sessiond
;
292 /* JUL TCP port for registration. Used by the JUL thread. */
293 unsigned int jul_tcp_port
= DEFAULT_JUL_TCP_PORT
;
295 /* Am I root or not. */
296 int is_root
; /* Set to 1 if the daemon is running as root */
298 const char * const config_section_name
= "sessiond";
300 /* Load session thread information to operate. */
301 struct load_session_thread_data
*load_info
;
304 * Whether sessiond is ready for commands/health check requests.
305 * NR_LTTNG_SESSIOND_READY must match the number of calls to
306 * sessiond_notify_ready().
308 #define NR_LTTNG_SESSIOND_READY 3
309 int lttng_sessiond_ready
= NR_LTTNG_SESSIOND_READY
;
311 /* Notify parents that we are ready for cmd and health check */
313 void sessiond_notify_ready(void)
315 if (uatomic_sub_return(<tng_sessiond_ready
, 1) == 0) {
317 * Notify parent pid that we are ready to accept command
318 * for client side. This ppid is the one from the
319 * external process that spawned us.
321 if (opt_sig_parent
) {
326 * Notify the parent of the fork() process that we are
329 if (opt_daemon
|| opt_background
) {
330 kill(child_ppid
, SIGUSR1
);
336 void setup_consumerd_path(void)
338 const char *bin
, *libdir
;
341 * Allow INSTALL_BIN_PATH to be used as a target path for the
342 * native architecture size consumer if CONFIG_CONSUMER*_PATH
343 * has not been defined.
345 #if (CAA_BITS_PER_LONG == 32)
346 if (!consumerd32_bin
[0]) {
347 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
349 if (!consumerd32_libdir
[0]) {
350 consumerd32_libdir
= INSTALL_LIB_PATH
;
352 #elif (CAA_BITS_PER_LONG == 64)
353 if (!consumerd64_bin
[0]) {
354 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
356 if (!consumerd64_libdir
[0]) {
357 consumerd64_libdir
= INSTALL_LIB_PATH
;
360 #error "Unknown bitness"
364 * runtime env. var. overrides the build default.
366 bin
= getenv("LTTNG_CONSUMERD32_BIN");
368 consumerd32_bin
= bin
;
370 bin
= getenv("LTTNG_CONSUMERD64_BIN");
372 consumerd64_bin
= bin
;
374 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
376 consumerd32_libdir
= libdir
;
378 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
380 consumerd64_libdir
= libdir
;
385 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
387 int sessiond_set_thread_pollset(struct lttng_poll_event
*events
, size_t size
)
393 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
399 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
| LPOLLERR
);
411 * Check if the thread quit pipe was triggered.
413 * Return 1 if it was triggered else 0;
415 int sessiond_check_thread_quit_pipe(int fd
, uint32_t events
)
417 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
425 * Init thread quit pipe.
427 * Return -1 on error or 0 if all pipes are created.
429 static int init_thread_quit_pipe(void)
433 ret
= pipe(thread_quit_pipe
);
435 PERROR("thread quit pipe");
439 for (i
= 0; i
< 2; i
++) {
440 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
452 * Stop all threads by closing the thread quit pipe.
454 static void stop_threads(void)
458 /* Stopping all threads */
459 DBG("Terminating all threads");
460 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
462 ERR("write error on thread quit pipe");
465 /* Dispatch thread */
466 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
467 futex_nto1_wake(&ust_cmd_queue
.futex
);
471 * Close every consumer sockets.
473 static void close_consumer_sockets(void)
477 if (kconsumer_data
.err_sock
>= 0) {
478 ret
= close(kconsumer_data
.err_sock
);
480 PERROR("kernel consumer err_sock close");
483 if (ustconsumer32_data
.err_sock
>= 0) {
484 ret
= close(ustconsumer32_data
.err_sock
);
486 PERROR("UST consumerd32 err_sock close");
489 if (ustconsumer64_data
.err_sock
>= 0) {
490 ret
= close(ustconsumer64_data
.err_sock
);
492 PERROR("UST consumerd64 err_sock close");
495 if (kconsumer_data
.cmd_sock
>= 0) {
496 ret
= close(kconsumer_data
.cmd_sock
);
498 PERROR("kernel consumer cmd_sock close");
501 if (ustconsumer32_data
.cmd_sock
>= 0) {
502 ret
= close(ustconsumer32_data
.cmd_sock
);
504 PERROR("UST consumerd32 cmd_sock close");
507 if (ustconsumer64_data
.cmd_sock
>= 0) {
508 ret
= close(ustconsumer64_data
.cmd_sock
);
510 PERROR("UST consumerd64 cmd_sock close");
516 * Generate the full lock file path using the rundir.
518 * Return the snprintf() return value thus a negative value is an error.
520 static int generate_lock_file_path(char *path
, size_t len
)
527 /* Build lockfile path from rundir. */
528 ret
= snprintf(path
, len
, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE
, rundir
);
530 PERROR("snprintf lockfile path");
539 static void cleanup(void)
542 struct ltt_session
*sess
, *stmp
;
548 * Close the thread quit pipe. It has already done its job,
549 * since we are now called.
551 utils_close_pipe(thread_quit_pipe
);
554 * If opt_pidfile is undefined, the default file will be wiped when
555 * removing the rundir.
558 ret
= remove(opt_pidfile
);
560 PERROR("remove pidfile %s", opt_pidfile
);
564 DBG("Removing sessiond and consumerd content of directory %s", rundir
);
567 snprintf(path
, PATH_MAX
,
569 rundir
, DEFAULT_LTTNG_SESSIOND_PIDFILE
);
570 DBG("Removing %s", path
);
573 snprintf(path
, PATH_MAX
, "%s/%s", rundir
,
574 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE
);
575 DBG("Removing %s", path
);
579 snprintf(path
, PATH_MAX
,
580 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
582 DBG("Removing %s", path
);
585 snprintf(path
, PATH_MAX
,
586 DEFAULT_KCONSUMERD_PATH
,
588 DBG("Removing directory %s", path
);
591 /* ust consumerd 32 */
592 snprintf(path
, PATH_MAX
,
593 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
595 DBG("Removing %s", path
);
598 snprintf(path
, PATH_MAX
,
599 DEFAULT_USTCONSUMERD32_PATH
,
601 DBG("Removing directory %s", path
);
604 /* ust consumerd 64 */
605 snprintf(path
, PATH_MAX
,
606 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
608 DBG("Removing %s", path
);
611 snprintf(path
, PATH_MAX
,
612 DEFAULT_USTCONSUMERD64_PATH
,
614 DBG("Removing directory %s", path
);
617 DBG("Cleaning up all sessions");
619 /* Destroy session list mutex */
620 if (session_list_ptr
!= NULL
) {
621 pthread_mutex_destroy(&session_list_ptr
->lock
);
623 /* Cleanup ALL session */
624 cds_list_for_each_entry_safe(sess
, stmp
,
625 &session_list_ptr
->head
, list
) {
626 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
630 DBG("Closing all UST sockets");
631 ust_app_clean_list();
632 buffer_reg_destroy_registries();
634 if (is_root
&& !opt_no_kernel
) {
635 DBG2("Closing kernel fd");
636 if (kernel_tracer_fd
>= 0) {
637 ret
= close(kernel_tracer_fd
);
642 DBG("Unloading kernel modules");
643 modprobe_remove_lttng_all();
646 close_consumer_sockets();
649 * If the override option is set, the pointer points to a *non* const thus
650 * freeing it even though the variable type is set to const.
652 if (tracing_group_name_override
) {
653 free((void *) tracing_group_name
);
655 if (consumerd32_bin_override
) {
656 free((void *) consumerd32_bin
);
658 if (consumerd64_bin_override
) {
659 free((void *) consumerd64_bin
);
661 if (consumerd32_libdir_override
) {
662 free((void *) consumerd32_libdir
);
664 if (consumerd64_libdir_override
) {
665 free((void *) consumerd64_libdir
);
672 if (opt_load_session_path
) {
673 free(opt_load_session_path
);
677 load_session_destroy_data(load_info
);
682 * Cleanup lock file by deleting it and finaly closing it which will
683 * release the file system lock.
685 if (lockfile_fd
>= 0) {
686 char lockfile_path
[PATH_MAX
];
688 ret
= generate_lock_file_path(lockfile_path
, sizeof(lockfile_path
));
690 ret
= remove(lockfile_path
);
692 PERROR("remove lock file");
694 ret
= close(lockfile_fd
);
696 PERROR("close lock file");
702 * We do NOT rmdir rundir because there are other processes
703 * using it, for instance lttng-relayd, which can start in
704 * parallel with this teardown.
710 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
711 "Matthew, BEET driven development works!%c[%dm",
712 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
717 * Send data on a unix socket using the liblttsessiondcomm API.
719 * Return lttcomm error code.
721 static int send_unix_sock(int sock
, void *buf
, size_t len
)
723 /* Check valid length */
728 return lttcomm_send_unix_sock(sock
, buf
, len
);
732 * Free memory of a command context structure.
734 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
736 DBG("Clean command context structure");
738 if ((*cmd_ctx
)->llm
) {
739 free((*cmd_ctx
)->llm
);
741 if ((*cmd_ctx
)->lsm
) {
742 free((*cmd_ctx
)->lsm
);
750 * Notify UST applications using the shm mmap futex.
752 static int notify_ust_apps(int active
)
756 DBG("Notifying applications of session daemon state: %d", active
);
758 /* See shm.c for this call implying mmap, shm and futex calls */
759 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
760 if (wait_shm_mmap
== NULL
) {
764 /* Wake waiting process */
765 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
767 /* Apps notified successfully */
775 * Setup the outgoing data buffer for the response (llm) by allocating the
776 * right amount of memory and copying the original information from the lsm
779 * Return total size of the buffer pointed by buf.
781 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
787 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
788 if (cmd_ctx
->llm
== NULL
) {
794 /* Copy common data */
795 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
796 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
798 cmd_ctx
->llm
->data_size
= size
;
799 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
808 * Update the kernel poll set of all channel fd available over all tracing
809 * session. Add the wakeup pipe at the end of the set.
811 static int update_kernel_poll(struct lttng_poll_event
*events
)
814 struct ltt_session
*session
;
815 struct ltt_kernel_channel
*channel
;
817 DBG("Updating kernel poll set");
820 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
821 session_lock(session
);
822 if (session
->kernel_session
== NULL
) {
823 session_unlock(session
);
827 cds_list_for_each_entry(channel
,
828 &session
->kernel_session
->channel_list
.head
, list
) {
829 /* Add channel fd to the kernel poll set */
830 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
832 session_unlock(session
);
835 DBG("Channel fd %d added to kernel set", channel
->fd
);
837 session_unlock(session
);
839 session_unlock_list();
844 session_unlock_list();
849 * Find the channel fd from 'fd' over all tracing session. When found, check
850 * for new channel stream and send those stream fds to the kernel consumer.
852 * Useful for CPU hotplug feature.
854 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
857 struct ltt_session
*session
;
858 struct ltt_kernel_session
*ksess
;
859 struct ltt_kernel_channel
*channel
;
861 DBG("Updating kernel streams for channel fd %d", fd
);
864 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
865 session_lock(session
);
866 if (session
->kernel_session
== NULL
) {
867 session_unlock(session
);
870 ksess
= session
->kernel_session
;
872 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
873 if (channel
->fd
== fd
) {
874 DBG("Channel found, updating kernel streams");
875 ret
= kernel_open_channel_stream(channel
);
879 /* Update the stream global counter */
880 ksess
->stream_count_global
+= ret
;
883 * Have we already sent fds to the consumer? If yes, it means
884 * that tracing is started so it is safe to send our updated
887 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
888 struct lttng_ht_iter iter
;
889 struct consumer_socket
*socket
;
892 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
893 &iter
.iter
, socket
, node
.node
) {
894 pthread_mutex_lock(socket
->lock
);
895 ret
= kernel_consumer_send_channel_stream(socket
,
897 session
->output_traces
? 1 : 0);
898 pthread_mutex_unlock(socket
->lock
);
909 session_unlock(session
);
911 session_unlock_list();
915 session_unlock(session
);
916 session_unlock_list();
921 * For each tracing session, update newly registered apps. The session list
922 * lock MUST be acquired before calling this.
924 static void update_ust_app(int app_sock
)
926 struct ltt_session
*sess
, *stmp
;
928 /* Consumer is in an ERROR state. Stop any application update. */
929 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
930 /* Stop the update process since the consumer is dead. */
934 /* For all tracing session(s) */
935 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
937 if (sess
->ust_session
) {
938 ust_app_global_update(sess
->ust_session
, app_sock
);
940 session_unlock(sess
);
945 * This thread manage event coming from the kernel.
947 * Features supported in this thread:
950 static void *thread_manage_kernel(void *data
)
952 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
953 uint32_t revents
, nb_fd
;
955 struct lttng_poll_event events
;
957 DBG("[thread] Thread manage kernel started");
959 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_KERNEL
);
962 * This first step of the while is to clean this structure which could free
963 * non NULL pointers so initialize it before the loop.
965 lttng_poll_init(&events
);
967 if (testpoint(sessiond_thread_manage_kernel
)) {
968 goto error_testpoint
;
971 health_code_update();
973 if (testpoint(sessiond_thread_manage_kernel_before_loop
)) {
974 goto error_testpoint
;
978 health_code_update();
980 if (update_poll_flag
== 1) {
981 /* Clean events object. We are about to populate it again. */
982 lttng_poll_clean(&events
);
984 ret
= sessiond_set_thread_pollset(&events
, 2);
986 goto error_poll_create
;
989 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
994 /* This will add the available kernel channel if any. */
995 ret
= update_kernel_poll(&events
);
999 update_poll_flag
= 0;
1002 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events
));
1004 /* Poll infinite value of time */
1006 health_poll_entry();
1007 ret
= lttng_poll_wait(&events
, -1);
1011 * Restart interrupted system call.
1013 if (errno
== EINTR
) {
1017 } else if (ret
== 0) {
1018 /* Should not happen since timeout is infinite */
1019 ERR("Return value of poll is 0 with an infinite timeout.\n"
1020 "This should not have happened! Continuing...");
1026 for (i
= 0; i
< nb_fd
; i
++) {
1027 /* Fetch once the poll data */
1028 revents
= LTTNG_POLL_GETEV(&events
, i
);
1029 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1031 health_code_update();
1034 /* No activity for this FD (poll implementation). */
1038 /* Thread quit pipe has been closed. Killing thread. */
1039 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1045 /* Check for data on kernel pipe */
1046 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
1047 (void) lttng_read(kernel_poll_pipe
[0],
1050 * Ret value is useless here, if this pipe gets any actions an
1051 * update is required anyway.
1053 update_poll_flag
= 1;
1057 * New CPU detected by the kernel. Adding kernel stream to
1058 * kernel session and updating the kernel consumer
1060 if (revents
& LPOLLIN
) {
1061 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
1067 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
1068 * and unregister kernel stream at this point.
1077 lttng_poll_clean(&events
);
1080 utils_close_pipe(kernel_poll_pipe
);
1081 kernel_poll_pipe
[0] = kernel_poll_pipe
[1] = -1;
1084 ERR("Health error occurred in %s", __func__
);
1085 WARN("Kernel thread died unexpectedly. "
1086 "Kernel tracing can continue but CPU hotplug is disabled.");
1088 health_unregister(health_sessiond
);
1089 DBG("Kernel thread dying");
1094 * Signal pthread condition of the consumer data that the thread.
1096 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
1098 pthread_mutex_lock(&data
->cond_mutex
);
1101 * The state is set before signaling. It can be any value, it's the waiter
1102 * job to correctly interpret this condition variable associated to the
1103 * consumer pthread_cond.
1105 * A value of 0 means that the corresponding thread of the consumer data
1106 * was not started. 1 indicates that the thread has started and is ready
1107 * for action. A negative value means that there was an error during the
1110 data
->consumer_thread_is_ready
= state
;
1111 (void) pthread_cond_signal(&data
->cond
);
1113 pthread_mutex_unlock(&data
->cond_mutex
);
1117 * This thread manage the consumer error sent back to the session daemon.
1119 static void *thread_manage_consumer(void *data
)
1121 int sock
= -1, i
, ret
, pollfd
, err
= -1, should_quit
= 0;
1122 uint32_t revents
, nb_fd
;
1123 enum lttcomm_return_code code
;
1124 struct lttng_poll_event events
;
1125 struct consumer_data
*consumer_data
= data
;
1127 DBG("[thread] Manage consumer started");
1129 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CONSUMER
);
1131 health_code_update();
1134 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1135 * metadata_sock. Nothing more will be added to this poll set.
1137 ret
= sessiond_set_thread_pollset(&events
, 3);
1143 * The error socket here is already in a listening state which was done
1144 * just before spawning this thread to avoid a race between the consumer
1145 * daemon exec trying to connect and the listen() call.
1147 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1152 health_code_update();
1154 /* Infinite blocking call, waiting for transmission */
1156 health_poll_entry();
1158 if (testpoint(sessiond_thread_manage_consumer
)) {
1162 ret
= lttng_poll_wait(&events
, -1);
1166 * Restart interrupted system call.
1168 if (errno
== EINTR
) {
1176 for (i
= 0; i
< nb_fd
; i
++) {
1177 /* Fetch once the poll data */
1178 revents
= LTTNG_POLL_GETEV(&events
, i
);
1179 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1181 health_code_update();
1184 /* No activity for this FD (poll implementation). */
1188 /* Thread quit pipe has been closed. Killing thread. */
1189 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1195 /* Event on the registration socket */
1196 if (pollfd
== consumer_data
->err_sock
) {
1197 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1198 ERR("consumer err socket poll error");
1204 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1210 * Set the CLOEXEC flag. Return code is useless because either way, the
1213 (void) utils_set_fd_cloexec(sock
);
1215 health_code_update();
1217 DBG2("Receiving code from consumer err_sock");
1219 /* Getting status code from kconsumerd */
1220 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1221 sizeof(enum lttcomm_return_code
));
1226 health_code_update();
1227 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
1228 /* Connect both socket, command and metadata. */
1229 consumer_data
->cmd_sock
=
1230 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1231 consumer_data
->metadata_fd
=
1232 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1233 if (consumer_data
->cmd_sock
< 0
1234 || consumer_data
->metadata_fd
< 0) {
1235 PERROR("consumer connect cmd socket");
1236 /* On error, signal condition and quit. */
1237 signal_consumer_condition(consumer_data
, -1);
1240 consumer_data
->metadata_sock
.fd_ptr
= &consumer_data
->metadata_fd
;
1241 /* Create metadata socket lock. */
1242 consumer_data
->metadata_sock
.lock
= zmalloc(sizeof(pthread_mutex_t
));
1243 if (consumer_data
->metadata_sock
.lock
== NULL
) {
1244 PERROR("zmalloc pthread mutex");
1248 pthread_mutex_init(consumer_data
->metadata_sock
.lock
, NULL
);
1250 signal_consumer_condition(consumer_data
, 1);
1251 DBG("Consumer command socket ready (fd: %d", consumer_data
->cmd_sock
);
1252 DBG("Consumer metadata socket ready (fd: %d)",
1253 consumer_data
->metadata_fd
);
1255 ERR("consumer error when waiting for SOCK_READY : %s",
1256 lttcomm_get_readable_code(-code
));
1260 /* Remove the consumerd error sock since we've established a connexion */
1261 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1266 /* Add new accepted error socket. */
1267 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1272 /* Add metadata socket that is successfully connected. */
1273 ret
= lttng_poll_add(&events
, consumer_data
->metadata_fd
,
1274 LPOLLIN
| LPOLLRDHUP
);
1279 health_code_update();
1281 /* Infinite blocking call, waiting for transmission */
1284 health_code_update();
1286 /* Exit the thread because the thread quit pipe has been triggered. */
1288 /* Not a health error. */
1293 health_poll_entry();
1294 ret
= lttng_poll_wait(&events
, -1);
1298 * Restart interrupted system call.
1300 if (errno
== EINTR
) {
1308 for (i
= 0; i
< nb_fd
; i
++) {
1309 /* Fetch once the poll data */
1310 revents
= LTTNG_POLL_GETEV(&events
, i
);
1311 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1313 health_code_update();
1316 /* No activity for this FD (poll implementation). */
1321 * Thread quit pipe has been triggered, flag that we should stop
1322 * but continue the current loop to handle potential data from
1325 should_quit
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1327 if (pollfd
== sock
) {
1328 /* Event on the consumerd socket */
1329 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1330 ERR("consumer err socket second poll error");
1333 health_code_update();
1334 /* Wait for any kconsumerd error */
1335 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1336 sizeof(enum lttcomm_return_code
));
1338 ERR("consumer closed the command socket");
1342 ERR("consumer return code : %s",
1343 lttcomm_get_readable_code(-code
));
1346 } else if (pollfd
== consumer_data
->metadata_fd
) {
1347 /* UST metadata requests */
1348 ret
= ust_consumer_metadata_request(
1349 &consumer_data
->metadata_sock
);
1351 ERR("Handling metadata request");
1355 /* No need for an else branch all FDs are tested prior. */
1357 health_code_update();
1363 * We lock here because we are about to close the sockets and some other
1364 * thread might be using them so get exclusive access which will abort all
1365 * other consumer command by other threads.
1367 pthread_mutex_lock(&consumer_data
->lock
);
1369 /* Immediately set the consumerd state to stopped */
1370 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1371 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1372 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1373 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1374 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1376 /* Code flow error... */
1380 if (consumer_data
->err_sock
>= 0) {
1381 ret
= close(consumer_data
->err_sock
);
1385 consumer_data
->err_sock
= -1;
1387 if (consumer_data
->cmd_sock
>= 0) {
1388 ret
= close(consumer_data
->cmd_sock
);
1392 consumer_data
->cmd_sock
= -1;
1394 if (consumer_data
->metadata_sock
.fd_ptr
&&
1395 *consumer_data
->metadata_sock
.fd_ptr
>= 0) {
1396 ret
= close(*consumer_data
->metadata_sock
.fd_ptr
);
1408 unlink(consumer_data
->err_unix_sock_path
);
1409 unlink(consumer_data
->cmd_unix_sock_path
);
1410 consumer_data
->pid
= 0;
1411 pthread_mutex_unlock(&consumer_data
->lock
);
1413 /* Cleanup metadata socket mutex. */
1414 if (consumer_data
->metadata_sock
.lock
) {
1415 pthread_mutex_destroy(consumer_data
->metadata_sock
.lock
);
1416 free(consumer_data
->metadata_sock
.lock
);
1418 lttng_poll_clean(&events
);
1422 ERR("Health error occurred in %s", __func__
);
1424 health_unregister(health_sessiond
);
1425 DBG("consumer thread cleanup completed");
1431 * This thread manage application communication.
1433 static void *thread_manage_apps(void *data
)
1435 int i
, ret
, pollfd
, err
= -1;
1437 uint32_t revents
, nb_fd
;
1438 struct lttng_poll_event events
;
1440 DBG("[thread] Manage application started");
1442 rcu_register_thread();
1443 rcu_thread_online();
1445 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_MANAGE
);
1447 if (testpoint(sessiond_thread_manage_apps
)) {
1448 goto error_testpoint
;
1451 health_code_update();
1453 ret
= sessiond_set_thread_pollset(&events
, 2);
1455 goto error_poll_create
;
1458 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1463 if (testpoint(sessiond_thread_manage_apps_before_loop
)) {
1467 health_code_update();
1470 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events
));
1472 /* Inifinite blocking call, waiting for transmission */
1474 health_poll_entry();
1475 ret
= lttng_poll_wait(&events
, -1);
1479 * Restart interrupted system call.
1481 if (errno
== EINTR
) {
1489 for (i
= 0; i
< nb_fd
; i
++) {
1490 /* Fetch once the poll data */
1491 revents
= LTTNG_POLL_GETEV(&events
, i
);
1492 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1494 health_code_update();
1497 /* No activity for this FD (poll implementation). */
1501 /* Thread quit pipe has been closed. Killing thread. */
1502 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
1508 /* Inspect the apps cmd pipe */
1509 if (pollfd
== apps_cmd_pipe
[0]) {
1510 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1511 ERR("Apps command pipe error");
1513 } else if (revents
& LPOLLIN
) {
1517 size_ret
= lttng_read(apps_cmd_pipe
[0], &sock
, sizeof(sock
));
1518 if (size_ret
< sizeof(sock
)) {
1519 PERROR("read apps cmd pipe");
1523 health_code_update();
1526 * We only monitor the error events of the socket. This
1527 * thread does not handle any incoming data from UST
1530 ret
= lttng_poll_add(&events
, sock
,
1531 LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
);
1536 DBG("Apps with sock %d added to poll set", sock
);
1540 * At this point, we know that a registered application made
1541 * the event at poll_wait.
1543 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1544 /* Removing from the poll set */
1545 ret
= lttng_poll_del(&events
, pollfd
);
1550 /* Socket closed on remote end. */
1551 ust_app_unregister(pollfd
);
1555 health_code_update();
1561 lttng_poll_clean(&events
);
1564 utils_close_pipe(apps_cmd_pipe
);
1565 apps_cmd_pipe
[0] = apps_cmd_pipe
[1] = -1;
1568 * We don't clean the UST app hash table here since already registered
1569 * applications can still be controlled so let them be until the session
1570 * daemon dies or the applications stop.
1575 ERR("Health error occurred in %s", __func__
);
1577 health_unregister(health_sessiond
);
1578 DBG("Application communication apps thread cleanup complete");
1579 rcu_thread_offline();
1580 rcu_unregister_thread();
1585 * Send a socket to a thread This is called from the dispatch UST registration
1586 * thread once all sockets are set for the application.
1588 * The sock value can be invalid, we don't really care, the thread will handle
1589 * it and make the necessary cleanup if so.
1591 * On success, return 0 else a negative value being the errno message of the
1594 static int send_socket_to_thread(int fd
, int sock
)
1599 * It's possible that the FD is set as invalid with -1 concurrently just
1600 * before calling this function being a shutdown state of the thread.
1607 ret
= lttng_write(fd
, &sock
, sizeof(sock
));
1608 if (ret
< sizeof(sock
)) {
1609 PERROR("write apps pipe %d", fd
);
1616 /* All good. Don't send back the write positive ret value. */
1623 * Sanitize the wait queue of the dispatch registration thread meaning removing
1624 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1625 * notify socket is never received.
1627 static void sanitize_wait_queue(struct ust_reg_wait_queue
*wait_queue
)
1629 int ret
, nb_fd
= 0, i
;
1630 unsigned int fd_added
= 0;
1631 struct lttng_poll_event events
;
1632 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1636 lttng_poll_init(&events
);
1638 /* Just skip everything for an empty queue. */
1639 if (!wait_queue
->count
) {
1643 ret
= lttng_poll_create(&events
, wait_queue
->count
, LTTNG_CLOEXEC
);
1648 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1649 &wait_queue
->head
, head
) {
1650 assert(wait_node
->app
);
1651 ret
= lttng_poll_add(&events
, wait_node
->app
->sock
,
1652 LPOLLHUP
| LPOLLERR
);
1665 * Poll but don't block so we can quickly identify the faulty events and
1666 * clean them afterwards from the wait queue.
1668 ret
= lttng_poll_wait(&events
, 0);
1674 for (i
= 0; i
< nb_fd
; i
++) {
1675 /* Get faulty FD. */
1676 uint32_t revents
= LTTNG_POLL_GETEV(&events
, i
);
1677 int pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1680 /* No activity for this FD (poll implementation). */
1684 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1685 &wait_queue
->head
, head
) {
1686 if (pollfd
== wait_node
->app
->sock
&&
1687 (revents
& (LPOLLHUP
| LPOLLERR
))) {
1688 cds_list_del(&wait_node
->head
);
1689 wait_queue
->count
--;
1690 ust_app_destroy(wait_node
->app
);
1698 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd
);
1702 lttng_poll_clean(&events
);
1706 lttng_poll_clean(&events
);
1708 ERR("Unable to sanitize wait queue");
1713 * Dispatch request from the registration threads to the application
1714 * communication thread.
1716 static void *thread_dispatch_ust_registration(void *data
)
1719 struct cds_wfq_node
*node
;
1720 struct ust_command
*ust_cmd
= NULL
;
1721 struct ust_reg_wait_node
*wait_node
= NULL
, *tmp_wait_node
;
1722 struct ust_reg_wait_queue wait_queue
= {
1726 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH
);
1728 if (testpoint(sessiond_thread_app_reg_dispatch
)) {
1729 goto error_testpoint
;
1732 health_code_update();
1734 CDS_INIT_LIST_HEAD(&wait_queue
.head
);
1736 DBG("[thread] Dispatch UST command started");
1738 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1739 health_code_update();
1741 /* Atomically prepare the queue futex */
1742 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1745 struct ust_app
*app
= NULL
;
1749 * Make sure we don't have node(s) that have hung up before receiving
1750 * the notify socket. This is to clean the list in order to avoid
1751 * memory leaks from notify socket that are never seen.
1753 sanitize_wait_queue(&wait_queue
);
1755 health_code_update();
1756 /* Dequeue command for registration */
1757 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1759 DBG("Woken up but nothing in the UST command queue");
1760 /* Continue thread execution */
1764 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1766 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1767 " gid:%d sock:%d name:%s (version %d.%d)",
1768 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1769 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1770 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1771 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1773 if (ust_cmd
->reg_msg
.type
== USTCTL_SOCKET_CMD
) {
1774 wait_node
= zmalloc(sizeof(*wait_node
));
1776 PERROR("zmalloc wait_node dispatch");
1777 ret
= close(ust_cmd
->sock
);
1779 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1781 lttng_fd_put(LTTNG_FD_APPS
, 1);
1785 CDS_INIT_LIST_HEAD(&wait_node
->head
);
1787 /* Create application object if socket is CMD. */
1788 wait_node
->app
= ust_app_create(&ust_cmd
->reg_msg
,
1790 if (!wait_node
->app
) {
1791 ret
= close(ust_cmd
->sock
);
1793 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1795 lttng_fd_put(LTTNG_FD_APPS
, 1);
1801 * Add application to the wait queue so we can set the notify
1802 * socket before putting this object in the global ht.
1804 cds_list_add(&wait_node
->head
, &wait_queue
.head
);
1809 * We have to continue here since we don't have the notify
1810 * socket and the application MUST be added to the hash table
1811 * only at that moment.
1816 * Look for the application in the local wait queue and set the
1817 * notify socket if found.
1819 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1820 &wait_queue
.head
, head
) {
1821 health_code_update();
1822 if (wait_node
->app
->pid
== ust_cmd
->reg_msg
.pid
) {
1823 wait_node
->app
->notify_sock
= ust_cmd
->sock
;
1824 cds_list_del(&wait_node
->head
);
1826 app
= wait_node
->app
;
1828 DBG3("UST app notify socket %d is set", ust_cmd
->sock
);
1834 * With no application at this stage the received socket is
1835 * basically useless so close it before we free the cmd data
1836 * structure for good.
1839 ret
= close(ust_cmd
->sock
);
1841 PERROR("close ust sock dispatch %d", ust_cmd
->sock
);
1843 lttng_fd_put(LTTNG_FD_APPS
, 1);
1850 * @session_lock_list
1852 * Lock the global session list so from the register up to the
1853 * registration done message, no thread can see the application
1854 * and change its state.
1856 session_lock_list();
1860 * Add application to the global hash table. This needs to be
1861 * done before the update to the UST registry can locate the
1866 /* Set app version. This call will print an error if needed. */
1867 (void) ust_app_version(app
);
1869 /* Send notify socket through the notify pipe. */
1870 ret
= send_socket_to_thread(apps_cmd_notify_pipe
[1],
1874 session_unlock_list();
1876 * No notify thread, stop the UST tracing. However, this is
1877 * not an internal error of the this thread thus setting
1878 * the health error code to a normal exit.
1885 * Update newly registered application with the tracing
1886 * registry info already enabled information.
1888 update_ust_app(app
->sock
);
1891 * Don't care about return value. Let the manage apps threads
1892 * handle app unregistration upon socket close.
1894 (void) ust_app_register_done(app
->sock
);
1897 * Even if the application socket has been closed, send the app
1898 * to the thread and unregistration will take place at that
1901 ret
= send_socket_to_thread(apps_cmd_pipe
[1], app
->sock
);
1904 session_unlock_list();
1906 * No apps. thread, stop the UST tracing. However, this is
1907 * not an internal error of the this thread thus setting
1908 * the health error code to a normal exit.
1915 session_unlock_list();
1917 } while (node
!= NULL
);
1919 health_poll_entry();
1920 /* Futex wait on queue. Blocking call on futex() */
1921 futex_nto1_wait(&ust_cmd_queue
.futex
);
1924 /* Normal exit, no error */
1928 /* Clean up wait queue. */
1929 cds_list_for_each_entry_safe(wait_node
, tmp_wait_node
,
1930 &wait_queue
.head
, head
) {
1931 cds_list_del(&wait_node
->head
);
1937 DBG("Dispatch thread dying");
1940 ERR("Health error occurred in %s", __func__
);
1942 health_unregister(health_sessiond
);
1947 * This thread manage application registration.
1949 static void *thread_registration_apps(void *data
)
1951 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1952 uint32_t revents
, nb_fd
;
1953 struct lttng_poll_event events
;
1955 * Get allocated in this thread, enqueued to a global queue, dequeued and
1956 * freed in the manage apps thread.
1958 struct ust_command
*ust_cmd
= NULL
;
1960 DBG("[thread] Manage application registration started");
1962 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_APP_REG
);
1964 if (testpoint(sessiond_thread_registration_apps
)) {
1965 goto error_testpoint
;
1968 ret
= lttcomm_listen_unix_sock(apps_sock
);
1974 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1975 * more will be added to this poll set.
1977 ret
= sessiond_set_thread_pollset(&events
, 2);
1979 goto error_create_poll
;
1982 /* Add the application registration socket */
1983 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1985 goto error_poll_add
;
1988 /* Notify all applications to register */
1989 ret
= notify_ust_apps(1);
1991 ERR("Failed to notify applications or create the wait shared memory.\n"
1992 "Execution continues but there might be problem for already\n"
1993 "running applications that wishes to register.");
1997 DBG("Accepting application registration");
1999 /* Inifinite blocking call, waiting for transmission */
2001 health_poll_entry();
2002 ret
= lttng_poll_wait(&events
, -1);
2006 * Restart interrupted system call.
2008 if (errno
== EINTR
) {
2016 for (i
= 0; i
< nb_fd
; i
++) {
2017 health_code_update();
2019 /* Fetch once the poll data */
2020 revents
= LTTNG_POLL_GETEV(&events
, i
);
2021 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2024 /* No activity for this FD (poll implementation). */
2028 /* Thread quit pipe has been closed. Killing thread. */
2029 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
2035 /* Event on the registration socket */
2036 if (pollfd
== apps_sock
) {
2037 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2038 ERR("Register apps socket poll error");
2040 } else if (revents
& LPOLLIN
) {
2041 sock
= lttcomm_accept_unix_sock(apps_sock
);
2047 * Set socket timeout for both receiving and ending.
2048 * app_socket_timeout is in seconds, whereas
2049 * lttcomm_setsockopt_rcv_timeout and
2050 * lttcomm_setsockopt_snd_timeout expect msec as
2053 (void) lttcomm_setsockopt_rcv_timeout(sock
,
2054 app_socket_timeout
* 1000);
2055 (void) lttcomm_setsockopt_snd_timeout(sock
,
2056 app_socket_timeout
* 1000);
2059 * Set the CLOEXEC flag. Return code is useless because
2060 * either way, the show must go on.
2062 (void) utils_set_fd_cloexec(sock
);
2064 /* Create UST registration command for enqueuing */
2065 ust_cmd
= zmalloc(sizeof(struct ust_command
));
2066 if (ust_cmd
== NULL
) {
2067 PERROR("ust command zmalloc");
2072 * Using message-based transmissions to ensure we don't
2073 * have to deal with partially received messages.
2075 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
2077 ERR("Exhausted file descriptors allowed for applications.");
2087 health_code_update();
2088 ret
= ust_app_recv_registration(sock
, &ust_cmd
->reg_msg
);
2091 /* Close socket of the application. */
2096 lttng_fd_put(LTTNG_FD_APPS
, 1);
2100 health_code_update();
2102 ust_cmd
->sock
= sock
;
2105 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2106 " gid:%d sock:%d name:%s (version %d.%d)",
2107 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
2108 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
2109 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
2110 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
2113 * Lock free enqueue the registration request. The red pill
2114 * has been taken! This apps will be part of the *system*.
2116 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
2119 * Wake the registration queue futex. Implicit memory
2120 * barrier with the exchange in cds_wfq_enqueue.
2122 futex_nto1_wake(&ust_cmd_queue
.futex
);
2130 /* Notify that the registration thread is gone */
2133 if (apps_sock
>= 0) {
2134 ret
= close(apps_sock
);
2144 lttng_fd_put(LTTNG_FD_APPS
, 1);
2146 unlink(apps_unix_sock_path
);
2149 lttng_poll_clean(&events
);
2153 DBG("UST Registration thread cleanup complete");
2156 ERR("Health error occurred in %s", __func__
);
2158 health_unregister(health_sessiond
);
2164 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2165 * exec or it will fails.
2167 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
2170 struct timespec timeout
;
2172 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2173 consumer_data
->consumer_thread_is_ready
= 0;
2175 /* Setup pthread condition */
2176 ret
= pthread_condattr_init(&consumer_data
->condattr
);
2179 PERROR("pthread_condattr_init consumer data");
2184 * Set the monotonic clock in order to make sure we DO NOT jump in time
2185 * between the clock_gettime() call and the timedwait call. See bug #324
2186 * for a more details and how we noticed it.
2188 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
2191 PERROR("pthread_condattr_setclock consumer data");
2195 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
2198 PERROR("pthread_cond_init consumer data");
2202 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
2205 PERROR("pthread_create consumer");
2210 /* We are about to wait on a pthread condition */
2211 pthread_mutex_lock(&consumer_data
->cond_mutex
);
2213 /* Get time for sem_timedwait absolute timeout */
2214 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
2216 * Set the timeout for the condition timed wait even if the clock gettime
2217 * call fails since we might loop on that call and we want to avoid to
2218 * increment the timeout too many times.
2220 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
2223 * The following loop COULD be skipped in some conditions so this is why we
2224 * set ret to 0 in order to make sure at least one round of the loop is
2230 * Loop until the condition is reached or when a timeout is reached. Note
2231 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2232 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2233 * possible. This loop does not take any chances and works with both of
2236 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
2237 if (clock_ret
< 0) {
2238 PERROR("clock_gettime spawn consumer");
2239 /* Infinite wait for the consumerd thread to be ready */
2240 ret
= pthread_cond_wait(&consumer_data
->cond
,
2241 &consumer_data
->cond_mutex
);
2243 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
2244 &consumer_data
->cond_mutex
, &timeout
);
2248 /* Release the pthread condition */
2249 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
2253 if (ret
== ETIMEDOUT
) {
2257 * Call has timed out so we kill the kconsumerd_thread and return
2260 ERR("Condition timed out. The consumer thread was never ready."
2262 pth_ret
= pthread_cancel(consumer_data
->thread
);
2264 PERROR("pthread_cancel consumer thread");
2267 PERROR("pthread_cond_wait failed consumer thread");
2269 /* Caller is expecting a negative value on failure. */
2274 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2275 if (consumer_data
->pid
== 0) {
2276 ERR("Consumerd did not start");
2277 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2280 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2289 * Join consumer thread
2291 static int join_consumer_thread(struct consumer_data
*consumer_data
)
2295 /* Consumer pid must be a real one. */
2296 if (consumer_data
->pid
> 0) {
2298 ret
= kill(consumer_data
->pid
, SIGTERM
);
2300 ERR("Error killing consumer daemon");
2303 return pthread_join(consumer_data
->thread
, &status
);
2310 * Fork and exec a consumer daemon (consumerd).
2312 * Return pid if successful else -1.
2314 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
2318 const char *consumer_to_use
;
2319 const char *verbosity
;
2322 DBG("Spawning consumerd");
2329 if (opt_verbose_consumer
) {
2330 verbosity
= "--verbose";
2331 } else if (lttng_opt_quiet
) {
2332 verbosity
= "--quiet";
2337 switch (consumer_data
->type
) {
2338 case LTTNG_CONSUMER_KERNEL
:
2340 * Find out which consumerd to execute. We will first try the
2341 * 64-bit path, then the sessiond's installation directory, and
2342 * fallback on the 32-bit one,
2344 DBG3("Looking for a kernel consumer at these locations:");
2345 DBG3(" 1) %s", consumerd64_bin
);
2346 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
2347 DBG3(" 3) %s", consumerd32_bin
);
2348 if (stat(consumerd64_bin
, &st
) == 0) {
2349 DBG3("Found location #1");
2350 consumer_to_use
= consumerd64_bin
;
2351 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
2352 DBG3("Found location #2");
2353 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
2354 } else if (stat(consumerd32_bin
, &st
) == 0) {
2355 DBG3("Found location #3");
2356 consumer_to_use
= consumerd32_bin
;
2358 DBG("Could not find any valid consumerd executable");
2362 DBG("Using kernel consumer at: %s", consumer_to_use
);
2363 ret
= execl(consumer_to_use
,
2364 "lttng-consumerd", verbosity
, "-k",
2365 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2366 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2367 "--group", tracing_group_name
,
2370 case LTTNG_CONSUMER64_UST
:
2372 char *tmpnew
= NULL
;
2374 if (consumerd64_libdir
[0] != '\0') {
2378 tmp
= getenv("LD_LIBRARY_PATH");
2382 tmplen
= strlen("LD_LIBRARY_PATH=")
2383 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
2384 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2389 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2390 strcat(tmpnew
, consumerd64_libdir
);
2391 if (tmp
[0] != '\0') {
2392 strcat(tmpnew
, ":");
2393 strcat(tmpnew
, tmp
);
2395 ret
= putenv(tmpnew
);
2402 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
2403 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
2404 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2405 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2406 "--group", tracing_group_name
,
2408 if (consumerd64_libdir
[0] != '\0') {
2413 case LTTNG_CONSUMER32_UST
:
2415 char *tmpnew
= NULL
;
2417 if (consumerd32_libdir
[0] != '\0') {
2421 tmp
= getenv("LD_LIBRARY_PATH");
2425 tmplen
= strlen("LD_LIBRARY_PATH=")
2426 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
2427 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
2432 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
2433 strcat(tmpnew
, consumerd32_libdir
);
2434 if (tmp
[0] != '\0') {
2435 strcat(tmpnew
, ":");
2436 strcat(tmpnew
, tmp
);
2438 ret
= putenv(tmpnew
);
2445 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
2446 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
2447 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
2448 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
2449 "--group", tracing_group_name
,
2451 if (consumerd32_libdir
[0] != '\0') {
2457 PERROR("unknown consumer type");
2461 PERROR("Consumer execl()");
2463 /* Reaching this point, we got a failure on our execl(). */
2465 } else if (pid
> 0) {
2468 PERROR("start consumer fork");
2476 * Spawn the consumerd daemon and session daemon thread.
2478 static int start_consumerd(struct consumer_data
*consumer_data
)
2483 * Set the listen() state on the socket since there is a possible race
2484 * between the exec() of the consumer daemon and this call if place in the
2485 * consumer thread. See bug #366 for more details.
2487 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
2492 pthread_mutex_lock(&consumer_data
->pid_mutex
);
2493 if (consumer_data
->pid
!= 0) {
2494 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2498 ret
= spawn_consumerd(consumer_data
);
2500 ERR("Spawning consumerd failed");
2501 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2505 /* Setting up the consumer_data pid */
2506 consumer_data
->pid
= ret
;
2507 DBG2("Consumer pid %d", consumer_data
->pid
);
2508 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
2510 DBG2("Spawning consumer control thread");
2511 ret
= spawn_consumer_thread(consumer_data
);
2513 ERR("Fatal error spawning consumer control thread");
2521 /* Cleanup already created sockets on error. */
2522 if (consumer_data
->err_sock
>= 0) {
2525 err
= close(consumer_data
->err_sock
);
2527 PERROR("close consumer data error socket");
2534 * Setup necessary data for kernel tracer action.
2536 static int init_kernel_tracer(void)
2540 /* Modprobe lttng kernel modules */
2541 ret
= modprobe_lttng_control();
2546 /* Open debugfs lttng */
2547 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
2548 if (kernel_tracer_fd
< 0) {
2549 DBG("Failed to open %s", module_proc_lttng
);
2554 /* Validate kernel version */
2555 ret
= kernel_validate_version(kernel_tracer_fd
);
2560 ret
= modprobe_lttng_data();
2565 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
2569 modprobe_remove_lttng_control();
2570 ret
= close(kernel_tracer_fd
);
2574 kernel_tracer_fd
= -1;
2575 return LTTNG_ERR_KERN_VERSION
;
2578 ret
= close(kernel_tracer_fd
);
2584 modprobe_remove_lttng_control();
2587 WARN("No kernel tracer available");
2588 kernel_tracer_fd
= -1;
2590 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
2592 return LTTNG_ERR_KERN_NA
;
2598 * Copy consumer output from the tracing session to the domain session. The
2599 * function also applies the right modification on a per domain basis for the
2600 * trace files destination directory.
2602 * Should *NOT* be called with RCU read-side lock held.
2604 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
2607 const char *dir_name
;
2608 struct consumer_output
*consumer
;
2611 assert(session
->consumer
);
2614 case LTTNG_DOMAIN_KERNEL
:
2615 DBG3("Copying tracing session consumer output in kernel session");
2617 * XXX: We should audit the session creation and what this function
2618 * does "extra" in order to avoid a destroy since this function is used
2619 * in the domain session creation (kernel and ust) only. Same for UST
2622 if (session
->kernel_session
->consumer
) {
2623 consumer_destroy_output(session
->kernel_session
->consumer
);
2625 session
->kernel_session
->consumer
=
2626 consumer_copy_output(session
->consumer
);
2627 /* Ease our life a bit for the next part */
2628 consumer
= session
->kernel_session
->consumer
;
2629 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2631 case LTTNG_DOMAIN_JUL
:
2632 case LTTNG_DOMAIN_UST
:
2633 DBG3("Copying tracing session consumer output in UST session");
2634 if (session
->ust_session
->consumer
) {
2635 consumer_destroy_output(session
->ust_session
->consumer
);
2637 session
->ust_session
->consumer
=
2638 consumer_copy_output(session
->consumer
);
2639 /* Ease our life a bit for the next part */
2640 consumer
= session
->ust_session
->consumer
;
2641 dir_name
= DEFAULT_UST_TRACE_DIR
;
2644 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2648 /* Append correct directory to subdir */
2649 strncat(consumer
->subdir
, dir_name
,
2650 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2651 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2660 * Create an UST session and add it to the session ust list.
2662 * Should *NOT* be called with RCU read-side lock held.
2664 static int create_ust_session(struct ltt_session
*session
,
2665 struct lttng_domain
*domain
)
2668 struct ltt_ust_session
*lus
= NULL
;
2672 assert(session
->consumer
);
2674 switch (domain
->type
) {
2675 case LTTNG_DOMAIN_JUL
:
2676 case LTTNG_DOMAIN_UST
:
2679 ERR("Unknown UST domain on create session %d", domain
->type
);
2680 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2684 DBG("Creating UST session");
2686 lus
= trace_ust_create_session(session
->id
);
2688 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2692 lus
->uid
= session
->uid
;
2693 lus
->gid
= session
->gid
;
2694 lus
->output_traces
= session
->output_traces
;
2695 lus
->snapshot_mode
= session
->snapshot_mode
;
2696 lus
->live_timer_interval
= session
->live_timer
;
2697 session
->ust_session
= lus
;
2699 /* Copy session output to the newly created UST session */
2700 ret
= copy_session_consumer(domain
->type
, session
);
2701 if (ret
!= LTTNG_OK
) {
2709 session
->ust_session
= NULL
;
2714 * Create a kernel tracer session then create the default channel.
2716 static int create_kernel_session(struct ltt_session
*session
)
2720 DBG("Creating kernel session");
2722 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2724 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2728 /* Code flow safety */
2729 assert(session
->kernel_session
);
2731 /* Copy session output to the newly created Kernel session */
2732 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2733 if (ret
!= LTTNG_OK
) {
2737 /* Create directory(ies) on local filesystem. */
2738 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2739 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2740 ret
= run_as_mkdir_recursive(
2741 session
->kernel_session
->consumer
->dst
.trace_path
,
2742 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2744 if (ret
!= -EEXIST
) {
2745 ERR("Trace directory creation error");
2751 session
->kernel_session
->uid
= session
->uid
;
2752 session
->kernel_session
->gid
= session
->gid
;
2753 session
->kernel_session
->output_traces
= session
->output_traces
;
2754 session
->kernel_session
->snapshot_mode
= session
->snapshot_mode
;
2759 trace_kernel_destroy_session(session
->kernel_session
);
2760 session
->kernel_session
= NULL
;
2765 * Count number of session permitted by uid/gid.
2767 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2770 struct ltt_session
*session
;
2772 DBG("Counting number of available session for UID %d GID %d",
2774 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2776 * Only list the sessions the user can control.
2778 if (!session_access_ok(session
, uid
, gid
)) {
2787 * Process the command requested by the lttng client within the command
2788 * context structure. This function make sure that the return structure (llm)
2789 * is set and ready for transmission before returning.
2791 * Return any error encountered or 0 for success.
2793 * "sock" is only used for special-case var. len data.
2795 * Should *NOT* be called with RCU read-side lock held.
2797 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2801 int need_tracing_session
= 1;
2804 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2808 switch (cmd_ctx
->lsm
->cmd_type
) {
2809 case LTTNG_CREATE_SESSION
:
2810 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2811 case LTTNG_CREATE_SESSION_LIVE
:
2812 case LTTNG_DESTROY_SESSION
:
2813 case LTTNG_LIST_SESSIONS
:
2814 case LTTNG_LIST_DOMAINS
:
2815 case LTTNG_START_TRACE
:
2816 case LTTNG_STOP_TRACE
:
2817 case LTTNG_DATA_PENDING
:
2818 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
2819 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
2820 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
2821 case LTTNG_SNAPSHOT_RECORD
:
2822 case LTTNG_SAVE_SESSION
:
2829 if (opt_no_kernel
&& need_domain
2830 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2832 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2834 ret
= LTTNG_ERR_KERN_NA
;
2839 /* Deny register consumer if we already have a spawned consumer. */
2840 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2841 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2842 if (kconsumer_data
.pid
> 0) {
2843 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2844 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2847 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2851 * Check for command that don't needs to allocate a returned payload. We do
2852 * this here so we don't have to make the call for no payload at each
2855 switch(cmd_ctx
->lsm
->cmd_type
) {
2856 case LTTNG_LIST_SESSIONS
:
2857 case LTTNG_LIST_TRACEPOINTS
:
2858 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2859 case LTTNG_LIST_DOMAINS
:
2860 case LTTNG_LIST_CHANNELS
:
2861 case LTTNG_LIST_EVENTS
:
2864 /* Setup lttng message with no payload */
2865 ret
= setup_lttng_msg(cmd_ctx
, 0);
2867 /* This label does not try to unlock the session */
2868 goto init_setup_error
;
2872 /* Commands that DO NOT need a session. */
2873 switch (cmd_ctx
->lsm
->cmd_type
) {
2874 case LTTNG_CREATE_SESSION
:
2875 case LTTNG_CREATE_SESSION_SNAPSHOT
:
2876 case LTTNG_CREATE_SESSION_LIVE
:
2877 case LTTNG_CALIBRATE
:
2878 case LTTNG_LIST_SESSIONS
:
2879 case LTTNG_LIST_TRACEPOINTS
:
2880 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2881 case LTTNG_SAVE_SESSION
:
2882 need_tracing_session
= 0;
2885 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2887 * We keep the session list lock across _all_ commands
2888 * for now, because the per-session lock does not
2889 * handle teardown properly.
2891 session_lock_list();
2892 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2893 if (cmd_ctx
->session
== NULL
) {
2894 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2897 /* Acquire lock for the session */
2898 session_lock(cmd_ctx
->session
);
2904 * Commands that need a valid session but should NOT create one if none
2905 * exists. Instead of creating one and destroying it when the command is
2906 * handled, process that right before so we save some round trip in useless
2909 switch (cmd_ctx
->lsm
->cmd_type
) {
2910 case LTTNG_DISABLE_CHANNEL
:
2911 case LTTNG_DISABLE_EVENT
:
2912 case LTTNG_DISABLE_ALL_EVENT
:
2913 switch (cmd_ctx
->lsm
->domain
.type
) {
2914 case LTTNG_DOMAIN_KERNEL
:
2915 if (!cmd_ctx
->session
->kernel_session
) {
2916 ret
= LTTNG_ERR_NO_CHANNEL
;
2920 case LTTNG_DOMAIN_JUL
:
2921 case LTTNG_DOMAIN_UST
:
2922 if (!cmd_ctx
->session
->ust_session
) {
2923 ret
= LTTNG_ERR_NO_CHANNEL
;
2928 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2940 * Check domain type for specific "pre-action".
2942 switch (cmd_ctx
->lsm
->domain
.type
) {
2943 case LTTNG_DOMAIN_KERNEL
:
2945 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2949 /* Kernel tracer check */
2950 if (kernel_tracer_fd
== -1) {
2951 /* Basically, load kernel tracer modules */
2952 ret
= init_kernel_tracer();
2958 /* Consumer is in an ERROR state. Report back to client */
2959 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2960 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2964 /* Need a session for kernel command */
2965 if (need_tracing_session
) {
2966 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2967 ret
= create_kernel_session(cmd_ctx
->session
);
2969 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2974 /* Start the kernel consumer daemon */
2975 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2976 if (kconsumer_data
.pid
== 0 &&
2977 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
2978 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2979 ret
= start_consumerd(&kconsumer_data
);
2981 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2984 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2986 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2990 * The consumer was just spawned so we need to add the socket to
2991 * the consumer output of the session if exist.
2993 ret
= consumer_create_socket(&kconsumer_data
,
2994 cmd_ctx
->session
->kernel_session
->consumer
);
3001 case LTTNG_DOMAIN_JUL
:
3002 case LTTNG_DOMAIN_UST
:
3004 if (!ust_app_supported()) {
3005 ret
= LTTNG_ERR_NO_UST
;
3008 /* Consumer is in an ERROR state. Report back to client */
3009 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3010 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3014 if (need_tracing_session
) {
3015 /* Create UST session if none exist. */
3016 if (cmd_ctx
->session
->ust_session
== NULL
) {
3017 ret
= create_ust_session(cmd_ctx
->session
,
3018 &cmd_ctx
->lsm
->domain
);
3019 if (ret
!= LTTNG_OK
) {
3024 /* Start the UST consumer daemons */
3026 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3027 if (consumerd64_bin
[0] != '\0' &&
3028 ustconsumer64_data
.pid
== 0 &&
3029 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3030 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3031 ret
= start_consumerd(&ustconsumer64_data
);
3033 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
3034 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
3038 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
3039 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3041 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3045 * Setup socket for consumer 64 bit. No need for atomic access
3046 * since it was set above and can ONLY be set in this thread.
3048 ret
= consumer_create_socket(&ustconsumer64_data
,
3049 cmd_ctx
->session
->ust_session
->consumer
);
3055 pthread_mutex_lock(&ustconsumer32_data
.pid_mutex
);
3056 if (consumerd32_bin
[0] != '\0' &&
3057 ustconsumer32_data
.pid
== 0 &&
3058 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3059 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3060 ret
= start_consumerd(&ustconsumer32_data
);
3062 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
3063 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
3067 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
3068 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3070 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3074 * Setup socket for consumer 64 bit. No need for atomic access
3075 * since it was set above and can ONLY be set in this thread.
3077 ret
= consumer_create_socket(&ustconsumer32_data
,
3078 cmd_ctx
->session
->ust_session
->consumer
);
3090 /* Validate consumer daemon state when start/stop trace command */
3091 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3092 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3093 switch (cmd_ctx
->lsm
->domain
.type
) {
3094 case LTTNG_DOMAIN_JUL
:
3095 case LTTNG_DOMAIN_UST
:
3096 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3097 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
3101 case LTTNG_DOMAIN_KERNEL
:
3102 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3103 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
3111 * Check that the UID or GID match that of the tracing session.
3112 * The root user can interact with all sessions.
3114 if (need_tracing_session
) {
3115 if (!session_access_ok(cmd_ctx
->session
,
3116 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3117 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3118 ret
= LTTNG_ERR_EPERM
;
3124 * Send relayd information to consumer as soon as we have a domain and a
3127 if (cmd_ctx
->session
&& need_domain
) {
3129 * Setup relayd if not done yet. If the relayd information was already
3130 * sent to the consumer, this call will gracefully return.
3132 ret
= cmd_setup_relayd(cmd_ctx
->session
);
3133 if (ret
!= LTTNG_OK
) {
3138 /* Process by command type */
3139 switch (cmd_ctx
->lsm
->cmd_type
) {
3140 case LTTNG_ADD_CONTEXT
:
3142 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3143 cmd_ctx
->lsm
->u
.context
.channel_name
,
3144 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
3147 case LTTNG_DISABLE_CHANNEL
:
3149 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3150 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3153 case LTTNG_DISABLE_EVENT
:
3155 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3156 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3157 cmd_ctx
->lsm
->u
.disable
.name
);
3160 case LTTNG_DISABLE_ALL_EVENT
:
3162 DBG("Disabling all events");
3164 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3165 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3168 case LTTNG_ENABLE_CHANNEL
:
3170 ret
= cmd_enable_channel(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3171 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
3174 case LTTNG_ENABLE_EVENT
:
3176 struct lttng_event_exclusion
*exclusion
= NULL
;
3177 struct lttng_filter_bytecode
*bytecode
= NULL
;
3178 char *filter_expression
= NULL
;
3180 /* Handle exclusion events and receive it from the client. */
3181 if (cmd_ctx
->lsm
->u
.enable
.exclusion_count
> 0) {
3182 size_t count
= cmd_ctx
->lsm
->u
.enable
.exclusion_count
;
3184 exclusion
= zmalloc(sizeof(struct lttng_event_exclusion
) +
3185 (count
* LTTNG_SYMBOL_NAME_LEN
));
3187 ret
= LTTNG_ERR_EXCLUSION_NOMEM
;
3191 DBG("Receiving var len exclusion event list from client ...");
3192 exclusion
->count
= count
;
3193 ret
= lttcomm_recv_unix_sock(sock
, exclusion
->names
,
3194 count
* LTTNG_SYMBOL_NAME_LEN
);
3196 DBG("Nothing recv() from client var len data... continuing");
3199 ret
= LTTNG_ERR_EXCLUSION_INVAL
;
3204 /* Get filter expression from client. */
3205 if (cmd_ctx
->lsm
->u
.enable
.expression_len
> 0) {
3206 size_t expression_len
=
3207 cmd_ctx
->lsm
->u
.enable
.expression_len
;
3209 if (expression_len
> LTTNG_FILTER_MAX_LEN
) {
3210 ret
= LTTNG_ERR_FILTER_INVAL
;
3215 filter_expression
= zmalloc(expression_len
);
3216 if (!filter_expression
) {
3218 ret
= LTTNG_ERR_FILTER_NOMEM
;
3222 /* Receive var. len. data */
3223 DBG("Receiving var len filter's expression from client ...");
3224 ret
= lttcomm_recv_unix_sock(sock
, filter_expression
,
3227 DBG("Nothing recv() from client car len data... continuing");
3229 free(filter_expression
);
3231 ret
= LTTNG_ERR_FILTER_INVAL
;
3236 /* Handle filter and get bytecode from client. */
3237 if (cmd_ctx
->lsm
->u
.enable
.bytecode_len
> 0) {
3238 size_t bytecode_len
= cmd_ctx
->lsm
->u
.enable
.bytecode_len
;
3240 if (bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
3241 ret
= LTTNG_ERR_FILTER_INVAL
;
3242 free(filter_expression
);
3247 bytecode
= zmalloc(bytecode_len
);
3249 free(filter_expression
);
3251 ret
= LTTNG_ERR_FILTER_NOMEM
;
3255 /* Receive var. len. data */
3256 DBG("Receiving var len filter's bytecode from client ...");
3257 ret
= lttcomm_recv_unix_sock(sock
, bytecode
, bytecode_len
);
3259 DBG("Nothing recv() from client car len data... continuing");
3261 free(filter_expression
);
3264 ret
= LTTNG_ERR_FILTER_INVAL
;
3268 if ((bytecode
->len
+ sizeof(*bytecode
)) != bytecode_len
) {
3269 free(filter_expression
);
3272 ret
= LTTNG_ERR_FILTER_INVAL
;
3277 ret
= cmd_enable_event(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3278 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3279 &cmd_ctx
->lsm
->u
.enable
.event
,
3280 filter_expression
, bytecode
, exclusion
,
3281 kernel_poll_pipe
[1]);
3284 case LTTNG_ENABLE_ALL_EVENT
:
3286 DBG("Enabling all events");
3288 ret
= cmd_enable_event_all(cmd_ctx
->session
, &cmd_ctx
->lsm
->domain
,
3289 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3290 cmd_ctx
->lsm
->u
.enable
.event
.type
, NULL
, NULL
,
3291 kernel_poll_pipe
[1]);
3294 case LTTNG_LIST_TRACEPOINTS
:
3296 struct lttng_event
*events
;
3299 session_lock_list();
3300 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3301 session_unlock_list();
3302 if (nb_events
< 0) {
3303 /* Return value is a negative lttng_error_code. */
3309 * Setup lttng message with payload size set to the event list size in
3310 * bytes and then copy list into the llm payload.
3312 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3318 /* Copy event list into message payload */
3319 memcpy(cmd_ctx
->llm
->payload
, events
,
3320 sizeof(struct lttng_event
) * nb_events
);
3327 case LTTNG_LIST_TRACEPOINT_FIELDS
:
3329 struct lttng_event_field
*fields
;
3332 session_lock_list();
3333 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
3335 session_unlock_list();
3336 if (nb_fields
< 0) {
3337 /* Return value is a negative lttng_error_code. */
3343 * Setup lttng message with payload size set to the event list size in
3344 * bytes and then copy list into the llm payload.
3346 ret
= setup_lttng_msg(cmd_ctx
,
3347 sizeof(struct lttng_event_field
) * nb_fields
);
3353 /* Copy event list into message payload */
3354 memcpy(cmd_ctx
->llm
->payload
, fields
,
3355 sizeof(struct lttng_event_field
) * nb_fields
);
3362 case LTTNG_SET_CONSUMER_URI
:
3365 struct lttng_uri
*uris
;
3367 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3368 len
= nb_uri
* sizeof(struct lttng_uri
);
3371 ret
= LTTNG_ERR_INVALID
;
3375 uris
= zmalloc(len
);
3377 ret
= LTTNG_ERR_FATAL
;
3381 /* Receive variable len data */
3382 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
3383 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3385 DBG("No URIs received from client... continuing");
3387 ret
= LTTNG_ERR_SESSION_FAIL
;
3392 ret
= cmd_set_consumer_uri(cmd_ctx
->session
, nb_uri
, uris
);
3394 if (ret
!= LTTNG_OK
) {
3401 case LTTNG_START_TRACE
:
3403 ret
= cmd_start_trace(cmd_ctx
->session
);
3406 case LTTNG_STOP_TRACE
:
3408 ret
= cmd_stop_trace(cmd_ctx
->session
);
3411 case LTTNG_CREATE_SESSION
:
3414 struct lttng_uri
*uris
= NULL
;
3416 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3417 len
= nb_uri
* sizeof(struct lttng_uri
);
3420 uris
= zmalloc(len
);
3422 ret
= LTTNG_ERR_FATAL
;
3426 /* Receive variable len data */
3427 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3428 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3430 DBG("No URIs received from client... continuing");
3432 ret
= LTTNG_ERR_SESSION_FAIL
;
3437 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3438 DBG("Creating session with ONE network URI is a bad call");
3439 ret
= LTTNG_ERR_SESSION_FAIL
;
3445 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
3446 &cmd_ctx
->creds
, 0);
3452 case LTTNG_DESTROY_SESSION
:
3454 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
3456 /* Set session to NULL so we do not unlock it after free. */
3457 cmd_ctx
->session
= NULL
;
3460 case LTTNG_LIST_DOMAINS
:
3463 struct lttng_domain
*domains
;
3465 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3467 /* Return value is a negative lttng_error_code. */
3472 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3478 /* Copy event list into message payload */
3479 memcpy(cmd_ctx
->llm
->payload
, domains
,
3480 nb_dom
* sizeof(struct lttng_domain
));
3487 case LTTNG_LIST_CHANNELS
:
3490 struct lttng_channel
*channels
= NULL
;
3492 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3493 cmd_ctx
->session
, &channels
);
3495 /* Return value is a negative lttng_error_code. */
3500 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3506 /* Copy event list into message payload */
3507 memcpy(cmd_ctx
->llm
->payload
, channels
,
3508 nb_chan
* sizeof(struct lttng_channel
));
3515 case LTTNG_LIST_EVENTS
:
3518 struct lttng_event
*events
= NULL
;
3520 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3521 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3523 /* Return value is a negative lttng_error_code. */
3528 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3534 /* Copy event list into message payload */
3535 memcpy(cmd_ctx
->llm
->payload
, events
,
3536 nb_event
* sizeof(struct lttng_event
));
3543 case LTTNG_LIST_SESSIONS
:
3545 unsigned int nr_sessions
;
3547 session_lock_list();
3548 nr_sessions
= lttng_sessions_count(
3549 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3550 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3552 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3554 session_unlock_list();
3558 /* Filled the session array */
3559 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3560 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3561 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3563 session_unlock_list();
3568 case LTTNG_CALIBRATE
:
3570 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3571 &cmd_ctx
->lsm
->u
.calibrate
);
3574 case LTTNG_REGISTER_CONSUMER
:
3576 struct consumer_data
*cdata
;
3578 switch (cmd_ctx
->lsm
->domain
.type
) {
3579 case LTTNG_DOMAIN_KERNEL
:
3580 cdata
= &kconsumer_data
;
3583 ret
= LTTNG_ERR_UND
;
3587 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3588 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
3591 case LTTNG_DATA_PENDING
:
3593 ret
= cmd_data_pending(cmd_ctx
->session
);
3596 case LTTNG_SNAPSHOT_ADD_OUTPUT
:
3598 struct lttcomm_lttng_output_id reply
;
3600 ret
= cmd_snapshot_add_output(cmd_ctx
->session
,
3601 &cmd_ctx
->lsm
->u
.snapshot_output
.output
, &reply
.id
);
3602 if (ret
!= LTTNG_OK
) {
3606 ret
= setup_lttng_msg(cmd_ctx
, sizeof(reply
));
3611 /* Copy output list into message payload */
3612 memcpy(cmd_ctx
->llm
->payload
, &reply
, sizeof(reply
));
3616 case LTTNG_SNAPSHOT_DEL_OUTPUT
:
3618 ret
= cmd_snapshot_del_output(cmd_ctx
->session
,
3619 &cmd_ctx
->lsm
->u
.snapshot_output
.output
);
3622 case LTTNG_SNAPSHOT_LIST_OUTPUT
:
3625 struct lttng_snapshot_output
*outputs
= NULL
;
3627 nb_output
= cmd_snapshot_list_outputs(cmd_ctx
->session
, &outputs
);
3628 if (nb_output
< 0) {
3633 ret
= setup_lttng_msg(cmd_ctx
,
3634 nb_output
* sizeof(struct lttng_snapshot_output
));
3641 /* Copy output list into message payload */
3642 memcpy(cmd_ctx
->llm
->payload
, outputs
,
3643 nb_output
* sizeof(struct lttng_snapshot_output
));
3650 case LTTNG_SNAPSHOT_RECORD
:
3652 ret
= cmd_snapshot_record(cmd_ctx
->session
,
3653 &cmd_ctx
->lsm
->u
.snapshot_record
.output
,
3654 cmd_ctx
->lsm
->u
.snapshot_record
.wait
);
3657 case LTTNG_CREATE_SESSION_SNAPSHOT
:
3660 struct lttng_uri
*uris
= NULL
;
3662 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3663 len
= nb_uri
* sizeof(struct lttng_uri
);
3666 uris
= zmalloc(len
);
3668 ret
= LTTNG_ERR_FATAL
;
3672 /* Receive variable len data */
3673 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3674 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3676 DBG("No URIs received from client... continuing");
3678 ret
= LTTNG_ERR_SESSION_FAIL
;
3683 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3684 DBG("Creating session with ONE network URI is a bad call");
3685 ret
= LTTNG_ERR_SESSION_FAIL
;
3691 ret
= cmd_create_session_snapshot(cmd_ctx
->lsm
->session
.name
, uris
,
3692 nb_uri
, &cmd_ctx
->creds
);
3696 case LTTNG_CREATE_SESSION_LIVE
:
3699 struct lttng_uri
*uris
= NULL
;
3701 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
3702 len
= nb_uri
* sizeof(struct lttng_uri
);
3705 uris
= zmalloc(len
);
3707 ret
= LTTNG_ERR_FATAL
;
3711 /* Receive variable len data */
3712 DBG("Waiting for %zu URIs from client ...", nb_uri
);
3713 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
3715 DBG("No URIs received from client... continuing");
3717 ret
= LTTNG_ERR_SESSION_FAIL
;
3722 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
3723 DBG("Creating session with ONE network URI is a bad call");
3724 ret
= LTTNG_ERR_SESSION_FAIL
;
3730 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
,
3731 nb_uri
, &cmd_ctx
->creds
, cmd_ctx
->lsm
->u
.session_live
.timer_interval
);
3735 case LTTNG_SAVE_SESSION
:
3737 ret
= cmd_save_sessions(&cmd_ctx
->lsm
->u
.save_session
.attr
,
3742 ret
= LTTNG_ERR_UND
;
3747 if (cmd_ctx
->llm
== NULL
) {
3748 DBG("Missing llm structure. Allocating one.");
3749 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3753 /* Set return code */
3754 cmd_ctx
->llm
->ret_code
= ret
;
3756 if (cmd_ctx
->session
) {
3757 session_unlock(cmd_ctx
->session
);
3759 if (need_tracing_session
) {
3760 session_unlock_list();
3767 * Thread managing health check socket.
3769 static void *thread_manage_health(void *data
)
3771 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
3772 uint32_t revents
, nb_fd
;
3773 struct lttng_poll_event events
;
3774 struct health_comm_msg msg
;
3775 struct health_comm_reply reply
;
3777 DBG("[thread] Manage health check started");
3779 rcu_register_thread();
3781 /* We might hit an error path before this is created. */
3782 lttng_poll_init(&events
);
3784 /* Create unix socket */
3785 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
3787 ERR("Unable to create health check Unix socket");
3793 /* lttng health client socket path permissions */
3794 ret
= chown(health_unix_sock_path
, 0,
3795 utils_get_group_id(tracing_group_name
));
3797 ERR("Unable to set group on %s", health_unix_sock_path
);
3803 ret
= chmod(health_unix_sock_path
,
3804 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3806 ERR("Unable to set permissions on %s", health_unix_sock_path
);
3814 * Set the CLOEXEC flag. Return code is useless because either way, the
3817 (void) utils_set_fd_cloexec(sock
);
3819 ret
= lttcomm_listen_unix_sock(sock
);
3825 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3826 * more will be added to this poll set.
3828 ret
= sessiond_set_thread_pollset(&events
, 2);
3833 /* Add the application registration socket */
3834 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
3839 sessiond_notify_ready();
3842 DBG("Health check ready");
3844 /* Inifinite blocking call, waiting for transmission */
3846 ret
= lttng_poll_wait(&events
, -1);
3849 * Restart interrupted system call.
3851 if (errno
== EINTR
) {
3859 for (i
= 0; i
< nb_fd
; i
++) {
3860 /* Fetch once the poll data */
3861 revents
= LTTNG_POLL_GETEV(&events
, i
);
3862 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3865 /* No activity for this FD (poll implementation). */
3869 /* Thread quit pipe has been closed. Killing thread. */
3870 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
3876 /* Event on the registration socket */
3877 if (pollfd
== sock
) {
3878 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3879 ERR("Health socket poll error");
3885 new_sock
= lttcomm_accept_unix_sock(sock
);
3891 * Set the CLOEXEC flag. Return code is useless because either way, the
3894 (void) utils_set_fd_cloexec(new_sock
);
3896 DBG("Receiving data from client for health...");
3897 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3899 DBG("Nothing recv() from client... continuing");
3900 ret
= close(new_sock
);
3908 rcu_thread_online();
3910 memset(&reply
, 0, sizeof(reply
));
3911 for (i
= 0; i
< NR_HEALTH_SESSIOND_TYPES
; i
++) {
3913 * health_check_state returns 0 if health is
3916 if (!health_check_state(health_sessiond
, i
)) {
3917 reply
.ret_code
|= 1ULL << i
;
3921 DBG2("Health check return value %" PRIx64
, reply
.ret_code
);
3923 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3925 ERR("Failed to send health data back to client");
3928 /* End of transmission */
3929 ret
= close(new_sock
);
3939 ERR("Health error occurred in %s", __func__
);
3941 DBG("Health check thread dying");
3942 unlink(health_unix_sock_path
);
3950 lttng_poll_clean(&events
);
3952 rcu_unregister_thread();
3957 * This thread manage all clients request using the unix client socket for
3960 static void *thread_manage_clients(void *data
)
3962 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3964 uint32_t revents
, nb_fd
;
3965 struct command_ctx
*cmd_ctx
= NULL
;
3966 struct lttng_poll_event events
;
3968 DBG("[thread] Manage client started");
3970 rcu_register_thread();
3972 health_register(health_sessiond
, HEALTH_SESSIOND_TYPE_CMD
);
3974 health_code_update();
3976 ret
= lttcomm_listen_unix_sock(client_sock
);
3982 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3983 * more will be added to this poll set.
3985 ret
= sessiond_set_thread_pollset(&events
, 2);
3987 goto error_create_poll
;
3990 /* Add the application registration socket */
3991 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3996 sessiond_notify_ready();
3997 ret
= sem_post(&load_info
->message_thread_ready
);
3999 PERROR("sem_post message_thread_ready");
4003 /* This testpoint is after we signal readiness to the parent. */
4004 if (testpoint(sessiond_thread_manage_clients
)) {
4008 if (testpoint(sessiond_thread_manage_clients_before_loop
)) {
4012 health_code_update();
4015 DBG("Accepting client command ...");
4017 /* Inifinite blocking call, waiting for transmission */
4019 health_poll_entry();
4020 ret
= lttng_poll_wait(&events
, -1);
4024 * Restart interrupted system call.
4026 if (errno
== EINTR
) {
4034 for (i
= 0; i
< nb_fd
; i
++) {
4035 /* Fetch once the poll data */
4036 revents
= LTTNG_POLL_GETEV(&events
, i
);
4037 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
4039 health_code_update();
4042 /* No activity for this FD (poll implementation). */
4046 /* Thread quit pipe has been closed. Killing thread. */
4047 ret
= sessiond_check_thread_quit_pipe(pollfd
, revents
);
4053 /* Event on the registration socket */
4054 if (pollfd
== client_sock
) {
4055 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
4056 ERR("Client socket poll error");
4062 DBG("Wait for client response");
4064 health_code_update();
4066 sock
= lttcomm_accept_unix_sock(client_sock
);
4072 * Set the CLOEXEC flag. Return code is useless because either way, the
4075 (void) utils_set_fd_cloexec(sock
);
4077 /* Set socket option for credentials retrieval */
4078 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
4083 /* Allocate context command to process the client request */
4084 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
4085 if (cmd_ctx
== NULL
) {
4086 PERROR("zmalloc cmd_ctx");
4090 /* Allocate data buffer for reception */
4091 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
4092 if (cmd_ctx
->lsm
== NULL
) {
4093 PERROR("zmalloc cmd_ctx->lsm");
4097 cmd_ctx
->llm
= NULL
;
4098 cmd_ctx
->session
= NULL
;
4100 health_code_update();
4103 * Data is received from the lttng client. The struct
4104 * lttcomm_session_msg (lsm) contains the command and data request of
4107 DBG("Receiving data from client ...");
4108 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
4109 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
4111 DBG("Nothing recv() from client... continuing");
4117 clean_command_ctx(&cmd_ctx
);
4121 health_code_update();
4123 // TODO: Validate cmd_ctx including sanity check for
4124 // security purpose.
4126 rcu_thread_online();
4128 * This function dispatch the work to the kernel or userspace tracer
4129 * libs and fill the lttcomm_lttng_msg data structure of all the needed
4130 * informations for the client. The command context struct contains
4131 * everything this function may needs.
4133 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
4134 rcu_thread_offline();
4142 * TODO: Inform client somehow of the fatal error. At
4143 * this point, ret < 0 means that a zmalloc failed
4144 * (ENOMEM). Error detected but still accept
4145 * command, unless a socket error has been
4148 clean_command_ctx(&cmd_ctx
);
4152 health_code_update();
4154 DBG("Sending response (size: %d, retcode: %s)",
4155 cmd_ctx
->lttng_msg_size
,
4156 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
4157 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
4159 ERR("Failed to send data back to client");
4162 /* End of transmission */
4169 clean_command_ctx(&cmd_ctx
);
4171 health_code_update();
4183 lttng_poll_clean(&events
);
4184 clean_command_ctx(&cmd_ctx
);
4188 unlink(client_unix_sock_path
);
4189 if (client_sock
>= 0) {
4190 ret
= close(client_sock
);
4198 ERR("Health error occurred in %s", __func__
);
4201 health_unregister(health_sessiond
);
4203 DBG("Client thread dying");
4205 rcu_unregister_thread();
4211 * usage function on stderr
4213 static void usage(void)
4215 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
4216 fprintf(stderr
, " -h, --help Display this usage.\n");
4217 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
4218 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
4219 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
4220 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
4221 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
4222 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
4223 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
4224 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
4225 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
4226 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
4227 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
4228 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
4229 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
4230 fprintf(stderr
, " -b, --background Start as a daemon, keeping console open.\n");
4231 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
4232 fprintf(stderr
, " -V, --version Show version number.\n");
4233 fprintf(stderr
, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
4234 fprintf(stderr
, " -q, --quiet No output at all.\n");
4235 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4236 fprintf(stderr
, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
4237 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4238 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
4239 fprintf(stderr
, " --jul-tcp-port JUL application registration TCP port\n");
4240 fprintf(stderr
, " -f --config PATH Load daemon configuration file\n");
4241 fprintf(stderr
, " -l --load PATH Load session configuration\n");
4242 fprintf(stderr
, " --kmod-probes Specify kernel module probes to load\n");
4246 * Take an option from the getopt output and set it in the right variable to be
4249 * Return 0 on success else a negative value.
4251 static int set_option(int opt
, const char *arg
, const char *optname
)
4255 if (arg
&& arg
[0] == '\0') {
4257 * This only happens if the value is read from daemon config
4258 * file. This means the option requires an argument and the
4259 * configuration file contains a line such as:
4268 fprintf(stderr
, "option %s", optname
);
4270 fprintf(stderr
, " with arg %s\n", arg
);
4274 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", arg
);
4277 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", arg
);
4287 * If the override option is set, the pointer points to a
4288 * *non* const thus freeing it even though the variable type is
4291 if (tracing_group_name_override
) {
4292 free((void *) tracing_group_name
);
4294 tracing_group_name
= strdup(arg
);
4295 if (!tracing_group_name
) {
4299 tracing_group_name_override
= 1;
4305 fprintf(stdout
, "%s\n", VERSION
);
4311 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4314 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4317 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4320 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4323 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", arg
);
4326 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", arg
);
4332 lttng_opt_quiet
= 1;
4335 /* Verbose level can increase using multiple -v */
4337 /* Value obtained from config file */
4338 lttng_opt_verbose
= config_parse_value(arg
);
4340 /* -v used on command line */
4341 lttng_opt_verbose
++;
4343 /* Clamp value to [0, 3] */
4344 lttng_opt_verbose
= lttng_opt_verbose
< 0 ? 0 :
4345 (lttng_opt_verbose
<= 3 ? lttng_opt_verbose
: 3);
4349 opt_verbose_consumer
= config_parse_value(arg
);
4351 opt_verbose_consumer
+= 1;
4355 if (consumerd32_bin_override
) {
4356 free((void *) consumerd32_bin
);
4358 consumerd32_bin
= strdup(arg
);
4359 if (!consumerd32_bin
) {
4363 consumerd32_bin_override
= 1;
4366 if (consumerd32_libdir_override
) {
4367 free((void *) consumerd32_libdir
);
4369 consumerd32_libdir
= strdup(arg
);
4370 if (!consumerd32_libdir
) {
4374 consumerd32_libdir_override
= 1;
4377 if (consumerd64_bin_override
) {
4378 free((void *) consumerd64_bin
);
4380 consumerd64_bin
= strdup(arg
);
4381 if (!consumerd64_bin
) {
4385 consumerd64_bin_override
= 1;
4388 if (consumerd64_libdir_override
) {
4389 free((void *) consumerd64_libdir
);
4391 consumerd64_libdir
= strdup(arg
);
4392 if (!consumerd64_libdir
) {
4396 consumerd64_libdir_override
= 1;
4400 opt_pidfile
= strdup(arg
);
4406 case 'J': /* JUL TCP port. */
4411 v
= strtoul(arg
, NULL
, 0);
4412 if (errno
!= 0 || !isdigit(arg
[0])) {
4413 ERR("Wrong value in --jul-tcp-port parameter: %s", arg
);
4416 if (v
== 0 || v
>= 65535) {
4417 ERR("Port overflow in --jul-tcp-port parameter: %s", arg
);
4420 jul_tcp_port
= (uint32_t) v
;
4421 DBG3("JUL TCP port set to non default: %u", jul_tcp_port
);
4425 free(opt_load_session_path
);
4426 opt_load_session_path
= strdup(arg
);
4427 if (!opt_load_session_path
) {
4432 case 'P': /* probe modules list */
4433 free(kmod_probes_list
);
4434 kmod_probes_list
= strdup(arg
);
4435 if (!kmod_probes_list
) {
4441 /* This is handled in set_options() thus silent break. */
4444 /* Unknown option or other error.
4445 * Error is printed by getopt, just return */
4450 if (ret
== -EINVAL
) {
4451 const char *opt_name
= "unknown";
4454 for (i
= 0; i
< sizeof(long_options
) / sizeof(struct option
);
4456 if (opt
== long_options
[i
].val
) {
4457 opt_name
= long_options
[i
].name
;
4462 WARN("Invalid argument provided for option \"%s\", using default value.",
4470 * config_entry_handler_cb used to handle options read from a config file.
4471 * See config_entry_handler_cb comment in common/config/config.h for the
4472 * return value conventions.
4474 static int config_entry_handler(const struct config_entry
*entry
, void *unused
)
4478 if (!entry
|| !entry
->name
|| !entry
->value
) {
4483 /* Check if the option is to be ignored */
4484 for (i
= 0; i
< sizeof(config_ignore_options
) / sizeof(char *); i
++) {
4485 if (!strcmp(entry
->name
, config_ignore_options
[i
])) {
4490 for (i
= 0; i
< (sizeof(long_options
) / sizeof(struct option
)) - 1;
4493 /* Ignore if not fully matched. */
4494 if (strcmp(entry
->name
, long_options
[i
].name
)) {
4499 * If the option takes no argument on the command line, we have to
4500 * check if the value is "true". We support non-zero numeric values,
4503 if (!long_options
[i
].has_arg
) {
4504 ret
= config_parse_value(entry
->value
);
4507 WARN("Invalid configuration value \"%s\" for option %s",
4508 entry
->value
, entry
->name
);
4510 /* False, skip boolean config option. */
4515 ret
= set_option(long_options
[i
].val
, entry
->value
, entry
->name
);
4519 WARN("Unrecognized option \"%s\" in daemon configuration file.", entry
->name
);
4526 * daemon configuration loading and argument parsing
4528 static int set_options(int argc
, char **argv
)
4530 int ret
= 0, c
= 0, option_index
= 0;
4531 int orig_optopt
= optopt
, orig_optind
= optind
;
4533 const char *config_path
= NULL
;
4535 optstring
= utils_generate_optstring(long_options
,
4536 sizeof(long_options
) / sizeof(struct option
));
4542 /* Check for the --config option */
4543 while ((c
= getopt_long(argc
, argv
, optstring
, long_options
,
4544 &option_index
)) != -1) {
4548 } else if (c
!= 'f') {
4549 /* if not equal to --config option. */
4553 config_path
= utils_expand_path(optarg
);
4555 ERR("Failed to resolve path: %s", optarg
);
4559 ret
= config_get_section_entries(config_path
, config_section_name
,
4560 config_entry_handler
, NULL
);
4563 ERR("Invalid configuration option at line %i", ret
);
4569 /* Reset getopt's global state */
4570 optopt
= orig_optopt
;
4571 optind
= orig_optind
;
4573 c
= getopt_long(argc
, argv
, optstring
, long_options
, &option_index
);
4578 ret
= set_option(c
, optarg
, long_options
[option_index
].name
);
4590 * Creates the two needed socket by the daemon.
4591 * apps_sock - The communication socket for all UST apps.
4592 * client_sock - The communication of the cli tool (lttng).
4594 static int init_daemon_socket(void)
4599 old_umask
= umask(0);
4601 /* Create client tool unix socket */
4602 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
4603 if (client_sock
< 0) {
4604 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4609 /* Set the cloexec flag */
4610 ret
= utils_set_fd_cloexec(client_sock
);
4612 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4613 "Continuing but note that the consumer daemon will have a "
4614 "reference to this socket on exec()", client_sock
);
4617 /* File permission MUST be 660 */
4618 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4620 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4625 /* Create the application unix socket */
4626 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4627 if (apps_sock
< 0) {
4628 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4633 /* Set the cloexec flag */
4634 ret
= utils_set_fd_cloexec(apps_sock
);
4636 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4637 "Continuing but note that the consumer daemon will have a "
4638 "reference to this socket on exec()", apps_sock
);
4641 /* File permission MUST be 666 */
4642 ret
= chmod(apps_unix_sock_path
,
4643 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4645 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4650 DBG3("Session daemon client socket %d and application socket %d created",
4651 client_sock
, apps_sock
);
4659 * Check if the global socket is available, and if a daemon is answering at the
4660 * other side. If yes, error is returned.
4662 static int check_existing_daemon(void)
4664 /* Is there anybody out there ? */
4665 if (lttng_session_daemon_alive()) {
4673 * Set the tracing group gid onto the client socket.
4675 * Race window between mkdir and chown is OK because we are going from more
4676 * permissive (root.root) to less permissive (root.tracing).
4678 static int set_permissions(char *rundir
)
4683 gid
= utils_get_group_id(tracing_group_name
);
4685 /* Set lttng run dir */
4686 ret
= chown(rundir
, 0, gid
);
4688 ERR("Unable to set group on %s", rundir
);
4693 * Ensure all applications and tracing group can search the run
4694 * dir. Allow everyone to read the directory, since it does not
4695 * buy us anything to hide its content.
4697 ret
= chmod(rundir
, S_IRWXU
| S_IRGRP
| S_IXGRP
| S_IROTH
| S_IXOTH
);
4699 ERR("Unable to set permissions on %s", rundir
);
4703 /* lttng client socket path */
4704 ret
= chown(client_unix_sock_path
, 0, gid
);
4706 ERR("Unable to set group on %s", client_unix_sock_path
);
4710 /* kconsumer error socket path */
4711 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, 0);
4713 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4717 /* 64-bit ustconsumer error socket path */
4718 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, 0);
4720 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4724 /* 32-bit ustconsumer compat32 error socket path */
4725 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, 0);
4727 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4731 DBG("All permissions are set");
4737 * Create the lttng run directory needed for all global sockets and pipe.
4739 static int create_lttng_rundir(const char *rundir
)
4743 DBG3("Creating LTTng run directory: %s", rundir
);
4745 ret
= mkdir(rundir
, S_IRWXU
);
4747 if (errno
!= EEXIST
) {
4748 ERR("Unable to create %s", rundir
);
4760 * Setup sockets and directory needed by the kconsumerd communication with the
4763 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4767 char path
[PATH_MAX
];
4769 switch (consumer_data
->type
) {
4770 case LTTNG_CONSUMER_KERNEL
:
4771 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4773 case LTTNG_CONSUMER64_UST
:
4774 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4776 case LTTNG_CONSUMER32_UST
:
4777 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4780 ERR("Consumer type unknown");
4785 DBG2("Creating consumer directory: %s", path
);
4787 ret
= mkdir(path
, S_IRWXU
| S_IRGRP
| S_IXGRP
);
4789 if (errno
!= EEXIST
) {
4791 ERR("Failed to create %s", path
);
4797 ret
= chown(path
, 0, utils_get_group_id(tracing_group_name
));
4799 ERR("Unable to set group on %s", path
);
4805 /* Create the kconsumerd error unix socket */
4806 consumer_data
->err_sock
=
4807 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4808 if (consumer_data
->err_sock
< 0) {
4809 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4815 * Set the CLOEXEC flag. Return code is useless because either way, the
4818 ret
= utils_set_fd_cloexec(consumer_data
->err_sock
);
4820 PERROR("utils_set_fd_cloexec");
4821 /* continue anyway */
4824 /* File permission MUST be 660 */
4825 ret
= chmod(consumer_data
->err_unix_sock_path
,
4826 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4828 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4838 * Signal handler for the daemon
4840 * Simply stop all worker threads, leaving main() return gracefully after
4841 * joining all threads and calling cleanup().
4843 static void sighandler(int sig
)
4847 DBG("SIGPIPE caught");
4850 DBG("SIGINT caught");
4854 DBG("SIGTERM caught");
4858 CMM_STORE_SHARED(recv_child_signal
, 1);
4866 * Setup signal handler for :
4867 * SIGINT, SIGTERM, SIGPIPE
4869 static int set_signal_handler(void)
4872 struct sigaction sa
;
4875 if ((ret
= sigemptyset(&sigset
)) < 0) {
4876 PERROR("sigemptyset");
4880 sa
.sa_handler
= sighandler
;
4881 sa
.sa_mask
= sigset
;
4883 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4884 PERROR("sigaction");
4888 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4889 PERROR("sigaction");
4893 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4894 PERROR("sigaction");
4898 if ((ret
= sigaction(SIGUSR1
, &sa
, NULL
)) < 0) {
4899 PERROR("sigaction");
4903 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
4909 * Set open files limit to unlimited. This daemon can open a large number of
4910 * file descriptors in order to consumer multiple kernel traces.
4912 static void set_ulimit(void)
4917 /* The kernel does not allowed an infinite limit for open files */
4918 lim
.rlim_cur
= 65535;
4919 lim
.rlim_max
= 65535;
4921 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4923 PERROR("failed to set open files limit");
4928 * Write pidfile using the rundir and opt_pidfile.
4930 static void write_pidfile(void)
4933 char pidfile_path
[PATH_MAX
];
4938 strncpy(pidfile_path
, opt_pidfile
, sizeof(pidfile_path
));
4940 /* Build pidfile path from rundir and opt_pidfile. */
4941 ret
= snprintf(pidfile_path
, sizeof(pidfile_path
), "%s/"
4942 DEFAULT_LTTNG_SESSIOND_PIDFILE
, rundir
);
4944 PERROR("snprintf pidfile path");
4950 * Create pid file in rundir. Return value is of no importance. The
4951 * execution will continue even though we are not able to write the file.
4953 (void) utils_create_pid_file(getpid(), pidfile_path
);
4960 * Create lockfile using the rundir and return its fd.
4962 static int create_lockfile(void)
4965 char lockfile_path
[PATH_MAX
];
4967 ret
= generate_lock_file_path(lockfile_path
, sizeof(lockfile_path
));
4972 ret
= utils_create_lock_file(lockfile_path
);
4978 * Write JUL TCP port using the rundir.
4980 static void write_julport(void)
4983 char path
[PATH_MAX
];
4987 ret
= snprintf(path
, sizeof(path
), "%s/"
4988 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE
, rundir
);
4990 PERROR("snprintf julport path");
4995 * Create TCP JUL port file in rundir. Return value is of no importance.
4996 * The execution will continue even though we are not able to write the
4999 (void) utils_create_pid_file(jul_tcp_port
, path
);
5006 * Start the load session thread and dettach from it so the main thread can
5007 * continue. This does not return a value since whatever the outcome, the main
5008 * thread will continue.
5010 static void start_load_session_thread(void)
5014 /* Create session loading thread. */
5015 ret
= pthread_create(&load_session_thread
, NULL
, thread_load_session
,
5018 PERROR("pthread_create load_session_thread");
5022 ret
= pthread_detach(load_session_thread
);
5024 PERROR("pthread_detach load_session_thread");
5027 /* Everything went well so don't cleanup anything. */
5030 /* The cleanup() function will destroy the load_info data. */
5037 int main(int argc
, char **argv
)
5041 const char *home_path
, *env_app_timeout
;
5043 init_kernel_workarounds();
5045 rcu_register_thread();
5047 if ((ret
= set_signal_handler()) < 0) {
5051 setup_consumerd_path();
5053 page_size
= sysconf(_SC_PAGESIZE
);
5054 if (page_size
< 0) {
5055 PERROR("sysconf _SC_PAGESIZE");
5056 page_size
= LONG_MAX
;
5057 WARN("Fallback page size to %ld", page_size
);
5060 /* Parse arguments and load the daemon configuration file */
5062 if ((ret
= set_options(argc
, argv
)) < 0) {
5067 if (opt_daemon
|| opt_background
) {
5070 ret
= lttng_daemonize(&child_ppid
, &recv_child_signal
,
5077 * We are in the child. Make sure all other file descriptors are
5078 * closed, in case we are called with more opened file descriptors than
5079 * the standard ones.
5081 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
5086 /* Create thread quit pipe */
5087 if ((ret
= init_thread_quit_pipe()) < 0) {
5091 /* Check if daemon is UID = 0 */
5092 is_root
= !getuid();
5095 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
5101 /* Create global run dir with root access */
5102 ret
= create_lttng_rundir(rundir
);
5107 if (strlen(apps_unix_sock_path
) == 0) {
5108 snprintf(apps_unix_sock_path
, PATH_MAX
,
5109 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
5112 if (strlen(client_unix_sock_path
) == 0) {
5113 snprintf(client_unix_sock_path
, PATH_MAX
,
5114 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
5117 /* Set global SHM for ust */
5118 if (strlen(wait_shm_path
) == 0) {
5119 snprintf(wait_shm_path
, PATH_MAX
,
5120 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
5123 if (strlen(health_unix_sock_path
) == 0) {
5124 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
5125 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
5128 /* Setup kernel consumerd path */
5129 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
5130 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
5131 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
5132 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
5134 DBG2("Kernel consumer err path: %s",
5135 kconsumer_data
.err_unix_sock_path
);
5136 DBG2("Kernel consumer cmd path: %s",
5137 kconsumer_data
.cmd_unix_sock_path
);
5139 home_path
= utils_get_home_dir();
5140 if (home_path
== NULL
) {
5141 /* TODO: Add --socket PATH option */
5142 ERR("Can't get HOME directory for sockets creation.");
5148 * Create rundir from home path. This will create something like
5151 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
5157 ret
= create_lttng_rundir(rundir
);
5162 if (strlen(apps_unix_sock_path
) == 0) {
5163 snprintf(apps_unix_sock_path
, PATH_MAX
,
5164 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
5167 /* Set the cli tool unix socket path */
5168 if (strlen(client_unix_sock_path
) == 0) {
5169 snprintf(client_unix_sock_path
, PATH_MAX
,
5170 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
5173 /* Set global SHM for ust */
5174 if (strlen(wait_shm_path
) == 0) {
5175 snprintf(wait_shm_path
, PATH_MAX
,
5176 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, getuid());
5179 /* Set health check Unix path */
5180 if (strlen(health_unix_sock_path
) == 0) {
5181 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
5182 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
5186 lockfile_fd
= create_lockfile();
5187 if (lockfile_fd
< 0) {
5191 /* Set consumer initial state */
5192 kernel_consumerd_state
= CONSUMER_STOPPED
;
5193 ust_consumerd_state
= CONSUMER_STOPPED
;
5195 DBG("Client socket path %s", client_unix_sock_path
);
5196 DBG("Application socket path %s", apps_unix_sock_path
);
5197 DBG("Application wait path %s", wait_shm_path
);
5198 DBG("LTTng run directory path: %s", rundir
);
5200 /* 32 bits consumerd path setup */
5201 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
5202 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
5203 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
5204 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
5206 DBG2("UST consumer 32 bits err path: %s",
5207 ustconsumer32_data
.err_unix_sock_path
);
5208 DBG2("UST consumer 32 bits cmd path: %s",
5209 ustconsumer32_data
.cmd_unix_sock_path
);
5211 /* 64 bits consumerd path setup */
5212 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
5213 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
5214 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
5215 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
5217 DBG2("UST consumer 64 bits err path: %s",
5218 ustconsumer64_data
.err_unix_sock_path
);
5219 DBG2("UST consumer 64 bits cmd path: %s",
5220 ustconsumer64_data
.cmd_unix_sock_path
);
5223 * See if daemon already exist.
5225 if ((ret
= check_existing_daemon()) < 0) {
5226 ERR("Already running daemon.\n");
5228 * We do not goto exit because we must not cleanup()
5229 * because a daemon is already running.
5235 * Init UST app hash table. Alloc hash table before this point since
5236 * cleanup() can get called after that point.
5240 /* Initialize JUL domain subsystem. */
5241 if ((ret
= jul_init()) < 0) {
5242 /* ENOMEM at this point. */
5246 /* After this point, we can safely call cleanup() with "goto exit" */
5249 * These actions must be executed as root. We do that *after* setting up
5250 * the sockets path because we MUST make the check for another daemon using
5251 * those paths *before* trying to set the kernel consumer sockets and init
5255 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
5260 /* Setup kernel tracer */
5261 if (!opt_no_kernel
) {
5262 init_kernel_tracer();
5265 /* Set ulimit for open files */
5268 /* init lttng_fd tracking must be done after set_ulimit. */
5271 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
5276 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
5281 /* Setup the needed unix socket */
5282 if ((ret
= init_daemon_socket()) < 0) {
5286 /* Set credentials to socket */
5287 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
5291 /* Get parent pid if -S, --sig-parent is specified. */
5292 if (opt_sig_parent
) {
5296 /* Setup the kernel pipe for waking up the kernel thread */
5297 if (is_root
&& !opt_no_kernel
) {
5298 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
5303 /* Setup the thread ht_cleanup communication pipe. */
5304 if (utils_create_pipe_cloexec(ht_cleanup_pipe
) < 0) {
5308 /* Setup the thread apps communication pipe. */
5309 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
5313 /* Setup the thread apps notify communication pipe. */
5314 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe
) < 0) {
5318 /* Initialize global buffer per UID and PID registry. */
5319 buffer_reg_init_uid_registry();
5320 buffer_reg_init_pid_registry();
5322 /* Init UST command queue. */
5323 cds_wfq_init(&ust_cmd_queue
.queue
);
5326 * Get session list pointer. This pointer MUST NOT be free(). This list is
5327 * statically declared in session.c
5329 session_list_ptr
= session_get_list();
5331 /* Set up max poll set size */
5332 lttng_poll_set_max_size();
5336 /* Check for the application socket timeout env variable. */
5337 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
5338 if (env_app_timeout
) {
5339 app_socket_timeout
= atoi(env_app_timeout
);
5341 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
5347 /* Initialize communication library */
5349 /* This is to get the TCP timeout value. */
5350 lttcomm_inet_init();
5352 if (load_session_init_data(&load_info
) < 0) {
5355 load_info
->path
= opt_load_session_path
;
5358 * Initialize the health check subsystem. This call should set the
5359 * appropriate time values.
5361 health_sessiond
= health_app_create(NR_HEALTH_SESSIOND_TYPES
);
5362 if (!health_sessiond
) {
5363 PERROR("health_app_create error");
5364 goto exit_health_sessiond_cleanup
;
5367 /* Create thread to clean up RCU hash tables */
5368 ret
= pthread_create(&ht_cleanup_thread
, NULL
,
5369 thread_ht_cleanup
, (void *) NULL
);
5371 PERROR("pthread_create ht_cleanup");
5372 goto exit_ht_cleanup
;
5375 /* Create health-check thread */
5376 ret
= pthread_create(&health_thread
, NULL
,
5377 thread_manage_health
, (void *) NULL
);
5379 PERROR("pthread_create health");
5383 /* Create thread to manage the client socket */
5384 ret
= pthread_create(&client_thread
, NULL
,
5385 thread_manage_clients
, (void *) NULL
);
5387 PERROR("pthread_create clients");
5391 /* Create thread to dispatch registration */
5392 ret
= pthread_create(&dispatch_thread
, NULL
,
5393 thread_dispatch_ust_registration
, (void *) NULL
);
5395 PERROR("pthread_create dispatch");
5399 /* Create thread to manage application registration. */
5400 ret
= pthread_create(®_apps_thread
, NULL
,
5401 thread_registration_apps
, (void *) NULL
);
5403 PERROR("pthread_create registration");
5407 /* Create thread to manage application socket */
5408 ret
= pthread_create(&apps_thread
, NULL
,
5409 thread_manage_apps
, (void *) NULL
);
5411 PERROR("pthread_create apps");
5415 /* Create thread to manage application notify socket */
5416 ret
= pthread_create(&apps_notify_thread
, NULL
,
5417 ust_thread_manage_notify
, (void *) NULL
);
5419 PERROR("pthread_create notify");
5420 goto exit_apps_notify
;
5423 /* Create JUL registration thread. */
5424 ret
= pthread_create(&jul_reg_thread
, NULL
,
5425 jul_thread_manage_registration
, (void *) NULL
);
5427 PERROR("pthread_create JUL");
5431 /* Don't start this thread if kernel tracing is not requested nor root */
5432 if (is_root
&& !opt_no_kernel
) {
5433 /* Create kernel thread to manage kernel event */
5434 ret
= pthread_create(&kernel_thread
, NULL
,
5435 thread_manage_kernel
, (void *) NULL
);
5437 PERROR("pthread_create kernel");
5442 /* Load possible session(s). */
5443 start_load_session_thread();
5445 if (is_root
&& !opt_no_kernel
) {
5446 ret
= pthread_join(kernel_thread
, &status
);
5448 PERROR("pthread_join");
5449 goto error
; /* join error, exit without cleanup */
5454 ret
= pthread_join(jul_reg_thread
, &status
);
5456 PERROR("pthread_join JUL");
5457 goto error
; /* join error, exit without cleanup */
5461 ret
= pthread_join(apps_notify_thread
, &status
);
5463 PERROR("pthread_join apps notify");
5464 goto error
; /* join error, exit without cleanup */
5468 ret
= pthread_join(apps_thread
, &status
);
5470 PERROR("pthread_join apps");
5471 goto error
; /* join error, exit without cleanup */
5476 ret
= pthread_join(reg_apps_thread
, &status
);
5478 PERROR("pthread_join");
5479 goto error
; /* join error, exit without cleanup */
5483 ret
= pthread_join(dispatch_thread
, &status
);
5485 PERROR("pthread_join");
5486 goto error
; /* join error, exit without cleanup */
5490 ret
= pthread_join(client_thread
, &status
);
5492 PERROR("pthread_join");
5493 goto error
; /* join error, exit without cleanup */
5496 ret
= join_consumer_thread(&kconsumer_data
);
5498 PERROR("join_consumer");
5499 goto error
; /* join error, exit without cleanup */
5502 ret
= join_consumer_thread(&ustconsumer32_data
);
5504 PERROR("join_consumer ust32");
5505 goto error
; /* join error, exit without cleanup */
5508 ret
= join_consumer_thread(&ustconsumer64_data
);
5510 PERROR("join_consumer ust64");
5511 goto error
; /* join error, exit without cleanup */
5515 ret
= pthread_join(health_thread
, &status
);
5517 PERROR("pthread_join health thread");
5518 goto error
; /* join error, exit without cleanup */
5522 ret
= pthread_join(ht_cleanup_thread
, &status
);
5524 PERROR("pthread_join ht cleanup thread");
5525 goto error
; /* join error, exit without cleanup */
5528 health_app_destroy(health_sessiond
);
5529 exit_health_sessiond_cleanup
:
5532 * cleanup() is called when no other thread is running.
5534 rcu_thread_online();
5536 rcu_thread_offline();
5537 rcu_unregister_thread();