2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
33 #include <sys/types.h>
35 #include <urcu/uatomic.h>
39 #include <common/common.h>
40 #include <common/compat/poll.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
48 #include "lttng-sessiond.h"
55 #include "kernel-consumer.h"
59 #include "ust-consumer.h"
64 #include "testpoint.h"
66 #define CONSUMERD_FILE "lttng-consumerd"
69 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
70 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
71 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
72 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
75 const char *opt_tracing_group
;
76 static int opt_sig_parent
;
77 static int opt_verbose_consumer
;
78 static int opt_daemon
;
79 static int opt_no_kernel
;
80 static int is_root
; /* Set to 1 if the daemon is running as root */
81 static pid_t ppid
; /* Parent PID for --sig-parent option */
85 * Consumer daemon specific control data. Every value not initialized here is
86 * set to 0 by the static definition.
88 static struct consumer_data kconsumer_data
= {
89 .type
= LTTNG_CONSUMER_KERNEL
,
90 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
91 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
94 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
95 .lock
= PTHREAD_MUTEX_INITIALIZER
,
96 .cond
= PTHREAD_COND_INITIALIZER
,
97 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
99 static struct consumer_data ustconsumer64_data
= {
100 .type
= LTTNG_CONSUMER64_UST
,
101 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
102 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
105 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
106 .lock
= PTHREAD_MUTEX_INITIALIZER
,
107 .cond
= PTHREAD_COND_INITIALIZER
,
108 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
110 static struct consumer_data ustconsumer32_data
= {
111 .type
= LTTNG_CONSUMER32_UST
,
112 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
113 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
116 .pid_mutex
= PTHREAD_MUTEX_INITIALIZER
,
117 .lock
= PTHREAD_MUTEX_INITIALIZER
,
118 .cond
= PTHREAD_COND_INITIALIZER
,
119 .cond_mutex
= PTHREAD_MUTEX_INITIALIZER
,
122 /* Shared between threads */
123 static int dispatch_thread_exit
;
125 /* Global application Unix socket path */
126 static char apps_unix_sock_path
[PATH_MAX
];
127 /* Global client Unix socket path */
128 static char client_unix_sock_path
[PATH_MAX
];
129 /* global wait shm path for UST */
130 static char wait_shm_path
[PATH_MAX
];
131 /* Global health check unix path */
132 static char health_unix_sock_path
[PATH_MAX
];
134 /* Sockets and FDs */
135 static int client_sock
= -1;
136 static int apps_sock
= -1;
137 int kernel_tracer_fd
= -1;
138 static int kernel_poll_pipe
[2] = { -1, -1 };
141 * Quit pipe for all threads. This permits a single cancellation point
142 * for all threads when receiving an event on the pipe.
144 static int thread_quit_pipe
[2] = { -1, -1 };
147 * This pipe is used to inform the thread managing application communication
148 * that a command is queued and ready to be processed.
150 static int apps_cmd_pipe
[2] = { -1, -1 };
152 /* Pthread, Mutexes and Semaphores */
153 static pthread_t apps_thread
;
154 static pthread_t reg_apps_thread
;
155 static pthread_t client_thread
;
156 static pthread_t kernel_thread
;
157 static pthread_t dispatch_thread
;
158 static pthread_t health_thread
;
161 * UST registration command queue. This queue is tied with a futex and uses a N
162 * wakers / 1 waiter implemented and detailed in futex.c/.h
164 * The thread_manage_apps and thread_dispatch_ust_registration interact with
165 * this queue and the wait/wake scheme.
167 static struct ust_cmd_queue ust_cmd_queue
;
170 * Pointer initialized before thread creation.
172 * This points to the tracing session list containing the session count and a
173 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
174 * MUST NOT be taken if you call a public function in session.c.
176 * The lock is nested inside the structure: session_list_ptr->lock. Please use
177 * session_lock_list and session_unlock_list for lock acquisition.
179 static struct ltt_session_list
*session_list_ptr
;
181 int ust_consumerd64_fd
= -1;
182 int ust_consumerd32_fd
= -1;
184 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
185 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
186 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
187 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
189 static const char *module_proc_lttng
= "/proc/lttng";
192 * Consumer daemon state which is changed when spawning it, killing it or in
193 * case of a fatal error.
195 enum consumerd_state
{
196 CONSUMER_STARTED
= 1,
197 CONSUMER_STOPPED
= 2,
202 * This consumer daemon state is used to validate if a client command will be
203 * able to reach the consumer. If not, the client is informed. For instance,
204 * doing a "lttng start" when the consumer state is set to ERROR will return an
205 * error to the client.
207 * The following example shows a possible race condition of this scheme:
209 * consumer thread error happens
211 * client cmd checks state -> still OK
212 * consumer thread exit, sets error
213 * client cmd try to talk to consumer
216 * However, since the consumer is a different daemon, we have no way of making
217 * sure the command will reach it safely even with this state flag. This is why
218 * we consider that up to the state validation during command processing, the
219 * command is safe. After that, we can not guarantee the correctness of the
220 * client request vis-a-vis the consumer.
222 static enum consumerd_state ust_consumerd_state
;
223 static enum consumerd_state kernel_consumerd_state
;
225 /* Used for the health monitoring of the session daemon. See health.h */
226 struct health_state health_thread_cmd
;
227 struct health_state health_thread_app_manage
;
228 struct health_state health_thread_app_reg
;
229 struct health_state health_thread_kernel
;
232 * Socket timeout for receiving and sending in seconds.
234 static int app_socket_timeout
;
237 void setup_consumerd_path(void)
239 const char *bin
, *libdir
;
242 * Allow INSTALL_BIN_PATH to be used as a target path for the
243 * native architecture size consumer if CONFIG_CONSUMER*_PATH
244 * has not been defined.
246 #if (CAA_BITS_PER_LONG == 32)
247 if (!consumerd32_bin
[0]) {
248 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
250 if (!consumerd32_libdir
[0]) {
251 consumerd32_libdir
= INSTALL_LIB_PATH
;
253 #elif (CAA_BITS_PER_LONG == 64)
254 if (!consumerd64_bin
[0]) {
255 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
257 if (!consumerd64_libdir
[0]) {
258 consumerd64_libdir
= INSTALL_LIB_PATH
;
261 #error "Unknown bitness"
265 * runtime env. var. overrides the build default.
267 bin
= getenv("LTTNG_CONSUMERD32_BIN");
269 consumerd32_bin
= bin
;
271 bin
= getenv("LTTNG_CONSUMERD64_BIN");
273 consumerd64_bin
= bin
;
275 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
277 consumerd32_libdir
= libdir
;
279 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
281 consumerd64_libdir
= libdir
;
286 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
288 static int create_thread_poll_set(struct lttng_poll_event
*events
,
293 if (events
== NULL
|| size
== 0) {
298 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
304 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
316 * Check if the thread quit pipe was triggered.
318 * Return 1 if it was triggered else 0;
320 static int check_thread_quit_pipe(int fd
, uint32_t events
)
322 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
330 * Return group ID of the tracing group or -1 if not found.
332 static gid_t
allowed_group(void)
336 if (opt_tracing_group
) {
337 grp
= getgrnam(opt_tracing_group
);
339 grp
= getgrnam(default_tracing_group
);
349 * Init thread quit pipe.
351 * Return -1 on error or 0 if all pipes are created.
353 static int init_thread_quit_pipe(void)
357 ret
= pipe(thread_quit_pipe
);
359 PERROR("thread quit pipe");
363 for (i
= 0; i
< 2; i
++) {
364 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
376 * Stop all threads by closing the thread quit pipe.
378 static void stop_threads(void)
382 /* Stopping all threads */
383 DBG("Terminating all threads");
384 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
386 ERR("write error on thread quit pipe");
389 /* Dispatch thread */
390 CMM_STORE_SHARED(dispatch_thread_exit
, 1);
391 futex_nto1_wake(&ust_cmd_queue
.futex
);
397 static void cleanup(void)
401 struct ltt_session
*sess
, *stmp
;
405 /* First thing first, stop all threads */
406 utils_close_pipe(thread_quit_pipe
);
408 DBG("Removing %s directory", rundir
);
409 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
411 ERR("asprintf failed. Something is really wrong!");
414 /* Remove lttng run directory */
417 ERR("Unable to clean %s", rundir
);
422 DBG("Cleaning up all sessions");
424 /* Destroy session list mutex */
425 if (session_list_ptr
!= NULL
) {
426 pthread_mutex_destroy(&session_list_ptr
->lock
);
428 /* Cleanup ALL session */
429 cds_list_for_each_entry_safe(sess
, stmp
,
430 &session_list_ptr
->head
, list
) {
431 cmd_destroy_session(sess
, kernel_poll_pipe
[1]);
435 DBG("Closing all UST sockets");
436 ust_app_clean_list();
438 if (is_root
&& !opt_no_kernel
) {
439 DBG2("Closing kernel fd");
440 if (kernel_tracer_fd
>= 0) {
441 ret
= close(kernel_tracer_fd
);
446 DBG("Unloading kernel modules");
447 modprobe_remove_lttng_all();
450 utils_close_pipe(kernel_poll_pipe
);
451 utils_close_pipe(apps_cmd_pipe
);
454 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
455 "Matthew, BEET driven development works!%c[%dm",
456 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
461 * Send data on a unix socket using the liblttsessiondcomm API.
463 * Return lttcomm error code.
465 static int send_unix_sock(int sock
, void *buf
, size_t len
)
467 /* Check valid length */
472 return lttcomm_send_unix_sock(sock
, buf
, len
);
476 * Free memory of a command context structure.
478 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
480 DBG("Clean command context structure");
482 if ((*cmd_ctx
)->llm
) {
483 free((*cmd_ctx
)->llm
);
485 if ((*cmd_ctx
)->lsm
) {
486 free((*cmd_ctx
)->lsm
);
494 * Notify UST applications using the shm mmap futex.
496 static int notify_ust_apps(int active
)
500 DBG("Notifying applications of session daemon state: %d", active
);
502 /* See shm.c for this call implying mmap, shm and futex calls */
503 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
504 if (wait_shm_mmap
== NULL
) {
508 /* Wake waiting process */
509 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
511 /* Apps notified successfully */
519 * Setup the outgoing data buffer for the response (llm) by allocating the
520 * right amount of memory and copying the original information from the lsm
523 * Return total size of the buffer pointed by buf.
525 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
531 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
532 if (cmd_ctx
->llm
== NULL
) {
538 /* Copy common data */
539 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
540 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
542 cmd_ctx
->llm
->data_size
= size
;
543 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
552 * Update the kernel poll set of all channel fd available over all tracing
553 * session. Add the wakeup pipe at the end of the set.
555 static int update_kernel_poll(struct lttng_poll_event
*events
)
558 struct ltt_session
*session
;
559 struct ltt_kernel_channel
*channel
;
561 DBG("Updating kernel poll set");
564 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
565 session_lock(session
);
566 if (session
->kernel_session
== NULL
) {
567 session_unlock(session
);
571 cds_list_for_each_entry(channel
,
572 &session
->kernel_session
->channel_list
.head
, list
) {
573 /* Add channel fd to the kernel poll set */
574 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
576 session_unlock(session
);
579 DBG("Channel fd %d added to kernel set", channel
->fd
);
581 session_unlock(session
);
583 session_unlock_list();
588 session_unlock_list();
593 * Find the channel fd from 'fd' over all tracing session. When found, check
594 * for new channel stream and send those stream fds to the kernel consumer.
596 * Useful for CPU hotplug feature.
598 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
601 struct ltt_session
*session
;
602 struct ltt_kernel_session
*ksess
;
603 struct ltt_kernel_channel
*channel
;
605 DBG("Updating kernel streams for channel fd %d", fd
);
608 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
609 session_lock(session
);
610 if (session
->kernel_session
== NULL
) {
611 session_unlock(session
);
614 ksess
= session
->kernel_session
;
616 cds_list_for_each_entry(channel
, &ksess
->channel_list
.head
, list
) {
617 if (channel
->fd
== fd
) {
618 DBG("Channel found, updating kernel streams");
619 ret
= kernel_open_channel_stream(channel
);
625 * Have we already sent fds to the consumer? If yes, it means
626 * that tracing is started so it is safe to send our updated
629 if (ksess
->consumer_fds_sent
== 1 && ksess
->consumer
!= NULL
) {
630 struct lttng_ht_iter iter
;
631 struct consumer_socket
*socket
;
634 cds_lfht_for_each_entry(ksess
->consumer
->socks
->ht
,
635 &iter
.iter
, socket
, node
.node
) {
636 /* Code flow error */
637 assert(socket
->fd
>= 0);
639 pthread_mutex_lock(socket
->lock
);
640 ret
= kernel_consumer_send_channel_stream(socket
->fd
,
642 pthread_mutex_unlock(socket
->lock
);
651 session_unlock(session
);
653 session_unlock_list();
657 session_unlock(session
);
658 session_unlock_list();
663 * For each tracing session, update newly registered apps.
665 static void update_ust_app(int app_sock
)
667 struct ltt_session
*sess
, *stmp
;
671 /* For all tracing session(s) */
672 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
674 if (sess
->ust_session
) {
675 ust_app_global_update(sess
->ust_session
, app_sock
);
677 session_unlock(sess
);
680 session_unlock_list();
684 * This thread manage event coming from the kernel.
686 * Features supported in this thread:
689 static void *thread_manage_kernel(void *data
)
691 int ret
, i
, pollfd
, update_poll_flag
= 1, err
= -1;
692 uint32_t revents
, nb_fd
;
694 struct lttng_poll_event events
;
696 DBG("Thread manage kernel started");
698 testpoint(thread_manage_kernel
);
700 health_code_update(&health_thread_kernel
);
702 testpoint(thread_manage_kernel_before_loop
);
704 ret
= create_thread_poll_set(&events
, 2);
706 goto error_poll_create
;
709 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
715 health_code_update(&health_thread_kernel
);
717 if (update_poll_flag
== 1) {
719 * Reset number of fd in the poll set. Always 2 since there is the thread
720 * quit pipe and the kernel pipe.
724 ret
= update_kernel_poll(&events
);
728 update_poll_flag
= 0;
731 nb_fd
= LTTNG_POLL_GETNB(&events
);
733 DBG("Thread kernel polling on %d fds", nb_fd
);
735 /* Zeroed the poll events */
736 lttng_poll_reset(&events
);
738 /* Poll infinite value of time */
740 health_poll_update(&health_thread_kernel
);
741 ret
= lttng_poll_wait(&events
, -1);
742 health_poll_update(&health_thread_kernel
);
745 * Restart interrupted system call.
747 if (errno
== EINTR
) {
751 } else if (ret
== 0) {
752 /* Should not happen since timeout is infinite */
753 ERR("Return value of poll is 0 with an infinite timeout.\n"
754 "This should not have happened! Continuing...");
758 for (i
= 0; i
< nb_fd
; i
++) {
759 /* Fetch once the poll data */
760 revents
= LTTNG_POLL_GETEV(&events
, i
);
761 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
763 health_code_update(&health_thread_kernel
);
765 /* Thread quit pipe has been closed. Killing thread. */
766 ret
= check_thread_quit_pipe(pollfd
, revents
);
772 /* Check for data on kernel pipe */
773 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
774 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
775 update_poll_flag
= 1;
779 * New CPU detected by the kernel. Adding kernel stream to
780 * kernel session and updating the kernel consumer
782 if (revents
& LPOLLIN
) {
783 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
789 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
790 * and unregister kernel stream at this point.
799 lttng_poll_clean(&events
);
802 health_error(&health_thread_kernel
);
803 ERR("Health error occurred in %s", __func__
);
805 health_exit(&health_thread_kernel
);
806 DBG("Kernel thread dying");
811 * Signal pthread condition of the consumer data that the thread.
813 static void signal_consumer_condition(struct consumer_data
*data
, int state
)
815 pthread_mutex_lock(&data
->cond_mutex
);
818 * The state is set before signaling. It can be any value, it's the waiter
819 * job to correctly interpret this condition variable associated to the
820 * consumer pthread_cond.
822 * A value of 0 means that the corresponding thread of the consumer data
823 * was not started. 1 indicates that the thread has started and is ready
824 * for action. A negative value means that there was an error during the
827 data
->consumer_thread_is_ready
= state
;
828 (void) pthread_cond_signal(&data
->cond
);
830 pthread_mutex_unlock(&data
->cond_mutex
);
834 * This thread manage the consumer error sent back to the session daemon.
836 static void *thread_manage_consumer(void *data
)
838 int sock
= -1, i
, ret
, pollfd
, err
= -1;
839 uint32_t revents
, nb_fd
;
840 enum lttcomm_return_code code
;
841 struct lttng_poll_event events
;
842 struct consumer_data
*consumer_data
= data
;
844 DBG("[thread] Manage consumer started");
847 * Since the consumer thread can be spawned at any moment in time, we init
848 * the health to a poll status (1, which is a valid health over time).
849 * When the thread starts, we update here the health to a "code" path being
850 * an even value so this thread, when reaching a poll wait, does not
851 * trigger an error with an even value.
853 * Here is the use case we avoid.
855 * +1: the first poll update during initialization (main())
856 * +2 * x: multiple code update once in this thread.
857 * +1: poll wait in this thread (being a good health state).
858 * == even number which after the wait period shows as a bad health.
860 * In a nutshell, the following poll update to the health state brings back
861 * the state to an even value meaning a code path.
863 health_poll_update(&consumer_data
->health
);
866 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
867 * Nothing more will be added to this poll set.
869 ret
= create_thread_poll_set(&events
, 2);
875 * The error socket here is already in a listening state which was done
876 * just before spawning this thread to avoid a race between the consumer
877 * daemon exec trying to connect and the listen() call.
879 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
884 nb_fd
= LTTNG_POLL_GETNB(&events
);
886 health_code_update(&consumer_data
->health
);
888 /* Inifinite blocking call, waiting for transmission */
890 health_poll_update(&consumer_data
->health
);
892 testpoint(thread_manage_consumer
);
894 ret
= lttng_poll_wait(&events
, -1);
895 health_poll_update(&consumer_data
->health
);
898 * Restart interrupted system call.
900 if (errno
== EINTR
) {
906 for (i
= 0; i
< nb_fd
; i
++) {
907 /* Fetch once the poll data */
908 revents
= LTTNG_POLL_GETEV(&events
, i
);
909 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
911 health_code_update(&consumer_data
->health
);
913 /* Thread quit pipe has been closed. Killing thread. */
914 ret
= check_thread_quit_pipe(pollfd
, revents
);
920 /* Event on the registration socket */
921 if (pollfd
== consumer_data
->err_sock
) {
922 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
923 ERR("consumer err socket poll error");
929 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
935 * Set the CLOEXEC flag. Return code is useless because either way, the
938 (void) utils_set_fd_cloexec(sock
);
940 health_code_update(&consumer_data
->health
);
942 DBG2("Receiving code from consumer err_sock");
944 /* Getting status code from kconsumerd */
945 ret
= lttcomm_recv_unix_sock(sock
, &code
,
946 sizeof(enum lttcomm_return_code
));
951 health_code_update(&consumer_data
->health
);
953 if (code
== LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
) {
954 consumer_data
->cmd_sock
=
955 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
956 if (consumer_data
->cmd_sock
< 0) {
957 /* On error, signal condition and quit. */
958 signal_consumer_condition(consumer_data
, -1);
959 PERROR("consumer connect");
962 signal_consumer_condition(consumer_data
, 1);
963 DBG("Consumer command socket ready");
965 ERR("consumer error when waiting for SOCK_READY : %s",
966 lttcomm_get_readable_code(-code
));
970 /* Remove the kconsumerd error sock since we've established a connexion */
971 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
976 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
981 health_code_update(&consumer_data
->health
);
983 /* Update number of fd */
984 nb_fd
= LTTNG_POLL_GETNB(&events
);
986 /* Inifinite blocking call, waiting for transmission */
988 health_poll_update(&consumer_data
->health
);
989 ret
= lttng_poll_wait(&events
, -1);
990 health_poll_update(&consumer_data
->health
);
993 * Restart interrupted system call.
995 if (errno
== EINTR
) {
1001 for (i
= 0; i
< nb_fd
; i
++) {
1002 /* Fetch once the poll data */
1003 revents
= LTTNG_POLL_GETEV(&events
, i
);
1004 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1006 health_code_update(&consumer_data
->health
);
1008 /* Thread quit pipe has been closed. Killing thread. */
1009 ret
= check_thread_quit_pipe(pollfd
, revents
);
1015 /* Event on the kconsumerd socket */
1016 if (pollfd
== sock
) {
1017 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1018 ERR("consumer err socket second poll error");
1024 health_code_update(&consumer_data
->health
);
1026 /* Wait for any kconsumerd error */
1027 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1028 sizeof(enum lttcomm_return_code
));
1030 ERR("consumer closed the command socket");
1034 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1038 /* Immediately set the consumerd state to stopped */
1039 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1040 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1041 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1042 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1043 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1045 /* Code flow error... */
1049 if (consumer_data
->err_sock
>= 0) {
1050 ret
= close(consumer_data
->err_sock
);
1055 if (consumer_data
->cmd_sock
>= 0) {
1056 ret
= close(consumer_data
->cmd_sock
);
1068 unlink(consumer_data
->err_unix_sock_path
);
1069 unlink(consumer_data
->cmd_unix_sock_path
);
1070 consumer_data
->pid
= 0;
1072 lttng_poll_clean(&events
);
1075 health_error(&consumer_data
->health
);
1076 ERR("Health error occurred in %s", __func__
);
1078 health_exit(&consumer_data
->health
);
1079 DBG("consumer thread cleanup completed");
1085 * This thread manage application communication.
1087 static void *thread_manage_apps(void *data
)
1089 int i
, ret
, pollfd
, err
= -1;
1090 uint32_t revents
, nb_fd
;
1091 struct ust_command ust_cmd
;
1092 struct lttng_poll_event events
;
1094 DBG("[thread] Manage application started");
1096 testpoint(thread_manage_apps
);
1098 rcu_register_thread();
1099 rcu_thread_online();
1101 health_code_update(&health_thread_app_manage
);
1103 ret
= create_thread_poll_set(&events
, 2);
1105 goto error_poll_create
;
1108 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1113 testpoint(thread_manage_apps_before_loop
);
1115 health_code_update(&health_thread_app_manage
);
1118 /* Zeroed the events structure */
1119 lttng_poll_reset(&events
);
1121 nb_fd
= LTTNG_POLL_GETNB(&events
);
1123 DBG("Apps thread polling on %d fds", nb_fd
);
1125 /* Inifinite blocking call, waiting for transmission */
1127 health_poll_update(&health_thread_app_manage
);
1128 ret
= lttng_poll_wait(&events
, -1);
1129 health_poll_update(&health_thread_app_manage
);
1132 * Restart interrupted system call.
1134 if (errno
== EINTR
) {
1140 for (i
= 0; i
< nb_fd
; i
++) {
1141 /* Fetch once the poll data */
1142 revents
= LTTNG_POLL_GETEV(&events
, i
);
1143 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1145 health_code_update(&health_thread_app_manage
);
1147 /* Thread quit pipe has been closed. Killing thread. */
1148 ret
= check_thread_quit_pipe(pollfd
, revents
);
1154 /* Inspect the apps cmd pipe */
1155 if (pollfd
== apps_cmd_pipe
[0]) {
1156 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1157 ERR("Apps command pipe error");
1159 } else if (revents
& LPOLLIN
) {
1161 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1162 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1163 PERROR("read apps cmd pipe");
1167 health_code_update(&health_thread_app_manage
);
1169 /* Register applicaton to the session daemon */
1170 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1172 if (ret
== -ENOMEM
) {
1174 } else if (ret
< 0) {
1178 health_code_update(&health_thread_app_manage
);
1181 * Validate UST version compatibility.
1183 ret
= ust_app_validate_version(ust_cmd
.sock
);
1186 * Add channel(s) and event(s) to newly registered apps
1187 * from lttng global UST domain.
1189 update_ust_app(ust_cmd
.sock
);
1192 health_code_update(&health_thread_app_manage
);
1194 ret
= ust_app_register_done(ust_cmd
.sock
);
1197 * If the registration is not possible, we simply
1198 * unregister the apps and continue
1200 ust_app_unregister(ust_cmd
.sock
);
1203 * We only monitor the error events of the socket. This
1204 * thread does not handle any incoming data from UST
1207 ret
= lttng_poll_add(&events
, ust_cmd
.sock
,
1208 LPOLLERR
& LPOLLHUP
& LPOLLRDHUP
);
1213 /* Set socket timeout for both receiving and ending */
1214 (void) lttcomm_setsockopt_rcv_timeout(ust_cmd
.sock
,
1215 app_socket_timeout
);
1216 (void) lttcomm_setsockopt_snd_timeout(ust_cmd
.sock
,
1217 app_socket_timeout
);
1219 DBG("Apps with sock %d added to poll set",
1223 health_code_update(&health_thread_app_manage
);
1229 * At this point, we know that a registered application made
1230 * the event at poll_wait.
1232 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1233 /* Removing from the poll set */
1234 ret
= lttng_poll_del(&events
, pollfd
);
1239 /* Socket closed on remote end. */
1240 ust_app_unregister(pollfd
);
1245 health_code_update(&health_thread_app_manage
);
1251 lttng_poll_clean(&events
);
1254 health_error(&health_thread_app_manage
);
1255 ERR("Health error occurred in %s", __func__
);
1257 health_exit(&health_thread_app_manage
);
1258 DBG("Application communication apps thread cleanup complete");
1259 rcu_thread_offline();
1260 rcu_unregister_thread();
1265 * Dispatch request from the registration threads to the application
1266 * communication thread.
1268 static void *thread_dispatch_ust_registration(void *data
)
1271 struct cds_wfq_node
*node
;
1272 struct ust_command
*ust_cmd
= NULL
;
1274 DBG("[thread] Dispatch UST command started");
1276 while (!CMM_LOAD_SHARED(dispatch_thread_exit
)) {
1277 /* Atomically prepare the queue futex */
1278 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1281 /* Dequeue command for registration */
1282 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1284 DBG("Woken up but nothing in the UST command queue");
1285 /* Continue thread execution */
1289 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1291 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1292 " gid:%d sock:%d name:%s (version %d.%d)",
1293 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1294 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1295 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1296 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1298 * Inform apps thread of the new application registration. This
1299 * call is blocking so we can be assured that the data will be read
1300 * at some point in time or wait to the end of the world :)
1302 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1303 sizeof(struct ust_command
));
1305 PERROR("write apps cmd pipe");
1306 if (errno
== EBADF
) {
1308 * We can't inform the application thread to process
1309 * registration. We will exit or else application
1310 * registration will not occur and tracing will never
1317 } while (node
!= NULL
);
1319 /* Futex wait on queue. Blocking call on futex() */
1320 futex_nto1_wait(&ust_cmd_queue
.futex
);
1324 DBG("Dispatch thread dying");
1329 * This thread manage application registration.
1331 static void *thread_registration_apps(void *data
)
1333 int sock
= -1, i
, ret
, pollfd
, err
= -1;
1334 uint32_t revents
, nb_fd
;
1335 struct lttng_poll_event events
;
1337 * Get allocated in this thread, enqueued to a global queue, dequeued and
1338 * freed in the manage apps thread.
1340 struct ust_command
*ust_cmd
= NULL
;
1342 DBG("[thread] Manage application registration started");
1344 testpoint(thread_registration_apps
);
1346 ret
= lttcomm_listen_unix_sock(apps_sock
);
1352 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1353 * more will be added to this poll set.
1355 ret
= create_thread_poll_set(&events
, 2);
1357 goto error_create_poll
;
1360 /* Add the application registration socket */
1361 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1363 goto error_poll_add
;
1366 /* Notify all applications to register */
1367 ret
= notify_ust_apps(1);
1369 ERR("Failed to notify applications or create the wait shared memory.\n"
1370 "Execution continues but there might be problem for already\n"
1371 "running applications that wishes to register.");
1375 DBG("Accepting application registration");
1377 nb_fd
= LTTNG_POLL_GETNB(&events
);
1379 /* Inifinite blocking call, waiting for transmission */
1381 health_poll_update(&health_thread_app_reg
);
1382 ret
= lttng_poll_wait(&events
, -1);
1383 health_poll_update(&health_thread_app_reg
);
1386 * Restart interrupted system call.
1388 if (errno
== EINTR
) {
1394 for (i
= 0; i
< nb_fd
; i
++) {
1395 health_code_update(&health_thread_app_reg
);
1397 /* Fetch once the poll data */
1398 revents
= LTTNG_POLL_GETEV(&events
, i
);
1399 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1401 /* Thread quit pipe has been closed. Killing thread. */
1402 ret
= check_thread_quit_pipe(pollfd
, revents
);
1408 /* Event on the registration socket */
1409 if (pollfd
== apps_sock
) {
1410 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1411 ERR("Register apps socket poll error");
1413 } else if (revents
& LPOLLIN
) {
1414 sock
= lttcomm_accept_unix_sock(apps_sock
);
1420 * Set the CLOEXEC flag. Return code is useless because
1421 * either way, the show must go on.
1423 (void) utils_set_fd_cloexec(sock
);
1425 /* Create UST registration command for enqueuing */
1426 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1427 if (ust_cmd
== NULL
) {
1428 PERROR("ust command zmalloc");
1433 * Using message-based transmissions to ensure we don't
1434 * have to deal with partially received messages.
1436 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1438 ERR("Exhausted file descriptors allowed for applications.");
1447 health_code_update(&health_thread_app_reg
);
1448 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1449 sizeof(struct ust_register_msg
));
1450 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1452 PERROR("lttcomm_recv_unix_sock register apps");
1454 ERR("Wrong size received on apps register");
1461 lttng_fd_put(LTTNG_FD_APPS
, 1);
1465 health_code_update(&health_thread_app_reg
);
1467 ust_cmd
->sock
= sock
;
1470 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1471 " gid:%d sock:%d name:%s (version %d.%d)",
1472 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1473 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1474 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1475 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1478 * Lock free enqueue the registration request. The red pill
1479 * has been taken! This apps will be part of the *system*.
1481 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1484 * Wake the registration queue futex. Implicit memory
1485 * barrier with the exchange in cds_wfq_enqueue.
1487 futex_nto1_wake(&ust_cmd_queue
.futex
);
1496 health_error(&health_thread_app_reg
);
1497 ERR("Health error occurred in %s", __func__
);
1499 health_exit(&health_thread_app_reg
);
1501 /* Notify that the registration thread is gone */
1504 if (apps_sock
>= 0) {
1505 ret
= close(apps_sock
);
1515 lttng_fd_put(LTTNG_FD_APPS
, 1);
1517 unlink(apps_unix_sock_path
);
1520 lttng_poll_clean(&events
);
1523 DBG("UST Registration thread cleanup complete");
1529 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1530 * exec or it will fails.
1532 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1535 struct timespec timeout
;
1537 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1538 consumer_data
->consumer_thread_is_ready
= 0;
1540 /* Setup pthread condition */
1541 ret
= pthread_condattr_init(&consumer_data
->condattr
);
1544 PERROR("pthread_condattr_init consumer data");
1549 * Set the monotonic clock in order to make sure we DO NOT jump in time
1550 * between the clock_gettime() call and the timedwait call. See bug #324
1551 * for a more details and how we noticed it.
1553 ret
= pthread_condattr_setclock(&consumer_data
->condattr
, CLOCK_MONOTONIC
);
1556 PERROR("pthread_condattr_setclock consumer data");
1560 ret
= pthread_cond_init(&consumer_data
->cond
, &consumer_data
->condattr
);
1563 PERROR("pthread_cond_init consumer data");
1567 ret
= pthread_create(&consumer_data
->thread
, NULL
, thread_manage_consumer
,
1570 PERROR("pthread_create consumer");
1575 /* We are about to wait on a pthread condition */
1576 pthread_mutex_lock(&consumer_data
->cond_mutex
);
1578 /* Get time for sem_timedwait absolute timeout */
1579 clock_ret
= clock_gettime(CLOCK_MONOTONIC
, &timeout
);
1581 * Set the timeout for the condition timed wait even if the clock gettime
1582 * call fails since we might loop on that call and we want to avoid to
1583 * increment the timeout too many times.
1585 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1588 * The following loop COULD be skipped in some conditions so this is why we
1589 * set ret to 0 in order to make sure at least one round of the loop is
1595 * Loop until the condition is reached or when a timeout is reached. Note
1596 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1597 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1598 * possible. This loop does not take any chances and works with both of
1601 while (!consumer_data
->consumer_thread_is_ready
&& ret
!= ETIMEDOUT
) {
1602 if (clock_ret
< 0) {
1603 PERROR("clock_gettime spawn consumer");
1604 /* Infinite wait for the consumerd thread to be ready */
1605 ret
= pthread_cond_wait(&consumer_data
->cond
,
1606 &consumer_data
->cond_mutex
);
1608 ret
= pthread_cond_timedwait(&consumer_data
->cond
,
1609 &consumer_data
->cond_mutex
, &timeout
);
1613 /* Release the pthread condition */
1614 pthread_mutex_unlock(&consumer_data
->cond_mutex
);
1618 if (ret
== ETIMEDOUT
) {
1620 * Call has timed out so we kill the kconsumerd_thread and return
1623 ERR("Condition timed out. The consumer thread was never ready."
1625 ret
= pthread_cancel(consumer_data
->thread
);
1627 PERROR("pthread_cancel consumer thread");
1630 PERROR("pthread_cond_wait failed consumer thread");
1635 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1636 if (consumer_data
->pid
== 0) {
1637 ERR("Consumerd did not start");
1638 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1641 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1650 * Join consumer thread
1652 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1657 /* Consumer pid must be a real one. */
1658 if (consumer_data
->pid
> 0) {
1659 ret
= kill(consumer_data
->pid
, SIGTERM
);
1661 ERR("Error killing consumer daemon");
1664 return pthread_join(consumer_data
->thread
, &status
);
1671 * Fork and exec a consumer daemon (consumerd).
1673 * Return pid if successful else -1.
1675 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1679 const char *consumer_to_use
;
1680 const char *verbosity
;
1683 DBG("Spawning consumerd");
1690 if (opt_verbose_consumer
) {
1691 verbosity
= "--verbose";
1693 verbosity
= "--quiet";
1695 switch (consumer_data
->type
) {
1696 case LTTNG_CONSUMER_KERNEL
:
1698 * Find out which consumerd to execute. We will first try the
1699 * 64-bit path, then the sessiond's installation directory, and
1700 * fallback on the 32-bit one,
1702 DBG3("Looking for a kernel consumer at these locations:");
1703 DBG3(" 1) %s", consumerd64_bin
);
1704 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1705 DBG3(" 3) %s", consumerd32_bin
);
1706 if (stat(consumerd64_bin
, &st
) == 0) {
1707 DBG3("Found location #1");
1708 consumer_to_use
= consumerd64_bin
;
1709 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1710 DBG3("Found location #2");
1711 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1712 } else if (stat(consumerd32_bin
, &st
) == 0) {
1713 DBG3("Found location #3");
1714 consumer_to_use
= consumerd32_bin
;
1716 DBG("Could not find any valid consumerd executable");
1719 DBG("Using kernel consumer at: %s", consumer_to_use
);
1720 execl(consumer_to_use
,
1721 "lttng-consumerd", verbosity
, "-k",
1722 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1723 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1726 case LTTNG_CONSUMER64_UST
:
1728 char *tmpnew
= NULL
;
1730 if (consumerd64_libdir
[0] != '\0') {
1734 tmp
= getenv("LD_LIBRARY_PATH");
1738 tmplen
= strlen("LD_LIBRARY_PATH=")
1739 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1740 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1745 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1746 strcat(tmpnew
, consumerd64_libdir
);
1747 if (tmp
[0] != '\0') {
1748 strcat(tmpnew
, ":");
1749 strcat(tmpnew
, tmp
);
1751 ret
= putenv(tmpnew
);
1757 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1758 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1759 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1760 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1762 if (consumerd64_libdir
[0] != '\0') {
1770 case LTTNG_CONSUMER32_UST
:
1772 char *tmpnew
= NULL
;
1774 if (consumerd32_libdir
[0] != '\0') {
1778 tmp
= getenv("LD_LIBRARY_PATH");
1782 tmplen
= strlen("LD_LIBRARY_PATH=")
1783 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1784 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1789 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1790 strcat(tmpnew
, consumerd32_libdir
);
1791 if (tmp
[0] != '\0') {
1792 strcat(tmpnew
, ":");
1793 strcat(tmpnew
, tmp
);
1795 ret
= putenv(tmpnew
);
1801 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1802 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1803 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1804 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1806 if (consumerd32_libdir
[0] != '\0') {
1815 PERROR("unknown consumer type");
1819 PERROR("kernel start consumer exec");
1822 } else if (pid
> 0) {
1825 PERROR("start consumer fork");
1833 * Spawn the consumerd daemon and session daemon thread.
1835 static int start_consumerd(struct consumer_data
*consumer_data
)
1840 * Set the listen() state on the socket since there is a possible race
1841 * between the exec() of the consumer daemon and this call if place in the
1842 * consumer thread. See bug #366 for more details.
1844 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
1849 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1850 if (consumer_data
->pid
!= 0) {
1851 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1855 ret
= spawn_consumerd(consumer_data
);
1857 ERR("Spawning consumerd failed");
1858 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1862 /* Setting up the consumer_data pid */
1863 consumer_data
->pid
= ret
;
1864 DBG2("Consumer pid %d", consumer_data
->pid
);
1865 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1867 DBG2("Spawning consumer control thread");
1868 ret
= spawn_consumer_thread(consumer_data
);
1870 ERR("Fatal error spawning consumer control thread");
1878 /* Cleanup already created socket on error. */
1879 if (consumer_data
->err_sock
>= 0) {
1880 err
= close(consumer_data
->err_sock
);
1882 PERROR("close consumer data error socket");
1889 * Compute health status of each consumer. If one of them is zero (bad
1890 * state), we return 0.
1892 static int check_consumer_health(void)
1896 ret
= health_check_state(&kconsumer_data
.health
) &&
1897 health_check_state(&ustconsumer32_data
.health
) &&
1898 health_check_state(&ustconsumer64_data
.health
);
1900 DBG3("Health consumer check %d", ret
);
1906 * Setup necessary data for kernel tracer action.
1908 static int init_kernel_tracer(void)
1912 /* Modprobe lttng kernel modules */
1913 ret
= modprobe_lttng_control();
1918 /* Open debugfs lttng */
1919 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1920 if (kernel_tracer_fd
< 0) {
1921 DBG("Failed to open %s", module_proc_lttng
);
1926 /* Validate kernel version */
1927 ret
= kernel_validate_version(kernel_tracer_fd
);
1932 ret
= modprobe_lttng_data();
1937 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1941 modprobe_remove_lttng_control();
1942 ret
= close(kernel_tracer_fd
);
1946 kernel_tracer_fd
= -1;
1947 return LTTNG_ERR_KERN_VERSION
;
1950 ret
= close(kernel_tracer_fd
);
1956 modprobe_remove_lttng_control();
1959 WARN("No kernel tracer available");
1960 kernel_tracer_fd
= -1;
1962 return LTTNG_ERR_NEED_ROOT_SESSIOND
;
1964 return LTTNG_ERR_KERN_NA
;
1970 * Copy consumer output from the tracing session to the domain session. The
1971 * function also applies the right modification on a per domain basis for the
1972 * trace files destination directory.
1974 static int copy_session_consumer(int domain
, struct ltt_session
*session
)
1977 const char *dir_name
;
1978 struct consumer_output
*consumer
;
1981 assert(session
->consumer
);
1984 case LTTNG_DOMAIN_KERNEL
:
1985 DBG3("Copying tracing session consumer output in kernel session");
1987 * XXX: We should audit the session creation and what this function
1988 * does "extra" in order to avoid a destroy since this function is used
1989 * in the domain session creation (kernel and ust) only. Same for UST
1992 if (session
->kernel_session
->consumer
) {
1993 consumer_destroy_output(session
->kernel_session
->consumer
);
1995 session
->kernel_session
->consumer
=
1996 consumer_copy_output(session
->consumer
);
1997 /* Ease our life a bit for the next part */
1998 consumer
= session
->kernel_session
->consumer
;
1999 dir_name
= DEFAULT_KERNEL_TRACE_DIR
;
2001 case LTTNG_DOMAIN_UST
:
2002 DBG3("Copying tracing session consumer output in UST session");
2003 if (session
->ust_session
->consumer
) {
2004 consumer_destroy_output(session
->ust_session
->consumer
);
2006 session
->ust_session
->consumer
=
2007 consumer_copy_output(session
->consumer
);
2008 /* Ease our life a bit for the next part */
2009 consumer
= session
->ust_session
->consumer
;
2010 dir_name
= DEFAULT_UST_TRACE_DIR
;
2013 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2017 /* Append correct directory to subdir */
2018 strncat(consumer
->subdir
, dir_name
,
2019 sizeof(consumer
->subdir
) - strlen(consumer
->subdir
) - 1);
2020 DBG3("Copy session consumer subdir %s", consumer
->subdir
);
2029 * Create an UST session and add it to the session ust list.
2031 static int create_ust_session(struct ltt_session
*session
,
2032 struct lttng_domain
*domain
)
2035 struct ltt_ust_session
*lus
= NULL
;
2039 assert(session
->consumer
);
2041 switch (domain
->type
) {
2042 case LTTNG_DOMAIN_UST
:
2045 ERR("Unknown UST domain on create session %d", domain
->type
);
2046 ret
= LTTNG_ERR_UNKNOWN_DOMAIN
;
2050 DBG("Creating UST session");
2052 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
2054 ret
= LTTNG_ERR_UST_SESS_FAIL
;
2058 lus
->uid
= session
->uid
;
2059 lus
->gid
= session
->gid
;
2060 session
->ust_session
= lus
;
2062 /* Copy session output to the newly created UST session */
2063 ret
= copy_session_consumer(domain
->type
, session
);
2064 if (ret
!= LTTNG_OK
) {
2072 session
->ust_session
= NULL
;
2077 * Create a kernel tracer session then create the default channel.
2079 static int create_kernel_session(struct ltt_session
*session
)
2083 DBG("Creating kernel session");
2085 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2087 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2091 /* Code flow safety */
2092 assert(session
->kernel_session
);
2094 /* Copy session output to the newly created Kernel session */
2095 ret
= copy_session_consumer(LTTNG_DOMAIN_KERNEL
, session
);
2096 if (ret
!= LTTNG_OK
) {
2100 /* Create directory(ies) on local filesystem. */
2101 if (session
->kernel_session
->consumer
->type
== CONSUMER_DST_LOCAL
&&
2102 strlen(session
->kernel_session
->consumer
->dst
.trace_path
) > 0) {
2103 ret
= run_as_mkdir_recursive(
2104 session
->kernel_session
->consumer
->dst
.trace_path
,
2105 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2107 if (ret
!= -EEXIST
) {
2108 ERR("Trace directory creation error");
2114 session
->kernel_session
->uid
= session
->uid
;
2115 session
->kernel_session
->gid
= session
->gid
;
2120 trace_kernel_destroy_session(session
->kernel_session
);
2121 session
->kernel_session
= NULL
;
2126 * Count number of session permitted by uid/gid.
2128 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2131 struct ltt_session
*session
;
2133 DBG("Counting number of available session for UID %d GID %d",
2135 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2137 * Only list the sessions the user can control.
2139 if (!session_access_ok(session
, uid
, gid
)) {
2148 * Process the command requested by the lttng client within the command
2149 * context structure. This function make sure that the return structure (llm)
2150 * is set and ready for transmission before returning.
2152 * Return any error encountered or 0 for success.
2154 * "sock" is only used for special-case var. len data.
2156 static int process_client_msg(struct command_ctx
*cmd_ctx
, int sock
,
2160 int need_tracing_session
= 1;
2163 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
2167 switch (cmd_ctx
->lsm
->cmd_type
) {
2168 case LTTNG_CREATE_SESSION
:
2169 case LTTNG_DESTROY_SESSION
:
2170 case LTTNG_LIST_SESSIONS
:
2171 case LTTNG_LIST_DOMAINS
:
2172 case LTTNG_START_TRACE
:
2173 case LTTNG_STOP_TRACE
:
2174 case LTTNG_DATA_PENDING
:
2181 if (opt_no_kernel
&& need_domain
2182 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
2184 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2186 ret
= LTTNG_ERR_KERN_NA
;
2191 /* Deny register consumer if we already have a spawned consumer. */
2192 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_REGISTER_CONSUMER
) {
2193 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2194 if (kconsumer_data
.pid
> 0) {
2195 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2196 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2199 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2203 * Check for command that don't needs to allocate a returned payload. We do
2204 * this here so we don't have to make the call for no payload at each
2207 switch(cmd_ctx
->lsm
->cmd_type
) {
2208 case LTTNG_LIST_SESSIONS
:
2209 case LTTNG_LIST_TRACEPOINTS
:
2210 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2211 case LTTNG_LIST_DOMAINS
:
2212 case LTTNG_LIST_CHANNELS
:
2213 case LTTNG_LIST_EVENTS
:
2216 /* Setup lttng message with no payload */
2217 ret
= setup_lttng_msg(cmd_ctx
, 0);
2219 /* This label does not try to unlock the session */
2220 goto init_setup_error
;
2224 /* Commands that DO NOT need a session. */
2225 switch (cmd_ctx
->lsm
->cmd_type
) {
2226 case LTTNG_CREATE_SESSION
:
2227 case LTTNG_CALIBRATE
:
2228 case LTTNG_LIST_SESSIONS
:
2229 case LTTNG_LIST_TRACEPOINTS
:
2230 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2231 need_tracing_session
= 0;
2234 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
2236 * We keep the session list lock across _all_ commands
2237 * for now, because the per-session lock does not
2238 * handle teardown properly.
2240 session_lock_list();
2241 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
2242 if (cmd_ctx
->session
== NULL
) {
2243 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
2244 ret
= LTTNG_ERR_SESS_NOT_FOUND
;
2246 /* If no session name specified */
2247 ret
= LTTNG_ERR_SELECT_SESS
;
2251 /* Acquire lock for the session */
2252 session_lock(cmd_ctx
->session
);
2262 * Check domain type for specific "pre-action".
2264 switch (cmd_ctx
->lsm
->domain
.type
) {
2265 case LTTNG_DOMAIN_KERNEL
:
2267 ret
= LTTNG_ERR_NEED_ROOT_SESSIOND
;
2271 /* Kernel tracer check */
2272 if (kernel_tracer_fd
== -1) {
2273 /* Basically, load kernel tracer modules */
2274 ret
= init_kernel_tracer();
2280 /* Consumer is in an ERROR state. Report back to client */
2281 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
2282 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2286 /* Need a session for kernel command */
2287 if (need_tracing_session
) {
2288 if (cmd_ctx
->session
->kernel_session
== NULL
) {
2289 ret
= create_kernel_session(cmd_ctx
->session
);
2291 ret
= LTTNG_ERR_KERN_SESS_FAIL
;
2296 /* Start the kernel consumer daemon */
2297 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
2298 if (kconsumer_data
.pid
== 0 &&
2299 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2300 cmd_ctx
->session
->start_consumer
) {
2301 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2302 ret
= start_consumerd(&kconsumer_data
);
2304 ret
= LTTNG_ERR_KERN_CONSUMER_FAIL
;
2307 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
2309 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
2313 * The consumer was just spawned so we need to add the socket to
2314 * the consumer output of the session if exist.
2316 ret
= consumer_create_socket(&kconsumer_data
,
2317 cmd_ctx
->session
->kernel_session
->consumer
);
2324 case LTTNG_DOMAIN_UST
:
2326 /* Consumer is in an ERROR state. Report back to client */
2327 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
2328 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2332 if (need_tracing_session
) {
2333 /* Create UST session if none exist. */
2334 if (cmd_ctx
->session
->ust_session
== NULL
) {
2335 ret
= create_ust_session(cmd_ctx
->session
,
2336 &cmd_ctx
->lsm
->domain
);
2337 if (ret
!= LTTNG_OK
) {
2342 /* Start the UST consumer daemons */
2344 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
2345 if (consumerd64_bin
[0] != '\0' &&
2346 ustconsumer64_data
.pid
== 0 &&
2347 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2348 cmd_ctx
->session
->start_consumer
) {
2349 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2350 ret
= start_consumerd(&ustconsumer64_data
);
2352 ret
= LTTNG_ERR_UST_CONSUMER64_FAIL
;
2353 uatomic_set(&ust_consumerd64_fd
, -EINVAL
);
2357 uatomic_set(&ust_consumerd64_fd
, ustconsumer64_data
.cmd_sock
);
2358 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2360 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
2364 * Setup socket for consumer 64 bit. No need for atomic access
2365 * since it was set above and can ONLY be set in this thread.
2367 ret
= consumer_create_socket(&ustconsumer64_data
,
2368 cmd_ctx
->session
->ust_session
->consumer
);
2374 if (consumerd32_bin
[0] != '\0' &&
2375 ustconsumer32_data
.pid
== 0 &&
2376 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
&&
2377 cmd_ctx
->session
->start_consumer
) {
2378 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2379 ret
= start_consumerd(&ustconsumer32_data
);
2381 ret
= LTTNG_ERR_UST_CONSUMER32_FAIL
;
2382 uatomic_set(&ust_consumerd32_fd
, -EINVAL
);
2386 uatomic_set(&ust_consumerd32_fd
, ustconsumer32_data
.cmd_sock
);
2387 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
2389 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
2393 * Setup socket for consumer 64 bit. No need for atomic access
2394 * since it was set above and can ONLY be set in this thread.
2396 ret
= consumer_create_socket(&ustconsumer32_data
,
2397 cmd_ctx
->session
->ust_session
->consumer
);
2409 /* Validate consumer daemon state when start/stop trace command */
2410 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
2411 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
2412 switch (cmd_ctx
->lsm
->domain
.type
) {
2413 case LTTNG_DOMAIN_UST
:
2414 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
2415 ret
= LTTNG_ERR_NO_USTCONSUMERD
;
2419 case LTTNG_DOMAIN_KERNEL
:
2420 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
2421 ret
= LTTNG_ERR_NO_KERNCONSUMERD
;
2429 * Check that the UID or GID match that of the tracing session.
2430 * The root user can interact with all sessions.
2432 if (need_tracing_session
) {
2433 if (!session_access_ok(cmd_ctx
->session
,
2434 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2435 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
2436 ret
= LTTNG_ERR_EPERM
;
2441 /* Process by command type */
2442 switch (cmd_ctx
->lsm
->cmd_type
) {
2443 case LTTNG_ADD_CONTEXT
:
2445 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2446 cmd_ctx
->lsm
->u
.context
.channel_name
,
2447 cmd_ctx
->lsm
->u
.context
.event_name
,
2448 &cmd_ctx
->lsm
->u
.context
.ctx
, kernel_poll_pipe
[1]);
2451 case LTTNG_DISABLE_CHANNEL
:
2453 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2454 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2457 case LTTNG_DISABLE_EVENT
:
2459 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2460 cmd_ctx
->lsm
->u
.disable
.channel_name
,
2461 cmd_ctx
->lsm
->u
.disable
.name
);
2464 case LTTNG_DISABLE_ALL_EVENT
:
2466 DBG("Disabling all events");
2468 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2469 cmd_ctx
->lsm
->u
.disable
.channel_name
);
2472 case LTTNG_DISABLE_CONSUMER
:
2474 ret
= cmd_disable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2477 case LTTNG_ENABLE_CHANNEL
:
2479 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2480 &cmd_ctx
->lsm
->u
.channel
.chan
, kernel_poll_pipe
[1]);
2483 case LTTNG_ENABLE_CONSUMER
:
2486 * XXX: 0 means that this URI should be applied on the session. Should
2487 * be a DOMAIN enuam.
2489 ret
= cmd_enable_consumer(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
);
2490 if (ret
!= LTTNG_OK
) {
2494 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2495 /* Add the URI for the UST session if a consumer is present. */
2496 if (cmd_ctx
->session
->ust_session
&&
2497 cmd_ctx
->session
->ust_session
->consumer
) {
2498 ret
= cmd_enable_consumer(LTTNG_DOMAIN_UST
, cmd_ctx
->session
);
2499 } else if (cmd_ctx
->session
->kernel_session
&&
2500 cmd_ctx
->session
->kernel_session
->consumer
) {
2501 ret
= cmd_enable_consumer(LTTNG_DOMAIN_KERNEL
,
2507 case LTTNG_ENABLE_EVENT
:
2509 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2510 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2511 &cmd_ctx
->lsm
->u
.enable
.event
, kernel_poll_pipe
[1]);
2514 case LTTNG_ENABLE_ALL_EVENT
:
2516 DBG("Enabling all events");
2518 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2519 cmd_ctx
->lsm
->u
.enable
.channel_name
,
2520 cmd_ctx
->lsm
->u
.enable
.event
.type
, kernel_poll_pipe
[1]);
2523 case LTTNG_LIST_TRACEPOINTS
:
2525 struct lttng_event
*events
;
2528 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
2529 if (nb_events
< 0) {
2530 /* Return value is a negative lttng_error_code. */
2536 * Setup lttng message with payload size set to the event list size in
2537 * bytes and then copy list into the llm payload.
2539 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
2545 /* Copy event list into message payload */
2546 memcpy(cmd_ctx
->llm
->payload
, events
,
2547 sizeof(struct lttng_event
) * nb_events
);
2554 case LTTNG_LIST_TRACEPOINT_FIELDS
:
2556 struct lttng_event_field
*fields
;
2559 nb_fields
= cmd_list_tracepoint_fields(cmd_ctx
->lsm
->domain
.type
,
2561 if (nb_fields
< 0) {
2562 /* Return value is a negative lttng_error_code. */
2568 * Setup lttng message with payload size set to the event list size in
2569 * bytes and then copy list into the llm payload.
2571 ret
= setup_lttng_msg(cmd_ctx
,
2572 sizeof(struct lttng_event_field
) * nb_fields
);
2578 /* Copy event list into message payload */
2579 memcpy(cmd_ctx
->llm
->payload
, fields
,
2580 sizeof(struct lttng_event_field
) * nb_fields
);
2587 case LTTNG_SET_CONSUMER_URI
:
2590 struct lttng_uri
*uris
;
2592 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2593 len
= nb_uri
* sizeof(struct lttng_uri
);
2596 ret
= LTTNG_ERR_INVALID
;
2600 uris
= zmalloc(len
);
2602 ret
= LTTNG_ERR_FATAL
;
2606 /* Receive variable len data */
2607 DBG("Receiving %zu URI(s) from client ...", nb_uri
);
2608 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2610 DBG("No URIs received from client... continuing");
2612 ret
= LTTNG_ERR_SESSION_FAIL
;
2617 ret
= cmd_set_consumer_uri(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2619 if (ret
!= LTTNG_OK
) {
2625 * XXX: 0 means that this URI should be applied on the session. Should
2626 * be a DOMAIN enuam.
2628 if (cmd_ctx
->lsm
->domain
.type
== 0) {
2629 /* Add the URI for the UST session if a consumer is present. */
2630 if (cmd_ctx
->session
->ust_session
&&
2631 cmd_ctx
->session
->ust_session
->consumer
) {
2632 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_UST
, cmd_ctx
->session
,
2634 } else if (cmd_ctx
->session
->kernel_session
&&
2635 cmd_ctx
->session
->kernel_session
->consumer
) {
2636 ret
= cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL
,
2637 cmd_ctx
->session
, nb_uri
, uris
);
2645 case LTTNG_START_TRACE
:
2647 ret
= cmd_start_trace(cmd_ctx
->session
);
2650 case LTTNG_STOP_TRACE
:
2652 ret
= cmd_stop_trace(cmd_ctx
->session
);
2655 case LTTNG_CREATE_SESSION
:
2658 struct lttng_uri
*uris
= NULL
;
2660 nb_uri
= cmd_ctx
->lsm
->u
.uri
.size
;
2661 len
= nb_uri
* sizeof(struct lttng_uri
);
2664 uris
= zmalloc(len
);
2666 ret
= LTTNG_ERR_FATAL
;
2670 /* Receive variable len data */
2671 DBG("Waiting for %zu URIs from client ...", nb_uri
);
2672 ret
= lttcomm_recv_unix_sock(sock
, uris
, len
);
2674 DBG("No URIs received from client... continuing");
2676 ret
= LTTNG_ERR_SESSION_FAIL
;
2681 if (nb_uri
== 1 && uris
[0].dtype
!= LTTNG_DST_PATH
) {
2682 DBG("Creating session with ONE network URI is a bad call");
2683 ret
= LTTNG_ERR_SESSION_FAIL
;
2689 ret
= cmd_create_session_uri(cmd_ctx
->lsm
->session
.name
, uris
, nb_uri
,
2696 case LTTNG_DESTROY_SESSION
:
2698 ret
= cmd_destroy_session(cmd_ctx
->session
, kernel_poll_pipe
[1]);
2700 /* Set session to NULL so we do not unlock it after free. */
2701 cmd_ctx
->session
= NULL
;
2704 case LTTNG_LIST_DOMAINS
:
2707 struct lttng_domain
*domains
;
2709 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
2711 /* Return value is a negative lttng_error_code. */
2716 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
2721 /* Copy event list into message payload */
2722 memcpy(cmd_ctx
->llm
->payload
, domains
,
2723 nb_dom
* sizeof(struct lttng_domain
));
2730 case LTTNG_LIST_CHANNELS
:
2733 struct lttng_channel
*channels
;
2735 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
2736 cmd_ctx
->session
, &channels
);
2738 /* Return value is a negative lttng_error_code. */
2743 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
2748 /* Copy event list into message payload */
2749 memcpy(cmd_ctx
->llm
->payload
, channels
,
2750 nb_chan
* sizeof(struct lttng_channel
));
2757 case LTTNG_LIST_EVENTS
:
2760 struct lttng_event
*events
= NULL
;
2762 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
2763 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
2765 /* Return value is a negative lttng_error_code. */
2770 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
2775 /* Copy event list into message payload */
2776 memcpy(cmd_ctx
->llm
->payload
, events
,
2777 nb_event
* sizeof(struct lttng_event
));
2784 case LTTNG_LIST_SESSIONS
:
2786 unsigned int nr_sessions
;
2788 session_lock_list();
2789 nr_sessions
= lttng_sessions_count(
2790 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2791 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2793 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
2795 session_unlock_list();
2799 /* Filled the session array */
2800 cmd_list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
2801 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
2802 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
2804 session_unlock_list();
2809 case LTTNG_CALIBRATE
:
2811 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
2812 &cmd_ctx
->lsm
->u
.calibrate
);
2815 case LTTNG_REGISTER_CONSUMER
:
2817 struct consumer_data
*cdata
;
2819 switch (cmd_ctx
->lsm
->domain
.type
) {
2820 case LTTNG_DOMAIN_KERNEL
:
2821 cdata
= &kconsumer_data
;
2824 ret
= LTTNG_ERR_UND
;
2828 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2829 cmd_ctx
->lsm
->u
.reg
.path
, cdata
);
2832 case LTTNG_SET_FILTER
:
2834 struct lttng_filter_bytecode
*bytecode
;
2836 if (cmd_ctx
->lsm
->u
.filter
.bytecode_len
> LTTNG_FILTER_MAX_LEN
) {
2837 ret
= LTTNG_ERR_FILTER_INVAL
;
2840 bytecode
= zmalloc(cmd_ctx
->lsm
->u
.filter
.bytecode_len
);
2842 ret
= LTTNG_ERR_FILTER_NOMEM
;
2845 /* Receive var. len. data */
2846 DBG("Receiving var len data from client ...");
2847 ret
= lttcomm_recv_unix_sock(sock
, bytecode
,
2848 cmd_ctx
->lsm
->u
.filter
.bytecode_len
);
2850 DBG("Nothing recv() from client var len data... continuing");
2852 ret
= LTTNG_ERR_FILTER_INVAL
;
2856 if (bytecode
->len
+ sizeof(*bytecode
)
2857 != cmd_ctx
->lsm
->u
.filter
.bytecode_len
) {
2859 ret
= LTTNG_ERR_FILTER_INVAL
;
2863 ret
= cmd_set_filter(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
2864 cmd_ctx
->lsm
->u
.filter
.channel_name
,
2865 cmd_ctx
->lsm
->u
.filter
.event_name
,
2869 case LTTNG_DATA_PENDING
:
2871 ret
= cmd_data_pending(cmd_ctx
->session
);
2875 ret
= LTTNG_ERR_UND
;
2880 if (cmd_ctx
->llm
== NULL
) {
2881 DBG("Missing llm structure. Allocating one.");
2882 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
2886 /* Set return code */
2887 cmd_ctx
->llm
->ret_code
= ret
;
2889 if (cmd_ctx
->session
) {
2890 session_unlock(cmd_ctx
->session
);
2892 if (need_tracing_session
) {
2893 session_unlock_list();
2900 * Thread managing health check socket.
2902 static void *thread_manage_health(void *data
)
2904 int sock
= -1, new_sock
= -1, ret
, i
, pollfd
, err
= -1;
2905 uint32_t revents
, nb_fd
;
2906 struct lttng_poll_event events
;
2907 struct lttcomm_health_msg msg
;
2908 struct lttcomm_health_data reply
;
2910 DBG("[thread] Manage health check started");
2912 rcu_register_thread();
2914 /* Create unix socket */
2915 sock
= lttcomm_create_unix_sock(health_unix_sock_path
);
2917 ERR("Unable to create health check Unix socket");
2923 * Set the CLOEXEC flag. Return code is useless because either way, the
2926 (void) utils_set_fd_cloexec(sock
);
2928 ret
= lttcomm_listen_unix_sock(sock
);
2934 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
2935 * more will be added to this poll set.
2937 ret
= create_thread_poll_set(&events
, 2);
2942 /* Add the application registration socket */
2943 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLPRI
);
2949 DBG("Health check ready");
2951 nb_fd
= LTTNG_POLL_GETNB(&events
);
2953 /* Inifinite blocking call, waiting for transmission */
2955 ret
= lttng_poll_wait(&events
, -1);
2958 * Restart interrupted system call.
2960 if (errno
== EINTR
) {
2966 for (i
= 0; i
< nb_fd
; i
++) {
2967 /* Fetch once the poll data */
2968 revents
= LTTNG_POLL_GETEV(&events
, i
);
2969 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2971 /* Thread quit pipe has been closed. Killing thread. */
2972 ret
= check_thread_quit_pipe(pollfd
, revents
);
2978 /* Event on the registration socket */
2979 if (pollfd
== sock
) {
2980 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
2981 ERR("Health socket poll error");
2987 new_sock
= lttcomm_accept_unix_sock(sock
);
2993 * Set the CLOEXEC flag. Return code is useless because either way, the
2996 (void) utils_set_fd_cloexec(new_sock
);
2998 DBG("Receiving data from client for health...");
2999 ret
= lttcomm_recv_unix_sock(new_sock
, (void *)&msg
, sizeof(msg
));
3001 DBG("Nothing recv() from client... continuing");
3002 ret
= close(new_sock
);
3010 rcu_thread_online();
3012 switch (msg
.component
) {
3013 case LTTNG_HEALTH_CMD
:
3014 reply
.ret_code
= health_check_state(&health_thread_cmd
);
3016 case LTTNG_HEALTH_APP_MANAGE
:
3017 reply
.ret_code
= health_check_state(&health_thread_app_manage
);
3019 case LTTNG_HEALTH_APP_REG
:
3020 reply
.ret_code
= health_check_state(&health_thread_app_reg
);
3022 case LTTNG_HEALTH_KERNEL
:
3023 reply
.ret_code
= health_check_state(&health_thread_kernel
);
3025 case LTTNG_HEALTH_CONSUMER
:
3026 reply
.ret_code
= check_consumer_health();
3028 case LTTNG_HEALTH_ALL
:
3030 health_check_state(&health_thread_app_manage
) &&
3031 health_check_state(&health_thread_app_reg
) &&
3032 health_check_state(&health_thread_cmd
) &&
3033 health_check_state(&health_thread_kernel
) &&
3034 check_consumer_health();
3037 reply
.ret_code
= LTTNG_ERR_UND
;
3042 * Flip ret value since 0 is a success and 1 indicates a bad health for
3043 * the client where in the sessiond it is the opposite. Again, this is
3044 * just to make things easier for us poor developer which enjoy a lot
3047 if (reply
.ret_code
== 0 || reply
.ret_code
== 1) {
3048 reply
.ret_code
= !reply
.ret_code
;
3051 DBG2("Health check return value %d", reply
.ret_code
);
3053 ret
= send_unix_sock(new_sock
, (void *) &reply
, sizeof(reply
));
3055 ERR("Failed to send health data back to client");
3058 /* End of transmission */
3059 ret
= close(new_sock
);
3069 ERR("Health error occurred in %s", __func__
);
3071 DBG("Health check thread dying");
3072 unlink(health_unix_sock_path
);
3079 if (new_sock
>= 0) {
3080 ret
= close(new_sock
);
3086 lttng_poll_clean(&events
);
3088 rcu_unregister_thread();
3093 * This thread manage all clients request using the unix client socket for
3096 static void *thread_manage_clients(void *data
)
3098 int sock
= -1, ret
, i
, pollfd
, err
= -1;
3100 uint32_t revents
, nb_fd
;
3101 struct command_ctx
*cmd_ctx
= NULL
;
3102 struct lttng_poll_event events
;
3104 DBG("[thread] Manage client started");
3106 testpoint(thread_manage_clients
);
3108 rcu_register_thread();
3110 health_code_update(&health_thread_cmd
);
3112 ret
= lttcomm_listen_unix_sock(client_sock
);
3118 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3119 * more will be added to this poll set.
3121 ret
= create_thread_poll_set(&events
, 2);
3126 /* Add the application registration socket */
3127 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3133 * Notify parent pid that we are ready to accept command for client side.
3135 if (opt_sig_parent
) {
3136 kill(ppid
, SIGUSR1
);
3139 testpoint(thread_manage_clients_before_loop
);
3141 health_code_update(&health_thread_cmd
);
3144 DBG("Accepting client command ...");
3146 nb_fd
= LTTNG_POLL_GETNB(&events
);
3148 /* Inifinite blocking call, waiting for transmission */
3150 health_poll_update(&health_thread_cmd
);
3151 ret
= lttng_poll_wait(&events
, -1);
3152 health_poll_update(&health_thread_cmd
);
3155 * Restart interrupted system call.
3157 if (errno
== EINTR
) {
3163 for (i
= 0; i
< nb_fd
; i
++) {
3164 /* Fetch once the poll data */
3165 revents
= LTTNG_POLL_GETEV(&events
, i
);
3166 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3168 health_code_update(&health_thread_cmd
);
3170 /* Thread quit pipe has been closed. Killing thread. */
3171 ret
= check_thread_quit_pipe(pollfd
, revents
);
3177 /* Event on the registration socket */
3178 if (pollfd
== client_sock
) {
3179 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3180 ERR("Client socket poll error");
3186 DBG("Wait for client response");
3188 health_code_update(&health_thread_cmd
);
3190 sock
= lttcomm_accept_unix_sock(client_sock
);
3196 * Set the CLOEXEC flag. Return code is useless because either way, the
3199 (void) utils_set_fd_cloexec(sock
);
3201 /* Set socket option for credentials retrieval */
3202 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3207 /* Allocate context command to process the client request */
3208 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3209 if (cmd_ctx
== NULL
) {
3210 PERROR("zmalloc cmd_ctx");
3214 /* Allocate data buffer for reception */
3215 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3216 if (cmd_ctx
->lsm
== NULL
) {
3217 PERROR("zmalloc cmd_ctx->lsm");
3221 cmd_ctx
->llm
= NULL
;
3222 cmd_ctx
->session
= NULL
;
3224 health_code_update(&health_thread_cmd
);
3227 * Data is received from the lttng client. The struct
3228 * lttcomm_session_msg (lsm) contains the command and data request of
3231 DBG("Receiving data from client ...");
3232 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3233 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3235 DBG("Nothing recv() from client... continuing");
3241 clean_command_ctx(&cmd_ctx
);
3245 health_code_update(&health_thread_cmd
);
3247 // TODO: Validate cmd_ctx including sanity check for
3248 // security purpose.
3250 rcu_thread_online();
3252 * This function dispatch the work to the kernel or userspace tracer
3253 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3254 * informations for the client. The command context struct contains
3255 * everything this function may needs.
3257 ret
= process_client_msg(cmd_ctx
, sock
, &sock_error
);
3258 rcu_thread_offline();
3268 * TODO: Inform client somehow of the fatal error. At
3269 * this point, ret < 0 means that a zmalloc failed
3270 * (ENOMEM). Error detected but still accept
3271 * command, unless a socket error has been
3274 clean_command_ctx(&cmd_ctx
);
3278 health_code_update(&health_thread_cmd
);
3280 DBG("Sending response (size: %d, retcode: %s)",
3281 cmd_ctx
->lttng_msg_size
,
3282 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3283 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3285 ERR("Failed to send data back to client");
3288 /* End of transmission */
3295 clean_command_ctx(&cmd_ctx
);
3297 health_code_update(&health_thread_cmd
);
3303 health_error(&health_thread_cmd
);
3304 ERR("Health error occurred in %s", __func__
);
3306 health_exit(&health_thread_cmd
);
3308 DBG("Client thread dying");
3309 unlink(client_unix_sock_path
);
3310 if (client_sock
>= 0) {
3311 ret
= close(client_sock
);
3323 lttng_poll_clean(&events
);
3324 clean_command_ctx(&cmd_ctx
);
3326 rcu_unregister_thread();
3332 * usage function on stderr
3334 static void usage(void)
3336 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3337 fprintf(stderr
, " -h, --help Display this usage.\n");
3338 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3339 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3340 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3341 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3342 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3343 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3344 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3345 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3346 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3347 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3348 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3349 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3350 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3351 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3352 fprintf(stderr
, " -V, --version Show version number.\n");
3353 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3354 fprintf(stderr
, " -q, --quiet No output at all.\n");
3355 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3356 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3357 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
3361 * daemon argument parsing
3363 static int parse_args(int argc
, char **argv
)
3367 static struct option long_options
[] = {
3368 { "client-sock", 1, 0, 'c' },
3369 { "apps-sock", 1, 0, 'a' },
3370 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3371 { "kconsumerd-err-sock", 1, 0, 'E' },
3372 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3373 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3374 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3375 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3376 { "consumerd32-path", 1, 0, 'u' },
3377 { "consumerd32-libdir", 1, 0, 'U' },
3378 { "consumerd64-path", 1, 0, 't' },
3379 { "consumerd64-libdir", 1, 0, 'T' },
3380 { "daemonize", 0, 0, 'd' },
3381 { "sig-parent", 0, 0, 'S' },
3382 { "help", 0, 0, 'h' },
3383 { "group", 1, 0, 'g' },
3384 { "version", 0, 0, 'V' },
3385 { "quiet", 0, 0, 'q' },
3386 { "verbose", 0, 0, 'v' },
3387 { "verbose-consumer", 0, 0, 'Z' },
3388 { "no-kernel", 0, 0, 'N' },
3393 int option_index
= 0;
3394 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
3395 long_options
, &option_index
);
3402 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
3404 fprintf(stderr
, " with arg %s\n", optarg
);
3408 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3411 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3417 opt_tracing_group
= optarg
;
3423 fprintf(stdout
, "%s\n", VERSION
);
3429 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3432 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3435 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3438 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3441 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3444 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
3450 lttng_opt_quiet
= 1;
3453 /* Verbose level can increase using multiple -v */
3454 lttng_opt_verbose
+= 1;
3457 opt_verbose_consumer
+= 1;
3460 consumerd32_bin
= optarg
;
3463 consumerd32_libdir
= optarg
;
3466 consumerd64_bin
= optarg
;
3469 consumerd64_libdir
= optarg
;
3472 /* Unknown option or other error.
3473 * Error is printed by getopt, just return */
3482 * Creates the two needed socket by the daemon.
3483 * apps_sock - The communication socket for all UST apps.
3484 * client_sock - The communication of the cli tool (lttng).
3486 static int init_daemon_socket(void)
3491 old_umask
= umask(0);
3493 /* Create client tool unix socket */
3494 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
3495 if (client_sock
< 0) {
3496 ERR("Create unix sock failed: %s", client_unix_sock_path
);
3501 /* Set the cloexec flag */
3502 ret
= utils_set_fd_cloexec(client_sock
);
3504 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3505 "Continuing but note that the consumer daemon will have a "
3506 "reference to this socket on exec()", client_sock
);
3509 /* File permission MUST be 660 */
3510 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3512 ERR("Set file permissions failed: %s", client_unix_sock_path
);
3517 /* Create the application unix socket */
3518 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
3519 if (apps_sock
< 0) {
3520 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
3525 /* Set the cloexec flag */
3526 ret
= utils_set_fd_cloexec(apps_sock
);
3528 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3529 "Continuing but note that the consumer daemon will have a "
3530 "reference to this socket on exec()", apps_sock
);
3533 /* File permission MUST be 666 */
3534 ret
= chmod(apps_unix_sock_path
,
3535 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
3537 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
3542 DBG3("Session daemon client socket %d and application socket %d created",
3543 client_sock
, apps_sock
);
3551 * Check if the global socket is available, and if a daemon is answering at the
3552 * other side. If yes, error is returned.
3554 static int check_existing_daemon(void)
3556 /* Is there anybody out there ? */
3557 if (lttng_session_daemon_alive()) {
3565 * Set the tracing group gid onto the client socket.
3567 * Race window between mkdir and chown is OK because we are going from more
3568 * permissive (root.root) to less permissive (root.tracing).
3570 static int set_permissions(char *rundir
)
3575 ret
= allowed_group();
3577 WARN("No tracing group detected");
3584 /* Set lttng run dir */
3585 ret
= chown(rundir
, 0, gid
);
3587 ERR("Unable to set group on %s", rundir
);
3591 /* Ensure tracing group can search the run dir */
3592 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
3594 ERR("Unable to set permissions on %s", rundir
);
3598 /* lttng client socket path */
3599 ret
= chown(client_unix_sock_path
, 0, gid
);
3601 ERR("Unable to set group on %s", client_unix_sock_path
);
3605 /* kconsumer error socket path */
3606 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
3608 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
3612 /* 64-bit ustconsumer error socket path */
3613 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
3615 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
3619 /* 32-bit ustconsumer compat32 error socket path */
3620 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
3622 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
3626 DBG("All permissions are set");
3633 * Create the lttng run directory needed for all global sockets and pipe.
3635 static int create_lttng_rundir(const char *rundir
)
3639 DBG3("Creating LTTng run directory: %s", rundir
);
3641 ret
= mkdir(rundir
, S_IRWXU
);
3643 if (errno
!= EEXIST
) {
3644 ERR("Unable to create %s", rundir
);
3656 * Setup sockets and directory needed by the kconsumerd communication with the
3659 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
3663 char path
[PATH_MAX
];
3665 switch (consumer_data
->type
) {
3666 case LTTNG_CONSUMER_KERNEL
:
3667 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
3669 case LTTNG_CONSUMER64_UST
:
3670 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
3672 case LTTNG_CONSUMER32_UST
:
3673 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
3676 ERR("Consumer type unknown");
3681 DBG2("Creating consumer directory: %s", path
);
3683 ret
= mkdir(path
, S_IRWXU
);
3685 if (errno
!= EEXIST
) {
3687 ERR("Failed to create %s", path
);
3693 /* Create the kconsumerd error unix socket */
3694 consumer_data
->err_sock
=
3695 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
3696 if (consumer_data
->err_sock
< 0) {
3697 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
3702 /* File permission MUST be 660 */
3703 ret
= chmod(consumer_data
->err_unix_sock_path
,
3704 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
3706 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
3716 * Signal handler for the daemon
3718 * Simply stop all worker threads, leaving main() return gracefully after
3719 * joining all threads and calling cleanup().
3721 static void sighandler(int sig
)
3725 DBG("SIGPIPE caught");
3728 DBG("SIGINT caught");
3732 DBG("SIGTERM caught");
3741 * Setup signal handler for :
3742 * SIGINT, SIGTERM, SIGPIPE
3744 static int set_signal_handler(void)
3747 struct sigaction sa
;
3750 if ((ret
= sigemptyset(&sigset
)) < 0) {
3751 PERROR("sigemptyset");
3755 sa
.sa_handler
= sighandler
;
3756 sa
.sa_mask
= sigset
;
3758 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
3759 PERROR("sigaction");
3763 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
3764 PERROR("sigaction");
3768 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
3769 PERROR("sigaction");
3773 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
3779 * Set open files limit to unlimited. This daemon can open a large number of
3780 * file descriptors in order to consumer multiple kernel traces.
3782 static void set_ulimit(void)
3787 /* The kernel does not allowed an infinite limit for open files */
3788 lim
.rlim_cur
= 65535;
3789 lim
.rlim_max
= 65535;
3791 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
3793 PERROR("failed to set open files limit");
3800 int main(int argc
, char **argv
)
3804 const char *home_path
, *env_app_timeout
;
3806 init_kernel_workarounds();
3808 rcu_register_thread();
3810 setup_consumerd_path();
3812 /* Parse arguments */
3814 if ((ret
= parse_args(argc
, argv
) < 0)) {
3824 * child: setsid, close FD 0, 1, 2, chdir /
3825 * parent: exit (if fork is successful)
3833 * We are in the child. Make sure all other file
3834 * descriptors are closed, in case we are called with
3835 * more opened file descriptors than the standard ones.
3837 for (i
= 3; i
< sysconf(_SC_OPEN_MAX
); i
++) {
3842 /* Create thread quit pipe */
3843 if ((ret
= init_thread_quit_pipe()) < 0) {
3847 /* Check if daemon is UID = 0 */
3848 is_root
= !getuid();
3851 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
3853 /* Create global run dir with root access */
3854 ret
= create_lttng_rundir(rundir
);
3859 if (strlen(apps_unix_sock_path
) == 0) {
3860 snprintf(apps_unix_sock_path
, PATH_MAX
,
3861 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
3864 if (strlen(client_unix_sock_path
) == 0) {
3865 snprintf(client_unix_sock_path
, PATH_MAX
,
3866 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
3869 /* Set global SHM for ust */
3870 if (strlen(wait_shm_path
) == 0) {
3871 snprintf(wait_shm_path
, PATH_MAX
,
3872 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
3875 if (strlen(health_unix_sock_path
) == 0) {
3876 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3877 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK
);
3880 /* Setup kernel consumerd path */
3881 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
3882 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
3883 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
3884 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
3886 DBG2("Kernel consumer err path: %s",
3887 kconsumer_data
.err_unix_sock_path
);
3888 DBG2("Kernel consumer cmd path: %s",
3889 kconsumer_data
.cmd_unix_sock_path
);
3891 home_path
= get_home_dir();
3892 if (home_path
== NULL
) {
3893 /* TODO: Add --socket PATH option */
3894 ERR("Can't get HOME directory for sockets creation.");
3900 * Create rundir from home path. This will create something like
3903 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
3909 ret
= create_lttng_rundir(rundir
);
3914 if (strlen(apps_unix_sock_path
) == 0) {
3915 snprintf(apps_unix_sock_path
, PATH_MAX
,
3916 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
3919 /* Set the cli tool unix socket path */
3920 if (strlen(client_unix_sock_path
) == 0) {
3921 snprintf(client_unix_sock_path
, PATH_MAX
,
3922 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
3925 /* Set global SHM for ust */
3926 if (strlen(wait_shm_path
) == 0) {
3927 snprintf(wait_shm_path
, PATH_MAX
,
3928 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
3931 /* Set health check Unix path */
3932 if (strlen(health_unix_sock_path
) == 0) {
3933 snprintf(health_unix_sock_path
, sizeof(health_unix_sock_path
),
3934 DEFAULT_HOME_HEALTH_UNIX_SOCK
, home_path
);
3938 /* Set consumer initial state */
3939 kernel_consumerd_state
= CONSUMER_STOPPED
;
3940 ust_consumerd_state
= CONSUMER_STOPPED
;
3942 DBG("Client socket path %s", client_unix_sock_path
);
3943 DBG("Application socket path %s", apps_unix_sock_path
);
3944 DBG("LTTng run directory path: %s", rundir
);
3946 /* 32 bits consumerd path setup */
3947 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
3948 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
3949 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
3950 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
3952 DBG2("UST consumer 32 bits err path: %s",
3953 ustconsumer32_data
.err_unix_sock_path
);
3954 DBG2("UST consumer 32 bits cmd path: %s",
3955 ustconsumer32_data
.cmd_unix_sock_path
);
3957 /* 64 bits consumerd path setup */
3958 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
3959 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
3960 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
3961 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
3963 DBG2("UST consumer 64 bits err path: %s",
3964 ustconsumer64_data
.err_unix_sock_path
);
3965 DBG2("UST consumer 64 bits cmd path: %s",
3966 ustconsumer64_data
.cmd_unix_sock_path
);
3969 * See if daemon already exist.
3971 if ((ret
= check_existing_daemon()) < 0) {
3972 ERR("Already running daemon.\n");
3974 * We do not goto exit because we must not cleanup()
3975 * because a daemon is already running.
3981 * Init UST app hash table. Alloc hash table before this point since
3982 * cleanup() can get called after that point.
3986 /* After this point, we can safely call cleanup() with "goto exit" */
3989 * These actions must be executed as root. We do that *after* setting up
3990 * the sockets path because we MUST make the check for another daemon using
3991 * those paths *before* trying to set the kernel consumer sockets and init
3995 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4000 /* Setup kernel tracer */
4001 if (!opt_no_kernel
) {
4002 init_kernel_tracer();
4005 /* Set ulimit for open files */
4008 /* init lttng_fd tracking must be done after set_ulimit. */
4011 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4016 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4021 if ((ret
= set_signal_handler()) < 0) {
4025 /* Setup the needed unix socket */
4026 if ((ret
= init_daemon_socket()) < 0) {
4030 /* Set credentials to socket */
4031 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4035 /* Get parent pid if -S, --sig-parent is specified. */
4036 if (opt_sig_parent
) {
4040 /* Setup the kernel pipe for waking up the kernel thread */
4041 if ((ret
= utils_create_pipe_cloexec(kernel_poll_pipe
)) < 0) {
4045 /* Setup the thread apps communication pipe. */
4046 if ((ret
= utils_create_pipe_cloexec(apps_cmd_pipe
)) < 0) {
4050 /* Init UST command queue. */
4051 cds_wfq_init(&ust_cmd_queue
.queue
);
4054 * Get session list pointer. This pointer MUST NOT be free(). This list is
4055 * statically declared in session.c
4057 session_list_ptr
= session_get_list();
4059 /* Set up max poll set size */
4060 lttng_poll_set_max_size();
4064 /* Init all health thread counters. */
4065 health_init(&health_thread_cmd
);
4066 health_init(&health_thread_kernel
);
4067 health_init(&health_thread_app_manage
);
4068 health_init(&health_thread_app_reg
);
4071 * Init health counters of the consumer thread. We do a quick hack here to
4072 * the state of the consumer health is fine even if the thread is not
4073 * started. Once the thread starts, the health state is updated with a poll
4074 * value to set a health code path. This is simply to ease our life and has
4075 * no cost what so ever.
4077 health_init(&kconsumer_data
.health
);
4078 health_poll_update(&kconsumer_data
.health
);
4079 health_init(&ustconsumer32_data
.health
);
4080 health_poll_update(&ustconsumer32_data
.health
);
4081 health_init(&ustconsumer64_data
.health
);
4082 health_poll_update(&ustconsumer64_data
.health
);
4084 /* Check for the application socket timeout env variable. */
4085 env_app_timeout
= getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV
);
4086 if (env_app_timeout
) {
4087 app_socket_timeout
= atoi(env_app_timeout
);
4089 app_socket_timeout
= DEFAULT_APP_SOCKET_RW_TIMEOUT
;
4092 /* Create thread to manage the client socket */
4093 ret
= pthread_create(&health_thread
, NULL
,
4094 thread_manage_health
, (void *) NULL
);
4096 PERROR("pthread_create health");
4100 /* Create thread to manage the client socket */
4101 ret
= pthread_create(&client_thread
, NULL
,
4102 thread_manage_clients
, (void *) NULL
);
4104 PERROR("pthread_create clients");
4108 /* Create thread to dispatch registration */
4109 ret
= pthread_create(&dispatch_thread
, NULL
,
4110 thread_dispatch_ust_registration
, (void *) NULL
);
4112 PERROR("pthread_create dispatch");
4116 /* Create thread to manage application registration. */
4117 ret
= pthread_create(®_apps_thread
, NULL
,
4118 thread_registration_apps
, (void *) NULL
);
4120 PERROR("pthread_create registration");
4124 /* Create thread to manage application socket */
4125 ret
= pthread_create(&apps_thread
, NULL
,
4126 thread_manage_apps
, (void *) NULL
);
4128 PERROR("pthread_create apps");
4132 /* Create kernel thread to manage kernel event */
4133 ret
= pthread_create(&kernel_thread
, NULL
,
4134 thread_manage_kernel
, (void *) NULL
);
4136 PERROR("pthread_create kernel");
4140 ret
= pthread_join(kernel_thread
, &status
);
4142 PERROR("pthread_join");
4143 goto error
; /* join error, exit without cleanup */
4147 ret
= pthread_join(apps_thread
, &status
);
4149 PERROR("pthread_join");
4150 goto error
; /* join error, exit without cleanup */
4154 ret
= pthread_join(reg_apps_thread
, &status
);
4156 PERROR("pthread_join");
4157 goto error
; /* join error, exit without cleanup */
4161 ret
= pthread_join(dispatch_thread
, &status
);
4163 PERROR("pthread_join");
4164 goto error
; /* join error, exit without cleanup */
4168 ret
= pthread_join(client_thread
, &status
);
4170 PERROR("pthread_join");
4171 goto error
; /* join error, exit without cleanup */
4174 ret
= join_consumer_thread(&kconsumer_data
);
4176 PERROR("join_consumer");
4177 goto error
; /* join error, exit without cleanup */
4180 ret
= join_consumer_thread(&ustconsumer32_data
);
4182 PERROR("join_consumer ust32");
4183 goto error
; /* join error, exit without cleanup */
4186 ret
= join_consumer_thread(&ustconsumer64_data
);
4188 PERROR("join_consumer ust64");
4189 goto error
; /* join error, exit without cleanup */
4193 ret
= pthread_join(health_thread
, &status
);
4195 PERROR("pthread_join health thread");
4196 goto error
; /* join error, exit without cleanup */
4202 * cleanup() is called when no other thread is running.
4204 rcu_thread_online();
4206 rcu_thread_offline();
4207 rcu_unregister_thread();