2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <semaphore.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
34 #include <sys/types.h>
36 #include <urcu/uatomic.h>
40 #include <common/common.h>
41 #include <common/compat/poll.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/ust-consumer/ust-consumer.h>
46 #include <common/futex.h>
48 #include "lttng-sessiond.h"
59 #define CONSUMERD_FILE "lttng-consumerd"
61 struct consumer_data
{
62 enum lttng_consumer_type type
;
64 pthread_t thread
; /* Worker thread interacting with the consumer */
67 /* Mutex to control consumerd pid assignation */
68 pthread_mutex_t pid_mutex
;
74 /* consumer error and command Unix socket path */
75 char err_unix_sock_path
[PATH_MAX
];
76 char cmd_unix_sock_path
[PATH_MAX
];
80 const char default_home_dir
[] = DEFAULT_HOME_DIR
;
81 const char default_tracing_group
[] = DEFAULT_TRACING_GROUP
;
82 const char default_ust_sock_dir
[] = DEFAULT_UST_SOCK_DIR
;
83 const char default_global_apps_pipe
[] = DEFAULT_GLOBAL_APPS_PIPE
;
86 const char *opt_tracing_group
;
87 static int opt_sig_parent
;
88 static int opt_verbose_consumer
;
89 static int opt_daemon
;
90 static int opt_no_kernel
;
91 static int is_root
; /* Set to 1 if the daemon is running as root */
92 static pid_t ppid
; /* Parent PID for --sig-parent option */
95 /* Consumer daemon specific control data */
96 static struct consumer_data kconsumer_data
= {
97 .type
= LTTNG_CONSUMER_KERNEL
,
98 .err_unix_sock_path
= DEFAULT_KCONSUMERD_ERR_SOCK_PATH
,
99 .cmd_unix_sock_path
= DEFAULT_KCONSUMERD_CMD_SOCK_PATH
,
103 static struct consumer_data ustconsumer64_data
= {
104 .type
= LTTNG_CONSUMER64_UST
,
105 .err_unix_sock_path
= DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
,
106 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
,
110 static struct consumer_data ustconsumer32_data
= {
111 .type
= LTTNG_CONSUMER32_UST
,
112 .err_unix_sock_path
= DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
,
113 .cmd_unix_sock_path
= DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
,
118 static int dispatch_thread_exit
;
120 /* Global application Unix socket path */
121 static char apps_unix_sock_path
[PATH_MAX
];
122 /* Global client Unix socket path */
123 static char client_unix_sock_path
[PATH_MAX
];
124 /* global wait shm path for UST */
125 static char wait_shm_path
[PATH_MAX
];
127 /* Sockets and FDs */
128 static int client_sock
= -1;
129 static int apps_sock
= -1;
130 static int kernel_tracer_fd
= -1;
131 static int kernel_poll_pipe
[2] = { -1, -1 };
134 * Quit pipe for all threads. This permits a single cancellation point
135 * for all threads when receiving an event on the pipe.
137 static int thread_quit_pipe
[2] = { -1, -1 };
140 * This pipe is used to inform the thread managing application communication
141 * that a command is queued and ready to be processed.
143 static int apps_cmd_pipe
[2] = { -1, -1 };
145 /* Pthread, Mutexes and Semaphores */
146 static pthread_t apps_thread
;
147 static pthread_t reg_apps_thread
;
148 static pthread_t client_thread
;
149 static pthread_t kernel_thread
;
150 static pthread_t dispatch_thread
;
154 * UST registration command queue. This queue is tied with a futex and uses a N
155 * wakers / 1 waiter implemented and detailed in futex.c/.h
157 * The thread_manage_apps and thread_dispatch_ust_registration interact with
158 * this queue and the wait/wake scheme.
160 static struct ust_cmd_queue ust_cmd_queue
;
163 * Pointer initialized before thread creation.
165 * This points to the tracing session list containing the session count and a
166 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
167 * MUST NOT be taken if you call a public function in session.c.
169 * The lock is nested inside the structure: session_list_ptr->lock. Please use
170 * session_lock_list and session_unlock_list for lock acquisition.
172 static struct ltt_session_list
*session_list_ptr
;
174 int ust_consumerd64_fd
= -1;
175 int ust_consumerd32_fd
= -1;
177 static const char *consumerd32_bin
= CONFIG_CONSUMERD32_BIN
;
178 static const char *consumerd64_bin
= CONFIG_CONSUMERD64_BIN
;
179 static const char *consumerd32_libdir
= CONFIG_CONSUMERD32_LIBDIR
;
180 static const char *consumerd64_libdir
= CONFIG_CONSUMERD64_LIBDIR
;
183 * Consumer daemon state which is changed when spawning it, killing it or in
184 * case of a fatal error.
186 enum consumerd_state
{
187 CONSUMER_STARTED
= 1,
188 CONSUMER_STOPPED
= 2,
193 * This consumer daemon state is used to validate if a client command will be
194 * able to reach the consumer. If not, the client is informed. For instance,
195 * doing a "lttng start" when the consumer state is set to ERROR will return an
196 * error to the client.
198 * The following example shows a possible race condition of this scheme:
200 * consumer thread error happens
202 * client cmd checks state -> still OK
203 * consumer thread exit, sets error
204 * client cmd try to talk to consumer
207 * However, since the consumer is a different daemon, we have no way of making
208 * sure the command will reach it safely even with this state flag. This is why
209 * we consider that up to the state validation during command processing, the
210 * command is safe. After that, we can not guarantee the correctness of the
211 * client request vis-a-vis the consumer.
213 static enum consumerd_state ust_consumerd_state
;
214 static enum consumerd_state kernel_consumerd_state
;
217 void setup_consumerd_path(void)
219 const char *bin
, *libdir
;
222 * Allow INSTALL_BIN_PATH to be used as a target path for the
223 * native architecture size consumer if CONFIG_CONSUMER*_PATH
224 * has not been defined.
226 #if (CAA_BITS_PER_LONG == 32)
227 if (!consumerd32_bin
[0]) {
228 consumerd32_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
230 if (!consumerd32_libdir
[0]) {
231 consumerd32_libdir
= INSTALL_LIB_PATH
;
233 #elif (CAA_BITS_PER_LONG == 64)
234 if (!consumerd64_bin
[0]) {
235 consumerd64_bin
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
237 if (!consumerd64_libdir
[0]) {
238 consumerd64_libdir
= INSTALL_LIB_PATH
;
241 #error "Unknown bitness"
245 * runtime env. var. overrides the build default.
247 bin
= getenv("LTTNG_CONSUMERD32_BIN");
249 consumerd32_bin
= bin
;
251 bin
= getenv("LTTNG_CONSUMERD64_BIN");
253 consumerd64_bin
= bin
;
255 libdir
= getenv("LTTNG_CONSUMERD32_LIBDIR");
257 consumerd32_libdir
= libdir
;
259 libdir
= getenv("LTTNG_CONSUMERD64_LIBDIR");
261 consumerd64_libdir
= libdir
;
266 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
268 static int create_thread_poll_set(struct lttng_poll_event
*events
,
273 if (events
== NULL
|| size
== 0) {
278 ret
= lttng_poll_create(events
, size
, LTTNG_CLOEXEC
);
284 ret
= lttng_poll_add(events
, thread_quit_pipe
[0], LPOLLIN
);
296 * Check if the thread quit pipe was triggered.
298 * Return 1 if it was triggered else 0;
300 static int check_thread_quit_pipe(int fd
, uint32_t events
)
302 if (fd
== thread_quit_pipe
[0] && (events
& LPOLLIN
)) {
310 * Return group ID of the tracing group or -1 if not found.
312 static gid_t
allowed_group(void)
316 if (opt_tracing_group
) {
317 grp
= getgrnam(opt_tracing_group
);
319 grp
= getgrnam(default_tracing_group
);
329 * Init thread quit pipe.
331 * Return -1 on error or 0 if all pipes are created.
333 static int init_thread_quit_pipe(void)
337 ret
= pipe(thread_quit_pipe
);
339 PERROR("thread quit pipe");
343 for (i
= 0; i
< 2; i
++) {
344 ret
= fcntl(thread_quit_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
356 * Complete teardown of a kernel session. This free all data structure related
357 * to a kernel session and update counter.
359 static void teardown_kernel_session(struct ltt_session
*session
)
361 if (!session
->kernel_session
) {
362 DBG3("No kernel session when tearing down session");
366 DBG("Tearing down kernel session");
369 * If a custom kernel consumer was registered, close the socket before
370 * tearing down the complete kernel session structure
372 if (kconsumer_data
.cmd_sock
>= 0 &&
373 session
->kernel_session
->consumer_fd
!= kconsumer_data
.cmd_sock
) {
374 lttcomm_close_unix_sock(session
->kernel_session
->consumer_fd
);
377 trace_kernel_destroy_session(session
->kernel_session
);
381 * Complete teardown of all UST sessions. This will free everything on his path
382 * and destroy the core essence of all ust sessions :)
384 static void teardown_ust_session(struct ltt_session
*session
)
388 if (!session
->ust_session
) {
389 DBG3("No UST session when tearing down session");
393 DBG("Tearing down UST session(s)");
395 ret
= ust_app_destroy_trace_all(session
->ust_session
);
397 ERR("Error in ust_app_destroy_trace_all");
400 trace_ust_destroy_session(session
->ust_session
);
404 * Stop all threads by closing the thread quit pipe.
406 static void stop_threads(void)
410 /* Stopping all threads */
411 DBG("Terminating all threads");
412 ret
= notify_thread_pipe(thread_quit_pipe
[1]);
414 ERR("write error on thread quit pipe");
417 /* Dispatch thread */
418 dispatch_thread_exit
= 1;
419 futex_nto1_wake(&ust_cmd_queue
.futex
);
425 static void cleanup(void)
429 struct ltt_session
*sess
, *stmp
;
433 DBG("Removing %s directory", rundir
);
434 ret
= asprintf(&cmd
, "rm -rf %s", rundir
);
436 ERR("asprintf failed. Something is really wrong!");
439 /* Remove lttng run directory */
442 ERR("Unable to clean %s", rundir
);
446 DBG("Cleaning up all sessions");
448 /* Destroy session list mutex */
449 if (session_list_ptr
!= NULL
) {
450 pthread_mutex_destroy(&session_list_ptr
->lock
);
452 /* Cleanup ALL session */
453 cds_list_for_each_entry_safe(sess
, stmp
,
454 &session_list_ptr
->head
, list
) {
455 teardown_kernel_session(sess
);
456 teardown_ust_session(sess
);
461 DBG("Closing all UST sockets");
462 ust_app_clean_list();
464 pthread_mutex_destroy(&kconsumer_data
.pid_mutex
);
466 if (is_root
&& !opt_no_kernel
) {
467 DBG2("Closing kernel fd");
468 if (kernel_tracer_fd
>= 0) {
469 ret
= close(kernel_tracer_fd
);
474 DBG("Unloading kernel modules");
475 modprobe_remove_lttng_all();
479 * Closing all pipes used for communication between threads.
481 for (i
= 0; i
< 2; i
++) {
482 if (kernel_poll_pipe
[i
] >= 0) {
483 ret
= close(kernel_poll_pipe
[i
]);
489 for (i
= 0; i
< 2; i
++) {
490 if (thread_quit_pipe
[i
] >= 0) {
491 ret
= close(thread_quit_pipe
[i
]);
497 for (i
= 0; i
< 2; i
++) {
498 if (apps_cmd_pipe
[i
] >= 0) {
499 ret
= close(apps_cmd_pipe
[i
]);
507 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
508 "Matthew, BEET driven development works!%c[%dm",
509 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
514 * Send data on a unix socket using the liblttsessiondcomm API.
516 * Return lttcomm error code.
518 static int send_unix_sock(int sock
, void *buf
, size_t len
)
520 /* Check valid length */
525 return lttcomm_send_unix_sock(sock
, buf
, len
);
529 * Free memory of a command context structure.
531 static void clean_command_ctx(struct command_ctx
**cmd_ctx
)
533 DBG("Clean command context structure");
535 if ((*cmd_ctx
)->llm
) {
536 free((*cmd_ctx
)->llm
);
538 if ((*cmd_ctx
)->lsm
) {
539 free((*cmd_ctx
)->lsm
);
547 * Send all stream fds of kernel channel to the consumer.
549 static int send_kconsumer_channel_streams(struct consumer_data
*consumer_data
,
550 int sock
, struct ltt_kernel_channel
*channel
,
551 uid_t uid
, gid_t gid
)
554 struct ltt_kernel_stream
*stream
;
555 struct lttcomm_consumer_msg lkm
;
557 DBG("Sending streams of channel %s to kernel consumer",
558 channel
->channel
->name
);
561 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_CHANNEL
;
562 lkm
.u
.channel
.channel_key
= channel
->fd
;
563 lkm
.u
.channel
.max_sb_size
= channel
->channel
->attr
.subbuf_size
;
564 lkm
.u
.channel
.mmap_len
= 0; /* for kernel */
565 DBG("Sending channel %d to consumer", lkm
.u
.channel
.channel_key
);
566 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
568 PERROR("send consumer channel");
573 cds_list_for_each_entry(stream
, &channel
->stream_list
.head
, list
) {
577 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_STREAM
;
578 lkm
.u
.stream
.channel_key
= channel
->fd
;
579 lkm
.u
.stream
.stream_key
= stream
->fd
;
580 lkm
.u
.stream
.state
= stream
->state
;
581 lkm
.u
.stream
.output
= channel
->channel
->attr
.output
;
582 lkm
.u
.stream
.mmap_len
= 0; /* for kernel */
583 lkm
.u
.stream
.uid
= uid
;
584 lkm
.u
.stream
.gid
= gid
;
585 strncpy(lkm
.u
.stream
.path_name
, stream
->pathname
, PATH_MAX
- 1);
586 lkm
.u
.stream
.path_name
[PATH_MAX
- 1] = '\0';
587 DBG("Sending stream %d to consumer", lkm
.u
.stream
.stream_key
);
588 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
590 PERROR("send consumer stream");
593 ret
= lttcomm_send_fds_unix_sock(sock
, &stream
->fd
, 1);
595 PERROR("send consumer stream ancillary data");
600 DBG("consumer channel streams sent");
609 * Send all stream fds of the kernel session to the consumer.
611 static int send_kconsumer_session_streams(struct consumer_data
*consumer_data
,
612 struct ltt_kernel_session
*session
)
615 struct ltt_kernel_channel
*chan
;
616 struct lttcomm_consumer_msg lkm
;
617 int sock
= session
->consumer_fd
;
619 DBG("Sending metadata stream fd");
621 /* Extra protection. It's NOT supposed to be set to -1 at this point */
622 if (session
->consumer_fd
< 0) {
623 session
->consumer_fd
= consumer_data
->cmd_sock
;
626 if (session
->metadata_stream_fd
>= 0) {
627 /* Send metadata channel fd */
628 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_CHANNEL
;
629 lkm
.u
.channel
.channel_key
= session
->metadata
->fd
;
630 lkm
.u
.channel
.max_sb_size
= session
->metadata
->conf
->attr
.subbuf_size
;
631 lkm
.u
.channel
.mmap_len
= 0; /* for kernel */
632 DBG("Sending metadata channel %d to consumer", lkm
.u
.channel
.channel_key
);
633 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
635 PERROR("send consumer channel");
639 /* Send metadata stream fd */
640 lkm
.cmd_type
= LTTNG_CONSUMER_ADD_STREAM
;
641 lkm
.u
.stream
.channel_key
= session
->metadata
->fd
;
642 lkm
.u
.stream
.stream_key
= session
->metadata_stream_fd
;
643 lkm
.u
.stream
.state
= LTTNG_CONSUMER_ACTIVE_STREAM
;
644 lkm
.u
.stream
.output
= DEFAULT_KERNEL_CHANNEL_OUTPUT
;
645 lkm
.u
.stream
.mmap_len
= 0; /* for kernel */
646 lkm
.u
.stream
.uid
= session
->uid
;
647 lkm
.u
.stream
.gid
= session
->gid
;
648 strncpy(lkm
.u
.stream
.path_name
, session
->metadata
->pathname
, PATH_MAX
- 1);
649 lkm
.u
.stream
.path_name
[PATH_MAX
- 1] = '\0';
650 DBG("Sending metadata stream %d to consumer", lkm
.u
.stream
.stream_key
);
651 ret
= lttcomm_send_unix_sock(sock
, &lkm
, sizeof(lkm
));
653 PERROR("send consumer stream");
656 ret
= lttcomm_send_fds_unix_sock(sock
, &session
->metadata_stream_fd
, 1);
658 PERROR("send consumer stream");
663 cds_list_for_each_entry(chan
, &session
->channel_list
.head
, list
) {
664 ret
= send_kconsumer_channel_streams(consumer_data
, sock
, chan
,
665 session
->uid
, session
->gid
);
671 DBG("consumer fds (metadata and channel streams) sent");
680 * Notify UST applications using the shm mmap futex.
682 static int notify_ust_apps(int active
)
686 DBG("Notifying applications of session daemon state: %d", active
);
688 /* See shm.c for this call implying mmap, shm and futex calls */
689 wait_shm_mmap
= shm_ust_get_mmap(wait_shm_path
, is_root
);
690 if (wait_shm_mmap
== NULL
) {
694 /* Wake waiting process */
695 futex_wait_update((int32_t *) wait_shm_mmap
, active
);
697 /* Apps notified successfully */
705 * Setup the outgoing data buffer for the response (llm) by allocating the
706 * right amount of memory and copying the original information from the lsm
709 * Return total size of the buffer pointed by buf.
711 static int setup_lttng_msg(struct command_ctx
*cmd_ctx
, size_t size
)
717 cmd_ctx
->llm
= zmalloc(sizeof(struct lttcomm_lttng_msg
) + buf_size
);
718 if (cmd_ctx
->llm
== NULL
) {
724 /* Copy common data */
725 cmd_ctx
->llm
->cmd_type
= cmd_ctx
->lsm
->cmd_type
;
726 cmd_ctx
->llm
->pid
= cmd_ctx
->lsm
->domain
.attr
.pid
;
728 cmd_ctx
->llm
->data_size
= size
;
729 cmd_ctx
->lttng_msg_size
= sizeof(struct lttcomm_lttng_msg
) + buf_size
;
738 * Update the kernel poll set of all channel fd available over all tracing
739 * session. Add the wakeup pipe at the end of the set.
741 static int update_kernel_poll(struct lttng_poll_event
*events
)
744 struct ltt_session
*session
;
745 struct ltt_kernel_channel
*channel
;
747 DBG("Updating kernel poll set");
750 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
751 session_lock(session
);
752 if (session
->kernel_session
== NULL
) {
753 session_unlock(session
);
757 cds_list_for_each_entry(channel
,
758 &session
->kernel_session
->channel_list
.head
, list
) {
759 /* Add channel fd to the kernel poll set */
760 ret
= lttng_poll_add(events
, channel
->fd
, LPOLLIN
| LPOLLRDNORM
);
762 session_unlock(session
);
765 DBG("Channel fd %d added to kernel set", channel
->fd
);
767 session_unlock(session
);
769 session_unlock_list();
774 session_unlock_list();
779 * Find the channel fd from 'fd' over all tracing session. When found, check
780 * for new channel stream and send those stream fds to the kernel consumer.
782 * Useful for CPU hotplug feature.
784 static int update_kernel_stream(struct consumer_data
*consumer_data
, int fd
)
787 struct ltt_session
*session
;
788 struct ltt_kernel_channel
*channel
;
790 DBG("Updating kernel streams for channel fd %d", fd
);
793 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
794 session_lock(session
);
795 if (session
->kernel_session
== NULL
) {
796 session_unlock(session
);
800 /* This is not suppose to be -1 but this is an extra security check */
801 if (session
->kernel_session
->consumer_fd
< 0) {
802 session
->kernel_session
->consumer_fd
= consumer_data
->cmd_sock
;
805 cds_list_for_each_entry(channel
,
806 &session
->kernel_session
->channel_list
.head
, list
) {
807 if (channel
->fd
== fd
) {
808 DBG("Channel found, updating kernel streams");
809 ret
= kernel_open_channel_stream(channel
);
815 * Have we already sent fds to the consumer? If yes, it means
816 * that tracing is started so it is safe to send our updated
819 if (session
->kernel_session
->consumer_fds_sent
== 1) {
820 ret
= send_kconsumer_channel_streams(consumer_data
,
821 session
->kernel_session
->consumer_fd
, channel
,
822 session
->uid
, session
->gid
);
830 session_unlock(session
);
832 session_unlock_list();
836 session_unlock(session
);
837 session_unlock_list();
842 * For each tracing session, update newly registered apps.
844 static void update_ust_app(int app_sock
)
846 struct ltt_session
*sess
, *stmp
;
850 /* For all tracing session(s) */
851 cds_list_for_each_entry_safe(sess
, stmp
, &session_list_ptr
->head
, list
) {
853 if (sess
->ust_session
) {
854 ust_app_global_update(sess
->ust_session
, app_sock
);
856 session_unlock(sess
);
859 session_unlock_list();
863 * This thread manage event coming from the kernel.
865 * Features supported in this thread:
868 static void *thread_manage_kernel(void *data
)
870 int ret
, i
, pollfd
, update_poll_flag
= 1;
871 uint32_t revents
, nb_fd
;
873 struct lttng_poll_event events
;
875 DBG("Thread manage kernel started");
877 ret
= create_thread_poll_set(&events
, 2);
879 goto error_poll_create
;
882 ret
= lttng_poll_add(&events
, kernel_poll_pipe
[0], LPOLLIN
);
888 if (update_poll_flag
== 1) {
890 * Reset number of fd in the poll set. Always 2 since there is the thread
891 * quit pipe and the kernel pipe.
895 ret
= update_kernel_poll(&events
);
899 update_poll_flag
= 0;
902 nb_fd
= LTTNG_POLL_GETNB(&events
);
904 DBG("Thread kernel polling on %d fds", nb_fd
);
906 /* Zeroed the poll events */
907 lttng_poll_reset(&events
);
909 /* Poll infinite value of time */
911 ret
= lttng_poll_wait(&events
, -1);
914 * Restart interrupted system call.
916 if (errno
== EINTR
) {
920 } else if (ret
== 0) {
921 /* Should not happen since timeout is infinite */
922 ERR("Return value of poll is 0 with an infinite timeout.\n"
923 "This should not have happened! Continuing...");
927 for (i
= 0; i
< nb_fd
; i
++) {
928 /* Fetch once the poll data */
929 revents
= LTTNG_POLL_GETEV(&events
, i
);
930 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
932 /* Thread quit pipe has been closed. Killing thread. */
933 ret
= check_thread_quit_pipe(pollfd
, revents
);
938 /* Check for data on kernel pipe */
939 if (pollfd
== kernel_poll_pipe
[0] && (revents
& LPOLLIN
)) {
940 ret
= read(kernel_poll_pipe
[0], &tmp
, 1);
941 update_poll_flag
= 1;
945 * New CPU detected by the kernel. Adding kernel stream to
946 * kernel session and updating the kernel consumer
948 if (revents
& LPOLLIN
) {
949 ret
= update_kernel_stream(&kconsumer_data
, pollfd
);
955 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
956 * and unregister kernel stream at this point.
964 lttng_poll_clean(&events
);
966 DBG("Kernel thread dying");
971 * This thread manage the consumer error sent back to the session daemon.
973 static void *thread_manage_consumer(void *data
)
975 int sock
= -1, i
, ret
, pollfd
;
976 uint32_t revents
, nb_fd
;
977 enum lttcomm_return_code code
;
978 struct lttng_poll_event events
;
979 struct consumer_data
*consumer_data
= data
;
981 DBG("[thread] Manage consumer started");
983 ret
= lttcomm_listen_unix_sock(consumer_data
->err_sock
);
989 * Pass 2 as size here for the thread quit pipe and kconsumerd_err_sock.
990 * Nothing more will be added to this poll set.
992 ret
= create_thread_poll_set(&events
, 2);
997 ret
= lttng_poll_add(&events
, consumer_data
->err_sock
, LPOLLIN
| LPOLLRDHUP
);
1002 nb_fd
= LTTNG_POLL_GETNB(&events
);
1004 /* Inifinite blocking call, waiting for transmission */
1006 ret
= lttng_poll_wait(&events
, -1);
1009 * Restart interrupted system call.
1011 if (errno
== EINTR
) {
1017 for (i
= 0; i
< nb_fd
; i
++) {
1018 /* Fetch once the poll data */
1019 revents
= LTTNG_POLL_GETEV(&events
, i
);
1020 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1022 /* Thread quit pipe has been closed. Killing thread. */
1023 ret
= check_thread_quit_pipe(pollfd
, revents
);
1028 /* Event on the registration socket */
1029 if (pollfd
== consumer_data
->err_sock
) {
1030 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1031 ERR("consumer err socket poll error");
1037 sock
= lttcomm_accept_unix_sock(consumer_data
->err_sock
);
1042 DBG2("Receiving code from consumer err_sock");
1044 /* Getting status code from kconsumerd */
1045 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1046 sizeof(enum lttcomm_return_code
));
1051 if (code
== CONSUMERD_COMMAND_SOCK_READY
) {
1052 consumer_data
->cmd_sock
=
1053 lttcomm_connect_unix_sock(consumer_data
->cmd_unix_sock_path
);
1054 if (consumer_data
->cmd_sock
< 0) {
1055 sem_post(&consumer_data
->sem
);
1056 PERROR("consumer connect");
1059 /* Signal condition to tell that the kconsumerd is ready */
1060 sem_post(&consumer_data
->sem
);
1061 DBG("consumer command socket ready");
1063 ERR("consumer error when waiting for SOCK_READY : %s",
1064 lttcomm_get_readable_code(-code
));
1068 /* Remove the kconsumerd error sock since we've established a connexion */
1069 ret
= lttng_poll_del(&events
, consumer_data
->err_sock
);
1074 ret
= lttng_poll_add(&events
, sock
, LPOLLIN
| LPOLLRDHUP
);
1079 /* Update number of fd */
1080 nb_fd
= LTTNG_POLL_GETNB(&events
);
1082 /* Inifinite blocking call, waiting for transmission */
1084 ret
= lttng_poll_wait(&events
, -1);
1087 * Restart interrupted system call.
1089 if (errno
== EINTR
) {
1095 for (i
= 0; i
< nb_fd
; i
++) {
1096 /* Fetch once the poll data */
1097 revents
= LTTNG_POLL_GETEV(&events
, i
);
1098 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1100 /* Thread quit pipe has been closed. Killing thread. */
1101 ret
= check_thread_quit_pipe(pollfd
, revents
);
1106 /* Event on the kconsumerd socket */
1107 if (pollfd
== sock
) {
1108 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1109 ERR("consumer err socket second poll error");
1115 /* Wait for any kconsumerd error */
1116 ret
= lttcomm_recv_unix_sock(sock
, &code
,
1117 sizeof(enum lttcomm_return_code
));
1119 ERR("consumer closed the command socket");
1123 ERR("consumer return code : %s", lttcomm_get_readable_code(-code
));
1126 /* Immediately set the consumerd state to stopped */
1127 if (consumer_data
->type
== LTTNG_CONSUMER_KERNEL
) {
1128 uatomic_set(&kernel_consumerd_state
, CONSUMER_ERROR
);
1129 } else if (consumer_data
->type
== LTTNG_CONSUMER64_UST
||
1130 consumer_data
->type
== LTTNG_CONSUMER32_UST
) {
1131 uatomic_set(&ust_consumerd_state
, CONSUMER_ERROR
);
1133 /* Code flow error... */
1137 if (consumer_data
->err_sock
>= 0) {
1138 ret
= close(consumer_data
->err_sock
);
1143 if (consumer_data
->cmd_sock
>= 0) {
1144 ret
= close(consumer_data
->cmd_sock
);
1156 unlink(consumer_data
->err_unix_sock_path
);
1157 unlink(consumer_data
->cmd_unix_sock_path
);
1158 consumer_data
->pid
= 0;
1160 lttng_poll_clean(&events
);
1163 DBG("consumer thread cleanup completed");
1169 * This thread manage application communication.
1171 static void *thread_manage_apps(void *data
)
1174 uint32_t revents
, nb_fd
;
1175 struct ust_command ust_cmd
;
1176 struct lttng_poll_event events
;
1178 DBG("[thread] Manage application started");
1180 rcu_register_thread();
1181 rcu_thread_online();
1183 ret
= create_thread_poll_set(&events
, 2);
1185 goto error_poll_create
;
1188 ret
= lttng_poll_add(&events
, apps_cmd_pipe
[0], LPOLLIN
| LPOLLRDHUP
);
1194 /* Zeroed the events structure */
1195 lttng_poll_reset(&events
);
1197 nb_fd
= LTTNG_POLL_GETNB(&events
);
1199 DBG("Apps thread polling on %d fds", nb_fd
);
1201 /* Inifinite blocking call, waiting for transmission */
1203 ret
= lttng_poll_wait(&events
, -1);
1206 * Restart interrupted system call.
1208 if (errno
== EINTR
) {
1214 for (i
= 0; i
< nb_fd
; i
++) {
1215 /* Fetch once the poll data */
1216 revents
= LTTNG_POLL_GETEV(&events
, i
);
1217 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1219 /* Thread quit pipe has been closed. Killing thread. */
1220 ret
= check_thread_quit_pipe(pollfd
, revents
);
1225 /* Inspect the apps cmd pipe */
1226 if (pollfd
== apps_cmd_pipe
[0]) {
1227 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1228 ERR("Apps command pipe error");
1230 } else if (revents
& LPOLLIN
) {
1232 ret
= read(apps_cmd_pipe
[0], &ust_cmd
, sizeof(ust_cmd
));
1233 if (ret
< 0 || ret
< sizeof(ust_cmd
)) {
1234 PERROR("read apps cmd pipe");
1238 /* Register applicaton to the session daemon */
1239 ret
= ust_app_register(&ust_cmd
.reg_msg
,
1241 if (ret
== -ENOMEM
) {
1243 } else if (ret
< 0) {
1248 * Validate UST version compatibility.
1250 ret
= ust_app_validate_version(ust_cmd
.sock
);
1253 * Add channel(s) and event(s) to newly registered apps
1254 * from lttng global UST domain.
1256 update_ust_app(ust_cmd
.sock
);
1259 ret
= ust_app_register_done(ust_cmd
.sock
);
1262 * If the registration is not possible, we simply
1263 * unregister the apps and continue
1265 ust_app_unregister(ust_cmd
.sock
);
1268 * We just need here to monitor the close of the UST
1269 * socket and poll set monitor those by default.
1270 * Listen on POLLIN (even if we never expect any
1271 * data) to ensure that hangup wakes us.
1273 ret
= lttng_poll_add(&events
, ust_cmd
.sock
, LPOLLIN
);
1278 DBG("Apps with sock %d added to poll set",
1286 * At this point, we know that a registered application made
1287 * the event at poll_wait.
1289 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1290 /* Removing from the poll set */
1291 ret
= lttng_poll_del(&events
, pollfd
);
1296 /* Socket closed on remote end. */
1297 ust_app_unregister(pollfd
);
1305 lttng_poll_clean(&events
);
1307 DBG("Application communication apps thread cleanup complete");
1308 rcu_thread_offline();
1309 rcu_unregister_thread();
1314 * Dispatch request from the registration threads to the application
1315 * communication thread.
1317 static void *thread_dispatch_ust_registration(void *data
)
1320 struct cds_wfq_node
*node
;
1321 struct ust_command
*ust_cmd
= NULL
;
1323 DBG("[thread] Dispatch UST command started");
1325 while (!dispatch_thread_exit
) {
1326 /* Atomically prepare the queue futex */
1327 futex_nto1_prepare(&ust_cmd_queue
.futex
);
1330 /* Dequeue command for registration */
1331 node
= cds_wfq_dequeue_blocking(&ust_cmd_queue
.queue
);
1333 DBG("Woken up but nothing in the UST command queue");
1334 /* Continue thread execution */
1338 ust_cmd
= caa_container_of(node
, struct ust_command
, node
);
1340 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1341 " gid:%d sock:%d name:%s (version %d.%d)",
1342 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1343 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1344 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1345 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1347 * Inform apps thread of the new application registration. This
1348 * call is blocking so we can be assured that the data will be read
1349 * at some point in time or wait to the end of the world :)
1351 ret
= write(apps_cmd_pipe
[1], ust_cmd
,
1352 sizeof(struct ust_command
));
1354 PERROR("write apps cmd pipe");
1355 if (errno
== EBADF
) {
1357 * We can't inform the application thread to process
1358 * registration. We will exit or else application
1359 * registration will not occur and tracing will never
1366 } while (node
!= NULL
);
1368 /* Futex wait on queue. Blocking call on futex() */
1369 futex_nto1_wait(&ust_cmd_queue
.futex
);
1373 DBG("Dispatch thread dying");
1378 * This thread manage application registration.
1380 static void *thread_registration_apps(void *data
)
1382 int sock
= -1, i
, ret
, pollfd
;
1383 uint32_t revents
, nb_fd
;
1384 struct lttng_poll_event events
;
1386 * Get allocated in this thread, enqueued to a global queue, dequeued and
1387 * freed in the manage apps thread.
1389 struct ust_command
*ust_cmd
= NULL
;
1391 DBG("[thread] Manage application registration started");
1393 ret
= lttcomm_listen_unix_sock(apps_sock
);
1399 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1400 * more will be added to this poll set.
1402 ret
= create_thread_poll_set(&events
, 2);
1404 goto error_create_poll
;
1407 /* Add the application registration socket */
1408 ret
= lttng_poll_add(&events
, apps_sock
, LPOLLIN
| LPOLLRDHUP
);
1410 goto error_poll_add
;
1413 /* Notify all applications to register */
1414 ret
= notify_ust_apps(1);
1416 ERR("Failed to notify applications or create the wait shared memory.\n"
1417 "Execution continues but there might be problem for already\n"
1418 "running applications that wishes to register.");
1422 DBG("Accepting application registration");
1424 nb_fd
= LTTNG_POLL_GETNB(&events
);
1426 /* Inifinite blocking call, waiting for transmission */
1428 ret
= lttng_poll_wait(&events
, -1);
1431 * Restart interrupted system call.
1433 if (errno
== EINTR
) {
1439 for (i
= 0; i
< nb_fd
; i
++) {
1440 /* Fetch once the poll data */
1441 revents
= LTTNG_POLL_GETEV(&events
, i
);
1442 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
1444 /* Thread quit pipe has been closed. Killing thread. */
1445 ret
= check_thread_quit_pipe(pollfd
, revents
);
1450 /* Event on the registration socket */
1451 if (pollfd
== apps_sock
) {
1452 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
1453 ERR("Register apps socket poll error");
1455 } else if (revents
& LPOLLIN
) {
1456 sock
= lttcomm_accept_unix_sock(apps_sock
);
1461 /* Create UST registration command for enqueuing */
1462 ust_cmd
= zmalloc(sizeof(struct ust_command
));
1463 if (ust_cmd
== NULL
) {
1464 PERROR("ust command zmalloc");
1469 * Using message-based transmissions to ensure we don't
1470 * have to deal with partially received messages.
1472 ret
= lttng_fd_get(LTTNG_FD_APPS
, 1);
1474 ERR("Exhausted file descriptors allowed for applications.");
1483 ret
= lttcomm_recv_unix_sock(sock
, &ust_cmd
->reg_msg
,
1484 sizeof(struct ust_register_msg
));
1485 if (ret
< 0 || ret
< sizeof(struct ust_register_msg
)) {
1487 PERROR("lttcomm_recv_unix_sock register apps");
1489 ERR("Wrong size received on apps register");
1496 lttng_fd_put(LTTNG_FD_APPS
, 1);
1501 ust_cmd
->sock
= sock
;
1504 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1505 " gid:%d sock:%d name:%s (version %d.%d)",
1506 ust_cmd
->reg_msg
.pid
, ust_cmd
->reg_msg
.ppid
,
1507 ust_cmd
->reg_msg
.uid
, ust_cmd
->reg_msg
.gid
,
1508 ust_cmd
->sock
, ust_cmd
->reg_msg
.name
,
1509 ust_cmd
->reg_msg
.major
, ust_cmd
->reg_msg
.minor
);
1512 * Lock free enqueue the registration request. The red pill
1513 * has been taken! This apps will be part of the *system*.
1515 cds_wfq_enqueue(&ust_cmd_queue
.queue
, &ust_cmd
->node
);
1518 * Wake the registration queue futex. Implicit memory
1519 * barrier with the exchange in cds_wfq_enqueue.
1521 futex_nto1_wake(&ust_cmd_queue
.futex
);
1528 /* Notify that the registration thread is gone */
1531 if (apps_sock
>= 0) {
1532 ret
= close(apps_sock
);
1542 lttng_fd_put(LTTNG_FD_APPS
, 1);
1544 unlink(apps_unix_sock_path
);
1547 lttng_poll_clean(&events
);
1550 DBG("UST Registration thread cleanup complete");
1556 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1557 * exec or it will fails.
1559 static int spawn_consumer_thread(struct consumer_data
*consumer_data
)
1562 struct timespec timeout
;
1564 timeout
.tv_sec
= DEFAULT_SEM_WAIT_TIMEOUT
;
1565 timeout
.tv_nsec
= 0;
1567 /* Setup semaphore */
1568 ret
= sem_init(&consumer_data
->sem
, 0, 0);
1570 PERROR("sem_init consumer semaphore");
1574 ret
= pthread_create(&consumer_data
->thread
, NULL
,
1575 thread_manage_consumer
, consumer_data
);
1577 PERROR("pthread_create consumer");
1582 /* Get time for sem_timedwait absolute timeout */
1583 ret
= clock_gettime(CLOCK_REALTIME
, &timeout
);
1585 PERROR("clock_gettime spawn consumer");
1586 /* Infinite wait for the kconsumerd thread to be ready */
1587 ret
= sem_wait(&consumer_data
->sem
);
1589 /* Normal timeout if the gettime was successful */
1590 timeout
.tv_sec
+= DEFAULT_SEM_WAIT_TIMEOUT
;
1591 ret
= sem_timedwait(&consumer_data
->sem
, &timeout
);
1595 if (errno
== ETIMEDOUT
) {
1597 * Call has timed out so we kill the kconsumerd_thread and return
1600 ERR("The consumer thread was never ready. Killing it");
1601 ret
= pthread_cancel(consumer_data
->thread
);
1603 PERROR("pthread_cancel consumer thread");
1606 PERROR("semaphore wait failed consumer thread");
1611 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1612 if (consumer_data
->pid
== 0) {
1613 ERR("Kconsumerd did not start");
1614 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1617 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1626 * Join consumer thread
1628 static int join_consumer_thread(struct consumer_data
*consumer_data
)
1633 if (consumer_data
->pid
!= 0) {
1634 ret
= kill(consumer_data
->pid
, SIGTERM
);
1636 ERR("Error killing consumer daemon");
1639 return pthread_join(consumer_data
->thread
, &status
);
1646 * Fork and exec a consumer daemon (consumerd).
1648 * Return pid if successful else -1.
1650 static pid_t
spawn_consumerd(struct consumer_data
*consumer_data
)
1654 const char *consumer_to_use
;
1655 const char *verbosity
;
1658 DBG("Spawning consumerd");
1665 if (opt_verbose_consumer
) {
1666 verbosity
= "--verbose";
1668 verbosity
= "--quiet";
1670 switch (consumer_data
->type
) {
1671 case LTTNG_CONSUMER_KERNEL
:
1673 * Find out which consumerd to execute. We will first try the
1674 * 64-bit path, then the sessiond's installation directory, and
1675 * fallback on the 32-bit one,
1677 DBG3("Looking for a kernel consumer at these locations:");
1678 DBG3(" 1) %s", consumerd64_bin
);
1679 DBG3(" 2) %s/%s", INSTALL_BIN_PATH
, CONSUMERD_FILE
);
1680 DBG3(" 3) %s", consumerd32_bin
);
1681 if (stat(consumerd64_bin
, &st
) == 0) {
1682 DBG3("Found location #1");
1683 consumer_to_use
= consumerd64_bin
;
1684 } else if (stat(INSTALL_BIN_PATH
"/" CONSUMERD_FILE
, &st
) == 0) {
1685 DBG3("Found location #2");
1686 consumer_to_use
= INSTALL_BIN_PATH
"/" CONSUMERD_FILE
;
1687 } else if (stat(consumerd32_bin
, &st
) == 0) {
1688 DBG3("Found location #3");
1689 consumer_to_use
= consumerd32_bin
;
1691 DBG("Could not find any valid consumerd executable");
1694 DBG("Using kernel consumer at: %s", consumer_to_use
);
1695 execl(consumer_to_use
,
1696 "lttng-consumerd", verbosity
, "-k",
1697 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1698 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1701 case LTTNG_CONSUMER64_UST
:
1703 char *tmpnew
= NULL
;
1705 if (consumerd64_libdir
[0] != '\0') {
1709 tmp
= getenv("LD_LIBRARY_PATH");
1713 tmplen
= strlen("LD_LIBRARY_PATH=")
1714 + strlen(consumerd64_libdir
) + 1 /* : */ + strlen(tmp
);
1715 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1720 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1721 strcat(tmpnew
, consumerd64_libdir
);
1722 if (tmp
[0] != '\0') {
1723 strcat(tmpnew
, ":");
1724 strcat(tmpnew
, tmp
);
1726 ret
= putenv(tmpnew
);
1732 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin
);
1733 ret
= execl(consumerd64_bin
, "lttng-consumerd", verbosity
, "-u",
1734 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1735 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1737 if (consumerd64_libdir
[0] != '\0') {
1745 case LTTNG_CONSUMER32_UST
:
1747 char *tmpnew
= NULL
;
1749 if (consumerd32_libdir
[0] != '\0') {
1753 tmp
= getenv("LD_LIBRARY_PATH");
1757 tmplen
= strlen("LD_LIBRARY_PATH=")
1758 + strlen(consumerd32_libdir
) + 1 /* : */ + strlen(tmp
);
1759 tmpnew
= zmalloc(tmplen
+ 1 /* \0 */);
1764 strcpy(tmpnew
, "LD_LIBRARY_PATH=");
1765 strcat(tmpnew
, consumerd32_libdir
);
1766 if (tmp
[0] != '\0') {
1767 strcat(tmpnew
, ":");
1768 strcat(tmpnew
, tmp
);
1770 ret
= putenv(tmpnew
);
1776 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin
);
1777 ret
= execl(consumerd32_bin
, "lttng-consumerd", verbosity
, "-u",
1778 "--consumerd-cmd-sock", consumer_data
->cmd_unix_sock_path
,
1779 "--consumerd-err-sock", consumer_data
->err_unix_sock_path
,
1781 if (consumerd32_libdir
[0] != '\0') {
1790 PERROR("unknown consumer type");
1794 PERROR("kernel start consumer exec");
1797 } else if (pid
> 0) {
1800 PERROR("start consumer fork");
1808 * Spawn the consumerd daemon and session daemon thread.
1810 static int start_consumerd(struct consumer_data
*consumer_data
)
1814 pthread_mutex_lock(&consumer_data
->pid_mutex
);
1815 if (consumer_data
->pid
!= 0) {
1816 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1820 ret
= spawn_consumerd(consumer_data
);
1822 ERR("Spawning consumerd failed");
1823 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1827 /* Setting up the consumer_data pid */
1828 consumer_data
->pid
= ret
;
1829 DBG2("Consumer pid %d", consumer_data
->pid
);
1830 pthread_mutex_unlock(&consumer_data
->pid_mutex
);
1832 DBG2("Spawning consumer control thread");
1833 ret
= spawn_consumer_thread(consumer_data
);
1835 ERR("Fatal error spawning consumer control thread");
1847 * Check version of the lttng-modules.
1849 static int validate_lttng_modules_version(void)
1851 return kernel_validate_version(kernel_tracer_fd
);
1855 * Setup necessary data for kernel tracer action.
1857 static int init_kernel_tracer(void)
1861 /* Modprobe lttng kernel modules */
1862 ret
= modprobe_lttng_control();
1867 /* Open debugfs lttng */
1868 kernel_tracer_fd
= open(module_proc_lttng
, O_RDWR
);
1869 if (kernel_tracer_fd
< 0) {
1870 DBG("Failed to open %s", module_proc_lttng
);
1875 /* Validate kernel version */
1876 ret
= validate_lttng_modules_version();
1881 ret
= modprobe_lttng_data();
1886 DBG("Kernel tracer fd %d", kernel_tracer_fd
);
1890 modprobe_remove_lttng_control();
1891 ret
= close(kernel_tracer_fd
);
1895 kernel_tracer_fd
= -1;
1896 return LTTCOMM_KERN_VERSION
;
1899 ret
= close(kernel_tracer_fd
);
1905 modprobe_remove_lttng_control();
1908 WARN("No kernel tracer available");
1909 kernel_tracer_fd
= -1;
1911 return LTTCOMM_NEED_ROOT_SESSIOND
;
1913 return LTTCOMM_KERN_NA
;
1918 * Init tracing by creating trace directory and sending fds kernel consumer.
1920 static int init_kernel_tracing(struct ltt_kernel_session
*session
)
1924 if (session
->consumer_fds_sent
== 0) {
1926 * Assign default kernel consumer socket if no consumer assigned to the
1927 * kernel session. At this point, it's NOT supposed to be -1 but this is
1928 * an extra security check.
1930 if (session
->consumer_fd
< 0) {
1931 session
->consumer_fd
= kconsumer_data
.cmd_sock
;
1934 ret
= send_kconsumer_session_streams(&kconsumer_data
, session
);
1936 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
1940 session
->consumer_fds_sent
= 1;
1948 * Create an UST session and add it to the session ust list.
1950 static int create_ust_session(struct ltt_session
*session
,
1951 struct lttng_domain
*domain
)
1953 struct ltt_ust_session
*lus
= NULL
;
1956 switch (domain
->type
) {
1957 case LTTNG_DOMAIN_UST
:
1960 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
1964 DBG("Creating UST session");
1966 lus
= trace_ust_create_session(session
->path
, session
->id
, domain
);
1968 ret
= LTTCOMM_UST_SESS_FAIL
;
1972 ret
= run_as_mkdir_recursive(lus
->pathname
, S_IRWXU
| S_IRWXG
,
1973 session
->uid
, session
->gid
);
1975 if (ret
!= -EEXIST
) {
1976 ERR("Trace directory creation error");
1977 ret
= LTTCOMM_UST_SESS_FAIL
;
1982 /* The domain type dictate different actions on session creation */
1983 switch (domain
->type
) {
1984 case LTTNG_DOMAIN_UST
:
1985 /* No ustctl for the global UST domain */
1988 ERR("Unknown UST domain on create session %d", domain
->type
);
1991 lus
->uid
= session
->uid
;
1992 lus
->gid
= session
->gid
;
1993 session
->ust_session
= lus
;
2003 * Create a kernel tracer session then create the default channel.
2005 static int create_kernel_session(struct ltt_session
*session
)
2009 DBG("Creating kernel session");
2011 ret
= kernel_create_session(session
, kernel_tracer_fd
);
2013 ret
= LTTCOMM_KERN_SESS_FAIL
;
2017 /* Set kernel consumer socket fd */
2018 if (kconsumer_data
.cmd_sock
>= 0) {
2019 session
->kernel_session
->consumer_fd
= kconsumer_data
.cmd_sock
;
2022 ret
= run_as_mkdir_recursive(session
->kernel_session
->trace_path
,
2023 S_IRWXU
| S_IRWXG
, session
->uid
, session
->gid
);
2025 if (ret
!= -EEXIST
) {
2026 ERR("Trace directory creation error");
2030 session
->kernel_session
->uid
= session
->uid
;
2031 session
->kernel_session
->gid
= session
->gid
;
2038 * Check if the UID or GID match the session. Root user has access to all
2041 static int session_access_ok(struct ltt_session
*session
, uid_t uid
, gid_t gid
)
2043 if (uid
!= session
->uid
&& gid
!= session
->gid
&& uid
!= 0) {
2050 static unsigned int lttng_sessions_count(uid_t uid
, gid_t gid
)
2053 struct ltt_session
*session
;
2055 DBG("Counting number of available session for UID %d GID %d",
2057 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2059 * Only list the sessions the user can control.
2061 if (!session_access_ok(session
, uid
, gid
)) {
2070 * Using the session list, filled a lttng_session array to send back to the
2071 * client for session listing.
2073 * The session list lock MUST be acquired before calling this function. Use
2074 * session_lock_list() and session_unlock_list().
2076 static void list_lttng_sessions(struct lttng_session
*sessions
, uid_t uid
,
2080 struct ltt_session
*session
;
2082 DBG("Getting all available session for UID %d GID %d",
2085 * Iterate over session list and append data after the control struct in
2088 cds_list_for_each_entry(session
, &session_list_ptr
->head
, list
) {
2090 * Only list the sessions the user can control.
2092 if (!session_access_ok(session
, uid
, gid
)) {
2095 strncpy(sessions
[i
].path
, session
->path
, PATH_MAX
);
2096 sessions
[i
].path
[PATH_MAX
- 1] = '\0';
2097 strncpy(sessions
[i
].name
, session
->name
, NAME_MAX
);
2098 sessions
[i
].name
[NAME_MAX
- 1] = '\0';
2099 sessions
[i
].enabled
= session
->enabled
;
2105 * Fill lttng_channel array of all channels.
2107 static void list_lttng_channels(int domain
, struct ltt_session
*session
,
2108 struct lttng_channel
*channels
)
2111 struct ltt_kernel_channel
*kchan
;
2113 DBG("Listing channels for session %s", session
->name
);
2116 case LTTNG_DOMAIN_KERNEL
:
2117 /* Kernel channels */
2118 if (session
->kernel_session
!= NULL
) {
2119 cds_list_for_each_entry(kchan
,
2120 &session
->kernel_session
->channel_list
.head
, list
) {
2121 /* Copy lttng_channel struct to array */
2122 memcpy(&channels
[i
], kchan
->channel
, sizeof(struct lttng_channel
));
2123 channels
[i
].enabled
= kchan
->enabled
;
2128 case LTTNG_DOMAIN_UST
:
2130 struct lttng_ht_iter iter
;
2131 struct ltt_ust_channel
*uchan
;
2133 cds_lfht_for_each_entry(session
->ust_session
->domain_global
.channels
->ht
,
2134 &iter
.iter
, uchan
, node
.node
) {
2135 strncpy(channels
[i
].name
, uchan
->name
, LTTNG_SYMBOL_NAME_LEN
);
2136 channels
[i
].attr
.overwrite
= uchan
->attr
.overwrite
;
2137 channels
[i
].attr
.subbuf_size
= uchan
->attr
.subbuf_size
;
2138 channels
[i
].attr
.num_subbuf
= uchan
->attr
.num_subbuf
;
2139 channels
[i
].attr
.switch_timer_interval
=
2140 uchan
->attr
.switch_timer_interval
;
2141 channels
[i
].attr
.read_timer_interval
=
2142 uchan
->attr
.read_timer_interval
;
2143 channels
[i
].enabled
= uchan
->enabled
;
2144 switch (uchan
->attr
.output
) {
2145 case LTTNG_UST_MMAP
:
2147 channels
[i
].attr
.output
= LTTNG_EVENT_MMAP
;
2160 * Create a list of ust global domain events.
2162 static int list_lttng_ust_global_events(char *channel_name
,
2163 struct ltt_ust_domain_global
*ust_global
, struct lttng_event
**events
)
2166 unsigned int nb_event
= 0;
2167 struct lttng_ht_iter iter
;
2168 struct lttng_ht_node_str
*node
;
2169 struct ltt_ust_channel
*uchan
;
2170 struct ltt_ust_event
*uevent
;
2171 struct lttng_event
*tmp
;
2173 DBG("Listing UST global events for channel %s", channel_name
);
2177 lttng_ht_lookup(ust_global
->channels
, (void *)channel_name
, &iter
);
2178 node
= lttng_ht_iter_get_node_str(&iter
);
2180 ret
= -LTTCOMM_UST_CHAN_NOT_FOUND
;
2184 uchan
= caa_container_of(&node
->node
, struct ltt_ust_channel
, node
.node
);
2186 nb_event
+= lttng_ht_get_count(uchan
->events
);
2188 if (nb_event
== 0) {
2193 DBG3("Listing UST global %d events", nb_event
);
2195 tmp
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2197 ret
= -LTTCOMM_FATAL
;
2201 cds_lfht_for_each_entry(uchan
->events
->ht
, &iter
.iter
, uevent
, node
.node
) {
2202 strncpy(tmp
[i
].name
, uevent
->attr
.name
, LTTNG_SYMBOL_NAME_LEN
);
2203 tmp
[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2204 tmp
[i
].enabled
= uevent
->enabled
;
2205 switch (uevent
->attr
.instrumentation
) {
2206 case LTTNG_UST_TRACEPOINT
:
2207 tmp
[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2209 case LTTNG_UST_PROBE
:
2210 tmp
[i
].type
= LTTNG_EVENT_PROBE
;
2212 case LTTNG_UST_FUNCTION
:
2213 tmp
[i
].type
= LTTNG_EVENT_FUNCTION
;
2216 tmp
[i
].loglevel
= uevent
->attr
.loglevel
;
2217 switch (uevent
->attr
.loglevel_type
) {
2218 case LTTNG_UST_LOGLEVEL_ALL
:
2219 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_ALL
;
2221 case LTTNG_UST_LOGLEVEL_RANGE
:
2222 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_RANGE
;
2224 case LTTNG_UST_LOGLEVEL_SINGLE
:
2225 tmp
[i
].loglevel_type
= LTTNG_EVENT_LOGLEVEL_SINGLE
;
2240 * Fill lttng_event array of all kernel events in the channel.
2242 static int list_lttng_kernel_events(char *channel_name
,
2243 struct ltt_kernel_session
*kernel_session
, struct lttng_event
**events
)
2246 unsigned int nb_event
;
2247 struct ltt_kernel_event
*event
;
2248 struct ltt_kernel_channel
*kchan
;
2250 kchan
= trace_kernel_get_channel_by_name(channel_name
, kernel_session
);
2251 if (kchan
== NULL
) {
2252 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2256 nb_event
= kchan
->event_count
;
2258 DBG("Listing events for channel %s", kchan
->channel
->name
);
2260 if (nb_event
== 0) {
2265 *events
= zmalloc(nb_event
* sizeof(struct lttng_event
));
2266 if (*events
== NULL
) {
2267 ret
= LTTCOMM_FATAL
;
2271 /* Kernel channels */
2272 cds_list_for_each_entry(event
, &kchan
->events_list
.head
, list
) {
2273 strncpy((*events
)[i
].name
, event
->event
->name
, LTTNG_SYMBOL_NAME_LEN
);
2274 (*events
)[i
].name
[LTTNG_SYMBOL_NAME_LEN
- 1] = '\0';
2275 (*events
)[i
].enabled
= event
->enabled
;
2276 switch (event
->event
->instrumentation
) {
2277 case LTTNG_KERNEL_TRACEPOINT
:
2278 (*events
)[i
].type
= LTTNG_EVENT_TRACEPOINT
;
2280 case LTTNG_KERNEL_KPROBE
:
2281 case LTTNG_KERNEL_KRETPROBE
:
2282 (*events
)[i
].type
= LTTNG_EVENT_PROBE
;
2283 memcpy(&(*events
)[i
].attr
.probe
, &event
->event
->u
.kprobe
,
2284 sizeof(struct lttng_kernel_kprobe
));
2286 case LTTNG_KERNEL_FUNCTION
:
2287 (*events
)[i
].type
= LTTNG_EVENT_FUNCTION
;
2288 memcpy(&((*events
)[i
].attr
.ftrace
), &event
->event
->u
.ftrace
,
2289 sizeof(struct lttng_kernel_function
));
2291 case LTTNG_KERNEL_NOOP
:
2292 (*events
)[i
].type
= LTTNG_EVENT_NOOP
;
2294 case LTTNG_KERNEL_SYSCALL
:
2295 (*events
)[i
].type
= LTTNG_EVENT_SYSCALL
;
2297 case LTTNG_KERNEL_ALL
:
2311 * Command LTTNG_DISABLE_CHANNEL processed by the client thread.
2313 static int cmd_disable_channel(struct ltt_session
*session
,
2314 int domain
, char *channel_name
)
2317 struct ltt_ust_session
*usess
;
2319 usess
= session
->ust_session
;
2322 case LTTNG_DOMAIN_KERNEL
:
2324 ret
= channel_kernel_disable(session
->kernel_session
,
2326 if (ret
!= LTTCOMM_OK
) {
2330 kernel_wait_quiescent(kernel_tracer_fd
);
2333 case LTTNG_DOMAIN_UST
:
2335 struct ltt_ust_channel
*uchan
;
2336 struct lttng_ht
*chan_ht
;
2338 chan_ht
= usess
->domain_global
.channels
;
2340 uchan
= trace_ust_find_channel_by_name(chan_ht
, channel_name
);
2341 if (uchan
== NULL
) {
2342 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2346 ret
= channel_ust_disable(usess
, domain
, uchan
);
2347 if (ret
!= LTTCOMM_OK
) {
2353 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2354 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2355 case LTTNG_DOMAIN_UST_PID
:
2358 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2369 * Command LTTNG_ENABLE_CHANNEL processed by the client thread.
2371 static int cmd_enable_channel(struct ltt_session
*session
,
2372 int domain
, struct lttng_channel
*attr
)
2375 struct ltt_ust_session
*usess
= session
->ust_session
;
2376 struct lttng_ht
*chan_ht
;
2378 DBG("Enabling channel %s for session %s", attr
->name
, session
->name
);
2381 case LTTNG_DOMAIN_KERNEL
:
2383 struct ltt_kernel_channel
*kchan
;
2385 kchan
= trace_kernel_get_channel_by_name(attr
->name
,
2386 session
->kernel_session
);
2387 if (kchan
== NULL
) {
2388 ret
= channel_kernel_create(session
->kernel_session
,
2389 attr
, kernel_poll_pipe
[1]);
2391 ret
= channel_kernel_enable(session
->kernel_session
, kchan
);
2394 if (ret
!= LTTCOMM_OK
) {
2398 kernel_wait_quiescent(kernel_tracer_fd
);
2401 case LTTNG_DOMAIN_UST
:
2403 struct ltt_ust_channel
*uchan
;
2405 chan_ht
= usess
->domain_global
.channels
;
2407 uchan
= trace_ust_find_channel_by_name(chan_ht
, attr
->name
);
2408 if (uchan
== NULL
) {
2409 ret
= channel_ust_create(usess
, domain
, attr
);
2411 ret
= channel_ust_enable(usess
, domain
, uchan
);
2416 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2417 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2418 case LTTNG_DOMAIN_UST_PID
:
2421 ret
= LTTCOMM_UNKNOWN_DOMAIN
;
2430 * Command LTTNG_DISABLE_EVENT processed by the client thread.
2432 static int cmd_disable_event(struct ltt_session
*session
, int domain
,
2433 char *channel_name
, char *event_name
)
2438 case LTTNG_DOMAIN_KERNEL
:
2440 struct ltt_kernel_channel
*kchan
;
2441 struct ltt_kernel_session
*ksess
;
2443 ksess
= session
->kernel_session
;
2445 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2446 if (kchan
== NULL
) {
2447 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2451 ret
= event_kernel_disable_tracepoint(ksess
, kchan
, event_name
);
2452 if (ret
!= LTTCOMM_OK
) {
2456 kernel_wait_quiescent(kernel_tracer_fd
);
2459 case LTTNG_DOMAIN_UST
:
2461 struct ltt_ust_channel
*uchan
;
2462 struct ltt_ust_session
*usess
;
2464 usess
= session
->ust_session
;
2466 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2468 if (uchan
== NULL
) {
2469 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2473 ret
= event_ust_disable_tracepoint(usess
, domain
, uchan
, event_name
);
2474 if (ret
!= LTTCOMM_OK
) {
2478 DBG3("Disable UST event %s in channel %s completed", event_name
,
2483 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2484 case LTTNG_DOMAIN_UST_PID
:
2485 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2499 * Command LTTNG_DISABLE_ALL_EVENT processed by the client thread.
2501 static int cmd_disable_event_all(struct ltt_session
*session
, int domain
,
2507 case LTTNG_DOMAIN_KERNEL
:
2509 struct ltt_kernel_session
*ksess
;
2510 struct ltt_kernel_channel
*kchan
;
2512 ksess
= session
->kernel_session
;
2514 kchan
= trace_kernel_get_channel_by_name(channel_name
, ksess
);
2515 if (kchan
== NULL
) {
2516 ret
= LTTCOMM_KERN_CHAN_NOT_FOUND
;
2520 ret
= event_kernel_disable_all(ksess
, kchan
);
2521 if (ret
!= LTTCOMM_OK
) {
2525 kernel_wait_quiescent(kernel_tracer_fd
);
2528 case LTTNG_DOMAIN_UST
:
2530 struct ltt_ust_session
*usess
;
2531 struct ltt_ust_channel
*uchan
;
2533 usess
= session
->ust_session
;
2535 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2537 if (uchan
== NULL
) {
2538 ret
= LTTCOMM_UST_CHAN_NOT_FOUND
;
2542 ret
= event_ust_disable_all_tracepoints(usess
, domain
, uchan
);
2547 DBG3("Disable all UST events in channel %s completed", channel_name
);
2552 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2553 case LTTNG_DOMAIN_UST_PID
:
2554 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2568 * Command LTTNG_ADD_CONTEXT processed by the client thread.
2570 static int cmd_add_context(struct ltt_session
*session
, int domain
,
2571 char *channel_name
, char *event_name
, struct lttng_event_context
*ctx
)
2576 case LTTNG_DOMAIN_KERNEL
:
2577 /* Add kernel context to kernel tracer */
2578 ret
= context_kernel_add(session
->kernel_session
, ctx
,
2579 event_name
, channel_name
);
2580 if (ret
!= LTTCOMM_OK
) {
2584 case LTTNG_DOMAIN_UST
:
2586 struct ltt_ust_session
*usess
= session
->ust_session
;
2588 ret
= context_ust_add(usess
, domain
, ctx
, event_name
, channel_name
);
2589 if (ret
!= LTTCOMM_OK
) {
2595 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2596 case LTTNG_DOMAIN_UST_PID
:
2597 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2611 * Command LTTNG_ENABLE_EVENT processed by the client thread.
2613 static int cmd_enable_event(struct ltt_session
*session
, int domain
,
2614 char *channel_name
, struct lttng_event
*event
)
2617 struct lttng_channel
*attr
;
2618 struct ltt_ust_session
*usess
= session
->ust_session
;
2621 case LTTNG_DOMAIN_KERNEL
:
2623 struct ltt_kernel_channel
*kchan
;
2625 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2626 session
->kernel_session
);
2627 if (kchan
== NULL
) {
2628 attr
= channel_new_default_attr(domain
);
2630 ret
= LTTCOMM_FATAL
;
2633 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2635 /* This call will notify the kernel thread */
2636 ret
= channel_kernel_create(session
->kernel_session
,
2637 attr
, kernel_poll_pipe
[1]);
2638 if (ret
!= LTTCOMM_OK
) {
2645 /* Get the newly created kernel channel pointer */
2646 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2647 session
->kernel_session
);
2648 if (kchan
== NULL
) {
2649 /* This sould not happen... */
2650 ret
= LTTCOMM_FATAL
;
2654 ret
= event_kernel_enable_tracepoint(session
->kernel_session
, kchan
,
2656 if (ret
!= LTTCOMM_OK
) {
2660 kernel_wait_quiescent(kernel_tracer_fd
);
2663 case LTTNG_DOMAIN_UST
:
2665 struct lttng_channel
*attr
;
2666 struct ltt_ust_channel
*uchan
;
2668 /* Get channel from global UST domain */
2669 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2671 if (uchan
== NULL
) {
2672 /* Create default channel */
2673 attr
= channel_new_default_attr(domain
);
2675 ret
= LTTCOMM_FATAL
;
2678 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2679 attr
->name
[NAME_MAX
- 1] = '\0';
2681 ret
= channel_ust_create(usess
, domain
, attr
);
2682 if (ret
!= LTTCOMM_OK
) {
2688 /* Get the newly created channel reference back */
2689 uchan
= trace_ust_find_channel_by_name(
2690 usess
->domain_global
.channels
, channel_name
);
2691 if (uchan
== NULL
) {
2692 /* Something is really wrong */
2693 ret
= LTTCOMM_FATAL
;
2698 /* At this point, the session and channel exist on the tracer */
2699 ret
= event_ust_enable_tracepoint(usess
, domain
, uchan
, event
);
2700 if (ret
!= LTTCOMM_OK
) {
2706 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2707 case LTTNG_DOMAIN_UST_PID
:
2708 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2722 * Command LTTNG_ENABLE_ALL_EVENT processed by the client thread.
2724 static int cmd_enable_event_all(struct ltt_session
*session
, int domain
,
2725 char *channel_name
, int event_type
)
2728 struct ltt_kernel_channel
*kchan
;
2731 case LTTNG_DOMAIN_KERNEL
:
2732 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2733 session
->kernel_session
);
2734 if (kchan
== NULL
) {
2735 /* This call will notify the kernel thread */
2736 ret
= channel_kernel_create(session
->kernel_session
, NULL
,
2737 kernel_poll_pipe
[1]);
2738 if (ret
!= LTTCOMM_OK
) {
2742 /* Get the newly created kernel channel pointer */
2743 kchan
= trace_kernel_get_channel_by_name(channel_name
,
2744 session
->kernel_session
);
2745 if (kchan
== NULL
) {
2746 /* This sould not happen... */
2747 ret
= LTTCOMM_FATAL
;
2753 switch (event_type
) {
2754 case LTTNG_EVENT_SYSCALL
:
2755 ret
= event_kernel_enable_all_syscalls(session
->kernel_session
,
2756 kchan
, kernel_tracer_fd
);
2758 case LTTNG_EVENT_TRACEPOINT
:
2760 * This call enables all LTTNG_KERNEL_TRACEPOINTS and
2761 * events already registered to the channel.
2763 ret
= event_kernel_enable_all_tracepoints(session
->kernel_session
,
2764 kchan
, kernel_tracer_fd
);
2766 case LTTNG_EVENT_ALL
:
2767 /* Enable syscalls and tracepoints */
2768 ret
= event_kernel_enable_all(session
->kernel_session
,
2769 kchan
, kernel_tracer_fd
);
2772 ret
= LTTCOMM_KERN_ENABLE_FAIL
;
2776 /* Manage return value */
2777 if (ret
!= LTTCOMM_OK
) {
2781 kernel_wait_quiescent(kernel_tracer_fd
);
2783 case LTTNG_DOMAIN_UST
:
2785 struct lttng_channel
*attr
;
2786 struct ltt_ust_channel
*uchan
;
2787 struct ltt_ust_session
*usess
= session
->ust_session
;
2789 /* Get channel from global UST domain */
2790 uchan
= trace_ust_find_channel_by_name(usess
->domain_global
.channels
,
2792 if (uchan
== NULL
) {
2793 /* Create default channel */
2794 attr
= channel_new_default_attr(domain
);
2796 ret
= LTTCOMM_FATAL
;
2799 snprintf(attr
->name
, NAME_MAX
, "%s", channel_name
);
2800 attr
->name
[NAME_MAX
- 1] = '\0';
2802 /* Use the internal command enable channel */
2803 ret
= channel_ust_create(usess
, domain
, attr
);
2804 if (ret
!= LTTCOMM_OK
) {
2810 /* Get the newly created channel reference back */
2811 uchan
= trace_ust_find_channel_by_name(
2812 usess
->domain_global
.channels
, channel_name
);
2813 if (uchan
== NULL
) {
2814 /* Something is really wrong */
2815 ret
= LTTCOMM_FATAL
;
2820 /* At this point, the session and channel exist on the tracer */
2822 switch (event_type
) {
2823 case LTTNG_EVENT_ALL
:
2824 case LTTNG_EVENT_TRACEPOINT
:
2825 ret
= event_ust_enable_all_tracepoints(usess
, domain
, uchan
);
2826 if (ret
!= LTTCOMM_OK
) {
2831 ret
= LTTCOMM_UST_ENABLE_FAIL
;
2835 /* Manage return value */
2836 if (ret
!= LTTCOMM_OK
) {
2843 case LTTNG_DOMAIN_UST_EXEC_NAME
:
2844 case LTTNG_DOMAIN_UST_PID
:
2845 case LTTNG_DOMAIN_UST_PID_FOLLOW_CHILDREN
:
2859 * Command LTTNG_LIST_TRACEPOINTS processed by the client thread.
2861 static ssize_t
cmd_list_tracepoints(int domain
, struct lttng_event
**events
)
2864 ssize_t nb_events
= 0;
2867 case LTTNG_DOMAIN_KERNEL
:
2868 nb_events
= kernel_list_events(kernel_tracer_fd
, events
);
2869 if (nb_events
< 0) {
2870 ret
= LTTCOMM_KERN_LIST_FAIL
;
2874 case LTTNG_DOMAIN_UST
:
2875 nb_events
= ust_app_list_events(events
);
2876 if (nb_events
< 0) {
2877 ret
= LTTCOMM_UST_LIST_FAIL
;
2889 /* Return negative value to differentiate return code */
2894 * Command LTTNG_START_TRACE processed by the client thread.
2896 static int cmd_start_trace(struct ltt_session
*session
)
2899 struct ltt_kernel_session
*ksession
;
2900 struct ltt_ust_session
*usess
;
2903 ksession
= session
->kernel_session
;
2904 usess
= session
->ust_session
;
2906 if (session
->enabled
) {
2907 /* Already started. */
2908 ret
= LTTCOMM_TRACE_ALREADY_STARTED
;
2912 session
->enabled
= 1;
2914 /* Kernel tracing */
2915 if (ksession
!= NULL
) {
2916 struct ltt_kernel_channel
*kchan
;
2918 /* Open kernel metadata */
2919 if (ksession
->metadata
== NULL
) {
2920 ret
= kernel_open_metadata(ksession
, ksession
->trace_path
);
2922 ret
= LTTCOMM_KERN_META_FAIL
;
2927 /* Open kernel metadata stream */
2928 if (ksession
->metadata_stream_fd
< 0) {
2929 ret
= kernel_open_metadata_stream(ksession
);
2931 ERR("Kernel create metadata stream failed");
2932 ret
= LTTCOMM_KERN_STREAM_FAIL
;
2937 /* For each channel */
2938 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
2939 if (kchan
->stream_count
== 0) {
2940 ret
= kernel_open_channel_stream(kchan
);
2942 ret
= LTTCOMM_KERN_STREAM_FAIL
;
2945 /* Update the stream global counter */
2946 ksession
->stream_count_global
+= ret
;
2950 /* Setup kernel consumer socket and send fds to it */
2951 ret
= init_kernel_tracing(ksession
);
2953 ret
= LTTCOMM_KERN_START_FAIL
;
2957 /* This start the kernel tracing */
2958 ret
= kernel_start_session(ksession
);
2960 ret
= LTTCOMM_KERN_START_FAIL
;
2964 /* Quiescent wait after starting trace */
2965 kernel_wait_quiescent(kernel_tracer_fd
);
2968 /* Flag session that trace should start automatically */
2970 usess
->start_trace
= 1;
2972 ret
= ust_app_start_trace_all(usess
);
2974 ret
= LTTCOMM_UST_START_FAIL
;
2986 * Command LTTNG_STOP_TRACE processed by the client thread.
2988 static int cmd_stop_trace(struct ltt_session
*session
)
2991 struct ltt_kernel_channel
*kchan
;
2992 struct ltt_kernel_session
*ksession
;
2993 struct ltt_ust_session
*usess
;
2996 ksession
= session
->kernel_session
;
2997 usess
= session
->ust_session
;
2999 if (!session
->enabled
) {
3000 ret
= LTTCOMM_TRACE_ALREADY_STOPPED
;
3004 session
->enabled
= 0;
3007 if (ksession
!= NULL
) {
3008 DBG("Stop kernel tracing");
3010 /* Flush all buffers before stopping */
3011 ret
= kernel_metadata_flush_buffer(ksession
->metadata_stream_fd
);
3013 ERR("Kernel metadata flush failed");
3016 cds_list_for_each_entry(kchan
, &ksession
->channel_list
.head
, list
) {
3017 ret
= kernel_flush_buffer(kchan
);
3019 ERR("Kernel flush buffer error");
3023 ret
= kernel_stop_session(ksession
);
3025 ret
= LTTCOMM_KERN_STOP_FAIL
;
3029 kernel_wait_quiescent(kernel_tracer_fd
);
3033 usess
->start_trace
= 0;
3035 ret
= ust_app_stop_trace_all(usess
);
3037 ret
= LTTCOMM_UST_STOP_FAIL
;
3049 * Command LTTNG_CREATE_SESSION processed by the client thread.
3051 static int cmd_create_session(char *name
, char *path
, lttng_sock_cred
*creds
)
3055 ret
= session_create(name
, path
, LTTNG_SOCK_GET_UID_CRED(creds
),
3056 LTTNG_SOCK_GET_GID_CRED(creds
));
3057 if (ret
!= LTTCOMM_OK
) {
3068 * Command LTTNG_DESTROY_SESSION processed by the client thread.
3070 static int cmd_destroy_session(struct ltt_session
*session
, char *name
)
3074 /* Clean kernel session teardown */
3075 teardown_kernel_session(session
);
3076 /* UST session teardown */
3077 teardown_ust_session(session
);
3080 * Must notify the kernel thread here to update it's poll setin order
3081 * to remove the channel(s)' fd just destroyed.
3083 ret
= notify_thread_pipe(kernel_poll_pipe
[1]);
3085 PERROR("write kernel poll pipe");
3088 ret
= session_destroy(session
);
3094 * Command LTTNG_CALIBRATE processed by the client thread.
3096 static int cmd_calibrate(int domain
, struct lttng_calibrate
*calibrate
)
3101 case LTTNG_DOMAIN_KERNEL
:
3103 struct lttng_kernel_calibrate kcalibrate
;
3105 kcalibrate
.type
= calibrate
->type
;
3106 ret
= kernel_calibrate(kernel_tracer_fd
, &kcalibrate
);
3108 ret
= LTTCOMM_KERN_ENABLE_FAIL
;
3113 case LTTNG_DOMAIN_UST
:
3115 struct lttng_ust_calibrate ucalibrate
;
3117 ucalibrate
.type
= calibrate
->type
;
3118 ret
= ust_app_calibrate_glb(&ucalibrate
);
3120 ret
= LTTCOMM_UST_CALIBRATE_FAIL
;
3137 * Command LTTNG_REGISTER_CONSUMER processed by the client thread.
3139 static int cmd_register_consumer(struct ltt_session
*session
, int domain
,
3145 case LTTNG_DOMAIN_KERNEL
:
3146 /* Can't register a consumer if there is already one */
3147 if (session
->kernel_session
->consumer_fds_sent
!= 0) {
3148 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
3152 sock
= lttcomm_connect_unix_sock(sock_path
);
3154 ret
= LTTCOMM_CONNECT_FAIL
;
3158 session
->kernel_session
->consumer_fd
= sock
;
3161 /* TODO: Userspace tracing */
3173 * Command LTTNG_LIST_DOMAINS processed by the client thread.
3175 static ssize_t
cmd_list_domains(struct ltt_session
*session
,
3176 struct lttng_domain
**domains
)
3181 if (session
->kernel_session
!= NULL
) {
3182 DBG3("Listing domains found kernel domain");
3186 if (session
->ust_session
!= NULL
) {
3187 DBG3("Listing domains found UST global domain");
3191 *domains
= zmalloc(nb_dom
* sizeof(struct lttng_domain
));
3192 if (*domains
== NULL
) {
3193 ret
= -LTTCOMM_FATAL
;
3197 if (session
->kernel_session
!= NULL
) {
3198 (*domains
)[index
].type
= LTTNG_DOMAIN_KERNEL
;
3202 if (session
->ust_session
!= NULL
) {
3203 (*domains
)[index
].type
= LTTNG_DOMAIN_UST
;
3214 * Command LTTNG_LIST_CHANNELS processed by the client thread.
3216 static ssize_t
cmd_list_channels(int domain
, struct ltt_session
*session
,
3217 struct lttng_channel
**channels
)
3220 ssize_t nb_chan
= 0;
3223 case LTTNG_DOMAIN_KERNEL
:
3224 if (session
->kernel_session
!= NULL
) {
3225 nb_chan
= session
->kernel_session
->channel_count
;
3227 DBG3("Number of kernel channels %zd", nb_chan
);
3229 case LTTNG_DOMAIN_UST
:
3230 if (session
->ust_session
!= NULL
) {
3231 nb_chan
= lttng_ht_get_count(
3232 session
->ust_session
->domain_global
.channels
);
3234 DBG3("Number of UST global channels %zd", nb_chan
);
3243 *channels
= zmalloc(nb_chan
* sizeof(struct lttng_channel
));
3244 if (*channels
== NULL
) {
3245 ret
= -LTTCOMM_FATAL
;
3249 list_lttng_channels(domain
, session
, *channels
);
3261 * Command LTTNG_LIST_EVENTS processed by the client thread.
3263 static ssize_t
cmd_list_events(int domain
, struct ltt_session
*session
,
3264 char *channel_name
, struct lttng_event
**events
)
3267 ssize_t nb_event
= 0;
3270 case LTTNG_DOMAIN_KERNEL
:
3271 if (session
->kernel_session
!= NULL
) {
3272 nb_event
= list_lttng_kernel_events(channel_name
,
3273 session
->kernel_session
, events
);
3276 case LTTNG_DOMAIN_UST
:
3278 if (session
->ust_session
!= NULL
) {
3279 nb_event
= list_lttng_ust_global_events(channel_name
,
3280 &session
->ust_session
->domain_global
, events
);
3296 * Process the command requested by the lttng client within the command
3297 * context structure. This function make sure that the return structure (llm)
3298 * is set and ready for transmission before returning.
3300 * Return any error encountered or 0 for success.
3302 static int process_client_msg(struct command_ctx
*cmd_ctx
)
3304 int ret
= LTTCOMM_OK
;
3305 int need_tracing_session
= 1;
3308 DBG("Processing client command %d", cmd_ctx
->lsm
->cmd_type
);
3310 switch (cmd_ctx
->lsm
->cmd_type
) {
3311 case LTTNG_CREATE_SESSION
:
3312 case LTTNG_DESTROY_SESSION
:
3313 case LTTNG_LIST_SESSIONS
:
3314 case LTTNG_LIST_DOMAINS
:
3315 case LTTNG_START_TRACE
:
3316 case LTTNG_STOP_TRACE
:
3323 if (opt_no_kernel
&& need_domain
3324 && cmd_ctx
->lsm
->domain
.type
== LTTNG_DOMAIN_KERNEL
) {
3326 ret
= LTTCOMM_NEED_ROOT_SESSIOND
;
3328 ret
= LTTCOMM_KERN_NA
;
3334 * Check for command that don't needs to allocate a returned payload. We do
3335 * this here so we don't have to make the call for no payload at each
3338 switch(cmd_ctx
->lsm
->cmd_type
) {
3339 case LTTNG_LIST_SESSIONS
:
3340 case LTTNG_LIST_TRACEPOINTS
:
3341 case LTTNG_LIST_DOMAINS
:
3342 case LTTNG_LIST_CHANNELS
:
3343 case LTTNG_LIST_EVENTS
:
3346 /* Setup lttng message with no payload */
3347 ret
= setup_lttng_msg(cmd_ctx
, 0);
3349 /* This label does not try to unlock the session */
3350 goto init_setup_error
;
3354 /* Commands that DO NOT need a session. */
3355 switch (cmd_ctx
->lsm
->cmd_type
) {
3356 case LTTNG_CREATE_SESSION
:
3357 case LTTNG_CALIBRATE
:
3358 case LTTNG_LIST_SESSIONS
:
3359 case LTTNG_LIST_TRACEPOINTS
:
3360 need_tracing_session
= 0;
3363 DBG("Getting session %s by name", cmd_ctx
->lsm
->session
.name
);
3365 * We keep the session list lock across _all_ commands
3366 * for now, because the per-session lock does not
3367 * handle teardown properly.
3369 session_lock_list();
3370 cmd_ctx
->session
= session_find_by_name(cmd_ctx
->lsm
->session
.name
);
3371 if (cmd_ctx
->session
== NULL
) {
3372 if (cmd_ctx
->lsm
->session
.name
!= NULL
) {
3373 ret
= LTTCOMM_SESS_NOT_FOUND
;
3375 /* If no session name specified */
3376 ret
= LTTCOMM_SELECT_SESS
;
3380 /* Acquire lock for the session */
3381 session_lock(cmd_ctx
->session
);
3390 * Check domain type for specific "pre-action".
3392 switch (cmd_ctx
->lsm
->domain
.type
) {
3393 case LTTNG_DOMAIN_KERNEL
:
3395 ret
= LTTCOMM_NEED_ROOT_SESSIOND
;
3399 /* Kernel tracer check */
3400 if (kernel_tracer_fd
== -1) {
3401 /* Basically, load kernel tracer modules */
3402 ret
= init_kernel_tracer();
3408 /* Consumer is in an ERROR state. Report back to client */
3409 if (uatomic_read(&kernel_consumerd_state
) == CONSUMER_ERROR
) {
3410 ret
= LTTCOMM_NO_KERNCONSUMERD
;
3414 /* Need a session for kernel command */
3415 if (need_tracing_session
) {
3416 if (cmd_ctx
->session
->kernel_session
== NULL
) {
3417 ret
= create_kernel_session(cmd_ctx
->session
);
3419 ret
= LTTCOMM_KERN_SESS_FAIL
;
3424 /* Start the kernel consumer daemon */
3425 pthread_mutex_lock(&kconsumer_data
.pid_mutex
);
3426 if (kconsumer_data
.pid
== 0 &&
3427 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3428 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3429 ret
= start_consumerd(&kconsumer_data
);
3431 ret
= LTTCOMM_KERN_CONSUMER_FAIL
;
3434 uatomic_set(&kernel_consumerd_state
, CONSUMER_STARTED
);
3436 pthread_mutex_unlock(&kconsumer_data
.pid_mutex
);
3441 case LTTNG_DOMAIN_UST
:
3443 /* Consumer is in an ERROR state. Report back to client */
3444 if (uatomic_read(&ust_consumerd_state
) == CONSUMER_ERROR
) {
3445 ret
= LTTCOMM_NO_USTCONSUMERD
;
3449 if (need_tracing_session
) {
3450 if (cmd_ctx
->session
->ust_session
== NULL
) {
3451 ret
= create_ust_session(cmd_ctx
->session
,
3452 &cmd_ctx
->lsm
->domain
);
3453 if (ret
!= LTTCOMM_OK
) {
3457 /* Start the UST consumer daemons */
3459 pthread_mutex_lock(&ustconsumer64_data
.pid_mutex
);
3460 if (consumerd64_bin
[0] != '\0' &&
3461 ustconsumer64_data
.pid
== 0 &&
3462 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3463 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3464 ret
= start_consumerd(&ustconsumer64_data
);
3466 ret
= LTTCOMM_UST_CONSUMER64_FAIL
;
3467 ust_consumerd64_fd
= -EINVAL
;
3471 ust_consumerd64_fd
= ustconsumer64_data
.cmd_sock
;
3472 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3474 pthread_mutex_unlock(&ustconsumer64_data
.pid_mutex
);
3477 if (consumerd32_bin
[0] != '\0' &&
3478 ustconsumer32_data
.pid
== 0 &&
3479 cmd_ctx
->lsm
->cmd_type
!= LTTNG_REGISTER_CONSUMER
) {
3480 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3481 ret
= start_consumerd(&ustconsumer32_data
);
3483 ret
= LTTCOMM_UST_CONSUMER32_FAIL
;
3484 ust_consumerd32_fd
= -EINVAL
;
3488 ust_consumerd32_fd
= ustconsumer32_data
.cmd_sock
;
3489 uatomic_set(&ust_consumerd_state
, CONSUMER_STARTED
);
3491 pthread_mutex_unlock(&ustconsumer32_data
.pid_mutex
);
3501 /* Validate consumer daemon state when start/stop trace command */
3502 if (cmd_ctx
->lsm
->cmd_type
== LTTNG_START_TRACE
||
3503 cmd_ctx
->lsm
->cmd_type
== LTTNG_STOP_TRACE
) {
3504 switch (cmd_ctx
->lsm
->domain
.type
) {
3505 case LTTNG_DOMAIN_UST
:
3506 if (uatomic_read(&ust_consumerd_state
) != CONSUMER_STARTED
) {
3507 ret
= LTTCOMM_NO_USTCONSUMERD
;
3511 case LTTNG_DOMAIN_KERNEL
:
3512 if (uatomic_read(&kernel_consumerd_state
) != CONSUMER_STARTED
) {
3513 ret
= LTTCOMM_NO_KERNCONSUMERD
;
3521 * Check that the UID or GID match that of the tracing session.
3522 * The root user can interact with all sessions.
3524 if (need_tracing_session
) {
3525 if (!session_access_ok(cmd_ctx
->session
,
3526 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3527 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
))) {
3528 ret
= LTTCOMM_EPERM
;
3533 /* Process by command type */
3534 switch (cmd_ctx
->lsm
->cmd_type
) {
3535 case LTTNG_ADD_CONTEXT
:
3537 ret
= cmd_add_context(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3538 cmd_ctx
->lsm
->u
.context
.channel_name
,
3539 cmd_ctx
->lsm
->u
.context
.event_name
,
3540 &cmd_ctx
->lsm
->u
.context
.ctx
);
3543 case LTTNG_DISABLE_CHANNEL
:
3545 ret
= cmd_disable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3546 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3549 case LTTNG_DISABLE_EVENT
:
3551 ret
= cmd_disable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3552 cmd_ctx
->lsm
->u
.disable
.channel_name
,
3553 cmd_ctx
->lsm
->u
.disable
.name
);
3556 case LTTNG_DISABLE_ALL_EVENT
:
3558 DBG("Disabling all events");
3560 ret
= cmd_disable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3561 cmd_ctx
->lsm
->u
.disable
.channel_name
);
3564 case LTTNG_ENABLE_CHANNEL
:
3566 ret
= cmd_enable_channel(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3567 &cmd_ctx
->lsm
->u
.channel
.chan
);
3570 case LTTNG_ENABLE_EVENT
:
3572 ret
= cmd_enable_event(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3573 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3574 &cmd_ctx
->lsm
->u
.enable
.event
);
3577 case LTTNG_ENABLE_ALL_EVENT
:
3579 DBG("Enabling all events");
3581 ret
= cmd_enable_event_all(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3582 cmd_ctx
->lsm
->u
.enable
.channel_name
,
3583 cmd_ctx
->lsm
->u
.enable
.event
.type
);
3586 case LTTNG_LIST_TRACEPOINTS
:
3588 struct lttng_event
*events
;
3591 nb_events
= cmd_list_tracepoints(cmd_ctx
->lsm
->domain
.type
, &events
);
3592 if (nb_events
< 0) {
3598 * Setup lttng message with payload size set to the event list size in
3599 * bytes and then copy list into the llm payload.
3601 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_event
) * nb_events
);
3607 /* Copy event list into message payload */
3608 memcpy(cmd_ctx
->llm
->payload
, events
,
3609 sizeof(struct lttng_event
) * nb_events
);
3616 case LTTNG_START_TRACE
:
3618 ret
= cmd_start_trace(cmd_ctx
->session
);
3621 case LTTNG_STOP_TRACE
:
3623 ret
= cmd_stop_trace(cmd_ctx
->session
);
3626 case LTTNG_CREATE_SESSION
:
3628 ret
= cmd_create_session(cmd_ctx
->lsm
->session
.name
,
3629 cmd_ctx
->lsm
->session
.path
, &cmd_ctx
->creds
);
3632 case LTTNG_DESTROY_SESSION
:
3634 ret
= cmd_destroy_session(cmd_ctx
->session
,
3635 cmd_ctx
->lsm
->session
.name
);
3637 * Set session to NULL so we do not unlock it after
3640 cmd_ctx
->session
= NULL
;
3643 case LTTNG_LIST_DOMAINS
:
3646 struct lttng_domain
*domains
;
3648 nb_dom
= cmd_list_domains(cmd_ctx
->session
, &domains
);
3654 ret
= setup_lttng_msg(cmd_ctx
, nb_dom
* sizeof(struct lttng_domain
));
3659 /* Copy event list into message payload */
3660 memcpy(cmd_ctx
->llm
->payload
, domains
,
3661 nb_dom
* sizeof(struct lttng_domain
));
3668 case LTTNG_LIST_CHANNELS
:
3671 struct lttng_channel
*channels
;
3673 nb_chan
= cmd_list_channels(cmd_ctx
->lsm
->domain
.type
,
3674 cmd_ctx
->session
, &channels
);
3680 ret
= setup_lttng_msg(cmd_ctx
, nb_chan
* sizeof(struct lttng_channel
));
3685 /* Copy event list into message payload */
3686 memcpy(cmd_ctx
->llm
->payload
, channels
,
3687 nb_chan
* sizeof(struct lttng_channel
));
3694 case LTTNG_LIST_EVENTS
:
3697 struct lttng_event
*events
= NULL
;
3699 nb_event
= cmd_list_events(cmd_ctx
->lsm
->domain
.type
, cmd_ctx
->session
,
3700 cmd_ctx
->lsm
->u
.list
.channel_name
, &events
);
3706 ret
= setup_lttng_msg(cmd_ctx
, nb_event
* sizeof(struct lttng_event
));
3711 /* Copy event list into message payload */
3712 memcpy(cmd_ctx
->llm
->payload
, events
,
3713 nb_event
* sizeof(struct lttng_event
));
3720 case LTTNG_LIST_SESSIONS
:
3722 unsigned int nr_sessions
;
3724 session_lock_list();
3725 nr_sessions
= lttng_sessions_count(
3726 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3727 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3729 ret
= setup_lttng_msg(cmd_ctx
, sizeof(struct lttng_session
) * nr_sessions
);
3731 session_unlock_list();
3735 /* Filled the session array */
3736 list_lttng_sessions((struct lttng_session
*)(cmd_ctx
->llm
->payload
),
3737 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx
->creds
),
3738 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx
->creds
));
3740 session_unlock_list();
3745 case LTTNG_CALIBRATE
:
3747 ret
= cmd_calibrate(cmd_ctx
->lsm
->domain
.type
,
3748 &cmd_ctx
->lsm
->u
.calibrate
);
3751 case LTTNG_REGISTER_CONSUMER
:
3753 ret
= cmd_register_consumer(cmd_ctx
->session
, cmd_ctx
->lsm
->domain
.type
,
3754 cmd_ctx
->lsm
->u
.reg
.path
);
3763 if (cmd_ctx
->llm
== NULL
) {
3764 DBG("Missing llm structure. Allocating one.");
3765 if (setup_lttng_msg(cmd_ctx
, 0) < 0) {
3769 /* Set return code */
3770 cmd_ctx
->llm
->ret_code
= ret
;
3772 if (cmd_ctx
->session
) {
3773 session_unlock(cmd_ctx
->session
);
3775 if (need_tracing_session
) {
3776 session_unlock_list();
3783 * This thread manage all clients request using the unix client socket for
3786 static void *thread_manage_clients(void *data
)
3788 int sock
= -1, ret
, i
, pollfd
;
3789 uint32_t revents
, nb_fd
;
3790 struct command_ctx
*cmd_ctx
= NULL
;
3791 struct lttng_poll_event events
;
3793 DBG("[thread] Manage client started");
3795 rcu_register_thread();
3797 ret
= lttcomm_listen_unix_sock(client_sock
);
3803 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3804 * more will be added to this poll set.
3806 ret
= create_thread_poll_set(&events
, 2);
3811 /* Add the application registration socket */
3812 ret
= lttng_poll_add(&events
, client_sock
, LPOLLIN
| LPOLLPRI
);
3818 * Notify parent pid that we are ready to accept command for client side.
3820 if (opt_sig_parent
) {
3821 kill(ppid
, SIGUSR1
);
3825 DBG("Accepting client command ...");
3827 nb_fd
= LTTNG_POLL_GETNB(&events
);
3829 /* Inifinite blocking call, waiting for transmission */
3831 ret
= lttng_poll_wait(&events
, -1);
3834 * Restart interrupted system call.
3836 if (errno
== EINTR
) {
3842 for (i
= 0; i
< nb_fd
; i
++) {
3843 /* Fetch once the poll data */
3844 revents
= LTTNG_POLL_GETEV(&events
, i
);
3845 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
3847 /* Thread quit pipe has been closed. Killing thread. */
3848 ret
= check_thread_quit_pipe(pollfd
, revents
);
3853 /* Event on the registration socket */
3854 if (pollfd
== client_sock
) {
3855 if (revents
& (LPOLLERR
| LPOLLHUP
| LPOLLRDHUP
)) {
3856 ERR("Client socket poll error");
3862 DBG("Wait for client response");
3864 sock
= lttcomm_accept_unix_sock(client_sock
);
3869 /* Set socket option for credentials retrieval */
3870 ret
= lttcomm_setsockopt_creds_unix_sock(sock
);
3875 /* Allocate context command to process the client request */
3876 cmd_ctx
= zmalloc(sizeof(struct command_ctx
));
3877 if (cmd_ctx
== NULL
) {
3878 PERROR("zmalloc cmd_ctx");
3882 /* Allocate data buffer for reception */
3883 cmd_ctx
->lsm
= zmalloc(sizeof(struct lttcomm_session_msg
));
3884 if (cmd_ctx
->lsm
== NULL
) {
3885 PERROR("zmalloc cmd_ctx->lsm");
3889 cmd_ctx
->llm
= NULL
;
3890 cmd_ctx
->session
= NULL
;
3893 * Data is received from the lttng client. The struct
3894 * lttcomm_session_msg (lsm) contains the command and data request of
3897 DBG("Receiving data from client ...");
3898 ret
= lttcomm_recv_creds_unix_sock(sock
, cmd_ctx
->lsm
,
3899 sizeof(struct lttcomm_session_msg
), &cmd_ctx
->creds
);
3901 DBG("Nothing recv() from client... continuing");
3907 clean_command_ctx(&cmd_ctx
);
3911 // TODO: Validate cmd_ctx including sanity check for
3912 // security purpose.
3914 rcu_thread_online();
3916 * This function dispatch the work to the kernel or userspace tracer
3917 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3918 * informations for the client. The command context struct contains
3919 * everything this function may needs.
3921 ret
= process_client_msg(cmd_ctx
);
3922 rcu_thread_offline();
3925 * TODO: Inform client somehow of the fatal error. At
3926 * this point, ret < 0 means that a zmalloc failed
3927 * (ENOMEM). Error detected but still accept command.
3929 clean_command_ctx(&cmd_ctx
);
3933 DBG("Sending response (size: %d, retcode: %s)",
3934 cmd_ctx
->lttng_msg_size
,
3935 lttng_strerror(-cmd_ctx
->llm
->ret_code
));
3936 ret
= send_unix_sock(sock
, cmd_ctx
->llm
, cmd_ctx
->lttng_msg_size
);
3938 ERR("Failed to send data back to client");
3941 /* End of transmission */
3948 clean_command_ctx(&cmd_ctx
);
3952 DBG("Client thread dying");
3953 unlink(client_unix_sock_path
);
3954 if (client_sock
>= 0) {
3955 ret
= close(client_sock
);
3967 lttng_poll_clean(&events
);
3968 clean_command_ctx(&cmd_ctx
);
3970 rcu_unregister_thread();
3976 * usage function on stderr
3978 static void usage(void)
3980 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
3981 fprintf(stderr
, " -h, --help Display this usage.\n");
3982 fprintf(stderr
, " -c, --client-sock PATH Specify path for the client unix socket\n");
3983 fprintf(stderr
, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3984 fprintf(stderr
, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3985 fprintf(stderr
, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3986 fprintf(stderr
, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3987 fprintf(stderr
, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3988 fprintf(stderr
, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3989 fprintf(stderr
, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3990 fprintf(stderr
, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3991 fprintf(stderr
, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3992 fprintf(stderr
, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3993 fprintf(stderr
, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3994 fprintf(stderr
, " -d, --daemonize Start as a daemon.\n");
3995 fprintf(stderr
, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3996 fprintf(stderr
, " -V, --version Show version number.\n");
3997 fprintf(stderr
, " -S, --sig-parent Send SIGCHLD to parent pid to notify readiness.\n");
3998 fprintf(stderr
, " -q, --quiet No output at all.\n");
3999 fprintf(stderr
, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4000 fprintf(stderr
, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4001 fprintf(stderr
, " --no-kernel Disable kernel tracer\n");
4005 * daemon argument parsing
4007 static int parse_args(int argc
, char **argv
)
4011 static struct option long_options
[] = {
4012 { "client-sock", 1, 0, 'c' },
4013 { "apps-sock", 1, 0, 'a' },
4014 { "kconsumerd-cmd-sock", 1, 0, 'C' },
4015 { "kconsumerd-err-sock", 1, 0, 'E' },
4016 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
4017 { "ustconsumerd32-err-sock", 1, 0, 'H' },
4018 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
4019 { "ustconsumerd64-err-sock", 1, 0, 'F' },
4020 { "consumerd32-path", 1, 0, 'u' },
4021 { "consumerd32-libdir", 1, 0, 'U' },
4022 { "consumerd64-path", 1, 0, 't' },
4023 { "consumerd64-libdir", 1, 0, 'T' },
4024 { "daemonize", 0, 0, 'd' },
4025 { "sig-parent", 0, 0, 'S' },
4026 { "help", 0, 0, 'h' },
4027 { "group", 1, 0, 'g' },
4028 { "version", 0, 0, 'V' },
4029 { "quiet", 0, 0, 'q' },
4030 { "verbose", 0, 0, 'v' },
4031 { "verbose-consumer", 0, 0, 'Z' },
4032 { "no-kernel", 0, 0, 'N' },
4037 int option_index
= 0;
4038 c
= getopt_long(argc
, argv
, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t",
4039 long_options
, &option_index
);
4046 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
4048 fprintf(stderr
, " with arg %s\n", optarg
);
4052 snprintf(client_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4055 snprintf(apps_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4061 opt_tracing_group
= optarg
;
4067 fprintf(stdout
, "%s\n", VERSION
);
4073 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4076 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4079 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4082 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4085 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4088 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
, "%s", optarg
);
4094 lttng_opt_quiet
= 1;
4097 /* Verbose level can increase using multiple -v */
4098 lttng_opt_verbose
+= 1;
4101 opt_verbose_consumer
+= 1;
4104 consumerd32_bin
= optarg
;
4107 consumerd32_libdir
= optarg
;
4110 consumerd64_bin
= optarg
;
4113 consumerd64_libdir
= optarg
;
4116 /* Unknown option or other error.
4117 * Error is printed by getopt, just return */
4126 * Creates the two needed socket by the daemon.
4127 * apps_sock - The communication socket for all UST apps.
4128 * client_sock - The communication of the cli tool (lttng).
4130 static int init_daemon_socket(void)
4135 old_umask
= umask(0);
4137 /* Create client tool unix socket */
4138 client_sock
= lttcomm_create_unix_sock(client_unix_sock_path
);
4139 if (client_sock
< 0) {
4140 ERR("Create unix sock failed: %s", client_unix_sock_path
);
4145 /* File permission MUST be 660 */
4146 ret
= chmod(client_unix_sock_path
, S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4148 ERR("Set file permissions failed: %s", client_unix_sock_path
);
4153 /* Create the application unix socket */
4154 apps_sock
= lttcomm_create_unix_sock(apps_unix_sock_path
);
4155 if (apps_sock
< 0) {
4156 ERR("Create unix sock failed: %s", apps_unix_sock_path
);
4161 /* File permission MUST be 666 */
4162 ret
= chmod(apps_unix_sock_path
,
4163 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
| S_IROTH
| S_IWOTH
);
4165 ERR("Set file permissions failed: %s", apps_unix_sock_path
);
4176 * Check if the global socket is available, and if a daemon is answering at the
4177 * other side. If yes, error is returned.
4179 static int check_existing_daemon(void)
4181 /* Is there anybody out there ? */
4182 if (lttng_session_daemon_alive()) {
4190 * Set the tracing group gid onto the client socket.
4192 * Race window between mkdir and chown is OK because we are going from more
4193 * permissive (root.root) to less permissive (root.tracing).
4195 static int set_permissions(char *rundir
)
4200 ret
= allowed_group();
4202 WARN("No tracing group detected");
4209 /* Set lttng run dir */
4210 ret
= chown(rundir
, 0, gid
);
4212 ERR("Unable to set group on %s", rundir
);
4216 /* Ensure tracing group can search the run dir */
4217 ret
= chmod(rundir
, S_IRWXU
| S_IXGRP
| S_IXOTH
);
4219 ERR("Unable to set permissions on %s", rundir
);
4223 /* lttng client socket path */
4224 ret
= chown(client_unix_sock_path
, 0, gid
);
4226 ERR("Unable to set group on %s", client_unix_sock_path
);
4230 /* kconsumer error socket path */
4231 ret
= chown(kconsumer_data
.err_unix_sock_path
, 0, gid
);
4233 ERR("Unable to set group on %s", kconsumer_data
.err_unix_sock_path
);
4237 /* 64-bit ustconsumer error socket path */
4238 ret
= chown(ustconsumer64_data
.err_unix_sock_path
, 0, gid
);
4240 ERR("Unable to set group on %s", ustconsumer64_data
.err_unix_sock_path
);
4244 /* 32-bit ustconsumer compat32 error socket path */
4245 ret
= chown(ustconsumer32_data
.err_unix_sock_path
, 0, gid
);
4247 ERR("Unable to set group on %s", ustconsumer32_data
.err_unix_sock_path
);
4251 DBG("All permissions are set");
4258 * Create the pipe used to wake up the kernel thread.
4259 * Closed in cleanup().
4261 static int create_kernel_poll_pipe(void)
4265 ret
= pipe(kernel_poll_pipe
);
4267 PERROR("kernel poll pipe");
4271 for (i
= 0; i
< 2; i
++) {
4272 ret
= fcntl(kernel_poll_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
4274 PERROR("fcntl kernel_poll_pipe");
4284 * Create the application command pipe to wake thread_manage_apps.
4285 * Closed in cleanup().
4287 static int create_apps_cmd_pipe(void)
4291 ret
= pipe(apps_cmd_pipe
);
4293 PERROR("apps cmd pipe");
4297 for (i
= 0; i
< 2; i
++) {
4298 ret
= fcntl(apps_cmd_pipe
[i
], F_SETFD
, FD_CLOEXEC
);
4300 PERROR("fcntl apps_cmd_pipe");
4310 * Create the lttng run directory needed for all global sockets and pipe.
4312 static int create_lttng_rundir(const char *rundir
)
4316 DBG3("Creating LTTng run directory: %s", rundir
);
4318 ret
= mkdir(rundir
, S_IRWXU
);
4320 if (errno
!= EEXIST
) {
4321 ERR("Unable to create %s", rundir
);
4333 * Setup sockets and directory needed by the kconsumerd communication with the
4336 static int set_consumer_sockets(struct consumer_data
*consumer_data
,
4340 char path
[PATH_MAX
];
4342 switch (consumer_data
->type
) {
4343 case LTTNG_CONSUMER_KERNEL
:
4344 snprintf(path
, PATH_MAX
, DEFAULT_KCONSUMERD_PATH
, rundir
);
4346 case LTTNG_CONSUMER64_UST
:
4347 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD64_PATH
, rundir
);
4349 case LTTNG_CONSUMER32_UST
:
4350 snprintf(path
, PATH_MAX
, DEFAULT_USTCONSUMERD32_PATH
, rundir
);
4353 ERR("Consumer type unknown");
4358 DBG2("Creating consumer directory: %s", path
);
4360 ret
= mkdir(path
, S_IRWXU
);
4362 if (errno
!= EEXIST
) {
4364 ERR("Failed to create %s", path
);
4370 /* Create the kconsumerd error unix socket */
4371 consumer_data
->err_sock
=
4372 lttcomm_create_unix_sock(consumer_data
->err_unix_sock_path
);
4373 if (consumer_data
->err_sock
< 0) {
4374 ERR("Create unix sock failed: %s", consumer_data
->err_unix_sock_path
);
4379 /* File permission MUST be 660 */
4380 ret
= chmod(consumer_data
->err_unix_sock_path
,
4381 S_IRUSR
| S_IWUSR
| S_IRGRP
| S_IWGRP
);
4383 ERR("Set file permissions failed: %s", consumer_data
->err_unix_sock_path
);
4393 * Signal handler for the daemon
4395 * Simply stop all worker threads, leaving main() return gracefully after
4396 * joining all threads and calling cleanup().
4398 static void sighandler(int sig
)
4402 DBG("SIGPIPE caught");
4405 DBG("SIGINT caught");
4409 DBG("SIGTERM caught");
4418 * Setup signal handler for :
4419 * SIGINT, SIGTERM, SIGPIPE
4421 static int set_signal_handler(void)
4424 struct sigaction sa
;
4427 if ((ret
= sigemptyset(&sigset
)) < 0) {
4428 PERROR("sigemptyset");
4432 sa
.sa_handler
= sighandler
;
4433 sa
.sa_mask
= sigset
;
4435 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
4436 PERROR("sigaction");
4440 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
4441 PERROR("sigaction");
4445 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
4446 PERROR("sigaction");
4450 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4456 * Set open files limit to unlimited. This daemon can open a large number of
4457 * file descriptors in order to consumer multiple kernel traces.
4459 static void set_ulimit(void)
4464 /* The kernel does not allowed an infinite limit for open files */
4465 lim
.rlim_cur
= 65535;
4466 lim
.rlim_max
= 65535;
4468 ret
= setrlimit(RLIMIT_NOFILE
, &lim
);
4470 PERROR("failed to set open files limit");
4477 int main(int argc
, char **argv
)
4481 const char *home_path
;
4483 init_kernel_workarounds();
4485 rcu_register_thread();
4487 /* Create thread quit pipe */
4488 if ((ret
= init_thread_quit_pipe()) < 0) {
4492 setup_consumerd_path();
4494 /* Parse arguments */
4496 if ((ret
= parse_args(argc
, argv
) < 0)) {
4509 /* Check if daemon is UID = 0 */
4510 is_root
= !getuid();
4513 rundir
= strdup(DEFAULT_LTTNG_RUNDIR
);
4515 /* Create global run dir with root access */
4516 ret
= create_lttng_rundir(rundir
);
4521 if (strlen(apps_unix_sock_path
) == 0) {
4522 snprintf(apps_unix_sock_path
, PATH_MAX
,
4523 DEFAULT_GLOBAL_APPS_UNIX_SOCK
);
4526 if (strlen(client_unix_sock_path
) == 0) {
4527 snprintf(client_unix_sock_path
, PATH_MAX
,
4528 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK
);
4531 /* Set global SHM for ust */
4532 if (strlen(wait_shm_path
) == 0) {
4533 snprintf(wait_shm_path
, PATH_MAX
,
4534 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH
);
4537 /* Setup kernel consumerd path */
4538 snprintf(kconsumer_data
.err_unix_sock_path
, PATH_MAX
,
4539 DEFAULT_KCONSUMERD_ERR_SOCK_PATH
, rundir
);
4540 snprintf(kconsumer_data
.cmd_unix_sock_path
, PATH_MAX
,
4541 DEFAULT_KCONSUMERD_CMD_SOCK_PATH
, rundir
);
4543 DBG2("Kernel consumer err path: %s",
4544 kconsumer_data
.err_unix_sock_path
);
4545 DBG2("Kernel consumer cmd path: %s",
4546 kconsumer_data
.cmd_unix_sock_path
);
4548 home_path
= get_home_dir();
4549 if (home_path
== NULL
) {
4550 /* TODO: Add --socket PATH option */
4551 ERR("Can't get HOME directory for sockets creation.");
4557 * Create rundir from home path. This will create something like
4560 ret
= asprintf(&rundir
, DEFAULT_LTTNG_HOME_RUNDIR
, home_path
);
4566 ret
= create_lttng_rundir(rundir
);
4571 if (strlen(apps_unix_sock_path
) == 0) {
4572 snprintf(apps_unix_sock_path
, PATH_MAX
,
4573 DEFAULT_HOME_APPS_UNIX_SOCK
, home_path
);
4576 /* Set the cli tool unix socket path */
4577 if (strlen(client_unix_sock_path
) == 0) {
4578 snprintf(client_unix_sock_path
, PATH_MAX
,
4579 DEFAULT_HOME_CLIENT_UNIX_SOCK
, home_path
);
4582 /* Set global SHM for ust */
4583 if (strlen(wait_shm_path
) == 0) {
4584 snprintf(wait_shm_path
, PATH_MAX
,
4585 DEFAULT_HOME_APPS_WAIT_SHM_PATH
, geteuid());
4589 /* Set consumer initial state */
4590 kernel_consumerd_state
= CONSUMER_STOPPED
;
4591 ust_consumerd_state
= CONSUMER_STOPPED
;
4593 DBG("Client socket path %s", client_unix_sock_path
);
4594 DBG("Application socket path %s", apps_unix_sock_path
);
4595 DBG("LTTng run directory path: %s", rundir
);
4597 /* 32 bits consumerd path setup */
4598 snprintf(ustconsumer32_data
.err_unix_sock_path
, PATH_MAX
,
4599 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH
, rundir
);
4600 snprintf(ustconsumer32_data
.cmd_unix_sock_path
, PATH_MAX
,
4601 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH
, rundir
);
4603 DBG2("UST consumer 32 bits err path: %s",
4604 ustconsumer32_data
.err_unix_sock_path
);
4605 DBG2("UST consumer 32 bits cmd path: %s",
4606 ustconsumer32_data
.cmd_unix_sock_path
);
4608 /* 64 bits consumerd path setup */
4609 snprintf(ustconsumer64_data
.err_unix_sock_path
, PATH_MAX
,
4610 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH
, rundir
);
4611 snprintf(ustconsumer64_data
.cmd_unix_sock_path
, PATH_MAX
,
4612 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH
, rundir
);
4614 DBG2("UST consumer 64 bits err path: %s",
4615 ustconsumer64_data
.err_unix_sock_path
);
4616 DBG2("UST consumer 64 bits cmd path: %s",
4617 ustconsumer64_data
.cmd_unix_sock_path
);
4620 * See if daemon already exist.
4622 if ((ret
= check_existing_daemon()) < 0) {
4623 ERR("Already running daemon.\n");
4625 * We do not goto exit because we must not cleanup()
4626 * because a daemon is already running.
4632 * Init UST app hash table. Alloc hash table before this point since
4633 * cleanup() can get called after that point.
4637 /* After this point, we can safely call cleanup() with "goto exit" */
4640 * These actions must be executed as root. We do that *after* setting up
4641 * the sockets path because we MUST make the check for another daemon using
4642 * those paths *before* trying to set the kernel consumer sockets and init
4646 ret
= set_consumer_sockets(&kconsumer_data
, rundir
);
4651 /* Setup kernel tracer */
4652 if (!opt_no_kernel
) {
4653 init_kernel_tracer();
4656 /* Set ulimit for open files */
4659 /* init lttng_fd tracking must be done after set_ulimit. */
4662 ret
= set_consumer_sockets(&ustconsumer64_data
, rundir
);
4667 ret
= set_consumer_sockets(&ustconsumer32_data
, rundir
);
4672 if ((ret
= set_signal_handler()) < 0) {
4676 /* Setup the needed unix socket */
4677 if ((ret
= init_daemon_socket()) < 0) {
4681 /* Set credentials to socket */
4682 if (is_root
&& ((ret
= set_permissions(rundir
)) < 0)) {
4686 /* Get parent pid if -S, --sig-parent is specified. */
4687 if (opt_sig_parent
) {
4691 /* Setup the kernel pipe for waking up the kernel thread */
4692 if ((ret
= create_kernel_poll_pipe()) < 0) {
4696 /* Setup the thread apps communication pipe. */
4697 if ((ret
= create_apps_cmd_pipe()) < 0) {
4701 /* Init UST command queue. */
4702 cds_wfq_init(&ust_cmd_queue
.queue
);
4705 * Get session list pointer. This pointer MUST NOT be free(). This list is
4706 * statically declared in session.c
4708 session_list_ptr
= session_get_list();
4710 /* Set up max poll set size */
4711 lttng_poll_set_max_size();
4713 /* Create thread to manage the client socket */
4714 ret
= pthread_create(&client_thread
, NULL
,
4715 thread_manage_clients
, (void *) NULL
);
4717 PERROR("pthread_create clients");
4721 /* Create thread to dispatch registration */
4722 ret
= pthread_create(&dispatch_thread
, NULL
,
4723 thread_dispatch_ust_registration
, (void *) NULL
);
4725 PERROR("pthread_create dispatch");
4729 /* Create thread to manage application registration. */
4730 ret
= pthread_create(®_apps_thread
, NULL
,
4731 thread_registration_apps
, (void *) NULL
);
4733 PERROR("pthread_create registration");
4737 /* Create thread to manage application socket */
4738 ret
= pthread_create(&apps_thread
, NULL
,
4739 thread_manage_apps
, (void *) NULL
);
4741 PERROR("pthread_create apps");
4745 /* Create kernel thread to manage kernel event */
4746 ret
= pthread_create(&kernel_thread
, NULL
,
4747 thread_manage_kernel
, (void *) NULL
);
4749 PERROR("pthread_create kernel");
4753 ret
= pthread_join(kernel_thread
, &status
);
4755 PERROR("pthread_join");
4756 goto error
; /* join error, exit without cleanup */
4760 ret
= pthread_join(apps_thread
, &status
);
4762 PERROR("pthread_join");
4763 goto error
; /* join error, exit without cleanup */
4767 ret
= pthread_join(reg_apps_thread
, &status
);
4769 PERROR("pthread_join");
4770 goto error
; /* join error, exit without cleanup */
4774 ret
= pthread_join(dispatch_thread
, &status
);
4776 PERROR("pthread_join");
4777 goto error
; /* join error, exit without cleanup */
4781 ret
= pthread_join(client_thread
, &status
);
4783 PERROR("pthread_join");
4784 goto error
; /* join error, exit without cleanup */
4787 ret
= join_consumer_thread(&kconsumer_data
);
4789 PERROR("join_consumer");
4790 goto error
; /* join error, exit without cleanup */
4796 * cleanup() is called when no other thread is running.
4798 rcu_thread_online();
4800 rcu_thread_offline();
4801 rcu_unregister_thread();