Fix: remove break in epoll loop of apps. thread
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <inttypes.h>
29 #include <sys/mman.h>
30 #include <sys/mount.h>
31 #include <sys/resource.h>
32 #include <sys/socket.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <urcu/uatomic.h>
37 #include <unistd.h>
38 #include <config.h>
39
40 #include <common/common.h>
41 #include <common/compat/socket.h>
42 #include <common/defaults.h>
43 #include <common/kernel-consumer/kernel-consumer.h>
44 #include <common/futex.h>
45 #include <common/relayd/relayd.h>
46 #include <common/utils.h>
47
48 #include "lttng-sessiond.h"
49 #include "buffer-registry.h"
50 #include "channel.h"
51 #include "cmd.h"
52 #include "consumer.h"
53 #include "context.h"
54 #include "event.h"
55 #include "kernel.h"
56 #include "kernel-consumer.h"
57 #include "modprobe.h"
58 #include "shm.h"
59 #include "ust-ctl.h"
60 #include "ust-consumer.h"
61 #include "utils.h"
62 #include "fd-limit.h"
63 #include "health.h"
64 #include "testpoint.h"
65 #include "ust-thread.h"
66
67 #define CONSUMERD_FILE "lttng-consumerd"
68
69 /* Const values */
70 const char default_tracing_group[] = DEFAULT_TRACING_GROUP;
71
72 const char *progname;
73 const char *opt_tracing_group;
74 static const char *opt_pidfile;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon;
78 static int opt_no_kernel;
79 static int is_root; /* Set to 1 if the daemon is running as root */
80 static pid_t ppid; /* Parent PID for --sig-parent option */
81 static char *rundir;
82
83 /*
84 * Consumer daemon specific control data. Every value not initialized here is
85 * set to 0 by the static definition.
86 */
87 static struct consumer_data kconsumer_data = {
88 .type = LTTNG_CONSUMER_KERNEL,
89 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
90 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
91 .err_sock = -1,
92 .cmd_sock = -1,
93 .metadata_sock.fd = -1,
94 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
95 .lock = PTHREAD_MUTEX_INITIALIZER,
96 .cond = PTHREAD_COND_INITIALIZER,
97 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
98 };
99 static struct consumer_data ustconsumer64_data = {
100 .type = LTTNG_CONSUMER64_UST,
101 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
102 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
103 .err_sock = -1,
104 .cmd_sock = -1,
105 .metadata_sock.fd = -1,
106 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
107 .lock = PTHREAD_MUTEX_INITIALIZER,
108 .cond = PTHREAD_COND_INITIALIZER,
109 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
110 };
111 static struct consumer_data ustconsumer32_data = {
112 .type = LTTNG_CONSUMER32_UST,
113 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
114 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
115 .err_sock = -1,
116 .cmd_sock = -1,
117 .metadata_sock.fd = -1,
118 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
119 .lock = PTHREAD_MUTEX_INITIALIZER,
120 .cond = PTHREAD_COND_INITIALIZER,
121 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
122 };
123
124 /* Shared between threads */
125 static int dispatch_thread_exit;
126
127 /* Global application Unix socket path */
128 static char apps_unix_sock_path[PATH_MAX];
129 /* Global client Unix socket path */
130 static char client_unix_sock_path[PATH_MAX];
131 /* global wait shm path for UST */
132 static char wait_shm_path[PATH_MAX];
133 /* Global health check unix path */
134 static char health_unix_sock_path[PATH_MAX];
135
136 /* Sockets and FDs */
137 static int client_sock = -1;
138 static int apps_sock = -1;
139 int kernel_tracer_fd = -1;
140 static int kernel_poll_pipe[2] = { -1, -1 };
141
142 /*
143 * Quit pipe for all threads. This permits a single cancellation point
144 * for all threads when receiving an event on the pipe.
145 */
146 static int thread_quit_pipe[2] = { -1, -1 };
147
148 /*
149 * This pipe is used to inform the thread managing application communication
150 * that a command is queued and ready to be processed.
151 */
152 static int apps_cmd_pipe[2] = { -1, -1 };
153
154 int apps_cmd_notify_pipe[2] = { -1, -1 };
155
156 /* Pthread, Mutexes and Semaphores */
157 static pthread_t apps_thread;
158 static pthread_t apps_notify_thread;
159 static pthread_t reg_apps_thread;
160 static pthread_t client_thread;
161 static pthread_t kernel_thread;
162 static pthread_t dispatch_thread;
163 static pthread_t health_thread;
164 static pthread_t ht_cleanup_thread;
165
166 /*
167 * UST registration command queue. This queue is tied with a futex and uses a N
168 * wakers / 1 waiter implemented and detailed in futex.c/.h
169 *
170 * The thread_manage_apps and thread_dispatch_ust_registration interact with
171 * this queue and the wait/wake scheme.
172 */
173 static struct ust_cmd_queue ust_cmd_queue;
174
175 /*
176 * Pointer initialized before thread creation.
177 *
178 * This points to the tracing session list containing the session count and a
179 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
180 * MUST NOT be taken if you call a public function in session.c.
181 *
182 * The lock is nested inside the structure: session_list_ptr->lock. Please use
183 * session_lock_list and session_unlock_list for lock acquisition.
184 */
185 static struct ltt_session_list *session_list_ptr;
186
187 int ust_consumerd64_fd = -1;
188 int ust_consumerd32_fd = -1;
189
190 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
191 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
192 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
193 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
194
195 static const char *module_proc_lttng = "/proc/lttng";
196
197 /*
198 * Consumer daemon state which is changed when spawning it, killing it or in
199 * case of a fatal error.
200 */
201 enum consumerd_state {
202 CONSUMER_STARTED = 1,
203 CONSUMER_STOPPED = 2,
204 CONSUMER_ERROR = 3,
205 };
206
207 /*
208 * This consumer daemon state is used to validate if a client command will be
209 * able to reach the consumer. If not, the client is informed. For instance,
210 * doing a "lttng start" when the consumer state is set to ERROR will return an
211 * error to the client.
212 *
213 * The following example shows a possible race condition of this scheme:
214 *
215 * consumer thread error happens
216 * client cmd arrives
217 * client cmd checks state -> still OK
218 * consumer thread exit, sets error
219 * client cmd try to talk to consumer
220 * ...
221 *
222 * However, since the consumer is a different daemon, we have no way of making
223 * sure the command will reach it safely even with this state flag. This is why
224 * we consider that up to the state validation during command processing, the
225 * command is safe. After that, we can not guarantee the correctness of the
226 * client request vis-a-vis the consumer.
227 */
228 static enum consumerd_state ust_consumerd_state;
229 static enum consumerd_state kernel_consumerd_state;
230
231 /*
232 * Socket timeout for receiving and sending in seconds.
233 */
234 static int app_socket_timeout;
235
236 /* Set in main() with the current page size. */
237 long page_size;
238
239 static
240 void setup_consumerd_path(void)
241 {
242 const char *bin, *libdir;
243
244 /*
245 * Allow INSTALL_BIN_PATH to be used as a target path for the
246 * native architecture size consumer if CONFIG_CONSUMER*_PATH
247 * has not been defined.
248 */
249 #if (CAA_BITS_PER_LONG == 32)
250 if (!consumerd32_bin[0]) {
251 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
252 }
253 if (!consumerd32_libdir[0]) {
254 consumerd32_libdir = INSTALL_LIB_PATH;
255 }
256 #elif (CAA_BITS_PER_LONG == 64)
257 if (!consumerd64_bin[0]) {
258 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
259 }
260 if (!consumerd64_libdir[0]) {
261 consumerd64_libdir = INSTALL_LIB_PATH;
262 }
263 #else
264 #error "Unknown bitness"
265 #endif
266
267 /*
268 * runtime env. var. overrides the build default.
269 */
270 bin = getenv("LTTNG_CONSUMERD32_BIN");
271 if (bin) {
272 consumerd32_bin = bin;
273 }
274 bin = getenv("LTTNG_CONSUMERD64_BIN");
275 if (bin) {
276 consumerd64_bin = bin;
277 }
278 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
279 if (libdir) {
280 consumerd32_libdir = libdir;
281 }
282 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
283 if (libdir) {
284 consumerd64_libdir = libdir;
285 }
286 }
287
288 /*
289 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
290 */
291 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
292 {
293 int ret;
294
295 assert(events);
296
297 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
298 if (ret < 0) {
299 goto error;
300 }
301
302 /* Add quit pipe */
303 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
304 if (ret < 0) {
305 goto error;
306 }
307
308 return 0;
309
310 error:
311 return ret;
312 }
313
314 /*
315 * Check if the thread quit pipe was triggered.
316 *
317 * Return 1 if it was triggered else 0;
318 */
319 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
320 {
321 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
322 return 1;
323 }
324
325 return 0;
326 }
327
328 /*
329 * Return group ID of the tracing group or -1 if not found.
330 */
331 static gid_t allowed_group(void)
332 {
333 struct group *grp;
334
335 if (opt_tracing_group) {
336 grp = getgrnam(opt_tracing_group);
337 } else {
338 grp = getgrnam(default_tracing_group);
339 }
340 if (!grp) {
341 return -1;
342 } else {
343 return grp->gr_gid;
344 }
345 }
346
347 /*
348 * Init thread quit pipe.
349 *
350 * Return -1 on error or 0 if all pipes are created.
351 */
352 static int init_thread_quit_pipe(void)
353 {
354 int ret, i;
355
356 ret = pipe(thread_quit_pipe);
357 if (ret < 0) {
358 PERROR("thread quit pipe");
359 goto error;
360 }
361
362 for (i = 0; i < 2; i++) {
363 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
364 if (ret < 0) {
365 PERROR("fcntl");
366 goto error;
367 }
368 }
369
370 error:
371 return ret;
372 }
373
374 /*
375 * Stop all threads by closing the thread quit pipe.
376 */
377 static void stop_threads(void)
378 {
379 int ret;
380
381 /* Stopping all threads */
382 DBG("Terminating all threads");
383 ret = notify_thread_pipe(thread_quit_pipe[1]);
384 if (ret < 0) {
385 ERR("write error on thread quit pipe");
386 }
387
388 /* Dispatch thread */
389 CMM_STORE_SHARED(dispatch_thread_exit, 1);
390 futex_nto1_wake(&ust_cmd_queue.futex);
391 }
392
393 /*
394 * Close every consumer sockets.
395 */
396 static void close_consumer_sockets(void)
397 {
398 int ret;
399
400 if (kconsumer_data.err_sock >= 0) {
401 ret = close(kconsumer_data.err_sock);
402 if (ret < 0) {
403 PERROR("kernel consumer err_sock close");
404 }
405 }
406 if (ustconsumer32_data.err_sock >= 0) {
407 ret = close(ustconsumer32_data.err_sock);
408 if (ret < 0) {
409 PERROR("UST consumerd32 err_sock close");
410 }
411 }
412 if (ustconsumer64_data.err_sock >= 0) {
413 ret = close(ustconsumer64_data.err_sock);
414 if (ret < 0) {
415 PERROR("UST consumerd64 err_sock close");
416 }
417 }
418 if (kconsumer_data.cmd_sock >= 0) {
419 ret = close(kconsumer_data.cmd_sock);
420 if (ret < 0) {
421 PERROR("kernel consumer cmd_sock close");
422 }
423 }
424 if (ustconsumer32_data.cmd_sock >= 0) {
425 ret = close(ustconsumer32_data.cmd_sock);
426 if (ret < 0) {
427 PERROR("UST consumerd32 cmd_sock close");
428 }
429 }
430 if (ustconsumer64_data.cmd_sock >= 0) {
431 ret = close(ustconsumer64_data.cmd_sock);
432 if (ret < 0) {
433 PERROR("UST consumerd64 cmd_sock close");
434 }
435 }
436 }
437
438 /*
439 * Cleanup the daemon
440 */
441 static void cleanup(void)
442 {
443 int ret;
444 char *cmd = NULL;
445 struct ltt_session *sess, *stmp;
446
447 DBG("Cleaning up");
448
449 /* First thing first, stop all threads */
450 utils_close_pipe(thread_quit_pipe);
451
452 /*
453 * If opt_pidfile is undefined, the default file will be wiped when
454 * removing the rundir.
455 */
456 if (opt_pidfile) {
457 ret = remove(opt_pidfile);
458 if (ret < 0) {
459 PERROR("remove pidfile %s", opt_pidfile);
460 }
461 }
462
463 DBG("Removing %s directory", rundir);
464 ret = asprintf(&cmd, "rm -rf %s", rundir);
465 if (ret < 0) {
466 ERR("asprintf failed. Something is really wrong!");
467 }
468
469 /* Remove lttng run directory */
470 ret = system(cmd);
471 if (ret < 0) {
472 ERR("Unable to clean %s", rundir);
473 }
474 free(cmd);
475 free(rundir);
476
477 DBG("Cleaning up all sessions");
478
479 /* Destroy session list mutex */
480 if (session_list_ptr != NULL) {
481 pthread_mutex_destroy(&session_list_ptr->lock);
482
483 /* Cleanup ALL session */
484 cds_list_for_each_entry_safe(sess, stmp,
485 &session_list_ptr->head, list) {
486 cmd_destroy_session(sess, kernel_poll_pipe[1]);
487 }
488 }
489
490 DBG("Closing all UST sockets");
491 ust_app_clean_list();
492 buffer_reg_destroy_registries();
493
494 if (is_root && !opt_no_kernel) {
495 DBG2("Closing kernel fd");
496 if (kernel_tracer_fd >= 0) {
497 ret = close(kernel_tracer_fd);
498 if (ret) {
499 PERROR("close");
500 }
501 }
502 DBG("Unloading kernel modules");
503 modprobe_remove_lttng_all();
504 }
505
506 close_consumer_sockets();
507
508 /* <fun> */
509 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
510 "Matthew, BEET driven development works!%c[%dm",
511 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
512 /* </fun> */
513 }
514
515 /*
516 * Send data on a unix socket using the liblttsessiondcomm API.
517 *
518 * Return lttcomm error code.
519 */
520 static int send_unix_sock(int sock, void *buf, size_t len)
521 {
522 /* Check valid length */
523 if (len == 0) {
524 return -1;
525 }
526
527 return lttcomm_send_unix_sock(sock, buf, len);
528 }
529
530 /*
531 * Free memory of a command context structure.
532 */
533 static void clean_command_ctx(struct command_ctx **cmd_ctx)
534 {
535 DBG("Clean command context structure");
536 if (*cmd_ctx) {
537 if ((*cmd_ctx)->llm) {
538 free((*cmd_ctx)->llm);
539 }
540 if ((*cmd_ctx)->lsm) {
541 free((*cmd_ctx)->lsm);
542 }
543 free(*cmd_ctx);
544 *cmd_ctx = NULL;
545 }
546 }
547
548 /*
549 * Notify UST applications using the shm mmap futex.
550 */
551 static int notify_ust_apps(int active)
552 {
553 char *wait_shm_mmap;
554
555 DBG("Notifying applications of session daemon state: %d", active);
556
557 /* See shm.c for this call implying mmap, shm and futex calls */
558 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
559 if (wait_shm_mmap == NULL) {
560 goto error;
561 }
562
563 /* Wake waiting process */
564 futex_wait_update((int32_t *) wait_shm_mmap, active);
565
566 /* Apps notified successfully */
567 return 0;
568
569 error:
570 return -1;
571 }
572
573 /*
574 * Setup the outgoing data buffer for the response (llm) by allocating the
575 * right amount of memory and copying the original information from the lsm
576 * structure.
577 *
578 * Return total size of the buffer pointed by buf.
579 */
580 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
581 {
582 int ret, buf_size;
583
584 buf_size = size;
585
586 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
587 if (cmd_ctx->llm == NULL) {
588 PERROR("zmalloc");
589 ret = -ENOMEM;
590 goto error;
591 }
592
593 /* Copy common data */
594 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
595 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
596
597 cmd_ctx->llm->data_size = size;
598 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
599
600 return buf_size;
601
602 error:
603 return ret;
604 }
605
606 /*
607 * Update the kernel poll set of all channel fd available over all tracing
608 * session. Add the wakeup pipe at the end of the set.
609 */
610 static int update_kernel_poll(struct lttng_poll_event *events)
611 {
612 int ret;
613 struct ltt_session *session;
614 struct ltt_kernel_channel *channel;
615
616 DBG("Updating kernel poll set");
617
618 session_lock_list();
619 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
620 session_lock(session);
621 if (session->kernel_session == NULL) {
622 session_unlock(session);
623 continue;
624 }
625
626 cds_list_for_each_entry(channel,
627 &session->kernel_session->channel_list.head, list) {
628 /* Add channel fd to the kernel poll set */
629 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
630 if (ret < 0) {
631 session_unlock(session);
632 goto error;
633 }
634 DBG("Channel fd %d added to kernel set", channel->fd);
635 }
636 session_unlock(session);
637 }
638 session_unlock_list();
639
640 return 0;
641
642 error:
643 session_unlock_list();
644 return -1;
645 }
646
647 /*
648 * Find the channel fd from 'fd' over all tracing session. When found, check
649 * for new channel stream and send those stream fds to the kernel consumer.
650 *
651 * Useful for CPU hotplug feature.
652 */
653 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
654 {
655 int ret = 0;
656 struct ltt_session *session;
657 struct ltt_kernel_session *ksess;
658 struct ltt_kernel_channel *channel;
659
660 DBG("Updating kernel streams for channel fd %d", fd);
661
662 session_lock_list();
663 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
664 session_lock(session);
665 if (session->kernel_session == NULL) {
666 session_unlock(session);
667 continue;
668 }
669 ksess = session->kernel_session;
670
671 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
672 if (channel->fd == fd) {
673 DBG("Channel found, updating kernel streams");
674 ret = kernel_open_channel_stream(channel);
675 if (ret < 0) {
676 goto error;
677 }
678
679 /*
680 * Have we already sent fds to the consumer? If yes, it means
681 * that tracing is started so it is safe to send our updated
682 * stream fds.
683 */
684 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
685 struct lttng_ht_iter iter;
686 struct consumer_socket *socket;
687
688 rcu_read_lock();
689 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
690 &iter.iter, socket, node.node) {
691 /* Code flow error */
692 assert(socket->fd >= 0);
693
694 pthread_mutex_lock(socket->lock);
695 ret = kernel_consumer_send_channel_stream(socket,
696 channel, ksess);
697 pthread_mutex_unlock(socket->lock);
698 if (ret < 0) {
699 rcu_read_unlock();
700 goto error;
701 }
702 }
703 rcu_read_unlock();
704 }
705 goto error;
706 }
707 }
708 session_unlock(session);
709 }
710 session_unlock_list();
711 return ret;
712
713 error:
714 session_unlock(session);
715 session_unlock_list();
716 return ret;
717 }
718
719 /*
720 * For each tracing session, update newly registered apps. The session list
721 * lock MUST be acquired before calling this.
722 */
723 static void update_ust_app(int app_sock)
724 {
725 struct ltt_session *sess, *stmp;
726
727 /* For all tracing session(s) */
728 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
729 session_lock(sess);
730 if (sess->ust_session) {
731 ust_app_global_update(sess->ust_session, app_sock);
732 }
733 session_unlock(sess);
734 }
735 }
736
737 /*
738 * This thread manage event coming from the kernel.
739 *
740 * Features supported in this thread:
741 * -) CPU Hotplug
742 */
743 static void *thread_manage_kernel(void *data)
744 {
745 int ret, i, pollfd, update_poll_flag = 1, err = -1;
746 uint32_t revents, nb_fd;
747 char tmp;
748 struct lttng_poll_event events;
749
750 DBG("[thread] Thread manage kernel started");
751
752 health_register(HEALTH_TYPE_KERNEL);
753
754 /*
755 * This first step of the while is to clean this structure which could free
756 * non NULL pointers so initialize it before the loop.
757 */
758 lttng_poll_init(&events);
759
760 if (testpoint(thread_manage_kernel)) {
761 goto error_testpoint;
762 }
763
764 health_code_update();
765
766 if (testpoint(thread_manage_kernel_before_loop)) {
767 goto error_testpoint;
768 }
769
770 while (1) {
771 health_code_update();
772
773 if (update_poll_flag == 1) {
774 /* Clean events object. We are about to populate it again. */
775 lttng_poll_clean(&events);
776
777 ret = sessiond_set_thread_pollset(&events, 2);
778 if (ret < 0) {
779 goto error_poll_create;
780 }
781
782 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
783 if (ret < 0) {
784 goto error;
785 }
786
787 /* This will add the available kernel channel if any. */
788 ret = update_kernel_poll(&events);
789 if (ret < 0) {
790 goto error;
791 }
792 update_poll_flag = 0;
793 }
794
795 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
796
797 /* Poll infinite value of time */
798 restart:
799 health_poll_entry();
800 ret = lttng_poll_wait(&events, -1);
801 health_poll_exit();
802 if (ret < 0) {
803 /*
804 * Restart interrupted system call.
805 */
806 if (errno == EINTR) {
807 goto restart;
808 }
809 goto error;
810 } else if (ret == 0) {
811 /* Should not happen since timeout is infinite */
812 ERR("Return value of poll is 0 with an infinite timeout.\n"
813 "This should not have happened! Continuing...");
814 continue;
815 }
816
817 nb_fd = ret;
818
819 for (i = 0; i < nb_fd; i++) {
820 /* Fetch once the poll data */
821 revents = LTTNG_POLL_GETEV(&events, i);
822 pollfd = LTTNG_POLL_GETFD(&events, i);
823
824 health_code_update();
825
826 /* Thread quit pipe has been closed. Killing thread. */
827 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
828 if (ret) {
829 err = 0;
830 goto exit;
831 }
832
833 /* Check for data on kernel pipe */
834 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
835 do {
836 ret = read(kernel_poll_pipe[0], &tmp, 1);
837 } while (ret < 0 && errno == EINTR);
838 /*
839 * Ret value is useless here, if this pipe gets any actions an
840 * update is required anyway.
841 */
842 update_poll_flag = 1;
843 continue;
844 } else {
845 /*
846 * New CPU detected by the kernel. Adding kernel stream to
847 * kernel session and updating the kernel consumer
848 */
849 if (revents & LPOLLIN) {
850 ret = update_kernel_stream(&kconsumer_data, pollfd);
851 if (ret < 0) {
852 continue;
853 }
854 break;
855 /*
856 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
857 * and unregister kernel stream at this point.
858 */
859 }
860 }
861 }
862 }
863
864 exit:
865 error:
866 lttng_poll_clean(&events);
867 error_poll_create:
868 error_testpoint:
869 utils_close_pipe(kernel_poll_pipe);
870 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
871 if (err) {
872 health_error();
873 ERR("Health error occurred in %s", __func__);
874 WARN("Kernel thread died unexpectedly. "
875 "Kernel tracing can continue but CPU hotplug is disabled.");
876 }
877 health_unregister();
878 DBG("Kernel thread dying");
879 return NULL;
880 }
881
882 /*
883 * Signal pthread condition of the consumer data that the thread.
884 */
885 static void signal_consumer_condition(struct consumer_data *data, int state)
886 {
887 pthread_mutex_lock(&data->cond_mutex);
888
889 /*
890 * The state is set before signaling. It can be any value, it's the waiter
891 * job to correctly interpret this condition variable associated to the
892 * consumer pthread_cond.
893 *
894 * A value of 0 means that the corresponding thread of the consumer data
895 * was not started. 1 indicates that the thread has started and is ready
896 * for action. A negative value means that there was an error during the
897 * thread bootstrap.
898 */
899 data->consumer_thread_is_ready = state;
900 (void) pthread_cond_signal(&data->cond);
901
902 pthread_mutex_unlock(&data->cond_mutex);
903 }
904
905 /*
906 * This thread manage the consumer error sent back to the session daemon.
907 */
908 static void *thread_manage_consumer(void *data)
909 {
910 int sock = -1, i, ret, pollfd, err = -1;
911 uint32_t revents, nb_fd;
912 enum lttcomm_return_code code;
913 struct lttng_poll_event events;
914 struct consumer_data *consumer_data = data;
915
916 DBG("[thread] Manage consumer started");
917
918 health_register(HEALTH_TYPE_CONSUMER);
919
920 health_code_update();
921
922 /*
923 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
924 * metadata_sock. Nothing more will be added to this poll set.
925 */
926 ret = sessiond_set_thread_pollset(&events, 3);
927 if (ret < 0) {
928 goto error_poll;
929 }
930
931 /*
932 * The error socket here is already in a listening state which was done
933 * just before spawning this thread to avoid a race between the consumer
934 * daemon exec trying to connect and the listen() call.
935 */
936 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
937 if (ret < 0) {
938 goto error;
939 }
940
941 health_code_update();
942
943 /* Infinite blocking call, waiting for transmission */
944 restart:
945 health_poll_entry();
946
947 if (testpoint(thread_manage_consumer)) {
948 goto error;
949 }
950
951 ret = lttng_poll_wait(&events, -1);
952 health_poll_exit();
953 if (ret < 0) {
954 /*
955 * Restart interrupted system call.
956 */
957 if (errno == EINTR) {
958 goto restart;
959 }
960 goto error;
961 }
962
963 nb_fd = ret;
964
965 for (i = 0; i < nb_fd; i++) {
966 /* Fetch once the poll data */
967 revents = LTTNG_POLL_GETEV(&events, i);
968 pollfd = LTTNG_POLL_GETFD(&events, i);
969
970 health_code_update();
971
972 /* Thread quit pipe has been closed. Killing thread. */
973 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
974 if (ret) {
975 err = 0;
976 goto exit;
977 }
978
979 /* Event on the registration socket */
980 if (pollfd == consumer_data->err_sock) {
981 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
982 ERR("consumer err socket poll error");
983 goto error;
984 }
985 }
986 }
987
988 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
989 if (sock < 0) {
990 goto error;
991 }
992
993 /*
994 * Set the CLOEXEC flag. Return code is useless because either way, the
995 * show must go on.
996 */
997 (void) utils_set_fd_cloexec(sock);
998
999 health_code_update();
1000
1001 DBG2("Receiving code from consumer err_sock");
1002
1003 /* Getting status code from kconsumerd */
1004 ret = lttcomm_recv_unix_sock(sock, &code,
1005 sizeof(enum lttcomm_return_code));
1006 if (ret <= 0) {
1007 goto error;
1008 }
1009
1010 health_code_update();
1011
1012 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1013 /* Connect both socket, command and metadata. */
1014 consumer_data->cmd_sock =
1015 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1016 consumer_data->metadata_sock.fd =
1017 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1018 if (consumer_data->cmd_sock < 0 ||
1019 consumer_data->metadata_sock.fd < 0) {
1020 PERROR("consumer connect cmd socket");
1021 /* On error, signal condition and quit. */
1022 signal_consumer_condition(consumer_data, -1);
1023 goto error;
1024 }
1025 /* Create metadata socket lock. */
1026 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1027 if (consumer_data->metadata_sock.lock == NULL) {
1028 PERROR("zmalloc pthread mutex");
1029 ret = -1;
1030 goto error;
1031 }
1032 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1033
1034 signal_consumer_condition(consumer_data, 1);
1035 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1036 DBG("Consumer metadata socket ready (fd: %d)",
1037 consumer_data->metadata_sock.fd);
1038 } else {
1039 ERR("consumer error when waiting for SOCK_READY : %s",
1040 lttcomm_get_readable_code(-code));
1041 goto error;
1042 }
1043
1044 /* Remove the consumerd error sock since we've established a connexion */
1045 ret = lttng_poll_del(&events, consumer_data->err_sock);
1046 if (ret < 0) {
1047 goto error;
1048 }
1049
1050 /* Add new accepted error socket. */
1051 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1052 if (ret < 0) {
1053 goto error;
1054 }
1055
1056 /* Add metadata socket that is successfully connected. */
1057 ret = lttng_poll_add(&events, consumer_data->metadata_sock.fd,
1058 LPOLLIN | LPOLLRDHUP);
1059 if (ret < 0) {
1060 goto error;
1061 }
1062
1063 health_code_update();
1064
1065 /* Infinite blocking call, waiting for transmission */
1066 restart_poll:
1067 while (1) {
1068 health_poll_entry();
1069 ret = lttng_poll_wait(&events, -1);
1070 health_poll_exit();
1071 if (ret < 0) {
1072 /*
1073 * Restart interrupted system call.
1074 */
1075 if (errno == EINTR) {
1076 goto restart_poll;
1077 }
1078 goto error;
1079 }
1080
1081 nb_fd = ret;
1082
1083 for (i = 0; i < nb_fd; i++) {
1084 /* Fetch once the poll data */
1085 revents = LTTNG_POLL_GETEV(&events, i);
1086 pollfd = LTTNG_POLL_GETFD(&events, i);
1087
1088 health_code_update();
1089
1090 /* Thread quit pipe has been closed. Killing thread. */
1091 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1092 if (ret) {
1093 err = 0;
1094 goto exit;
1095 }
1096
1097 if (pollfd == sock) {
1098 /* Event on the consumerd socket */
1099 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1100 ERR("consumer err socket second poll error");
1101 goto error;
1102 }
1103 health_code_update();
1104 /* Wait for any kconsumerd error */
1105 ret = lttcomm_recv_unix_sock(sock, &code,
1106 sizeof(enum lttcomm_return_code));
1107 if (ret <= 0) {
1108 ERR("consumer closed the command socket");
1109 goto error;
1110 }
1111
1112 ERR("consumer return code : %s",
1113 lttcomm_get_readable_code(-code));
1114
1115 goto exit;
1116 } else if (pollfd == consumer_data->metadata_sock.fd) {
1117 /* UST metadata requests */
1118 ret = ust_consumer_metadata_request(
1119 &consumer_data->metadata_sock);
1120 if (ret < 0) {
1121 ERR("Handling metadata request");
1122 goto error;
1123 }
1124 break;
1125 } else {
1126 ERR("Unknown pollfd");
1127 goto error;
1128 }
1129 }
1130 health_code_update();
1131 }
1132
1133 exit:
1134 error:
1135 /* Immediately set the consumerd state to stopped */
1136 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1137 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1138 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1139 consumer_data->type == LTTNG_CONSUMER32_UST) {
1140 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1141 } else {
1142 /* Code flow error... */
1143 assert(0);
1144 }
1145
1146 if (consumer_data->err_sock >= 0) {
1147 ret = close(consumer_data->err_sock);
1148 if (ret) {
1149 PERROR("close");
1150 }
1151 consumer_data->err_sock = -1;
1152 }
1153 if (consumer_data->cmd_sock >= 0) {
1154 ret = close(consumer_data->cmd_sock);
1155 if (ret) {
1156 PERROR("close");
1157 }
1158 consumer_data->cmd_sock = -1;
1159 }
1160 if (consumer_data->metadata_sock.fd >= 0) {
1161 ret = close(consumer_data->metadata_sock.fd);
1162 if (ret) {
1163 PERROR("close");
1164 }
1165 }
1166 /* Cleanup metadata socket mutex. */
1167 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1168 free(consumer_data->metadata_sock.lock);
1169
1170 if (sock >= 0) {
1171 ret = close(sock);
1172 if (ret) {
1173 PERROR("close");
1174 }
1175 }
1176
1177 unlink(consumer_data->err_unix_sock_path);
1178 unlink(consumer_data->cmd_unix_sock_path);
1179 consumer_data->pid = 0;
1180
1181 lttng_poll_clean(&events);
1182 error_poll:
1183 if (err) {
1184 health_error();
1185 ERR("Health error occurred in %s", __func__);
1186 }
1187 health_unregister();
1188 DBG("consumer thread cleanup completed");
1189
1190 return NULL;
1191 }
1192
1193 /*
1194 * This thread manage application communication.
1195 */
1196 static void *thread_manage_apps(void *data)
1197 {
1198 int i, ret, pollfd, err = -1;
1199 uint32_t revents, nb_fd;
1200 struct lttng_poll_event events;
1201
1202 DBG("[thread] Manage application started");
1203
1204 rcu_register_thread();
1205 rcu_thread_online();
1206
1207 health_register(HEALTH_TYPE_APP_MANAGE);
1208
1209 if (testpoint(thread_manage_apps)) {
1210 goto error_testpoint;
1211 }
1212
1213 health_code_update();
1214
1215 ret = sessiond_set_thread_pollset(&events, 2);
1216 if (ret < 0) {
1217 goto error_poll_create;
1218 }
1219
1220 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1221 if (ret < 0) {
1222 goto error;
1223 }
1224
1225 if (testpoint(thread_manage_apps_before_loop)) {
1226 goto error;
1227 }
1228
1229 health_code_update();
1230
1231 while (1) {
1232 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1233
1234 /* Inifinite blocking call, waiting for transmission */
1235 restart:
1236 health_poll_entry();
1237 ret = lttng_poll_wait(&events, -1);
1238 health_poll_exit();
1239 if (ret < 0) {
1240 /*
1241 * Restart interrupted system call.
1242 */
1243 if (errno == EINTR) {
1244 goto restart;
1245 }
1246 goto error;
1247 }
1248
1249 nb_fd = ret;
1250
1251 for (i = 0; i < nb_fd; i++) {
1252 /* Fetch once the poll data */
1253 revents = LTTNG_POLL_GETEV(&events, i);
1254 pollfd = LTTNG_POLL_GETFD(&events, i);
1255
1256 health_code_update();
1257
1258 /* Thread quit pipe has been closed. Killing thread. */
1259 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1260 if (ret) {
1261 err = 0;
1262 goto exit;
1263 }
1264
1265 /* Inspect the apps cmd pipe */
1266 if (pollfd == apps_cmd_pipe[0]) {
1267 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1268 ERR("Apps command pipe error");
1269 goto error;
1270 } else if (revents & LPOLLIN) {
1271 int sock;
1272
1273 /* Empty pipe */
1274 do {
1275 ret = read(apps_cmd_pipe[0], &sock, sizeof(sock));
1276 } while (ret < 0 && errno == EINTR);
1277 if (ret < 0 || ret < sizeof(sock)) {
1278 PERROR("read apps cmd pipe");
1279 goto error;
1280 }
1281
1282 health_code_update();
1283
1284 /*
1285 * We only monitor the error events of the socket. This
1286 * thread does not handle any incoming data from UST
1287 * (POLLIN).
1288 */
1289 ret = lttng_poll_add(&events, sock,
1290 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1291 if (ret < 0) {
1292 goto error;
1293 }
1294
1295 DBG("Apps with sock %d added to poll set", sock);
1296 }
1297 } else {
1298 /*
1299 * At this point, we know that a registered application made
1300 * the event at poll_wait.
1301 */
1302 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1303 /* Removing from the poll set */
1304 ret = lttng_poll_del(&events, pollfd);
1305 if (ret < 0) {
1306 goto error;
1307 }
1308
1309 /* Socket closed on remote end. */
1310 ust_app_unregister(pollfd);
1311 }
1312 }
1313
1314 health_code_update();
1315 }
1316 }
1317
1318 exit:
1319 error:
1320 lttng_poll_clean(&events);
1321 error_poll_create:
1322 error_testpoint:
1323 utils_close_pipe(apps_cmd_pipe);
1324 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1325
1326 /*
1327 * We don't clean the UST app hash table here since already registered
1328 * applications can still be controlled so let them be until the session
1329 * daemon dies or the applications stop.
1330 */
1331
1332 if (err) {
1333 health_error();
1334 ERR("Health error occurred in %s", __func__);
1335 }
1336 health_unregister();
1337 DBG("Application communication apps thread cleanup complete");
1338 rcu_thread_offline();
1339 rcu_unregister_thread();
1340 return NULL;
1341 }
1342
1343 /*
1344 * Send a socket to a thread This is called from the dispatch UST registration
1345 * thread once all sockets are set for the application.
1346 *
1347 * The sock value can be invalid, we don't really care, the thread will handle
1348 * it and make the necessary cleanup if so.
1349 *
1350 * On success, return 0 else a negative value being the errno message of the
1351 * write().
1352 */
1353 static int send_socket_to_thread(int fd, int sock)
1354 {
1355 int ret;
1356
1357 /*
1358 * It's possible that the FD is set as invalid with -1 concurrently just
1359 * before calling this function being a shutdown state of the thread.
1360 */
1361 if (fd < 0) {
1362 ret = -EBADF;
1363 goto error;
1364 }
1365
1366 do {
1367 ret = write(fd, &sock, sizeof(sock));
1368 } while (ret < 0 && errno == EINTR);
1369 if (ret < 0 || ret != sizeof(sock)) {
1370 PERROR("write apps pipe %d", fd);
1371 if (ret < 0) {
1372 ret = -errno;
1373 }
1374 goto error;
1375 }
1376
1377 /* All good. Don't send back the write positive ret value. */
1378 ret = 0;
1379 error:
1380 return ret;
1381 }
1382
1383 /*
1384 * Sanitize the wait queue of the dispatch registration thread meaning removing
1385 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1386 * notify socket is never received.
1387 */
1388 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1389 {
1390 int ret, nb_fd = 0, i;
1391 unsigned int fd_added = 0;
1392 struct lttng_poll_event events;
1393 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1394
1395 assert(wait_queue);
1396
1397 lttng_poll_init(&events);
1398
1399 /* Just skip everything for an empty queue. */
1400 if (!wait_queue->count) {
1401 goto end;
1402 }
1403
1404 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1405 if (ret < 0) {
1406 goto error_create;
1407 }
1408
1409 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1410 &wait_queue->head, head) {
1411 assert(wait_node->app);
1412 ret = lttng_poll_add(&events, wait_node->app->sock,
1413 LPOLLHUP | LPOLLERR);
1414 if (ret < 0) {
1415 goto error;
1416 }
1417
1418 fd_added = 1;
1419 }
1420
1421 if (!fd_added) {
1422 goto end;
1423 }
1424
1425 /*
1426 * Poll but don't block so we can quickly identify the faulty events and
1427 * clean them afterwards from the wait queue.
1428 */
1429 ret = lttng_poll_wait(&events, 0);
1430 if (ret < 0) {
1431 goto error;
1432 }
1433 nb_fd = ret;
1434
1435 for (i = 0; i < nb_fd; i++) {
1436 /* Get faulty FD. */
1437 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1438 int pollfd = LTTNG_POLL_GETFD(&events, i);
1439
1440 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1441 &wait_queue->head, head) {
1442 if (pollfd == wait_node->app->sock &&
1443 (revents & (LPOLLHUP | LPOLLERR))) {
1444 cds_list_del(&wait_node->head);
1445 wait_queue->count--;
1446 ust_app_destroy(wait_node->app);
1447 free(wait_node);
1448 break;
1449 }
1450 }
1451 }
1452
1453 if (nb_fd > 0) {
1454 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1455 }
1456
1457 end:
1458 lttng_poll_clean(&events);
1459 return;
1460
1461 error:
1462 lttng_poll_clean(&events);
1463 error_create:
1464 ERR("Unable to sanitize wait queue");
1465 return;
1466 }
1467
1468 /*
1469 * Dispatch request from the registration threads to the application
1470 * communication thread.
1471 */
1472 static void *thread_dispatch_ust_registration(void *data)
1473 {
1474 int ret, err = -1;
1475 struct cds_wfq_node *node;
1476 struct ust_command *ust_cmd = NULL;
1477 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1478 struct ust_reg_wait_queue wait_queue = {
1479 .count = 0,
1480 };
1481
1482 health_register(HEALTH_TYPE_APP_REG_DISPATCH);
1483
1484 health_code_update();
1485
1486 CDS_INIT_LIST_HEAD(&wait_queue.head);
1487
1488 DBG("[thread] Dispatch UST command started");
1489
1490 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1491 health_code_update();
1492
1493 /* Atomically prepare the queue futex */
1494 futex_nto1_prepare(&ust_cmd_queue.futex);
1495
1496 do {
1497 struct ust_app *app = NULL;
1498 ust_cmd = NULL;
1499
1500 /*
1501 * Make sure we don't have node(s) that have hung up before receiving
1502 * the notify socket. This is to clean the list in order to avoid
1503 * memory leaks from notify socket that are never seen.
1504 */
1505 sanitize_wait_queue(&wait_queue);
1506
1507 health_code_update();
1508 /* Dequeue command for registration */
1509 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1510 if (node == NULL) {
1511 DBG("Woken up but nothing in the UST command queue");
1512 /* Continue thread execution */
1513 break;
1514 }
1515
1516 ust_cmd = caa_container_of(node, struct ust_command, node);
1517
1518 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1519 " gid:%d sock:%d name:%s (version %d.%d)",
1520 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1521 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1522 ust_cmd->sock, ust_cmd->reg_msg.name,
1523 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1524
1525 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1526 wait_node = zmalloc(sizeof(*wait_node));
1527 if (!wait_node) {
1528 PERROR("zmalloc wait_node dispatch");
1529 ret = close(ust_cmd->sock);
1530 if (ret < 0) {
1531 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1532 }
1533 lttng_fd_put(LTTNG_FD_APPS, 1);
1534 free(ust_cmd);
1535 goto error;
1536 }
1537 CDS_INIT_LIST_HEAD(&wait_node->head);
1538
1539 /* Create application object if socket is CMD. */
1540 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1541 ust_cmd->sock);
1542 if (!wait_node->app) {
1543 ret = close(ust_cmd->sock);
1544 if (ret < 0) {
1545 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1546 }
1547 lttng_fd_put(LTTNG_FD_APPS, 1);
1548 free(wait_node);
1549 free(ust_cmd);
1550 continue;
1551 }
1552 /*
1553 * Add application to the wait queue so we can set the notify
1554 * socket before putting this object in the global ht.
1555 */
1556 cds_list_add(&wait_node->head, &wait_queue.head);
1557 wait_queue.count++;
1558
1559 free(ust_cmd);
1560 /*
1561 * We have to continue here since we don't have the notify
1562 * socket and the application MUST be added to the hash table
1563 * only at that moment.
1564 */
1565 continue;
1566 } else {
1567 /*
1568 * Look for the application in the local wait queue and set the
1569 * notify socket if found.
1570 */
1571 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1572 &wait_queue.head, head) {
1573 health_code_update();
1574 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1575 wait_node->app->notify_sock = ust_cmd->sock;
1576 cds_list_del(&wait_node->head);
1577 wait_queue.count--;
1578 app = wait_node->app;
1579 free(wait_node);
1580 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1581 break;
1582 }
1583 }
1584
1585 /*
1586 * With no application at this stage the received socket is
1587 * basically useless so close it before we free the cmd data
1588 * structure for good.
1589 */
1590 if (!app) {
1591 ret = close(ust_cmd->sock);
1592 if (ret < 0) {
1593 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1594 }
1595 lttng_fd_put(LTTNG_FD_APPS, 1);
1596 }
1597 free(ust_cmd);
1598 }
1599
1600 if (app) {
1601 /*
1602 * @session_lock_list
1603 *
1604 * Lock the global session list so from the register up to the
1605 * registration done message, no thread can see the application
1606 * and change its state.
1607 */
1608 session_lock_list();
1609 rcu_read_lock();
1610
1611 /*
1612 * Add application to the global hash table. This needs to be
1613 * done before the update to the UST registry can locate the
1614 * application.
1615 */
1616 ust_app_add(app);
1617
1618 /* Set app version. This call will print an error if needed. */
1619 (void) ust_app_version(app);
1620
1621 /* Send notify socket through the notify pipe. */
1622 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1623 app->notify_sock);
1624 if (ret < 0) {
1625 rcu_read_unlock();
1626 session_unlock_list();
1627 /*
1628 * No notify thread, stop the UST tracing. However, this is
1629 * not an internal error of the this thread thus setting
1630 * the health error code to a normal exit.
1631 */
1632 err = 0;
1633 goto error;
1634 }
1635
1636 /*
1637 * Update newly registered application with the tracing
1638 * registry info already enabled information.
1639 */
1640 update_ust_app(app->sock);
1641
1642 /*
1643 * Don't care about return value. Let the manage apps threads
1644 * handle app unregistration upon socket close.
1645 */
1646 (void) ust_app_register_done(app->sock);
1647
1648 /*
1649 * Even if the application socket has been closed, send the app
1650 * to the thread and unregistration will take place at that
1651 * place.
1652 */
1653 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1654 if (ret < 0) {
1655 rcu_read_unlock();
1656 session_unlock_list();
1657 /*
1658 * No apps. thread, stop the UST tracing. However, this is
1659 * not an internal error of the this thread thus setting
1660 * the health error code to a normal exit.
1661 */
1662 err = 0;
1663 goto error;
1664 }
1665
1666 rcu_read_unlock();
1667 session_unlock_list();
1668 }
1669 } while (node != NULL);
1670
1671 health_poll_entry();
1672 /* Futex wait on queue. Blocking call on futex() */
1673 futex_nto1_wait(&ust_cmd_queue.futex);
1674 health_poll_exit();
1675 }
1676 /* Normal exit, no error */
1677 err = 0;
1678
1679 error:
1680 /* Clean up wait queue. */
1681 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1682 &wait_queue.head, head) {
1683 cds_list_del(&wait_node->head);
1684 wait_queue.count--;
1685 free(wait_node);
1686 }
1687
1688 DBG("Dispatch thread dying");
1689 if (err) {
1690 health_error();
1691 ERR("Health error occurred in %s", __func__);
1692 }
1693 health_unregister();
1694 return NULL;
1695 }
1696
1697 /*
1698 * This thread manage application registration.
1699 */
1700 static void *thread_registration_apps(void *data)
1701 {
1702 int sock = -1, i, ret, pollfd, err = -1;
1703 uint32_t revents, nb_fd;
1704 struct lttng_poll_event events;
1705 /*
1706 * Get allocated in this thread, enqueued to a global queue, dequeued and
1707 * freed in the manage apps thread.
1708 */
1709 struct ust_command *ust_cmd = NULL;
1710
1711 DBG("[thread] Manage application registration started");
1712
1713 health_register(HEALTH_TYPE_APP_REG);
1714
1715 if (testpoint(thread_registration_apps)) {
1716 goto error_testpoint;
1717 }
1718
1719 ret = lttcomm_listen_unix_sock(apps_sock);
1720 if (ret < 0) {
1721 goto error_listen;
1722 }
1723
1724 /*
1725 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1726 * more will be added to this poll set.
1727 */
1728 ret = sessiond_set_thread_pollset(&events, 2);
1729 if (ret < 0) {
1730 goto error_create_poll;
1731 }
1732
1733 /* Add the application registration socket */
1734 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1735 if (ret < 0) {
1736 goto error_poll_add;
1737 }
1738
1739 /* Notify all applications to register */
1740 ret = notify_ust_apps(1);
1741 if (ret < 0) {
1742 ERR("Failed to notify applications or create the wait shared memory.\n"
1743 "Execution continues but there might be problem for already\n"
1744 "running applications that wishes to register.");
1745 }
1746
1747 while (1) {
1748 DBG("Accepting application registration");
1749
1750 /* Inifinite blocking call, waiting for transmission */
1751 restart:
1752 health_poll_entry();
1753 ret = lttng_poll_wait(&events, -1);
1754 health_poll_exit();
1755 if (ret < 0) {
1756 /*
1757 * Restart interrupted system call.
1758 */
1759 if (errno == EINTR) {
1760 goto restart;
1761 }
1762 goto error;
1763 }
1764
1765 nb_fd = ret;
1766
1767 for (i = 0; i < nb_fd; i++) {
1768 health_code_update();
1769
1770 /* Fetch once the poll data */
1771 revents = LTTNG_POLL_GETEV(&events, i);
1772 pollfd = LTTNG_POLL_GETFD(&events, i);
1773
1774 /* Thread quit pipe has been closed. Killing thread. */
1775 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1776 if (ret) {
1777 err = 0;
1778 goto exit;
1779 }
1780
1781 /* Event on the registration socket */
1782 if (pollfd == apps_sock) {
1783 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1784 ERR("Register apps socket poll error");
1785 goto error;
1786 } else if (revents & LPOLLIN) {
1787 sock = lttcomm_accept_unix_sock(apps_sock);
1788 if (sock < 0) {
1789 goto error;
1790 }
1791
1792 /*
1793 * Set socket timeout for both receiving and ending.
1794 * app_socket_timeout is in seconds, whereas
1795 * lttcomm_setsockopt_rcv_timeout and
1796 * lttcomm_setsockopt_snd_timeout expect msec as
1797 * parameter.
1798 */
1799 (void) lttcomm_setsockopt_rcv_timeout(sock,
1800 app_socket_timeout);
1801 (void) lttcomm_setsockopt_snd_timeout(sock,
1802 app_socket_timeout);
1803
1804 /*
1805 * Set the CLOEXEC flag. Return code is useless because
1806 * either way, the show must go on.
1807 */
1808 (void) utils_set_fd_cloexec(sock);
1809
1810 /* Create UST registration command for enqueuing */
1811 ust_cmd = zmalloc(sizeof(struct ust_command));
1812 if (ust_cmd == NULL) {
1813 PERROR("ust command zmalloc");
1814 goto error;
1815 }
1816
1817 /*
1818 * Using message-based transmissions to ensure we don't
1819 * have to deal with partially received messages.
1820 */
1821 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1822 if (ret < 0) {
1823 ERR("Exhausted file descriptors allowed for applications.");
1824 free(ust_cmd);
1825 ret = close(sock);
1826 if (ret) {
1827 PERROR("close");
1828 }
1829 sock = -1;
1830 continue;
1831 }
1832
1833 health_code_update();
1834 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
1835 if (ret < 0) {
1836 free(ust_cmd);
1837 /* Close socket of the application. */
1838 ret = close(sock);
1839 if (ret) {
1840 PERROR("close");
1841 }
1842 lttng_fd_put(LTTNG_FD_APPS, 1);
1843 sock = -1;
1844 continue;
1845 }
1846 health_code_update();
1847
1848 ust_cmd->sock = sock;
1849 sock = -1;
1850
1851 DBG("UST registration received with pid:%d ppid:%d uid:%d"
1852 " gid:%d sock:%d name:%s (version %d.%d)",
1853 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1854 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1855 ust_cmd->sock, ust_cmd->reg_msg.name,
1856 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1857
1858 /*
1859 * Lock free enqueue the registration request. The red pill
1860 * has been taken! This apps will be part of the *system*.
1861 */
1862 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
1863
1864 /*
1865 * Wake the registration queue futex. Implicit memory
1866 * barrier with the exchange in cds_wfq_enqueue.
1867 */
1868 futex_nto1_wake(&ust_cmd_queue.futex);
1869 }
1870 }
1871 }
1872 }
1873
1874 exit:
1875 error:
1876 if (err) {
1877 health_error();
1878 ERR("Health error occurred in %s", __func__);
1879 }
1880
1881 /* Notify that the registration thread is gone */
1882 notify_ust_apps(0);
1883
1884 if (apps_sock >= 0) {
1885 ret = close(apps_sock);
1886 if (ret) {
1887 PERROR("close");
1888 }
1889 }
1890 if (sock >= 0) {
1891 ret = close(sock);
1892 if (ret) {
1893 PERROR("close");
1894 }
1895 lttng_fd_put(LTTNG_FD_APPS, 1);
1896 }
1897 unlink(apps_unix_sock_path);
1898
1899 error_poll_add:
1900 lttng_poll_clean(&events);
1901 error_listen:
1902 error_create_poll:
1903 error_testpoint:
1904 DBG("UST Registration thread cleanup complete");
1905 health_unregister();
1906
1907 return NULL;
1908 }
1909
1910 /*
1911 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
1912 * exec or it will fails.
1913 */
1914 static int spawn_consumer_thread(struct consumer_data *consumer_data)
1915 {
1916 int ret, clock_ret;
1917 struct timespec timeout;
1918
1919 /* Make sure we set the readiness flag to 0 because we are NOT ready */
1920 consumer_data->consumer_thread_is_ready = 0;
1921
1922 /* Setup pthread condition */
1923 ret = pthread_condattr_init(&consumer_data->condattr);
1924 if (ret != 0) {
1925 errno = ret;
1926 PERROR("pthread_condattr_init consumer data");
1927 goto error;
1928 }
1929
1930 /*
1931 * Set the monotonic clock in order to make sure we DO NOT jump in time
1932 * between the clock_gettime() call and the timedwait call. See bug #324
1933 * for a more details and how we noticed it.
1934 */
1935 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
1936 if (ret != 0) {
1937 errno = ret;
1938 PERROR("pthread_condattr_setclock consumer data");
1939 goto error;
1940 }
1941
1942 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
1943 if (ret != 0) {
1944 errno = ret;
1945 PERROR("pthread_cond_init consumer data");
1946 goto error;
1947 }
1948
1949 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
1950 consumer_data);
1951 if (ret != 0) {
1952 PERROR("pthread_create consumer");
1953 ret = -1;
1954 goto error;
1955 }
1956
1957 /* We are about to wait on a pthread condition */
1958 pthread_mutex_lock(&consumer_data->cond_mutex);
1959
1960 /* Get time for sem_timedwait absolute timeout */
1961 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
1962 /*
1963 * Set the timeout for the condition timed wait even if the clock gettime
1964 * call fails since we might loop on that call and we want to avoid to
1965 * increment the timeout too many times.
1966 */
1967 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
1968
1969 /*
1970 * The following loop COULD be skipped in some conditions so this is why we
1971 * set ret to 0 in order to make sure at least one round of the loop is
1972 * done.
1973 */
1974 ret = 0;
1975
1976 /*
1977 * Loop until the condition is reached or when a timeout is reached. Note
1978 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
1979 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
1980 * possible. This loop does not take any chances and works with both of
1981 * them.
1982 */
1983 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
1984 if (clock_ret < 0) {
1985 PERROR("clock_gettime spawn consumer");
1986 /* Infinite wait for the consumerd thread to be ready */
1987 ret = pthread_cond_wait(&consumer_data->cond,
1988 &consumer_data->cond_mutex);
1989 } else {
1990 ret = pthread_cond_timedwait(&consumer_data->cond,
1991 &consumer_data->cond_mutex, &timeout);
1992 }
1993 }
1994
1995 /* Release the pthread condition */
1996 pthread_mutex_unlock(&consumer_data->cond_mutex);
1997
1998 if (ret != 0) {
1999 errno = ret;
2000 if (ret == ETIMEDOUT) {
2001 /*
2002 * Call has timed out so we kill the kconsumerd_thread and return
2003 * an error.
2004 */
2005 ERR("Condition timed out. The consumer thread was never ready."
2006 " Killing it");
2007 ret = pthread_cancel(consumer_data->thread);
2008 if (ret < 0) {
2009 PERROR("pthread_cancel consumer thread");
2010 }
2011 } else {
2012 PERROR("pthread_cond_wait failed consumer thread");
2013 }
2014 goto error;
2015 }
2016
2017 pthread_mutex_lock(&consumer_data->pid_mutex);
2018 if (consumer_data->pid == 0) {
2019 ERR("Consumerd did not start");
2020 pthread_mutex_unlock(&consumer_data->pid_mutex);
2021 goto error;
2022 }
2023 pthread_mutex_unlock(&consumer_data->pid_mutex);
2024
2025 return 0;
2026
2027 error:
2028 return ret;
2029 }
2030
2031 /*
2032 * Join consumer thread
2033 */
2034 static int join_consumer_thread(struct consumer_data *consumer_data)
2035 {
2036 void *status;
2037
2038 /* Consumer pid must be a real one. */
2039 if (consumer_data->pid > 0) {
2040 int ret;
2041 ret = kill(consumer_data->pid, SIGTERM);
2042 if (ret) {
2043 ERR("Error killing consumer daemon");
2044 return ret;
2045 }
2046 return pthread_join(consumer_data->thread, &status);
2047 } else {
2048 return 0;
2049 }
2050 }
2051
2052 /*
2053 * Fork and exec a consumer daemon (consumerd).
2054 *
2055 * Return pid if successful else -1.
2056 */
2057 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2058 {
2059 int ret;
2060 pid_t pid;
2061 const char *consumer_to_use;
2062 const char *verbosity;
2063 struct stat st;
2064
2065 DBG("Spawning consumerd");
2066
2067 pid = fork();
2068 if (pid == 0) {
2069 /*
2070 * Exec consumerd.
2071 */
2072 if (opt_verbose_consumer) {
2073 verbosity = "--verbose";
2074 } else {
2075 verbosity = "--quiet";
2076 }
2077 switch (consumer_data->type) {
2078 case LTTNG_CONSUMER_KERNEL:
2079 /*
2080 * Find out which consumerd to execute. We will first try the
2081 * 64-bit path, then the sessiond's installation directory, and
2082 * fallback on the 32-bit one,
2083 */
2084 DBG3("Looking for a kernel consumer at these locations:");
2085 DBG3(" 1) %s", consumerd64_bin);
2086 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2087 DBG3(" 3) %s", consumerd32_bin);
2088 if (stat(consumerd64_bin, &st) == 0) {
2089 DBG3("Found location #1");
2090 consumer_to_use = consumerd64_bin;
2091 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2092 DBG3("Found location #2");
2093 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2094 } else if (stat(consumerd32_bin, &st) == 0) {
2095 DBG3("Found location #3");
2096 consumer_to_use = consumerd32_bin;
2097 } else {
2098 DBG("Could not find any valid consumerd executable");
2099 break;
2100 }
2101 DBG("Using kernel consumer at: %s", consumer_to_use);
2102 execl(consumer_to_use,
2103 "lttng-consumerd", verbosity, "-k",
2104 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2105 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2106 NULL);
2107 break;
2108 case LTTNG_CONSUMER64_UST:
2109 {
2110 char *tmpnew = NULL;
2111
2112 if (consumerd64_libdir[0] != '\0') {
2113 char *tmp;
2114 size_t tmplen;
2115
2116 tmp = getenv("LD_LIBRARY_PATH");
2117 if (!tmp) {
2118 tmp = "";
2119 }
2120 tmplen = strlen("LD_LIBRARY_PATH=")
2121 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2122 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2123 if (!tmpnew) {
2124 ret = -ENOMEM;
2125 goto error;
2126 }
2127 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2128 strcat(tmpnew, consumerd64_libdir);
2129 if (tmp[0] != '\0') {
2130 strcat(tmpnew, ":");
2131 strcat(tmpnew, tmp);
2132 }
2133 ret = putenv(tmpnew);
2134 if (ret) {
2135 ret = -errno;
2136 free(tmpnew);
2137 goto error;
2138 }
2139 }
2140 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2141 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2142 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2143 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2144 NULL);
2145 if (consumerd64_libdir[0] != '\0') {
2146 free(tmpnew);
2147 }
2148 if (ret) {
2149 goto error;
2150 }
2151 break;
2152 }
2153 case LTTNG_CONSUMER32_UST:
2154 {
2155 char *tmpnew = NULL;
2156
2157 if (consumerd32_libdir[0] != '\0') {
2158 char *tmp;
2159 size_t tmplen;
2160
2161 tmp = getenv("LD_LIBRARY_PATH");
2162 if (!tmp) {
2163 tmp = "";
2164 }
2165 tmplen = strlen("LD_LIBRARY_PATH=")
2166 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2167 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2168 if (!tmpnew) {
2169 ret = -ENOMEM;
2170 goto error;
2171 }
2172 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2173 strcat(tmpnew, consumerd32_libdir);
2174 if (tmp[0] != '\0') {
2175 strcat(tmpnew, ":");
2176 strcat(tmpnew, tmp);
2177 }
2178 ret = putenv(tmpnew);
2179 if (ret) {
2180 ret = -errno;
2181 free(tmpnew);
2182 goto error;
2183 }
2184 }
2185 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2186 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2187 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2188 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2189 NULL);
2190 if (consumerd32_libdir[0] != '\0') {
2191 free(tmpnew);
2192 }
2193 if (ret) {
2194 goto error;
2195 }
2196 break;
2197 }
2198 default:
2199 PERROR("unknown consumer type");
2200 exit(EXIT_FAILURE);
2201 }
2202 if (errno != 0) {
2203 PERROR("kernel start consumer exec");
2204 }
2205 exit(EXIT_FAILURE);
2206 } else if (pid > 0) {
2207 ret = pid;
2208 } else {
2209 PERROR("start consumer fork");
2210 ret = -errno;
2211 }
2212 error:
2213 return ret;
2214 }
2215
2216 /*
2217 * Spawn the consumerd daemon and session daemon thread.
2218 */
2219 static int start_consumerd(struct consumer_data *consumer_data)
2220 {
2221 int ret;
2222
2223 /*
2224 * Set the listen() state on the socket since there is a possible race
2225 * between the exec() of the consumer daemon and this call if place in the
2226 * consumer thread. See bug #366 for more details.
2227 */
2228 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2229 if (ret < 0) {
2230 goto error;
2231 }
2232
2233 pthread_mutex_lock(&consumer_data->pid_mutex);
2234 if (consumer_data->pid != 0) {
2235 pthread_mutex_unlock(&consumer_data->pid_mutex);
2236 goto end;
2237 }
2238
2239 ret = spawn_consumerd(consumer_data);
2240 if (ret < 0) {
2241 ERR("Spawning consumerd failed");
2242 pthread_mutex_unlock(&consumer_data->pid_mutex);
2243 goto error;
2244 }
2245
2246 /* Setting up the consumer_data pid */
2247 consumer_data->pid = ret;
2248 DBG2("Consumer pid %d", consumer_data->pid);
2249 pthread_mutex_unlock(&consumer_data->pid_mutex);
2250
2251 DBG2("Spawning consumer control thread");
2252 ret = spawn_consumer_thread(consumer_data);
2253 if (ret < 0) {
2254 ERR("Fatal error spawning consumer control thread");
2255 goto error;
2256 }
2257
2258 end:
2259 return 0;
2260
2261 error:
2262 /* Cleanup already created sockets on error. */
2263 if (consumer_data->err_sock >= 0) {
2264 int err;
2265
2266 err = close(consumer_data->err_sock);
2267 if (err < 0) {
2268 PERROR("close consumer data error socket");
2269 }
2270 }
2271 return ret;
2272 }
2273
2274 /*
2275 * Compute health status of each consumer. If one of them is zero (bad
2276 * state), we return 0.
2277 */
2278 static int check_consumer_health(void)
2279 {
2280 int ret;
2281
2282 ret = health_check_state(HEALTH_TYPE_CONSUMER);
2283
2284 DBG3("Health consumer check %d", ret);
2285
2286 return ret;
2287 }
2288
2289 /*
2290 * Setup necessary data for kernel tracer action.
2291 */
2292 static int init_kernel_tracer(void)
2293 {
2294 int ret;
2295
2296 /* Modprobe lttng kernel modules */
2297 ret = modprobe_lttng_control();
2298 if (ret < 0) {
2299 goto error;
2300 }
2301
2302 /* Open debugfs lttng */
2303 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2304 if (kernel_tracer_fd < 0) {
2305 DBG("Failed to open %s", module_proc_lttng);
2306 ret = -1;
2307 goto error_open;
2308 }
2309
2310 /* Validate kernel version */
2311 ret = kernel_validate_version(kernel_tracer_fd);
2312 if (ret < 0) {
2313 goto error_version;
2314 }
2315
2316 ret = modprobe_lttng_data();
2317 if (ret < 0) {
2318 goto error_modules;
2319 }
2320
2321 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2322 return 0;
2323
2324 error_version:
2325 modprobe_remove_lttng_control();
2326 ret = close(kernel_tracer_fd);
2327 if (ret) {
2328 PERROR("close");
2329 }
2330 kernel_tracer_fd = -1;
2331 return LTTNG_ERR_KERN_VERSION;
2332
2333 error_modules:
2334 ret = close(kernel_tracer_fd);
2335 if (ret) {
2336 PERROR("close");
2337 }
2338
2339 error_open:
2340 modprobe_remove_lttng_control();
2341
2342 error:
2343 WARN("No kernel tracer available");
2344 kernel_tracer_fd = -1;
2345 if (!is_root) {
2346 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2347 } else {
2348 return LTTNG_ERR_KERN_NA;
2349 }
2350 }
2351
2352
2353 /*
2354 * Copy consumer output from the tracing session to the domain session. The
2355 * function also applies the right modification on a per domain basis for the
2356 * trace files destination directory.
2357 *
2358 * Should *NOT* be called with RCU read-side lock held.
2359 */
2360 static int copy_session_consumer(int domain, struct ltt_session *session)
2361 {
2362 int ret;
2363 const char *dir_name;
2364 struct consumer_output *consumer;
2365
2366 assert(session);
2367 assert(session->consumer);
2368
2369 switch (domain) {
2370 case LTTNG_DOMAIN_KERNEL:
2371 DBG3("Copying tracing session consumer output in kernel session");
2372 /*
2373 * XXX: We should audit the session creation and what this function
2374 * does "extra" in order to avoid a destroy since this function is used
2375 * in the domain session creation (kernel and ust) only. Same for UST
2376 * domain.
2377 */
2378 if (session->kernel_session->consumer) {
2379 consumer_destroy_output(session->kernel_session->consumer);
2380 }
2381 session->kernel_session->consumer =
2382 consumer_copy_output(session->consumer);
2383 /* Ease our life a bit for the next part */
2384 consumer = session->kernel_session->consumer;
2385 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2386 break;
2387 case LTTNG_DOMAIN_UST:
2388 DBG3("Copying tracing session consumer output in UST session");
2389 if (session->ust_session->consumer) {
2390 consumer_destroy_output(session->ust_session->consumer);
2391 }
2392 session->ust_session->consumer =
2393 consumer_copy_output(session->consumer);
2394 /* Ease our life a bit for the next part */
2395 consumer = session->ust_session->consumer;
2396 dir_name = DEFAULT_UST_TRACE_DIR;
2397 break;
2398 default:
2399 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2400 goto error;
2401 }
2402
2403 /* Append correct directory to subdir */
2404 strncat(consumer->subdir, dir_name,
2405 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2406 DBG3("Copy session consumer subdir %s", consumer->subdir);
2407
2408 ret = LTTNG_OK;
2409
2410 error:
2411 return ret;
2412 }
2413
2414 /*
2415 * Create an UST session and add it to the session ust list.
2416 *
2417 * Should *NOT* be called with RCU read-side lock held.
2418 */
2419 static int create_ust_session(struct ltt_session *session,
2420 struct lttng_domain *domain)
2421 {
2422 int ret;
2423 struct ltt_ust_session *lus = NULL;
2424
2425 assert(session);
2426 assert(domain);
2427 assert(session->consumer);
2428
2429 switch (domain->type) {
2430 case LTTNG_DOMAIN_UST:
2431 break;
2432 default:
2433 ERR("Unknown UST domain on create session %d", domain->type);
2434 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2435 goto error;
2436 }
2437
2438 DBG("Creating UST session");
2439
2440 lus = trace_ust_create_session(session->id);
2441 if (lus == NULL) {
2442 ret = LTTNG_ERR_UST_SESS_FAIL;
2443 goto error;
2444 }
2445
2446 lus->uid = session->uid;
2447 lus->gid = session->gid;
2448 session->ust_session = lus;
2449
2450 /* Copy session output to the newly created UST session */
2451 ret = copy_session_consumer(domain->type, session);
2452 if (ret != LTTNG_OK) {
2453 goto error;
2454 }
2455
2456 return LTTNG_OK;
2457
2458 error:
2459 free(lus);
2460 session->ust_session = NULL;
2461 return ret;
2462 }
2463
2464 /*
2465 * Create a kernel tracer session then create the default channel.
2466 */
2467 static int create_kernel_session(struct ltt_session *session)
2468 {
2469 int ret;
2470
2471 DBG("Creating kernel session");
2472
2473 ret = kernel_create_session(session, kernel_tracer_fd);
2474 if (ret < 0) {
2475 ret = LTTNG_ERR_KERN_SESS_FAIL;
2476 goto error;
2477 }
2478
2479 /* Code flow safety */
2480 assert(session->kernel_session);
2481
2482 /* Copy session output to the newly created Kernel session */
2483 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2484 if (ret != LTTNG_OK) {
2485 goto error;
2486 }
2487
2488 /* Create directory(ies) on local filesystem. */
2489 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2490 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2491 ret = run_as_mkdir_recursive(
2492 session->kernel_session->consumer->dst.trace_path,
2493 S_IRWXU | S_IRWXG, session->uid, session->gid);
2494 if (ret < 0) {
2495 if (ret != -EEXIST) {
2496 ERR("Trace directory creation error");
2497 goto error;
2498 }
2499 }
2500 }
2501
2502 session->kernel_session->uid = session->uid;
2503 session->kernel_session->gid = session->gid;
2504
2505 return LTTNG_OK;
2506
2507 error:
2508 trace_kernel_destroy_session(session->kernel_session);
2509 session->kernel_session = NULL;
2510 return ret;
2511 }
2512
2513 /*
2514 * Count number of session permitted by uid/gid.
2515 */
2516 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2517 {
2518 unsigned int i = 0;
2519 struct ltt_session *session;
2520
2521 DBG("Counting number of available session for UID %d GID %d",
2522 uid, gid);
2523 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2524 /*
2525 * Only list the sessions the user can control.
2526 */
2527 if (!session_access_ok(session, uid, gid)) {
2528 continue;
2529 }
2530 i++;
2531 }
2532 return i;
2533 }
2534
2535 /*
2536 * Process the command requested by the lttng client within the command
2537 * context structure. This function make sure that the return structure (llm)
2538 * is set and ready for transmission before returning.
2539 *
2540 * Return any error encountered or 0 for success.
2541 *
2542 * "sock" is only used for special-case var. len data.
2543 *
2544 * Should *NOT* be called with RCU read-side lock held.
2545 */
2546 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2547 int *sock_error)
2548 {
2549 int ret = LTTNG_OK;
2550 int need_tracing_session = 1;
2551 int need_domain;
2552
2553 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2554
2555 *sock_error = 0;
2556
2557 switch (cmd_ctx->lsm->cmd_type) {
2558 case LTTNG_CREATE_SESSION:
2559 case LTTNG_DESTROY_SESSION:
2560 case LTTNG_LIST_SESSIONS:
2561 case LTTNG_LIST_DOMAINS:
2562 case LTTNG_START_TRACE:
2563 case LTTNG_STOP_TRACE:
2564 case LTTNG_DATA_PENDING:
2565 need_domain = 0;
2566 break;
2567 default:
2568 need_domain = 1;
2569 }
2570
2571 if (opt_no_kernel && need_domain
2572 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2573 if (!is_root) {
2574 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2575 } else {
2576 ret = LTTNG_ERR_KERN_NA;
2577 }
2578 goto error;
2579 }
2580
2581 /* Deny register consumer if we already have a spawned consumer. */
2582 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2583 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2584 if (kconsumer_data.pid > 0) {
2585 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2586 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2587 goto error;
2588 }
2589 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2590 }
2591
2592 /*
2593 * Check for command that don't needs to allocate a returned payload. We do
2594 * this here so we don't have to make the call for no payload at each
2595 * command.
2596 */
2597 switch(cmd_ctx->lsm->cmd_type) {
2598 case LTTNG_LIST_SESSIONS:
2599 case LTTNG_LIST_TRACEPOINTS:
2600 case LTTNG_LIST_TRACEPOINT_FIELDS:
2601 case LTTNG_LIST_DOMAINS:
2602 case LTTNG_LIST_CHANNELS:
2603 case LTTNG_LIST_EVENTS:
2604 break;
2605 default:
2606 /* Setup lttng message with no payload */
2607 ret = setup_lttng_msg(cmd_ctx, 0);
2608 if (ret < 0) {
2609 /* This label does not try to unlock the session */
2610 goto init_setup_error;
2611 }
2612 }
2613
2614 /* Commands that DO NOT need a session. */
2615 switch (cmd_ctx->lsm->cmd_type) {
2616 case LTTNG_CREATE_SESSION:
2617 case LTTNG_CALIBRATE:
2618 case LTTNG_LIST_SESSIONS:
2619 case LTTNG_LIST_TRACEPOINTS:
2620 case LTTNG_LIST_TRACEPOINT_FIELDS:
2621 need_tracing_session = 0;
2622 break;
2623 default:
2624 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2625 /*
2626 * We keep the session list lock across _all_ commands
2627 * for now, because the per-session lock does not
2628 * handle teardown properly.
2629 */
2630 session_lock_list();
2631 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2632 if (cmd_ctx->session == NULL) {
2633 ret = LTTNG_ERR_SESS_NOT_FOUND;
2634 goto error;
2635 } else {
2636 /* Acquire lock for the session */
2637 session_lock(cmd_ctx->session);
2638 }
2639 break;
2640 }
2641
2642 if (!need_domain) {
2643 goto skip_domain;
2644 }
2645
2646 /*
2647 * Check domain type for specific "pre-action".
2648 */
2649 switch (cmd_ctx->lsm->domain.type) {
2650 case LTTNG_DOMAIN_KERNEL:
2651 if (!is_root) {
2652 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2653 goto error;
2654 }
2655
2656 /* Kernel tracer check */
2657 if (kernel_tracer_fd == -1) {
2658 /* Basically, load kernel tracer modules */
2659 ret = init_kernel_tracer();
2660 if (ret != 0) {
2661 goto error;
2662 }
2663 }
2664
2665 /* Consumer is in an ERROR state. Report back to client */
2666 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2667 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2668 goto error;
2669 }
2670
2671 /* Need a session for kernel command */
2672 if (need_tracing_session) {
2673 if (cmd_ctx->session->kernel_session == NULL) {
2674 ret = create_kernel_session(cmd_ctx->session);
2675 if (ret < 0) {
2676 ret = LTTNG_ERR_KERN_SESS_FAIL;
2677 goto error;
2678 }
2679 }
2680
2681 /* Start the kernel consumer daemon */
2682 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2683 if (kconsumer_data.pid == 0 &&
2684 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2685 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2686 ret = start_consumerd(&kconsumer_data);
2687 if (ret < 0) {
2688 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2689 goto error;
2690 }
2691 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2692 } else {
2693 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2694 }
2695
2696 /*
2697 * The consumer was just spawned so we need to add the socket to
2698 * the consumer output of the session if exist.
2699 */
2700 ret = consumer_create_socket(&kconsumer_data,
2701 cmd_ctx->session->kernel_session->consumer);
2702 if (ret < 0) {
2703 goto error;
2704 }
2705 }
2706
2707 break;
2708 case LTTNG_DOMAIN_UST:
2709 {
2710 if (!ust_app_supported()) {
2711 ret = LTTNG_ERR_NO_UST;
2712 goto error;
2713 }
2714 /* Consumer is in an ERROR state. Report back to client */
2715 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2716 ret = LTTNG_ERR_NO_USTCONSUMERD;
2717 goto error;
2718 }
2719
2720 if (need_tracing_session) {
2721 /* Create UST session if none exist. */
2722 if (cmd_ctx->session->ust_session == NULL) {
2723 ret = create_ust_session(cmd_ctx->session,
2724 &cmd_ctx->lsm->domain);
2725 if (ret != LTTNG_OK) {
2726 goto error;
2727 }
2728 }
2729
2730 /* Start the UST consumer daemons */
2731 /* 64-bit */
2732 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2733 if (consumerd64_bin[0] != '\0' &&
2734 ustconsumer64_data.pid == 0 &&
2735 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2736 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2737 ret = start_consumerd(&ustconsumer64_data);
2738 if (ret < 0) {
2739 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2740 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2741 goto error;
2742 }
2743
2744 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2745 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2746 } else {
2747 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2748 }
2749
2750 /*
2751 * Setup socket for consumer 64 bit. No need for atomic access
2752 * since it was set above and can ONLY be set in this thread.
2753 */
2754 ret = consumer_create_socket(&ustconsumer64_data,
2755 cmd_ctx->session->ust_session->consumer);
2756 if (ret < 0) {
2757 goto error;
2758 }
2759
2760 /* 32-bit */
2761 if (consumerd32_bin[0] != '\0' &&
2762 ustconsumer32_data.pid == 0 &&
2763 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2764 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2765 ret = start_consumerd(&ustconsumer32_data);
2766 if (ret < 0) {
2767 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2768 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2769 goto error;
2770 }
2771
2772 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2773 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2774 } else {
2775 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2776 }
2777
2778 /*
2779 * Setup socket for consumer 64 bit. No need for atomic access
2780 * since it was set above and can ONLY be set in this thread.
2781 */
2782 ret = consumer_create_socket(&ustconsumer32_data,
2783 cmd_ctx->session->ust_session->consumer);
2784 if (ret < 0) {
2785 goto error;
2786 }
2787 }
2788 break;
2789 }
2790 default:
2791 break;
2792 }
2793 skip_domain:
2794
2795 /* Validate consumer daemon state when start/stop trace command */
2796 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2797 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2798 switch (cmd_ctx->lsm->domain.type) {
2799 case LTTNG_DOMAIN_UST:
2800 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2801 ret = LTTNG_ERR_NO_USTCONSUMERD;
2802 goto error;
2803 }
2804 break;
2805 case LTTNG_DOMAIN_KERNEL:
2806 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2807 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2808 goto error;
2809 }
2810 break;
2811 }
2812 }
2813
2814 /*
2815 * Check that the UID or GID match that of the tracing session.
2816 * The root user can interact with all sessions.
2817 */
2818 if (need_tracing_session) {
2819 if (!session_access_ok(cmd_ctx->session,
2820 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2821 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2822 ret = LTTNG_ERR_EPERM;
2823 goto error;
2824 }
2825 }
2826
2827 /*
2828 * Send relayd information to consumer as soon as we have a domain and a
2829 * session defined.
2830 */
2831 if (cmd_ctx->session && need_domain) {
2832 /*
2833 * Setup relayd if not done yet. If the relayd information was already
2834 * sent to the consumer, this call will gracefully return.
2835 */
2836 ret = cmd_setup_relayd(cmd_ctx->session);
2837 if (ret != LTTNG_OK) {
2838 goto error;
2839 }
2840 }
2841
2842 /* Process by command type */
2843 switch (cmd_ctx->lsm->cmd_type) {
2844 case LTTNG_ADD_CONTEXT:
2845 {
2846 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2847 cmd_ctx->lsm->u.context.channel_name,
2848 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
2849 break;
2850 }
2851 case LTTNG_DISABLE_CHANNEL:
2852 {
2853 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2854 cmd_ctx->lsm->u.disable.channel_name);
2855 break;
2856 }
2857 case LTTNG_DISABLE_EVENT:
2858 {
2859 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2860 cmd_ctx->lsm->u.disable.channel_name,
2861 cmd_ctx->lsm->u.disable.name);
2862 break;
2863 }
2864 case LTTNG_DISABLE_ALL_EVENT:
2865 {
2866 DBG("Disabling all events");
2867
2868 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
2869 cmd_ctx->lsm->u.disable.channel_name);
2870 break;
2871 }
2872 case LTTNG_ENABLE_CHANNEL:
2873 {
2874 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
2875 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
2876 break;
2877 }
2878 case LTTNG_ENABLE_EVENT:
2879 {
2880 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
2881 cmd_ctx->lsm->u.enable.channel_name,
2882 &cmd_ctx->lsm->u.enable.event, NULL, kernel_poll_pipe[1]);
2883 break;
2884 }
2885 case LTTNG_ENABLE_ALL_EVENT:
2886 {
2887 DBG("Enabling all events");
2888
2889 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
2890 cmd_ctx->lsm->u.enable.channel_name,
2891 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
2892 break;
2893 }
2894 case LTTNG_LIST_TRACEPOINTS:
2895 {
2896 struct lttng_event *events;
2897 ssize_t nb_events;
2898
2899 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
2900 if (nb_events < 0) {
2901 /* Return value is a negative lttng_error_code. */
2902 ret = -nb_events;
2903 goto error;
2904 }
2905
2906 /*
2907 * Setup lttng message with payload size set to the event list size in
2908 * bytes and then copy list into the llm payload.
2909 */
2910 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
2911 if (ret < 0) {
2912 free(events);
2913 goto setup_error;
2914 }
2915
2916 /* Copy event list into message payload */
2917 memcpy(cmd_ctx->llm->payload, events,
2918 sizeof(struct lttng_event) * nb_events);
2919
2920 free(events);
2921
2922 ret = LTTNG_OK;
2923 break;
2924 }
2925 case LTTNG_LIST_TRACEPOINT_FIELDS:
2926 {
2927 struct lttng_event_field *fields;
2928 ssize_t nb_fields;
2929
2930 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
2931 &fields);
2932 if (nb_fields < 0) {
2933 /* Return value is a negative lttng_error_code. */
2934 ret = -nb_fields;
2935 goto error;
2936 }
2937
2938 /*
2939 * Setup lttng message with payload size set to the event list size in
2940 * bytes and then copy list into the llm payload.
2941 */
2942 ret = setup_lttng_msg(cmd_ctx,
2943 sizeof(struct lttng_event_field) * nb_fields);
2944 if (ret < 0) {
2945 free(fields);
2946 goto setup_error;
2947 }
2948
2949 /* Copy event list into message payload */
2950 memcpy(cmd_ctx->llm->payload, fields,
2951 sizeof(struct lttng_event_field) * nb_fields);
2952
2953 free(fields);
2954
2955 ret = LTTNG_OK;
2956 break;
2957 }
2958 case LTTNG_SET_CONSUMER_URI:
2959 {
2960 size_t nb_uri, len;
2961 struct lttng_uri *uris;
2962
2963 nb_uri = cmd_ctx->lsm->u.uri.size;
2964 len = nb_uri * sizeof(struct lttng_uri);
2965
2966 if (nb_uri == 0) {
2967 ret = LTTNG_ERR_INVALID;
2968 goto error;
2969 }
2970
2971 uris = zmalloc(len);
2972 if (uris == NULL) {
2973 ret = LTTNG_ERR_FATAL;
2974 goto error;
2975 }
2976
2977 /* Receive variable len data */
2978 DBG("Receiving %zu URI(s) from client ...", nb_uri);
2979 ret = lttcomm_recv_unix_sock(sock, uris, len);
2980 if (ret <= 0) {
2981 DBG("No URIs received from client... continuing");
2982 *sock_error = 1;
2983 ret = LTTNG_ERR_SESSION_FAIL;
2984 free(uris);
2985 goto error;
2986 }
2987
2988 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
2989 nb_uri, uris);
2990 if (ret != LTTNG_OK) {
2991 free(uris);
2992 goto error;
2993 }
2994
2995 /*
2996 * XXX: 0 means that this URI should be applied on the session. Should
2997 * be a DOMAIN enuam.
2998 */
2999 if (cmd_ctx->lsm->domain.type == 0) {
3000 /* Add the URI for the UST session if a consumer is present. */
3001 if (cmd_ctx->session->ust_session &&
3002 cmd_ctx->session->ust_session->consumer) {
3003 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3004 nb_uri, uris);
3005 } else if (cmd_ctx->session->kernel_session &&
3006 cmd_ctx->session->kernel_session->consumer) {
3007 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3008 cmd_ctx->session, nb_uri, uris);
3009 }
3010 }
3011
3012 free(uris);
3013
3014 break;
3015 }
3016 case LTTNG_START_TRACE:
3017 {
3018 ret = cmd_start_trace(cmd_ctx->session);
3019 break;
3020 }
3021 case LTTNG_STOP_TRACE:
3022 {
3023 ret = cmd_stop_trace(cmd_ctx->session);
3024 break;
3025 }
3026 case LTTNG_CREATE_SESSION:
3027 {
3028 size_t nb_uri, len;
3029 struct lttng_uri *uris = NULL;
3030
3031 nb_uri = cmd_ctx->lsm->u.uri.size;
3032 len = nb_uri * sizeof(struct lttng_uri);
3033
3034 if (nb_uri > 0) {
3035 uris = zmalloc(len);
3036 if (uris == NULL) {
3037 ret = LTTNG_ERR_FATAL;
3038 goto error;
3039 }
3040
3041 /* Receive variable len data */
3042 DBG("Waiting for %zu URIs from client ...", nb_uri);
3043 ret = lttcomm_recv_unix_sock(sock, uris, len);
3044 if (ret <= 0) {
3045 DBG("No URIs received from client... continuing");
3046 *sock_error = 1;
3047 ret = LTTNG_ERR_SESSION_FAIL;
3048 free(uris);
3049 goto error;
3050 }
3051
3052 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3053 DBG("Creating session with ONE network URI is a bad call");
3054 ret = LTTNG_ERR_SESSION_FAIL;
3055 free(uris);
3056 goto error;
3057 }
3058 }
3059
3060 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3061 &cmd_ctx->creds);
3062
3063 free(uris);
3064
3065 break;
3066 }
3067 case LTTNG_DESTROY_SESSION:
3068 {
3069 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3070
3071 /* Set session to NULL so we do not unlock it after free. */
3072 cmd_ctx->session = NULL;
3073 break;
3074 }
3075 case LTTNG_LIST_DOMAINS:
3076 {
3077 ssize_t nb_dom;
3078 struct lttng_domain *domains;
3079
3080 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3081 if (nb_dom < 0) {
3082 /* Return value is a negative lttng_error_code. */
3083 ret = -nb_dom;
3084 goto error;
3085 }
3086
3087 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3088 if (ret < 0) {
3089 free(domains);
3090 goto setup_error;
3091 }
3092
3093 /* Copy event list into message payload */
3094 memcpy(cmd_ctx->llm->payload, domains,
3095 nb_dom * sizeof(struct lttng_domain));
3096
3097 free(domains);
3098
3099 ret = LTTNG_OK;
3100 break;
3101 }
3102 case LTTNG_LIST_CHANNELS:
3103 {
3104 int nb_chan;
3105 struct lttng_channel *channels;
3106
3107 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3108 cmd_ctx->session, &channels);
3109 if (nb_chan < 0) {
3110 /* Return value is a negative lttng_error_code. */
3111 ret = -nb_chan;
3112 goto error;
3113 }
3114
3115 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3116 if (ret < 0) {
3117 free(channels);
3118 goto setup_error;
3119 }
3120
3121 /* Copy event list into message payload */
3122 memcpy(cmd_ctx->llm->payload, channels,
3123 nb_chan * sizeof(struct lttng_channel));
3124
3125 free(channels);
3126
3127 ret = LTTNG_OK;
3128 break;
3129 }
3130 case LTTNG_LIST_EVENTS:
3131 {
3132 ssize_t nb_event;
3133 struct lttng_event *events = NULL;
3134
3135 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3136 cmd_ctx->lsm->u.list.channel_name, &events);
3137 if (nb_event < 0) {
3138 /* Return value is a negative lttng_error_code. */
3139 ret = -nb_event;
3140 goto error;
3141 }
3142
3143 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3144 if (ret < 0) {
3145 free(events);
3146 goto setup_error;
3147 }
3148
3149 /* Copy event list into message payload */
3150 memcpy(cmd_ctx->llm->payload, events,
3151 nb_event * sizeof(struct lttng_event));
3152
3153 free(events);
3154
3155 ret = LTTNG_OK;
3156 break;
3157 }
3158 case LTTNG_LIST_SESSIONS:
3159 {
3160 unsigned int nr_sessions;
3161
3162 session_lock_list();
3163 nr_sessions = lttng_sessions_count(
3164 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3165 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3166
3167 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3168 if (ret < 0) {
3169 session_unlock_list();
3170 goto setup_error;
3171 }
3172
3173 /* Filled the session array */
3174 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3175 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3176 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3177
3178 session_unlock_list();
3179
3180 ret = LTTNG_OK;
3181 break;
3182 }
3183 case LTTNG_CALIBRATE:
3184 {
3185 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3186 &cmd_ctx->lsm->u.calibrate);
3187 break;
3188 }
3189 case LTTNG_REGISTER_CONSUMER:
3190 {
3191 struct consumer_data *cdata;
3192
3193 switch (cmd_ctx->lsm->domain.type) {
3194 case LTTNG_DOMAIN_KERNEL:
3195 cdata = &kconsumer_data;
3196 break;
3197 default:
3198 ret = LTTNG_ERR_UND;
3199 goto error;
3200 }
3201
3202 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3203 cmd_ctx->lsm->u.reg.path, cdata);
3204 break;
3205 }
3206 case LTTNG_ENABLE_EVENT_WITH_FILTER:
3207 {
3208 struct lttng_filter_bytecode *bytecode;
3209
3210 if (cmd_ctx->lsm->u.enable.bytecode_len > LTTNG_FILTER_MAX_LEN) {
3211 ret = LTTNG_ERR_FILTER_INVAL;
3212 goto error;
3213 }
3214 if (cmd_ctx->lsm->u.enable.bytecode_len == 0) {
3215 ret = LTTNG_ERR_FILTER_INVAL;
3216 goto error;
3217 }
3218 bytecode = zmalloc(cmd_ctx->lsm->u.enable.bytecode_len);
3219 if (!bytecode) {
3220 ret = LTTNG_ERR_FILTER_NOMEM;
3221 goto error;
3222 }
3223 /* Receive var. len. data */
3224 DBG("Receiving var len data from client ...");
3225 ret = lttcomm_recv_unix_sock(sock, bytecode,
3226 cmd_ctx->lsm->u.enable.bytecode_len);
3227 if (ret <= 0) {
3228 DBG("Nothing recv() from client var len data... continuing");
3229 *sock_error = 1;
3230 ret = LTTNG_ERR_FILTER_INVAL;
3231 goto error;
3232 }
3233
3234 if (bytecode->len + sizeof(*bytecode)
3235 != cmd_ctx->lsm->u.enable.bytecode_len) {
3236 free(bytecode);
3237 ret = LTTNG_ERR_FILTER_INVAL;
3238 goto error;
3239 }
3240
3241 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3242 cmd_ctx->lsm->u.enable.channel_name,
3243 &cmd_ctx->lsm->u.enable.event, bytecode, kernel_poll_pipe[1]);
3244 break;
3245 }
3246 case LTTNG_DATA_PENDING:
3247 {
3248 ret = cmd_data_pending(cmd_ctx->session);
3249 break;
3250 }
3251 default:
3252 ret = LTTNG_ERR_UND;
3253 break;
3254 }
3255
3256 error:
3257 if (cmd_ctx->llm == NULL) {
3258 DBG("Missing llm structure. Allocating one.");
3259 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3260 goto setup_error;
3261 }
3262 }
3263 /* Set return code */
3264 cmd_ctx->llm->ret_code = ret;
3265 setup_error:
3266 if (cmd_ctx->session) {
3267 session_unlock(cmd_ctx->session);
3268 }
3269 if (need_tracing_session) {
3270 session_unlock_list();
3271 }
3272 init_setup_error:
3273 return ret;
3274 }
3275
3276 /*
3277 * Thread managing health check socket.
3278 */
3279 static void *thread_manage_health(void *data)
3280 {
3281 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3282 uint32_t revents, nb_fd;
3283 struct lttng_poll_event events;
3284 struct lttcomm_health_msg msg;
3285 struct lttcomm_health_data reply;
3286
3287 DBG("[thread] Manage health check started");
3288
3289 rcu_register_thread();
3290
3291 /* We might hit an error path before this is created. */
3292 lttng_poll_init(&events);
3293
3294 /* Create unix socket */
3295 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3296 if (sock < 0) {
3297 ERR("Unable to create health check Unix socket");
3298 ret = -1;
3299 goto error;
3300 }
3301
3302 /*
3303 * Set the CLOEXEC flag. Return code is useless because either way, the
3304 * show must go on.
3305 */
3306 (void) utils_set_fd_cloexec(sock);
3307
3308 ret = lttcomm_listen_unix_sock(sock);
3309 if (ret < 0) {
3310 goto error;
3311 }
3312
3313 /*
3314 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3315 * more will be added to this poll set.
3316 */
3317 ret = sessiond_set_thread_pollset(&events, 2);
3318 if (ret < 0) {
3319 goto error;
3320 }
3321
3322 /* Add the application registration socket */
3323 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3324 if (ret < 0) {
3325 goto error;
3326 }
3327
3328 while (1) {
3329 DBG("Health check ready");
3330
3331 /* Inifinite blocking call, waiting for transmission */
3332 restart:
3333 ret = lttng_poll_wait(&events, -1);
3334 if (ret < 0) {
3335 /*
3336 * Restart interrupted system call.
3337 */
3338 if (errno == EINTR) {
3339 goto restart;
3340 }
3341 goto error;
3342 }
3343
3344 nb_fd = ret;
3345
3346 for (i = 0; i < nb_fd; i++) {
3347 /* Fetch once the poll data */
3348 revents = LTTNG_POLL_GETEV(&events, i);
3349 pollfd = LTTNG_POLL_GETFD(&events, i);
3350
3351 /* Thread quit pipe has been closed. Killing thread. */
3352 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3353 if (ret) {
3354 err = 0;
3355 goto exit;
3356 }
3357
3358 /* Event on the registration socket */
3359 if (pollfd == sock) {
3360 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3361 ERR("Health socket poll error");
3362 goto error;
3363 }
3364 }
3365 }
3366
3367 new_sock = lttcomm_accept_unix_sock(sock);
3368 if (new_sock < 0) {
3369 goto error;
3370 }
3371
3372 /*
3373 * Set the CLOEXEC flag. Return code is useless because either way, the
3374 * show must go on.
3375 */
3376 (void) utils_set_fd_cloexec(new_sock);
3377
3378 DBG("Receiving data from client for health...");
3379 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3380 if (ret <= 0) {
3381 DBG("Nothing recv() from client... continuing");
3382 ret = close(new_sock);
3383 if (ret) {
3384 PERROR("close");
3385 }
3386 new_sock = -1;
3387 continue;
3388 }
3389
3390 rcu_thread_online();
3391
3392 switch (msg.component) {
3393 case LTTNG_HEALTH_CMD:
3394 reply.ret_code = health_check_state(HEALTH_TYPE_CMD);
3395 break;
3396 case LTTNG_HEALTH_APP_MANAGE:
3397 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE);
3398 break;
3399 case LTTNG_HEALTH_APP_REG:
3400 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG);
3401 break;
3402 case LTTNG_HEALTH_KERNEL:
3403 reply.ret_code = health_check_state(HEALTH_TYPE_KERNEL);
3404 break;
3405 case LTTNG_HEALTH_CONSUMER:
3406 reply.ret_code = check_consumer_health();
3407 break;
3408 case LTTNG_HEALTH_HT_CLEANUP:
3409 reply.ret_code = health_check_state(HEALTH_TYPE_HT_CLEANUP);
3410 break;
3411 case LTTNG_HEALTH_APP_MANAGE_NOTIFY:
3412 reply.ret_code = health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY);
3413 break;
3414 case LTTNG_HEALTH_APP_REG_DISPATCH:
3415 reply.ret_code = health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3416 break;
3417 case LTTNG_HEALTH_ALL:
3418 reply.ret_code =
3419 health_check_state(HEALTH_TYPE_APP_MANAGE) &&
3420 health_check_state(HEALTH_TYPE_APP_REG) &&
3421 health_check_state(HEALTH_TYPE_CMD) &&
3422 health_check_state(HEALTH_TYPE_KERNEL) &&
3423 check_consumer_health() &&
3424 health_check_state(HEALTH_TYPE_HT_CLEANUP) &&
3425 health_check_state(HEALTH_TYPE_APP_MANAGE_NOTIFY) &&
3426 health_check_state(HEALTH_TYPE_APP_REG_DISPATCH);
3427 break;
3428 default:
3429 reply.ret_code = LTTNG_ERR_UND;
3430 break;
3431 }
3432
3433 /*
3434 * Flip ret value since 0 is a success and 1 indicates a bad health for
3435 * the client where in the sessiond it is the opposite. Again, this is
3436 * just to make things easier for us poor developer which enjoy a lot
3437 * lazyness.
3438 */
3439 if (reply.ret_code == 0 || reply.ret_code == 1) {
3440 reply.ret_code = !reply.ret_code;
3441 }
3442
3443 DBG2("Health check return value %d", reply.ret_code);
3444
3445 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3446 if (ret < 0) {
3447 ERR("Failed to send health data back to client");
3448 }
3449
3450 /* End of transmission */
3451 ret = close(new_sock);
3452 if (ret) {
3453 PERROR("close");
3454 }
3455 new_sock = -1;
3456 }
3457
3458 exit:
3459 error:
3460 if (err) {
3461 ERR("Health error occurred in %s", __func__);
3462 }
3463 DBG("Health check thread dying");
3464 unlink(health_unix_sock_path);
3465 if (sock >= 0) {
3466 ret = close(sock);
3467 if (ret) {
3468 PERROR("close");
3469 }
3470 }
3471
3472 lttng_poll_clean(&events);
3473
3474 rcu_unregister_thread();
3475 return NULL;
3476 }
3477
3478 /*
3479 * This thread manage all clients request using the unix client socket for
3480 * communication.
3481 */
3482 static void *thread_manage_clients(void *data)
3483 {
3484 int sock = -1, ret, i, pollfd, err = -1;
3485 int sock_error;
3486 uint32_t revents, nb_fd;
3487 struct command_ctx *cmd_ctx = NULL;
3488 struct lttng_poll_event events;
3489
3490 DBG("[thread] Manage client started");
3491
3492 rcu_register_thread();
3493
3494 health_register(HEALTH_TYPE_CMD);
3495
3496 if (testpoint(thread_manage_clients)) {
3497 goto error_testpoint;
3498 }
3499
3500 health_code_update();
3501
3502 ret = lttcomm_listen_unix_sock(client_sock);
3503 if (ret < 0) {
3504 goto error_listen;
3505 }
3506
3507 /*
3508 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3509 * more will be added to this poll set.
3510 */
3511 ret = sessiond_set_thread_pollset(&events, 2);
3512 if (ret < 0) {
3513 goto error_create_poll;
3514 }
3515
3516 /* Add the application registration socket */
3517 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3518 if (ret < 0) {
3519 goto error;
3520 }
3521
3522 /*
3523 * Notify parent pid that we are ready to accept command for client side.
3524 */
3525 if (opt_sig_parent) {
3526 kill(ppid, SIGUSR1);
3527 }
3528
3529 if (testpoint(thread_manage_clients_before_loop)) {
3530 goto error;
3531 }
3532
3533 health_code_update();
3534
3535 while (1) {
3536 DBG("Accepting client command ...");
3537
3538 /* Inifinite blocking call, waiting for transmission */
3539 restart:
3540 health_poll_entry();
3541 ret = lttng_poll_wait(&events, -1);
3542 health_poll_exit();
3543 if (ret < 0) {
3544 /*
3545 * Restart interrupted system call.
3546 */
3547 if (errno == EINTR) {
3548 goto restart;
3549 }
3550 goto error;
3551 }
3552
3553 nb_fd = ret;
3554
3555 for (i = 0; i < nb_fd; i++) {
3556 /* Fetch once the poll data */
3557 revents = LTTNG_POLL_GETEV(&events, i);
3558 pollfd = LTTNG_POLL_GETFD(&events, i);
3559
3560 health_code_update();
3561
3562 /* Thread quit pipe has been closed. Killing thread. */
3563 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3564 if (ret) {
3565 err = 0;
3566 goto exit;
3567 }
3568
3569 /* Event on the registration socket */
3570 if (pollfd == client_sock) {
3571 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3572 ERR("Client socket poll error");
3573 goto error;
3574 }
3575 }
3576 }
3577
3578 DBG("Wait for client response");
3579
3580 health_code_update();
3581
3582 sock = lttcomm_accept_unix_sock(client_sock);
3583 if (sock < 0) {
3584 goto error;
3585 }
3586
3587 /*
3588 * Set the CLOEXEC flag. Return code is useless because either way, the
3589 * show must go on.
3590 */
3591 (void) utils_set_fd_cloexec(sock);
3592
3593 /* Set socket option for credentials retrieval */
3594 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3595 if (ret < 0) {
3596 goto error;
3597 }
3598
3599 /* Allocate context command to process the client request */
3600 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3601 if (cmd_ctx == NULL) {
3602 PERROR("zmalloc cmd_ctx");
3603 goto error;
3604 }
3605
3606 /* Allocate data buffer for reception */
3607 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3608 if (cmd_ctx->lsm == NULL) {
3609 PERROR("zmalloc cmd_ctx->lsm");
3610 goto error;
3611 }
3612
3613 cmd_ctx->llm = NULL;
3614 cmd_ctx->session = NULL;
3615
3616 health_code_update();
3617
3618 /*
3619 * Data is received from the lttng client. The struct
3620 * lttcomm_session_msg (lsm) contains the command and data request of
3621 * the client.
3622 */
3623 DBG("Receiving data from client ...");
3624 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3625 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3626 if (ret <= 0) {
3627 DBG("Nothing recv() from client... continuing");
3628 ret = close(sock);
3629 if (ret) {
3630 PERROR("close");
3631 }
3632 sock = -1;
3633 clean_command_ctx(&cmd_ctx);
3634 continue;
3635 }
3636
3637 health_code_update();
3638
3639 // TODO: Validate cmd_ctx including sanity check for
3640 // security purpose.
3641
3642 rcu_thread_online();
3643 /*
3644 * This function dispatch the work to the kernel or userspace tracer
3645 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3646 * informations for the client. The command context struct contains
3647 * everything this function may needs.
3648 */
3649 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3650 rcu_thread_offline();
3651 if (ret < 0) {
3652 ret = close(sock);
3653 if (ret) {
3654 PERROR("close");
3655 }
3656 sock = -1;
3657 /*
3658 * TODO: Inform client somehow of the fatal error. At
3659 * this point, ret < 0 means that a zmalloc failed
3660 * (ENOMEM). Error detected but still accept
3661 * command, unless a socket error has been
3662 * detected.
3663 */
3664 clean_command_ctx(&cmd_ctx);
3665 continue;
3666 }
3667
3668 health_code_update();
3669
3670 DBG("Sending response (size: %d, retcode: %s)",
3671 cmd_ctx->lttng_msg_size,
3672 lttng_strerror(-cmd_ctx->llm->ret_code));
3673 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
3674 if (ret < 0) {
3675 ERR("Failed to send data back to client");
3676 }
3677
3678 /* End of transmission */
3679 ret = close(sock);
3680 if (ret) {
3681 PERROR("close");
3682 }
3683 sock = -1;
3684
3685 clean_command_ctx(&cmd_ctx);
3686
3687 health_code_update();
3688 }
3689
3690 exit:
3691 error:
3692 if (sock >= 0) {
3693 ret = close(sock);
3694 if (ret) {
3695 PERROR("close");
3696 }
3697 }
3698
3699 lttng_poll_clean(&events);
3700 clean_command_ctx(&cmd_ctx);
3701
3702 error_listen:
3703 error_create_poll:
3704 error_testpoint:
3705 unlink(client_unix_sock_path);
3706 if (client_sock >= 0) {
3707 ret = close(client_sock);
3708 if (ret) {
3709 PERROR("close");
3710 }
3711 }
3712
3713 if (err) {
3714 health_error();
3715 ERR("Health error occurred in %s", __func__);
3716 }
3717
3718 health_unregister();
3719
3720 DBG("Client thread dying");
3721
3722 rcu_unregister_thread();
3723 return NULL;
3724 }
3725
3726
3727 /*
3728 * usage function on stderr
3729 */
3730 static void usage(void)
3731 {
3732 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
3733 fprintf(stderr, " -h, --help Display this usage.\n");
3734 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
3735 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
3736 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
3737 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
3738 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
3739 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
3740 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
3741 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
3742 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
3743 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
3744 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
3745 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
3746 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
3747 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
3748 fprintf(stderr, " -V, --version Show version number.\n");
3749 fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
3750 fprintf(stderr, " -q, --quiet No output at all.\n");
3751 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
3752 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
3753 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
3754 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
3755 }
3756
3757 /*
3758 * daemon argument parsing
3759 */
3760 static int parse_args(int argc, char **argv)
3761 {
3762 int c;
3763
3764 static struct option long_options[] = {
3765 { "client-sock", 1, 0, 'c' },
3766 { "apps-sock", 1, 0, 'a' },
3767 { "kconsumerd-cmd-sock", 1, 0, 'C' },
3768 { "kconsumerd-err-sock", 1, 0, 'E' },
3769 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
3770 { "ustconsumerd32-err-sock", 1, 0, 'H' },
3771 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
3772 { "ustconsumerd64-err-sock", 1, 0, 'F' },
3773 { "consumerd32-path", 1, 0, 'u' },
3774 { "consumerd32-libdir", 1, 0, 'U' },
3775 { "consumerd64-path", 1, 0, 't' },
3776 { "consumerd64-libdir", 1, 0, 'T' },
3777 { "daemonize", 0, 0, 'd' },
3778 { "sig-parent", 0, 0, 'S' },
3779 { "help", 0, 0, 'h' },
3780 { "group", 1, 0, 'g' },
3781 { "version", 0, 0, 'V' },
3782 { "quiet", 0, 0, 'q' },
3783 { "verbose", 0, 0, 'v' },
3784 { "verbose-consumer", 0, 0, 'Z' },
3785 { "no-kernel", 0, 0, 'N' },
3786 { "pidfile", 1, 0, 'p' },
3787 { NULL, 0, 0, 0 }
3788 };
3789
3790 while (1) {
3791 int option_index = 0;
3792 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:",
3793 long_options, &option_index);
3794 if (c == -1) {
3795 break;
3796 }
3797
3798 switch (c) {
3799 case 0:
3800 fprintf(stderr, "option %s", long_options[option_index].name);
3801 if (optarg) {
3802 fprintf(stderr, " with arg %s\n", optarg);
3803 }
3804 break;
3805 case 'c':
3806 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
3807 break;
3808 case 'a':
3809 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
3810 break;
3811 case 'd':
3812 opt_daemon = 1;
3813 break;
3814 case 'g':
3815 opt_tracing_group = optarg;
3816 break;
3817 case 'h':
3818 usage();
3819 exit(EXIT_FAILURE);
3820 case 'V':
3821 fprintf(stdout, "%s\n", VERSION);
3822 exit(EXIT_SUCCESS);
3823 case 'S':
3824 opt_sig_parent = 1;
3825 break;
3826 case 'E':
3827 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3828 break;
3829 case 'C':
3830 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3831 break;
3832 case 'F':
3833 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3834 break;
3835 case 'D':
3836 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3837 break;
3838 case 'H':
3839 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
3840 break;
3841 case 'G':
3842 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
3843 break;
3844 case 'N':
3845 opt_no_kernel = 1;
3846 break;
3847 case 'q':
3848 lttng_opt_quiet = 1;
3849 break;
3850 case 'v':
3851 /* Verbose level can increase using multiple -v */
3852 lttng_opt_verbose += 1;
3853 break;
3854 case 'Z':
3855 opt_verbose_consumer += 1;
3856 break;
3857 case 'u':
3858 consumerd32_bin= optarg;
3859 break;
3860 case 'U':
3861 consumerd32_libdir = optarg;
3862 break;
3863 case 't':
3864 consumerd64_bin = optarg;
3865 break;
3866 case 'T':
3867 consumerd64_libdir = optarg;
3868 break;
3869 case 'p':
3870 opt_pidfile = optarg;
3871 break;
3872 default:
3873 /* Unknown option or other error.
3874 * Error is printed by getopt, just return */
3875 return -1;
3876 }
3877 }
3878
3879 return 0;
3880 }
3881
3882 /*
3883 * Creates the two needed socket by the daemon.
3884 * apps_sock - The communication socket for all UST apps.
3885 * client_sock - The communication of the cli tool (lttng).
3886 */
3887 static int init_daemon_socket(void)
3888 {
3889 int ret = 0;
3890 mode_t old_umask;
3891
3892 old_umask = umask(0);
3893
3894 /* Create client tool unix socket */
3895 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
3896 if (client_sock < 0) {
3897 ERR("Create unix sock failed: %s", client_unix_sock_path);
3898 ret = -1;
3899 goto end;
3900 }
3901
3902 /* Set the cloexec flag */
3903 ret = utils_set_fd_cloexec(client_sock);
3904 if (ret < 0) {
3905 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
3906 "Continuing but note that the consumer daemon will have a "
3907 "reference to this socket on exec()", client_sock);
3908 }
3909
3910 /* File permission MUST be 660 */
3911 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3912 if (ret < 0) {
3913 ERR("Set file permissions failed: %s", client_unix_sock_path);
3914 PERROR("chmod");
3915 goto end;
3916 }
3917
3918 /* Create the application unix socket */
3919 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
3920 if (apps_sock < 0) {
3921 ERR("Create unix sock failed: %s", apps_unix_sock_path);
3922 ret = -1;
3923 goto end;
3924 }
3925
3926 /* Set the cloexec flag */
3927 ret = utils_set_fd_cloexec(apps_sock);
3928 if (ret < 0) {
3929 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
3930 "Continuing but note that the consumer daemon will have a "
3931 "reference to this socket on exec()", apps_sock);
3932 }
3933
3934 /* File permission MUST be 666 */
3935 ret = chmod(apps_unix_sock_path,
3936 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
3937 if (ret < 0) {
3938 ERR("Set file permissions failed: %s", apps_unix_sock_path);
3939 PERROR("chmod");
3940 goto end;
3941 }
3942
3943 DBG3("Session daemon client socket %d and application socket %d created",
3944 client_sock, apps_sock);
3945
3946 end:
3947 umask(old_umask);
3948 return ret;
3949 }
3950
3951 /*
3952 * Check if the global socket is available, and if a daemon is answering at the
3953 * other side. If yes, error is returned.
3954 */
3955 static int check_existing_daemon(void)
3956 {
3957 /* Is there anybody out there ? */
3958 if (lttng_session_daemon_alive()) {
3959 return -EEXIST;
3960 }
3961
3962 return 0;
3963 }
3964
3965 /*
3966 * Set the tracing group gid onto the client socket.
3967 *
3968 * Race window between mkdir and chown is OK because we are going from more
3969 * permissive (root.root) to less permissive (root.tracing).
3970 */
3971 static int set_permissions(char *rundir)
3972 {
3973 int ret;
3974 gid_t gid;
3975
3976 ret = allowed_group();
3977 if (ret < 0) {
3978 WARN("No tracing group detected");
3979 /* Setting gid to 0 if no tracing group is found */
3980 gid = 0;
3981 } else {
3982 gid = ret;
3983 }
3984
3985 /* Set lttng run dir */
3986 ret = chown(rundir, 0, gid);
3987 if (ret < 0) {
3988 ERR("Unable to set group on %s", rundir);
3989 PERROR("chown");
3990 }
3991
3992 /* Ensure all applications and tracing group can search the run dir */
3993 ret = chmod(rundir, S_IRWXU | S_IXGRP | S_IXOTH);
3994 if (ret < 0) {
3995 ERR("Unable to set permissions on %s", rundir);
3996 PERROR("chmod");
3997 }
3998
3999 /* lttng client socket path */
4000 ret = chown(client_unix_sock_path, 0, gid);
4001 if (ret < 0) {
4002 ERR("Unable to set group on %s", client_unix_sock_path);
4003 PERROR("chown");
4004 }
4005
4006 /* kconsumer error socket path */
4007 ret = chown(kconsumer_data.err_unix_sock_path, 0, gid);
4008 if (ret < 0) {
4009 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4010 PERROR("chown");
4011 }
4012
4013 /* 64-bit ustconsumer error socket path */
4014 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, gid);
4015 if (ret < 0) {
4016 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4017 PERROR("chown");
4018 }
4019
4020 /* 32-bit ustconsumer compat32 error socket path */
4021 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, gid);
4022 if (ret < 0) {
4023 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4024 PERROR("chown");
4025 }
4026
4027 DBG("All permissions are set");
4028
4029 return ret;
4030 }
4031
4032 /*
4033 * Create the lttng run directory needed for all global sockets and pipe.
4034 */
4035 static int create_lttng_rundir(const char *rundir)
4036 {
4037 int ret;
4038
4039 DBG3("Creating LTTng run directory: %s", rundir);
4040
4041 ret = mkdir(rundir, S_IRWXU);
4042 if (ret < 0) {
4043 if (errno != EEXIST) {
4044 ERR("Unable to create %s", rundir);
4045 goto error;
4046 } else {
4047 ret = 0;
4048 }
4049 }
4050
4051 error:
4052 return ret;
4053 }
4054
4055 /*
4056 * Setup sockets and directory needed by the kconsumerd communication with the
4057 * session daemon.
4058 */
4059 static int set_consumer_sockets(struct consumer_data *consumer_data,
4060 const char *rundir)
4061 {
4062 int ret;
4063 char path[PATH_MAX];
4064
4065 switch (consumer_data->type) {
4066 case LTTNG_CONSUMER_KERNEL:
4067 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4068 break;
4069 case LTTNG_CONSUMER64_UST:
4070 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4071 break;
4072 case LTTNG_CONSUMER32_UST:
4073 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4074 break;
4075 default:
4076 ERR("Consumer type unknown");
4077 ret = -EINVAL;
4078 goto error;
4079 }
4080
4081 DBG2("Creating consumer directory: %s", path);
4082
4083 ret = mkdir(path, S_IRWXU);
4084 if (ret < 0) {
4085 if (errno != EEXIST) {
4086 PERROR("mkdir");
4087 ERR("Failed to create %s", path);
4088 goto error;
4089 }
4090 ret = -1;
4091 }
4092
4093 /* Create the kconsumerd error unix socket */
4094 consumer_data->err_sock =
4095 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4096 if (consumer_data->err_sock < 0) {
4097 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4098 ret = -1;
4099 goto error;
4100 }
4101
4102 /*
4103 * Set the CLOEXEC flag. Return code is useless because either way, the
4104 * show must go on.
4105 */
4106 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4107 if (ret < 0) {
4108 PERROR("utils_set_fd_cloexec");
4109 /* continue anyway */
4110 }
4111
4112 /* File permission MUST be 660 */
4113 ret = chmod(consumer_data->err_unix_sock_path,
4114 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4115 if (ret < 0) {
4116 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4117 PERROR("chmod");
4118 goto error;
4119 }
4120
4121 error:
4122 return ret;
4123 }
4124
4125 /*
4126 * Signal handler for the daemon
4127 *
4128 * Simply stop all worker threads, leaving main() return gracefully after
4129 * joining all threads and calling cleanup().
4130 */
4131 static void sighandler(int sig)
4132 {
4133 switch (sig) {
4134 case SIGPIPE:
4135 DBG("SIGPIPE caught");
4136 return;
4137 case SIGINT:
4138 DBG("SIGINT caught");
4139 stop_threads();
4140 break;
4141 case SIGTERM:
4142 DBG("SIGTERM caught");
4143 stop_threads();
4144 break;
4145 default:
4146 break;
4147 }
4148 }
4149
4150 /*
4151 * Setup signal handler for :
4152 * SIGINT, SIGTERM, SIGPIPE
4153 */
4154 static int set_signal_handler(void)
4155 {
4156 int ret = 0;
4157 struct sigaction sa;
4158 sigset_t sigset;
4159
4160 if ((ret = sigemptyset(&sigset)) < 0) {
4161 PERROR("sigemptyset");
4162 return ret;
4163 }
4164
4165 sa.sa_handler = sighandler;
4166 sa.sa_mask = sigset;
4167 sa.sa_flags = 0;
4168 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4169 PERROR("sigaction");
4170 return ret;
4171 }
4172
4173 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4174 PERROR("sigaction");
4175 return ret;
4176 }
4177
4178 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4179 PERROR("sigaction");
4180 return ret;
4181 }
4182
4183 DBG("Signal handler set for SIGTERM, SIGPIPE and SIGINT");
4184
4185 return ret;
4186 }
4187
4188 /*
4189 * Set open files limit to unlimited. This daemon can open a large number of
4190 * file descriptors in order to consumer multiple kernel traces.
4191 */
4192 static void set_ulimit(void)
4193 {
4194 int ret;
4195 struct rlimit lim;
4196
4197 /* The kernel does not allowed an infinite limit for open files */
4198 lim.rlim_cur = 65535;
4199 lim.rlim_max = 65535;
4200
4201 ret = setrlimit(RLIMIT_NOFILE, &lim);
4202 if (ret < 0) {
4203 PERROR("failed to set open files limit");
4204 }
4205 }
4206
4207 /*
4208 * Write pidfile using the rundir and opt_pidfile.
4209 */
4210 static void write_pidfile(void)
4211 {
4212 int ret;
4213 char pidfile_path[PATH_MAX];
4214
4215 assert(rundir);
4216
4217 if (opt_pidfile) {
4218 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4219 } else {
4220 /* Build pidfile path from rundir and opt_pidfile. */
4221 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4222 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4223 if (ret < 0) {
4224 PERROR("snprintf pidfile path");
4225 goto error;
4226 }
4227 }
4228
4229 /*
4230 * Create pid file in rundir. Return value is of no importance. The
4231 * execution will continue even though we are not able to write the file.
4232 */
4233 (void) utils_create_pid_file(getpid(), pidfile_path);
4234
4235 error:
4236 return;
4237 }
4238
4239 /*
4240 * main
4241 */
4242 int main(int argc, char **argv)
4243 {
4244 int ret = 0;
4245 void *status;
4246 const char *home_path, *env_app_timeout;
4247
4248 init_kernel_workarounds();
4249
4250 rcu_register_thread();
4251
4252 setup_consumerd_path();
4253
4254 page_size = sysconf(_SC_PAGESIZE);
4255 if (page_size < 0) {
4256 PERROR("sysconf _SC_PAGESIZE");
4257 page_size = LONG_MAX;
4258 WARN("Fallback page size to %ld", page_size);
4259 }
4260
4261 /* Parse arguments */
4262 progname = argv[0];
4263 if ((ret = parse_args(argc, argv)) < 0) {
4264 goto error;
4265 }
4266
4267 /* Daemonize */
4268 if (opt_daemon) {
4269 int i;
4270
4271 /*
4272 * fork
4273 * child: setsid, close FD 0, 1, 2, chdir /
4274 * parent: exit (if fork is successful)
4275 */
4276 ret = daemon(0, 0);
4277 if (ret < 0) {
4278 PERROR("daemon");
4279 goto error;
4280 }
4281 /*
4282 * We are in the child. Make sure all other file
4283 * descriptors are closed, in case we are called with
4284 * more opened file descriptors than the standard ones.
4285 */
4286 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4287 (void) close(i);
4288 }
4289 }
4290
4291 /* Create thread quit pipe */
4292 if ((ret = init_thread_quit_pipe()) < 0) {
4293 goto error;
4294 }
4295
4296 /* Check if daemon is UID = 0 */
4297 is_root = !getuid();
4298
4299 if (is_root) {
4300 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4301
4302 /* Create global run dir with root access */
4303 ret = create_lttng_rundir(rundir);
4304 if (ret < 0) {
4305 goto error;
4306 }
4307
4308 if (strlen(apps_unix_sock_path) == 0) {
4309 snprintf(apps_unix_sock_path, PATH_MAX,
4310 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4311 }
4312
4313 if (strlen(client_unix_sock_path) == 0) {
4314 snprintf(client_unix_sock_path, PATH_MAX,
4315 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4316 }
4317
4318 /* Set global SHM for ust */
4319 if (strlen(wait_shm_path) == 0) {
4320 snprintf(wait_shm_path, PATH_MAX,
4321 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4322 }
4323
4324 if (strlen(health_unix_sock_path) == 0) {
4325 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4326 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4327 }
4328
4329 /* Setup kernel consumerd path */
4330 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4331 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4332 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4333 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4334
4335 DBG2("Kernel consumer err path: %s",
4336 kconsumer_data.err_unix_sock_path);
4337 DBG2("Kernel consumer cmd path: %s",
4338 kconsumer_data.cmd_unix_sock_path);
4339 } else {
4340 home_path = utils_get_home_dir();
4341 if (home_path == NULL) {
4342 /* TODO: Add --socket PATH option */
4343 ERR("Can't get HOME directory for sockets creation.");
4344 ret = -EPERM;
4345 goto error;
4346 }
4347
4348 /*
4349 * Create rundir from home path. This will create something like
4350 * $HOME/.lttng
4351 */
4352 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4353 if (ret < 0) {
4354 ret = -ENOMEM;
4355 goto error;
4356 }
4357
4358 ret = create_lttng_rundir(rundir);
4359 if (ret < 0) {
4360 goto error;
4361 }
4362
4363 if (strlen(apps_unix_sock_path) == 0) {
4364 snprintf(apps_unix_sock_path, PATH_MAX,
4365 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4366 }
4367
4368 /* Set the cli tool unix socket path */
4369 if (strlen(client_unix_sock_path) == 0) {
4370 snprintf(client_unix_sock_path, PATH_MAX,
4371 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4372 }
4373
4374 /* Set global SHM for ust */
4375 if (strlen(wait_shm_path) == 0) {
4376 snprintf(wait_shm_path, PATH_MAX,
4377 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4378 }
4379
4380 /* Set health check Unix path */
4381 if (strlen(health_unix_sock_path) == 0) {
4382 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4383 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4384 }
4385 }
4386
4387 /* Set consumer initial state */
4388 kernel_consumerd_state = CONSUMER_STOPPED;
4389 ust_consumerd_state = CONSUMER_STOPPED;
4390
4391 DBG("Client socket path %s", client_unix_sock_path);
4392 DBG("Application socket path %s", apps_unix_sock_path);
4393 DBG("Application wait path %s", wait_shm_path);
4394 DBG("LTTng run directory path: %s", rundir);
4395
4396 /* 32 bits consumerd path setup */
4397 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4398 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4399 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4400 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4401
4402 DBG2("UST consumer 32 bits err path: %s",
4403 ustconsumer32_data.err_unix_sock_path);
4404 DBG2("UST consumer 32 bits cmd path: %s",
4405 ustconsumer32_data.cmd_unix_sock_path);
4406
4407 /* 64 bits consumerd path setup */
4408 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4409 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4410 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4411 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4412
4413 DBG2("UST consumer 64 bits err path: %s",
4414 ustconsumer64_data.err_unix_sock_path);
4415 DBG2("UST consumer 64 bits cmd path: %s",
4416 ustconsumer64_data.cmd_unix_sock_path);
4417
4418 /*
4419 * See if daemon already exist.
4420 */
4421 if ((ret = check_existing_daemon()) < 0) {
4422 ERR("Already running daemon.\n");
4423 /*
4424 * We do not goto exit because we must not cleanup()
4425 * because a daemon is already running.
4426 */
4427 goto error;
4428 }
4429
4430 /*
4431 * Init UST app hash table. Alloc hash table before this point since
4432 * cleanup() can get called after that point.
4433 */
4434 ust_app_ht_alloc();
4435
4436 /* After this point, we can safely call cleanup() with "goto exit" */
4437
4438 /*
4439 * These actions must be executed as root. We do that *after* setting up
4440 * the sockets path because we MUST make the check for another daemon using
4441 * those paths *before* trying to set the kernel consumer sockets and init
4442 * kernel tracer.
4443 */
4444 if (is_root) {
4445 ret = set_consumer_sockets(&kconsumer_data, rundir);
4446 if (ret < 0) {
4447 goto exit;
4448 }
4449
4450 /* Setup kernel tracer */
4451 if (!opt_no_kernel) {
4452 init_kernel_tracer();
4453 }
4454
4455 /* Set ulimit for open files */
4456 set_ulimit();
4457 }
4458 /* init lttng_fd tracking must be done after set_ulimit. */
4459 lttng_fd_init();
4460
4461 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4462 if (ret < 0) {
4463 goto exit;
4464 }
4465
4466 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4467 if (ret < 0) {
4468 goto exit;
4469 }
4470
4471 if ((ret = set_signal_handler()) < 0) {
4472 goto exit;
4473 }
4474
4475 /* Setup the needed unix socket */
4476 if ((ret = init_daemon_socket()) < 0) {
4477 goto exit;
4478 }
4479
4480 /* Set credentials to socket */
4481 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4482 goto exit;
4483 }
4484
4485 /* Get parent pid if -S, --sig-parent is specified. */
4486 if (opt_sig_parent) {
4487 ppid = getppid();
4488 }
4489
4490 /* Setup the kernel pipe for waking up the kernel thread */
4491 if (is_root && !opt_no_kernel) {
4492 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4493 goto exit;
4494 }
4495 }
4496
4497 /* Setup the thread ht_cleanup communication pipe. */
4498 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4499 goto exit;
4500 }
4501
4502 /* Setup the thread apps communication pipe. */
4503 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4504 goto exit;
4505 }
4506
4507 /* Setup the thread apps notify communication pipe. */
4508 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4509 goto exit;
4510 }
4511
4512 /* Initialize global buffer per UID and PID registry. */
4513 buffer_reg_init_uid_registry();
4514 buffer_reg_init_pid_registry();
4515
4516 /* Init UST command queue. */
4517 cds_wfq_init(&ust_cmd_queue.queue);
4518
4519 /*
4520 * Get session list pointer. This pointer MUST NOT be free(). This list is
4521 * statically declared in session.c
4522 */
4523 session_list_ptr = session_get_list();
4524
4525 /* Set up max poll set size */
4526 lttng_poll_set_max_size();
4527
4528 cmd_init();
4529
4530 /* Check for the application socket timeout env variable. */
4531 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4532 if (env_app_timeout) {
4533 app_socket_timeout = atoi(env_app_timeout);
4534 } else {
4535 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4536 }
4537
4538 write_pidfile();
4539
4540 /* This is to get the TCP timeout value. */
4541 lttcomm_inet_init();
4542
4543 /*
4544 * Initialize the health check subsystem. This call should set the
4545 * appropriate time values.
4546 */
4547 health_init();
4548
4549 /* Create thread to manage the client socket */
4550 ret = pthread_create(&ht_cleanup_thread, NULL,
4551 thread_ht_cleanup, (void *) NULL);
4552 if (ret != 0) {
4553 PERROR("pthread_create ht_cleanup");
4554 goto exit_ht_cleanup;
4555 }
4556
4557 /* Create thread to manage the client socket */
4558 ret = pthread_create(&health_thread, NULL,
4559 thread_manage_health, (void *) NULL);
4560 if (ret != 0) {
4561 PERROR("pthread_create health");
4562 goto exit_health;
4563 }
4564
4565 /* Create thread to manage the client socket */
4566 ret = pthread_create(&client_thread, NULL,
4567 thread_manage_clients, (void *) NULL);
4568 if (ret != 0) {
4569 PERROR("pthread_create clients");
4570 goto exit_client;
4571 }
4572
4573 /* Create thread to dispatch registration */
4574 ret = pthread_create(&dispatch_thread, NULL,
4575 thread_dispatch_ust_registration, (void *) NULL);
4576 if (ret != 0) {
4577 PERROR("pthread_create dispatch");
4578 goto exit_dispatch;
4579 }
4580
4581 /* Create thread to manage application registration. */
4582 ret = pthread_create(&reg_apps_thread, NULL,
4583 thread_registration_apps, (void *) NULL);
4584 if (ret != 0) {
4585 PERROR("pthread_create registration");
4586 goto exit_reg_apps;
4587 }
4588
4589 /* Create thread to manage application socket */
4590 ret = pthread_create(&apps_thread, NULL,
4591 thread_manage_apps, (void *) NULL);
4592 if (ret != 0) {
4593 PERROR("pthread_create apps");
4594 goto exit_apps;
4595 }
4596
4597 /* Create thread to manage application notify socket */
4598 ret = pthread_create(&apps_notify_thread, NULL,
4599 ust_thread_manage_notify, (void *) NULL);
4600 if (ret != 0) {
4601 PERROR("pthread_create apps");
4602 goto exit_apps;
4603 }
4604
4605 /* Don't start this thread if kernel tracing is not requested nor root */
4606 if (is_root && !opt_no_kernel) {
4607 /* Create kernel thread to manage kernel event */
4608 ret = pthread_create(&kernel_thread, NULL,
4609 thread_manage_kernel, (void *) NULL);
4610 if (ret != 0) {
4611 PERROR("pthread_create kernel");
4612 goto exit_kernel;
4613 }
4614
4615 ret = pthread_join(kernel_thread, &status);
4616 if (ret != 0) {
4617 PERROR("pthread_join");
4618 goto error; /* join error, exit without cleanup */
4619 }
4620 }
4621
4622 exit_kernel:
4623 ret = pthread_join(apps_thread, &status);
4624 if (ret != 0) {
4625 PERROR("pthread_join");
4626 goto error; /* join error, exit without cleanup */
4627 }
4628
4629 exit_apps:
4630 ret = pthread_join(reg_apps_thread, &status);
4631 if (ret != 0) {
4632 PERROR("pthread_join");
4633 goto error; /* join error, exit without cleanup */
4634 }
4635
4636 exit_reg_apps:
4637 ret = pthread_join(dispatch_thread, &status);
4638 if (ret != 0) {
4639 PERROR("pthread_join");
4640 goto error; /* join error, exit without cleanup */
4641 }
4642
4643 exit_dispatch:
4644 ret = pthread_join(client_thread, &status);
4645 if (ret != 0) {
4646 PERROR("pthread_join");
4647 goto error; /* join error, exit without cleanup */
4648 }
4649
4650 ret = join_consumer_thread(&kconsumer_data);
4651 if (ret != 0) {
4652 PERROR("join_consumer");
4653 goto error; /* join error, exit without cleanup */
4654 }
4655
4656 ret = join_consumer_thread(&ustconsumer32_data);
4657 if (ret != 0) {
4658 PERROR("join_consumer ust32");
4659 goto error; /* join error, exit without cleanup */
4660 }
4661
4662 ret = join_consumer_thread(&ustconsumer64_data);
4663 if (ret != 0) {
4664 PERROR("join_consumer ust64");
4665 goto error; /* join error, exit without cleanup */
4666 }
4667
4668 exit_client:
4669 ret = pthread_join(health_thread, &status);
4670 if (ret != 0) {
4671 PERROR("pthread_join health thread");
4672 goto error; /* join error, exit without cleanup */
4673 }
4674
4675 exit_health:
4676 ret = pthread_join(ht_cleanup_thread, &status);
4677 if (ret != 0) {
4678 PERROR("pthread_join ht cleanup thread");
4679 goto error; /* join error, exit without cleanup */
4680 }
4681 exit_ht_cleanup:
4682 exit:
4683 /*
4684 * cleanup() is called when no other thread is running.
4685 */
4686 rcu_thread_online();
4687 cleanup();
4688 rcu_thread_offline();
4689 rcu_unregister_thread();
4690 if (!ret) {
4691 exit(EXIT_SUCCESS);
4692 }
4693 error:
4694 exit(EXIT_FAILURE);
4695 }
This page took 0.124661 seconds and 4 git commands to generate.