Fix: compat poll: add missing empty revents checks
[lttng-tools.git] / src / bin / lttng-sessiond / main.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <paths.h>
24 #include <pthread.h>
25 #include <signal.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <inttypes.h>
30 #include <sys/mman.h>
31 #include <sys/mount.h>
32 #include <sys/resource.h>
33 #include <sys/socket.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/wait.h>
37 #include <urcu/uatomic.h>
38 #include <unistd.h>
39 #include <config.h>
40
41 #include <common/common.h>
42 #include <common/compat/socket.h>
43 #include <common/defaults.h>
44 #include <common/kernel-consumer/kernel-consumer.h>
45 #include <common/futex.h>
46 #include <common/relayd/relayd.h>
47 #include <common/utils.h>
48 #include <common/daemonize.h>
49
50 #include "lttng-sessiond.h"
51 #include "buffer-registry.h"
52 #include "channel.h"
53 #include "cmd.h"
54 #include "consumer.h"
55 #include "context.h"
56 #include "event.h"
57 #include "kernel.h"
58 #include "kernel-consumer.h"
59 #include "modprobe.h"
60 #include "shm.h"
61 #include "ust-ctl.h"
62 #include "ust-consumer.h"
63 #include "utils.h"
64 #include "fd-limit.h"
65 #include "health-sessiond.h"
66 #include "testpoint.h"
67 #include "ust-thread.h"
68 #include "jul-thread.h"
69
70 #define CONSUMERD_FILE "lttng-consumerd"
71
72 const char *progname;
73 static const char *tracing_group_name = DEFAULT_TRACING_GROUP;
74 static const char *opt_pidfile;
75 static int opt_sig_parent;
76 static int opt_verbose_consumer;
77 static int opt_daemon, opt_background;
78 static int opt_no_kernel;
79 static pid_t ppid; /* Parent PID for --sig-parent option */
80 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
81 static char *rundir;
82 static int lockfile_fd = -1;
83
84 /* Set to 1 when a SIGUSR1 signal is received. */
85 static int recv_child_signal;
86
87 /*
88 * Consumer daemon specific control data. Every value not initialized here is
89 * set to 0 by the static definition.
90 */
91 static struct consumer_data kconsumer_data = {
92 .type = LTTNG_CONSUMER_KERNEL,
93 .err_unix_sock_path = DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
94 .cmd_unix_sock_path = DEFAULT_KCONSUMERD_CMD_SOCK_PATH,
95 .err_sock = -1,
96 .cmd_sock = -1,
97 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
98 .lock = PTHREAD_MUTEX_INITIALIZER,
99 .cond = PTHREAD_COND_INITIALIZER,
100 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
101 };
102 static struct consumer_data ustconsumer64_data = {
103 .type = LTTNG_CONSUMER64_UST,
104 .err_unix_sock_path = DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
105 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH,
106 .err_sock = -1,
107 .cmd_sock = -1,
108 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
109 .lock = PTHREAD_MUTEX_INITIALIZER,
110 .cond = PTHREAD_COND_INITIALIZER,
111 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
112 };
113 static struct consumer_data ustconsumer32_data = {
114 .type = LTTNG_CONSUMER32_UST,
115 .err_unix_sock_path = DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
116 .cmd_unix_sock_path = DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH,
117 .err_sock = -1,
118 .cmd_sock = -1,
119 .pid_mutex = PTHREAD_MUTEX_INITIALIZER,
120 .lock = PTHREAD_MUTEX_INITIALIZER,
121 .cond = PTHREAD_COND_INITIALIZER,
122 .cond_mutex = PTHREAD_MUTEX_INITIALIZER,
123 };
124
125 /* Shared between threads */
126 static int dispatch_thread_exit;
127
128 /* Global application Unix socket path */
129 static char apps_unix_sock_path[PATH_MAX];
130 /* Global client Unix socket path */
131 static char client_unix_sock_path[PATH_MAX];
132 /* global wait shm path for UST */
133 static char wait_shm_path[PATH_MAX];
134 /* Global health check unix path */
135 static char health_unix_sock_path[PATH_MAX];
136
137 /* Sockets and FDs */
138 static int client_sock = -1;
139 static int apps_sock = -1;
140 int kernel_tracer_fd = -1;
141 static int kernel_poll_pipe[2] = { -1, -1 };
142
143 /*
144 * Quit pipe for all threads. This permits a single cancellation point
145 * for all threads when receiving an event on the pipe.
146 */
147 static int thread_quit_pipe[2] = { -1, -1 };
148
149 /*
150 * This pipe is used to inform the thread managing application communication
151 * that a command is queued and ready to be processed.
152 */
153 static int apps_cmd_pipe[2] = { -1, -1 };
154
155 int apps_cmd_notify_pipe[2] = { -1, -1 };
156
157 /* Pthread, Mutexes and Semaphores */
158 static pthread_t apps_thread;
159 static pthread_t apps_notify_thread;
160 static pthread_t reg_apps_thread;
161 static pthread_t client_thread;
162 static pthread_t kernel_thread;
163 static pthread_t dispatch_thread;
164 static pthread_t health_thread;
165 static pthread_t ht_cleanup_thread;
166 static pthread_t jul_reg_thread;
167
168 /*
169 * UST registration command queue. This queue is tied with a futex and uses a N
170 * wakers / 1 waiter implemented and detailed in futex.c/.h
171 *
172 * The thread_manage_apps and thread_dispatch_ust_registration interact with
173 * this queue and the wait/wake scheme.
174 */
175 static struct ust_cmd_queue ust_cmd_queue;
176
177 /*
178 * Pointer initialized before thread creation.
179 *
180 * This points to the tracing session list containing the session count and a
181 * mutex lock. The lock MUST be taken if you iterate over the list. The lock
182 * MUST NOT be taken if you call a public function in session.c.
183 *
184 * The lock is nested inside the structure: session_list_ptr->lock. Please use
185 * session_lock_list and session_unlock_list for lock acquisition.
186 */
187 static struct ltt_session_list *session_list_ptr;
188
189 int ust_consumerd64_fd = -1;
190 int ust_consumerd32_fd = -1;
191
192 static const char *consumerd32_bin = CONFIG_CONSUMERD32_BIN;
193 static const char *consumerd64_bin = CONFIG_CONSUMERD64_BIN;
194 static const char *consumerd32_libdir = CONFIG_CONSUMERD32_LIBDIR;
195 static const char *consumerd64_libdir = CONFIG_CONSUMERD64_LIBDIR;
196
197 static const char *module_proc_lttng = "/proc/lttng";
198
199 /*
200 * Consumer daemon state which is changed when spawning it, killing it or in
201 * case of a fatal error.
202 */
203 enum consumerd_state {
204 CONSUMER_STARTED = 1,
205 CONSUMER_STOPPED = 2,
206 CONSUMER_ERROR = 3,
207 };
208
209 /*
210 * This consumer daemon state is used to validate if a client command will be
211 * able to reach the consumer. If not, the client is informed. For instance,
212 * doing a "lttng start" when the consumer state is set to ERROR will return an
213 * error to the client.
214 *
215 * The following example shows a possible race condition of this scheme:
216 *
217 * consumer thread error happens
218 * client cmd arrives
219 * client cmd checks state -> still OK
220 * consumer thread exit, sets error
221 * client cmd try to talk to consumer
222 * ...
223 *
224 * However, since the consumer is a different daemon, we have no way of making
225 * sure the command will reach it safely even with this state flag. This is why
226 * we consider that up to the state validation during command processing, the
227 * command is safe. After that, we can not guarantee the correctness of the
228 * client request vis-a-vis the consumer.
229 */
230 static enum consumerd_state ust_consumerd_state;
231 static enum consumerd_state kernel_consumerd_state;
232
233 /*
234 * Socket timeout for receiving and sending in seconds.
235 */
236 static int app_socket_timeout;
237
238 /* Set in main() with the current page size. */
239 long page_size;
240
241 /* Application health monitoring */
242 struct health_app *health_sessiond;
243
244 /* JUL TCP port for registration. Used by the JUL thread. */
245 unsigned int jul_tcp_port = DEFAULT_JUL_TCP_PORT;
246
247 /* Am I root or not. */
248 int is_root; /* Set to 1 if the daemon is running as root */
249
250 /*
251 * Whether sessiond is ready for commands/health check requests.
252 * NR_LTTNG_SESSIOND_READY must match the number of calls to
253 * lttng_sessiond_notify_ready().
254 */
255 #define NR_LTTNG_SESSIOND_READY 2
256 int lttng_sessiond_ready = NR_LTTNG_SESSIOND_READY;
257
258 /* Notify parents that we are ready for cmd and health check */
259 static
260 void lttng_sessiond_notify_ready(void)
261 {
262 if (uatomic_sub_return(&lttng_sessiond_ready, 1) == 0) {
263 /*
264 * Notify parent pid that we are ready to accept command
265 * for client side. This ppid is the one from the
266 * external process that spawned us.
267 */
268 if (opt_sig_parent) {
269 kill(ppid, SIGUSR1);
270 }
271
272 /*
273 * Notify the parent of the fork() process that we are
274 * ready.
275 */
276 if (opt_daemon || opt_background) {
277 kill(child_ppid, SIGUSR1);
278 }
279 }
280 }
281
282 static
283 void setup_consumerd_path(void)
284 {
285 const char *bin, *libdir;
286
287 /*
288 * Allow INSTALL_BIN_PATH to be used as a target path for the
289 * native architecture size consumer if CONFIG_CONSUMER*_PATH
290 * has not been defined.
291 */
292 #if (CAA_BITS_PER_LONG == 32)
293 if (!consumerd32_bin[0]) {
294 consumerd32_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
295 }
296 if (!consumerd32_libdir[0]) {
297 consumerd32_libdir = INSTALL_LIB_PATH;
298 }
299 #elif (CAA_BITS_PER_LONG == 64)
300 if (!consumerd64_bin[0]) {
301 consumerd64_bin = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
302 }
303 if (!consumerd64_libdir[0]) {
304 consumerd64_libdir = INSTALL_LIB_PATH;
305 }
306 #else
307 #error "Unknown bitness"
308 #endif
309
310 /*
311 * runtime env. var. overrides the build default.
312 */
313 bin = getenv("LTTNG_CONSUMERD32_BIN");
314 if (bin) {
315 consumerd32_bin = bin;
316 }
317 bin = getenv("LTTNG_CONSUMERD64_BIN");
318 if (bin) {
319 consumerd64_bin = bin;
320 }
321 libdir = getenv("LTTNG_CONSUMERD32_LIBDIR");
322 if (libdir) {
323 consumerd32_libdir = libdir;
324 }
325 libdir = getenv("LTTNG_CONSUMERD64_LIBDIR");
326 if (libdir) {
327 consumerd64_libdir = libdir;
328 }
329 }
330
331 /*
332 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
333 */
334 int sessiond_set_thread_pollset(struct lttng_poll_event *events, size_t size)
335 {
336 int ret;
337
338 assert(events);
339
340 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
341 if (ret < 0) {
342 goto error;
343 }
344
345 /* Add quit pipe */
346 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
347 if (ret < 0) {
348 goto error;
349 }
350
351 return 0;
352
353 error:
354 return ret;
355 }
356
357 /*
358 * Check if the thread quit pipe was triggered.
359 *
360 * Return 1 if it was triggered else 0;
361 */
362 int sessiond_check_thread_quit_pipe(int fd, uint32_t events)
363 {
364 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
365 return 1;
366 }
367
368 return 0;
369 }
370
371 /*
372 * Init thread quit pipe.
373 *
374 * Return -1 on error or 0 if all pipes are created.
375 */
376 static int init_thread_quit_pipe(void)
377 {
378 int ret, i;
379
380 ret = pipe(thread_quit_pipe);
381 if (ret < 0) {
382 PERROR("thread quit pipe");
383 goto error;
384 }
385
386 for (i = 0; i < 2; i++) {
387 ret = fcntl(thread_quit_pipe[i], F_SETFD, FD_CLOEXEC);
388 if (ret < 0) {
389 PERROR("fcntl");
390 goto error;
391 }
392 }
393
394 error:
395 return ret;
396 }
397
398 /*
399 * Stop all threads by closing the thread quit pipe.
400 */
401 static void stop_threads(void)
402 {
403 int ret;
404
405 /* Stopping all threads */
406 DBG("Terminating all threads");
407 ret = notify_thread_pipe(thread_quit_pipe[1]);
408 if (ret < 0) {
409 ERR("write error on thread quit pipe");
410 }
411
412 /* Dispatch thread */
413 CMM_STORE_SHARED(dispatch_thread_exit, 1);
414 futex_nto1_wake(&ust_cmd_queue.futex);
415 }
416
417 /*
418 * Close every consumer sockets.
419 */
420 static void close_consumer_sockets(void)
421 {
422 int ret;
423
424 if (kconsumer_data.err_sock >= 0) {
425 ret = close(kconsumer_data.err_sock);
426 if (ret < 0) {
427 PERROR("kernel consumer err_sock close");
428 }
429 }
430 if (ustconsumer32_data.err_sock >= 0) {
431 ret = close(ustconsumer32_data.err_sock);
432 if (ret < 0) {
433 PERROR("UST consumerd32 err_sock close");
434 }
435 }
436 if (ustconsumer64_data.err_sock >= 0) {
437 ret = close(ustconsumer64_data.err_sock);
438 if (ret < 0) {
439 PERROR("UST consumerd64 err_sock close");
440 }
441 }
442 if (kconsumer_data.cmd_sock >= 0) {
443 ret = close(kconsumer_data.cmd_sock);
444 if (ret < 0) {
445 PERROR("kernel consumer cmd_sock close");
446 }
447 }
448 if (ustconsumer32_data.cmd_sock >= 0) {
449 ret = close(ustconsumer32_data.cmd_sock);
450 if (ret < 0) {
451 PERROR("UST consumerd32 cmd_sock close");
452 }
453 }
454 if (ustconsumer64_data.cmd_sock >= 0) {
455 ret = close(ustconsumer64_data.cmd_sock);
456 if (ret < 0) {
457 PERROR("UST consumerd64 cmd_sock close");
458 }
459 }
460 }
461
462 /*
463 * Generate the full lock file path using the rundir.
464 *
465 * Return the snprintf() return value thus a negative value is an error.
466 */
467 static int generate_lock_file_path(char *path, size_t len)
468 {
469 int ret;
470
471 assert(path);
472 assert(rundir);
473
474 /* Build lockfile path from rundir. */
475 ret = snprintf(path, len, "%s/" DEFAULT_LTTNG_SESSIOND_LOCKFILE, rundir);
476 if (ret < 0) {
477 PERROR("snprintf lockfile path");
478 }
479
480 return ret;
481 }
482
483 /*
484 * Cleanup the daemon
485 */
486 static void cleanup(void)
487 {
488 int ret;
489 struct ltt_session *sess, *stmp;
490 char path[PATH_MAX];
491
492 DBG("Cleaning up");
493
494 /*
495 * Close the thread quit pipe. It has already done its job,
496 * since we are now called.
497 */
498 utils_close_pipe(thread_quit_pipe);
499
500 /*
501 * If opt_pidfile is undefined, the default file will be wiped when
502 * removing the rundir.
503 */
504 if (opt_pidfile) {
505 ret = remove(opt_pidfile);
506 if (ret < 0) {
507 PERROR("remove pidfile %s", opt_pidfile);
508 }
509 }
510
511 DBG("Removing sessiond and consumerd content of directory %s", rundir);
512
513 /* sessiond */
514 snprintf(path, PATH_MAX,
515 "%s/%s",
516 rundir, DEFAULT_LTTNG_SESSIOND_PIDFILE);
517 DBG("Removing %s", path);
518 (void) unlink(path);
519
520 snprintf(path, PATH_MAX, "%s/%s", rundir,
521 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE);
522 DBG("Removing %s", path);
523 (void) unlink(path);
524
525 /* kconsumerd */
526 snprintf(path, PATH_MAX,
527 DEFAULT_KCONSUMERD_ERR_SOCK_PATH,
528 rundir);
529 DBG("Removing %s", path);
530 (void) unlink(path);
531
532 snprintf(path, PATH_MAX,
533 DEFAULT_KCONSUMERD_PATH,
534 rundir);
535 DBG("Removing directory %s", path);
536 (void) rmdir(path);
537
538 /* ust consumerd 32 */
539 snprintf(path, PATH_MAX,
540 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH,
541 rundir);
542 DBG("Removing %s", path);
543 (void) unlink(path);
544
545 snprintf(path, PATH_MAX,
546 DEFAULT_USTCONSUMERD32_PATH,
547 rundir);
548 DBG("Removing directory %s", path);
549 (void) rmdir(path);
550
551 /* ust consumerd 64 */
552 snprintf(path, PATH_MAX,
553 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH,
554 rundir);
555 DBG("Removing %s", path);
556 (void) unlink(path);
557
558 snprintf(path, PATH_MAX,
559 DEFAULT_USTCONSUMERD64_PATH,
560 rundir);
561 DBG("Removing directory %s", path);
562 (void) rmdir(path);
563
564 DBG("Cleaning up all sessions");
565
566 /* Destroy session list mutex */
567 if (session_list_ptr != NULL) {
568 pthread_mutex_destroy(&session_list_ptr->lock);
569
570 /* Cleanup ALL session */
571 cds_list_for_each_entry_safe(sess, stmp,
572 &session_list_ptr->head, list) {
573 cmd_destroy_session(sess, kernel_poll_pipe[1]);
574 }
575 }
576
577 DBG("Closing all UST sockets");
578 ust_app_clean_list();
579 buffer_reg_destroy_registries();
580
581 if (is_root && !opt_no_kernel) {
582 DBG2("Closing kernel fd");
583 if (kernel_tracer_fd >= 0) {
584 ret = close(kernel_tracer_fd);
585 if (ret) {
586 PERROR("close");
587 }
588 }
589 DBG("Unloading kernel modules");
590 modprobe_remove_lttng_all();
591 }
592
593 close_consumer_sockets();
594
595
596 /*
597 * Cleanup lock file by deleting it and finaly closing it which will
598 * release the file system lock.
599 */
600 if (lockfile_fd >= 0) {
601 char lockfile_path[PATH_MAX];
602
603 ret = generate_lock_file_path(lockfile_path, sizeof(lockfile_path));
604 if (ret > 0) {
605 ret = remove(lockfile_path);
606 if (ret < 0) {
607 PERROR("remove lock file");
608 }
609 ret = close(lockfile_fd);
610 if (ret < 0) {
611 PERROR("close lock file");
612 }
613 }
614 }
615
616 /*
617 * We do NOT rmdir rundir because there are other processes
618 * using it, for instance lttng-relayd, which can start in
619 * parallel with this teardown.
620 */
621
622 free(rundir);
623
624 /* <fun> */
625 DBG("%c[%d;%dm*** assert failed :-) *** ==> %c[%dm%c[%d;%dm"
626 "Matthew, BEET driven development works!%c[%dm",
627 27, 1, 31, 27, 0, 27, 1, 33, 27, 0);
628 /* </fun> */
629 }
630
631 /*
632 * Send data on a unix socket using the liblttsessiondcomm API.
633 *
634 * Return lttcomm error code.
635 */
636 static int send_unix_sock(int sock, void *buf, size_t len)
637 {
638 /* Check valid length */
639 if (len == 0) {
640 return -1;
641 }
642
643 return lttcomm_send_unix_sock(sock, buf, len);
644 }
645
646 /*
647 * Free memory of a command context structure.
648 */
649 static void clean_command_ctx(struct command_ctx **cmd_ctx)
650 {
651 DBG("Clean command context structure");
652 if (*cmd_ctx) {
653 if ((*cmd_ctx)->llm) {
654 free((*cmd_ctx)->llm);
655 }
656 if ((*cmd_ctx)->lsm) {
657 free((*cmd_ctx)->lsm);
658 }
659 free(*cmd_ctx);
660 *cmd_ctx = NULL;
661 }
662 }
663
664 /*
665 * Notify UST applications using the shm mmap futex.
666 */
667 static int notify_ust_apps(int active)
668 {
669 char *wait_shm_mmap;
670
671 DBG("Notifying applications of session daemon state: %d", active);
672
673 /* See shm.c for this call implying mmap, shm and futex calls */
674 wait_shm_mmap = shm_ust_get_mmap(wait_shm_path, is_root);
675 if (wait_shm_mmap == NULL) {
676 goto error;
677 }
678
679 /* Wake waiting process */
680 futex_wait_update((int32_t *) wait_shm_mmap, active);
681
682 /* Apps notified successfully */
683 return 0;
684
685 error:
686 return -1;
687 }
688
689 /*
690 * Setup the outgoing data buffer for the response (llm) by allocating the
691 * right amount of memory and copying the original information from the lsm
692 * structure.
693 *
694 * Return total size of the buffer pointed by buf.
695 */
696 static int setup_lttng_msg(struct command_ctx *cmd_ctx, size_t size)
697 {
698 int ret, buf_size;
699
700 buf_size = size;
701
702 cmd_ctx->llm = zmalloc(sizeof(struct lttcomm_lttng_msg) + buf_size);
703 if (cmd_ctx->llm == NULL) {
704 PERROR("zmalloc");
705 ret = -ENOMEM;
706 goto error;
707 }
708
709 /* Copy common data */
710 cmd_ctx->llm->cmd_type = cmd_ctx->lsm->cmd_type;
711 cmd_ctx->llm->pid = cmd_ctx->lsm->domain.attr.pid;
712
713 cmd_ctx->llm->data_size = size;
714 cmd_ctx->lttng_msg_size = sizeof(struct lttcomm_lttng_msg) + buf_size;
715
716 return buf_size;
717
718 error:
719 return ret;
720 }
721
722 /*
723 * Update the kernel poll set of all channel fd available over all tracing
724 * session. Add the wakeup pipe at the end of the set.
725 */
726 static int update_kernel_poll(struct lttng_poll_event *events)
727 {
728 int ret;
729 struct ltt_session *session;
730 struct ltt_kernel_channel *channel;
731
732 DBG("Updating kernel poll set");
733
734 session_lock_list();
735 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
736 session_lock(session);
737 if (session->kernel_session == NULL) {
738 session_unlock(session);
739 continue;
740 }
741
742 cds_list_for_each_entry(channel,
743 &session->kernel_session->channel_list.head, list) {
744 /* Add channel fd to the kernel poll set */
745 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
746 if (ret < 0) {
747 session_unlock(session);
748 goto error;
749 }
750 DBG("Channel fd %d added to kernel set", channel->fd);
751 }
752 session_unlock(session);
753 }
754 session_unlock_list();
755
756 return 0;
757
758 error:
759 session_unlock_list();
760 return -1;
761 }
762
763 /*
764 * Find the channel fd from 'fd' over all tracing session. When found, check
765 * for new channel stream and send those stream fds to the kernel consumer.
766 *
767 * Useful for CPU hotplug feature.
768 */
769 static int update_kernel_stream(struct consumer_data *consumer_data, int fd)
770 {
771 int ret = 0;
772 struct ltt_session *session;
773 struct ltt_kernel_session *ksess;
774 struct ltt_kernel_channel *channel;
775
776 DBG("Updating kernel streams for channel fd %d", fd);
777
778 session_lock_list();
779 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
780 session_lock(session);
781 if (session->kernel_session == NULL) {
782 session_unlock(session);
783 continue;
784 }
785 ksess = session->kernel_session;
786
787 cds_list_for_each_entry(channel, &ksess->channel_list.head, list) {
788 if (channel->fd == fd) {
789 DBG("Channel found, updating kernel streams");
790 ret = kernel_open_channel_stream(channel);
791 if (ret < 0) {
792 goto error;
793 }
794 /* Update the stream global counter */
795 ksess->stream_count_global += ret;
796
797 /*
798 * Have we already sent fds to the consumer? If yes, it means
799 * that tracing is started so it is safe to send our updated
800 * stream fds.
801 */
802 if (ksess->consumer_fds_sent == 1 && ksess->consumer != NULL) {
803 struct lttng_ht_iter iter;
804 struct consumer_socket *socket;
805
806 rcu_read_lock();
807 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
808 &iter.iter, socket, node.node) {
809 pthread_mutex_lock(socket->lock);
810 ret = kernel_consumer_send_channel_stream(socket,
811 channel, ksess,
812 session->output_traces ? 1 : 0);
813 pthread_mutex_unlock(socket->lock);
814 if (ret < 0) {
815 rcu_read_unlock();
816 goto error;
817 }
818 }
819 rcu_read_unlock();
820 }
821 goto error;
822 }
823 }
824 session_unlock(session);
825 }
826 session_unlock_list();
827 return ret;
828
829 error:
830 session_unlock(session);
831 session_unlock_list();
832 return ret;
833 }
834
835 /*
836 * For each tracing session, update newly registered apps. The session list
837 * lock MUST be acquired before calling this.
838 */
839 static void update_ust_app(int app_sock)
840 {
841 struct ltt_session *sess, *stmp;
842
843 /* Consumer is in an ERROR state. Stop any application update. */
844 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
845 /* Stop the update process since the consumer is dead. */
846 return;
847 }
848
849 /* For all tracing session(s) */
850 cds_list_for_each_entry_safe(sess, stmp, &session_list_ptr->head, list) {
851 session_lock(sess);
852 if (sess->ust_session) {
853 ust_app_global_update(sess->ust_session, app_sock);
854 }
855 session_unlock(sess);
856 }
857 }
858
859 /*
860 * This thread manage event coming from the kernel.
861 *
862 * Features supported in this thread:
863 * -) CPU Hotplug
864 */
865 static void *thread_manage_kernel(void *data)
866 {
867 int ret, i, pollfd, update_poll_flag = 1, err = -1;
868 uint32_t revents, nb_fd;
869 char tmp;
870 struct lttng_poll_event events;
871
872 DBG("[thread] Thread manage kernel started");
873
874 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
875
876 /*
877 * This first step of the while is to clean this structure which could free
878 * non NULL pointers so initialize it before the loop.
879 */
880 lttng_poll_init(&events);
881
882 if (testpoint(sessiond_thread_manage_kernel)) {
883 goto error_testpoint;
884 }
885
886 health_code_update();
887
888 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
889 goto error_testpoint;
890 }
891
892 while (1) {
893 health_code_update();
894
895 if (update_poll_flag == 1) {
896 /* Clean events object. We are about to populate it again. */
897 lttng_poll_clean(&events);
898
899 ret = sessiond_set_thread_pollset(&events, 2);
900 if (ret < 0) {
901 goto error_poll_create;
902 }
903
904 ret = lttng_poll_add(&events, kernel_poll_pipe[0], LPOLLIN);
905 if (ret < 0) {
906 goto error;
907 }
908
909 /* This will add the available kernel channel if any. */
910 ret = update_kernel_poll(&events);
911 if (ret < 0) {
912 goto error;
913 }
914 update_poll_flag = 0;
915 }
916
917 DBG("Thread kernel polling on %d fds", LTTNG_POLL_GETNB(&events));
918
919 /* Poll infinite value of time */
920 restart:
921 health_poll_entry();
922 ret = lttng_poll_wait(&events, -1);
923 health_poll_exit();
924 if (ret < 0) {
925 /*
926 * Restart interrupted system call.
927 */
928 if (errno == EINTR) {
929 goto restart;
930 }
931 goto error;
932 } else if (ret == 0) {
933 /* Should not happen since timeout is infinite */
934 ERR("Return value of poll is 0 with an infinite timeout.\n"
935 "This should not have happened! Continuing...");
936 continue;
937 }
938
939 nb_fd = ret;
940
941 for (i = 0; i < nb_fd; i++) {
942 /* Fetch once the poll data */
943 revents = LTTNG_POLL_GETEV(&events, i);
944 pollfd = LTTNG_POLL_GETFD(&events, i);
945
946 health_code_update();
947
948 if (!revents) {
949 /* No activity for this FD (poll implementation). */
950 continue;
951 }
952
953 /* Thread quit pipe has been closed. Killing thread. */
954 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
955 if (ret) {
956 err = 0;
957 goto exit;
958 }
959
960 /* Check for data on kernel pipe */
961 if (pollfd == kernel_poll_pipe[0] && (revents & LPOLLIN)) {
962 (void) lttng_read(kernel_poll_pipe[0],
963 &tmp, 1);
964 /*
965 * Ret value is useless here, if this pipe gets any actions an
966 * update is required anyway.
967 */
968 update_poll_flag = 1;
969 continue;
970 } else {
971 /*
972 * New CPU detected by the kernel. Adding kernel stream to
973 * kernel session and updating the kernel consumer
974 */
975 if (revents & LPOLLIN) {
976 ret = update_kernel_stream(&kconsumer_data, pollfd);
977 if (ret < 0) {
978 continue;
979 }
980 break;
981 /*
982 * TODO: We might want to handle the LPOLLERR | LPOLLHUP
983 * and unregister kernel stream at this point.
984 */
985 }
986 }
987 }
988 }
989
990 exit:
991 error:
992 lttng_poll_clean(&events);
993 error_poll_create:
994 error_testpoint:
995 utils_close_pipe(kernel_poll_pipe);
996 kernel_poll_pipe[0] = kernel_poll_pipe[1] = -1;
997 if (err) {
998 health_error();
999 ERR("Health error occurred in %s", __func__);
1000 WARN("Kernel thread died unexpectedly. "
1001 "Kernel tracing can continue but CPU hotplug is disabled.");
1002 }
1003 health_unregister(health_sessiond);
1004 DBG("Kernel thread dying");
1005 return NULL;
1006 }
1007
1008 /*
1009 * Signal pthread condition of the consumer data that the thread.
1010 */
1011 static void signal_consumer_condition(struct consumer_data *data, int state)
1012 {
1013 pthread_mutex_lock(&data->cond_mutex);
1014
1015 /*
1016 * The state is set before signaling. It can be any value, it's the waiter
1017 * job to correctly interpret this condition variable associated to the
1018 * consumer pthread_cond.
1019 *
1020 * A value of 0 means that the corresponding thread of the consumer data
1021 * was not started. 1 indicates that the thread has started and is ready
1022 * for action. A negative value means that there was an error during the
1023 * thread bootstrap.
1024 */
1025 data->consumer_thread_is_ready = state;
1026 (void) pthread_cond_signal(&data->cond);
1027
1028 pthread_mutex_unlock(&data->cond_mutex);
1029 }
1030
1031 /*
1032 * This thread manage the consumer error sent back to the session daemon.
1033 */
1034 static void *thread_manage_consumer(void *data)
1035 {
1036 int sock = -1, i, ret, pollfd, err = -1, should_quit = 0;
1037 uint32_t revents, nb_fd;
1038 enum lttcomm_return_code code;
1039 struct lttng_poll_event events;
1040 struct consumer_data *consumer_data = data;
1041
1042 DBG("[thread] Manage consumer started");
1043
1044 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CONSUMER);
1045
1046 health_code_update();
1047
1048 /*
1049 * Pass 3 as size here for the thread quit pipe, consumerd_err_sock and the
1050 * metadata_sock. Nothing more will be added to this poll set.
1051 */
1052 ret = sessiond_set_thread_pollset(&events, 3);
1053 if (ret < 0) {
1054 goto error_poll;
1055 }
1056
1057 /*
1058 * The error socket here is already in a listening state which was done
1059 * just before spawning this thread to avoid a race between the consumer
1060 * daemon exec trying to connect and the listen() call.
1061 */
1062 ret = lttng_poll_add(&events, consumer_data->err_sock, LPOLLIN | LPOLLRDHUP);
1063 if (ret < 0) {
1064 goto error;
1065 }
1066
1067 health_code_update();
1068
1069 /* Infinite blocking call, waiting for transmission */
1070 restart:
1071 health_poll_entry();
1072
1073 if (testpoint(sessiond_thread_manage_consumer)) {
1074 goto error;
1075 }
1076
1077 ret = lttng_poll_wait(&events, -1);
1078 health_poll_exit();
1079 if (ret < 0) {
1080 /*
1081 * Restart interrupted system call.
1082 */
1083 if (errno == EINTR) {
1084 goto restart;
1085 }
1086 goto error;
1087 }
1088
1089 nb_fd = ret;
1090
1091 for (i = 0; i < nb_fd; i++) {
1092 /* Fetch once the poll data */
1093 revents = LTTNG_POLL_GETEV(&events, i);
1094 pollfd = LTTNG_POLL_GETFD(&events, i);
1095
1096 health_code_update();
1097
1098 if (!revents) {
1099 /* No activity for this FD (poll implementation). */
1100 continue;
1101 }
1102
1103 /* Thread quit pipe has been closed. Killing thread. */
1104 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1105 if (ret) {
1106 err = 0;
1107 goto exit;
1108 }
1109
1110 /* Event on the registration socket */
1111 if (pollfd == consumer_data->err_sock) {
1112 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1113 ERR("consumer err socket poll error");
1114 goto error;
1115 }
1116 }
1117 }
1118
1119 sock = lttcomm_accept_unix_sock(consumer_data->err_sock);
1120 if (sock < 0) {
1121 goto error;
1122 }
1123
1124 /*
1125 * Set the CLOEXEC flag. Return code is useless because either way, the
1126 * show must go on.
1127 */
1128 (void) utils_set_fd_cloexec(sock);
1129
1130 health_code_update();
1131
1132 DBG2("Receiving code from consumer err_sock");
1133
1134 /* Getting status code from kconsumerd */
1135 ret = lttcomm_recv_unix_sock(sock, &code,
1136 sizeof(enum lttcomm_return_code));
1137 if (ret <= 0) {
1138 goto error;
1139 }
1140
1141 health_code_update();
1142 if (code == LTTCOMM_CONSUMERD_COMMAND_SOCK_READY) {
1143 /* Connect both socket, command and metadata. */
1144 consumer_data->cmd_sock =
1145 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1146 consumer_data->metadata_fd =
1147 lttcomm_connect_unix_sock(consumer_data->cmd_unix_sock_path);
1148 if (consumer_data->cmd_sock < 0
1149 || consumer_data->metadata_fd < 0) {
1150 PERROR("consumer connect cmd socket");
1151 /* On error, signal condition and quit. */
1152 signal_consumer_condition(consumer_data, -1);
1153 goto error;
1154 }
1155 consumer_data->metadata_sock.fd_ptr = &consumer_data->metadata_fd;
1156 /* Create metadata socket lock. */
1157 consumer_data->metadata_sock.lock = zmalloc(sizeof(pthread_mutex_t));
1158 if (consumer_data->metadata_sock.lock == NULL) {
1159 PERROR("zmalloc pthread mutex");
1160 ret = -1;
1161 goto error;
1162 }
1163 pthread_mutex_init(consumer_data->metadata_sock.lock, NULL);
1164
1165 signal_consumer_condition(consumer_data, 1);
1166 DBG("Consumer command socket ready (fd: %d", consumer_data->cmd_sock);
1167 DBG("Consumer metadata socket ready (fd: %d)",
1168 consumer_data->metadata_fd);
1169 } else {
1170 ERR("consumer error when waiting for SOCK_READY : %s",
1171 lttcomm_get_readable_code(-code));
1172 goto error;
1173 }
1174
1175 /* Remove the consumerd error sock since we've established a connexion */
1176 ret = lttng_poll_del(&events, consumer_data->err_sock);
1177 if (ret < 0) {
1178 goto error;
1179 }
1180
1181 /* Add new accepted error socket. */
1182 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLRDHUP);
1183 if (ret < 0) {
1184 goto error;
1185 }
1186
1187 /* Add metadata socket that is successfully connected. */
1188 ret = lttng_poll_add(&events, consumer_data->metadata_fd,
1189 LPOLLIN | LPOLLRDHUP);
1190 if (ret < 0) {
1191 goto error;
1192 }
1193
1194 health_code_update();
1195
1196 /* Infinite blocking call, waiting for transmission */
1197 restart_poll:
1198 while (1) {
1199 health_code_update();
1200
1201 /* Exit the thread because the thread quit pipe has been triggered. */
1202 if (should_quit) {
1203 /* Not a health error. */
1204 err = 0;
1205 goto exit;
1206 }
1207
1208 health_poll_entry();
1209 ret = lttng_poll_wait(&events, -1);
1210 health_poll_exit();
1211 if (ret < 0) {
1212 /*
1213 * Restart interrupted system call.
1214 */
1215 if (errno == EINTR) {
1216 goto restart_poll;
1217 }
1218 goto error;
1219 }
1220
1221 nb_fd = ret;
1222
1223 for (i = 0; i < nb_fd; i++) {
1224 /* Fetch once the poll data */
1225 revents = LTTNG_POLL_GETEV(&events, i);
1226 pollfd = LTTNG_POLL_GETFD(&events, i);
1227
1228 health_code_update();
1229
1230 if (!revents) {
1231 /* No activity for this FD (poll implementation). */
1232 continue;
1233 }
1234
1235 /*
1236 * Thread quit pipe has been triggered, flag that we should stop
1237 * but continue the current loop to handle potential data from
1238 * consumer.
1239 */
1240 should_quit = sessiond_check_thread_quit_pipe(pollfd, revents);
1241
1242 if (pollfd == sock) {
1243 /* Event on the consumerd socket */
1244 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1245 ERR("consumer err socket second poll error");
1246 goto error;
1247 }
1248 health_code_update();
1249 /* Wait for any kconsumerd error */
1250 ret = lttcomm_recv_unix_sock(sock, &code,
1251 sizeof(enum lttcomm_return_code));
1252 if (ret <= 0) {
1253 ERR("consumer closed the command socket");
1254 goto error;
1255 }
1256
1257 ERR("consumer return code : %s",
1258 lttcomm_get_readable_code(-code));
1259
1260 goto exit;
1261 } else if (pollfd == consumer_data->metadata_fd) {
1262 /* UST metadata requests */
1263 ret = ust_consumer_metadata_request(
1264 &consumer_data->metadata_sock);
1265 if (ret < 0) {
1266 ERR("Handling metadata request");
1267 goto error;
1268 }
1269 }
1270 /* No need for an else branch all FDs are tested prior. */
1271 }
1272 health_code_update();
1273 }
1274
1275 exit:
1276 error:
1277 /*
1278 * We lock here because we are about to close the sockets and some other
1279 * thread might be using them so get exclusive access which will abort all
1280 * other consumer command by other threads.
1281 */
1282 pthread_mutex_lock(&consumer_data->lock);
1283
1284 /* Immediately set the consumerd state to stopped */
1285 if (consumer_data->type == LTTNG_CONSUMER_KERNEL) {
1286 uatomic_set(&kernel_consumerd_state, CONSUMER_ERROR);
1287 } else if (consumer_data->type == LTTNG_CONSUMER64_UST ||
1288 consumer_data->type == LTTNG_CONSUMER32_UST) {
1289 uatomic_set(&ust_consumerd_state, CONSUMER_ERROR);
1290 } else {
1291 /* Code flow error... */
1292 assert(0);
1293 }
1294
1295 if (consumer_data->err_sock >= 0) {
1296 ret = close(consumer_data->err_sock);
1297 if (ret) {
1298 PERROR("close");
1299 }
1300 consumer_data->err_sock = -1;
1301 }
1302 if (consumer_data->cmd_sock >= 0) {
1303 ret = close(consumer_data->cmd_sock);
1304 if (ret) {
1305 PERROR("close");
1306 }
1307 consumer_data->cmd_sock = -1;
1308 }
1309 if (consumer_data->metadata_sock.fd_ptr &&
1310 *consumer_data->metadata_sock.fd_ptr >= 0) {
1311 ret = close(*consumer_data->metadata_sock.fd_ptr);
1312 if (ret) {
1313 PERROR("close");
1314 }
1315 }
1316 if (sock >= 0) {
1317 ret = close(sock);
1318 if (ret) {
1319 PERROR("close");
1320 }
1321 }
1322
1323 unlink(consumer_data->err_unix_sock_path);
1324 unlink(consumer_data->cmd_unix_sock_path);
1325 consumer_data->pid = 0;
1326 pthread_mutex_unlock(&consumer_data->lock);
1327
1328 /* Cleanup metadata socket mutex. */
1329 if (consumer_data->metadata_sock.lock) {
1330 pthread_mutex_destroy(consumer_data->metadata_sock.lock);
1331 free(consumer_data->metadata_sock.lock);
1332 }
1333 lttng_poll_clean(&events);
1334 error_poll:
1335 if (err) {
1336 health_error();
1337 ERR("Health error occurred in %s", __func__);
1338 }
1339 health_unregister(health_sessiond);
1340 DBG("consumer thread cleanup completed");
1341
1342 return NULL;
1343 }
1344
1345 /*
1346 * This thread manage application communication.
1347 */
1348 static void *thread_manage_apps(void *data)
1349 {
1350 int i, ret, pollfd, err = -1;
1351 ssize_t size_ret;
1352 uint32_t revents, nb_fd;
1353 struct lttng_poll_event events;
1354
1355 DBG("[thread] Manage application started");
1356
1357 rcu_register_thread();
1358 rcu_thread_online();
1359
1360 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_MANAGE);
1361
1362 if (testpoint(sessiond_thread_manage_apps)) {
1363 goto error_testpoint;
1364 }
1365
1366 health_code_update();
1367
1368 ret = sessiond_set_thread_pollset(&events, 2);
1369 if (ret < 0) {
1370 goto error_poll_create;
1371 }
1372
1373 ret = lttng_poll_add(&events, apps_cmd_pipe[0], LPOLLIN | LPOLLRDHUP);
1374 if (ret < 0) {
1375 goto error;
1376 }
1377
1378 if (testpoint(sessiond_thread_manage_apps_before_loop)) {
1379 goto error;
1380 }
1381
1382 health_code_update();
1383
1384 while (1) {
1385 DBG("Apps thread polling on %d fds", LTTNG_POLL_GETNB(&events));
1386
1387 /* Inifinite blocking call, waiting for transmission */
1388 restart:
1389 health_poll_entry();
1390 ret = lttng_poll_wait(&events, -1);
1391 health_poll_exit();
1392 if (ret < 0) {
1393 /*
1394 * Restart interrupted system call.
1395 */
1396 if (errno == EINTR) {
1397 goto restart;
1398 }
1399 goto error;
1400 }
1401
1402 nb_fd = ret;
1403
1404 for (i = 0; i < nb_fd; i++) {
1405 /* Fetch once the poll data */
1406 revents = LTTNG_POLL_GETEV(&events, i);
1407 pollfd = LTTNG_POLL_GETFD(&events, i);
1408
1409 health_code_update();
1410
1411 if (!revents) {
1412 /* No activity for this FD (poll implementation). */
1413 continue;
1414 }
1415
1416 /* Thread quit pipe has been closed. Killing thread. */
1417 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1418 if (ret) {
1419 err = 0;
1420 goto exit;
1421 }
1422
1423 /* Inspect the apps cmd pipe */
1424 if (pollfd == apps_cmd_pipe[0]) {
1425 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1426 ERR("Apps command pipe error");
1427 goto error;
1428 } else if (revents & LPOLLIN) {
1429 int sock;
1430
1431 /* Empty pipe */
1432 size_ret = lttng_read(apps_cmd_pipe[0], &sock, sizeof(sock));
1433 if (size_ret < sizeof(sock)) {
1434 PERROR("read apps cmd pipe");
1435 goto error;
1436 }
1437
1438 health_code_update();
1439
1440 /*
1441 * We only monitor the error events of the socket. This
1442 * thread does not handle any incoming data from UST
1443 * (POLLIN).
1444 */
1445 ret = lttng_poll_add(&events, sock,
1446 LPOLLERR | LPOLLHUP | LPOLLRDHUP);
1447 if (ret < 0) {
1448 goto error;
1449 }
1450
1451 DBG("Apps with sock %d added to poll set", sock);
1452 }
1453 } else {
1454 /*
1455 * At this point, we know that a registered application made
1456 * the event at poll_wait.
1457 */
1458 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1459 /* Removing from the poll set */
1460 ret = lttng_poll_del(&events, pollfd);
1461 if (ret < 0) {
1462 goto error;
1463 }
1464
1465 /* Socket closed on remote end. */
1466 ust_app_unregister(pollfd);
1467 }
1468 }
1469
1470 health_code_update();
1471 }
1472 }
1473
1474 exit:
1475 error:
1476 lttng_poll_clean(&events);
1477 error_poll_create:
1478 error_testpoint:
1479 utils_close_pipe(apps_cmd_pipe);
1480 apps_cmd_pipe[0] = apps_cmd_pipe[1] = -1;
1481
1482 /*
1483 * We don't clean the UST app hash table here since already registered
1484 * applications can still be controlled so let them be until the session
1485 * daemon dies or the applications stop.
1486 */
1487
1488 if (err) {
1489 health_error();
1490 ERR("Health error occurred in %s", __func__);
1491 }
1492 health_unregister(health_sessiond);
1493 DBG("Application communication apps thread cleanup complete");
1494 rcu_thread_offline();
1495 rcu_unregister_thread();
1496 return NULL;
1497 }
1498
1499 /*
1500 * Send a socket to a thread This is called from the dispatch UST registration
1501 * thread once all sockets are set for the application.
1502 *
1503 * The sock value can be invalid, we don't really care, the thread will handle
1504 * it and make the necessary cleanup if so.
1505 *
1506 * On success, return 0 else a negative value being the errno message of the
1507 * write().
1508 */
1509 static int send_socket_to_thread(int fd, int sock)
1510 {
1511 ssize_t ret;
1512
1513 /*
1514 * It's possible that the FD is set as invalid with -1 concurrently just
1515 * before calling this function being a shutdown state of the thread.
1516 */
1517 if (fd < 0) {
1518 ret = -EBADF;
1519 goto error;
1520 }
1521
1522 ret = lttng_write(fd, &sock, sizeof(sock));
1523 if (ret < sizeof(sock)) {
1524 PERROR("write apps pipe %d", fd);
1525 if (ret < 0) {
1526 ret = -errno;
1527 }
1528 goto error;
1529 }
1530
1531 /* All good. Don't send back the write positive ret value. */
1532 ret = 0;
1533 error:
1534 return (int) ret;
1535 }
1536
1537 /*
1538 * Sanitize the wait queue of the dispatch registration thread meaning removing
1539 * invalid nodes from it. This is to avoid memory leaks for the case the UST
1540 * notify socket is never received.
1541 */
1542 static void sanitize_wait_queue(struct ust_reg_wait_queue *wait_queue)
1543 {
1544 int ret, nb_fd = 0, i;
1545 unsigned int fd_added = 0;
1546 struct lttng_poll_event events;
1547 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1548
1549 assert(wait_queue);
1550
1551 lttng_poll_init(&events);
1552
1553 /* Just skip everything for an empty queue. */
1554 if (!wait_queue->count) {
1555 goto end;
1556 }
1557
1558 ret = lttng_poll_create(&events, wait_queue->count, LTTNG_CLOEXEC);
1559 if (ret < 0) {
1560 goto error_create;
1561 }
1562
1563 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1564 &wait_queue->head, head) {
1565 assert(wait_node->app);
1566 ret = lttng_poll_add(&events, wait_node->app->sock,
1567 LPOLLHUP | LPOLLERR);
1568 if (ret < 0) {
1569 goto error;
1570 }
1571
1572 fd_added = 1;
1573 }
1574
1575 if (!fd_added) {
1576 goto end;
1577 }
1578
1579 /*
1580 * Poll but don't block so we can quickly identify the faulty events and
1581 * clean them afterwards from the wait queue.
1582 */
1583 ret = lttng_poll_wait(&events, 0);
1584 if (ret < 0) {
1585 goto error;
1586 }
1587 nb_fd = ret;
1588
1589 for (i = 0; i < nb_fd; i++) {
1590 /* Get faulty FD. */
1591 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
1592 int pollfd = LTTNG_POLL_GETFD(&events, i);
1593
1594 if (!revents) {
1595 /* No activity for this FD (poll implementation). */
1596 continue;
1597 }
1598
1599 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1600 &wait_queue->head, head) {
1601 if (pollfd == wait_node->app->sock &&
1602 (revents & (LPOLLHUP | LPOLLERR))) {
1603 cds_list_del(&wait_node->head);
1604 wait_queue->count--;
1605 ust_app_destroy(wait_node->app);
1606 free(wait_node);
1607 break;
1608 }
1609 }
1610 }
1611
1612 if (nb_fd > 0) {
1613 DBG("Wait queue sanitized, %d node were cleaned up", nb_fd);
1614 }
1615
1616 end:
1617 lttng_poll_clean(&events);
1618 return;
1619
1620 error:
1621 lttng_poll_clean(&events);
1622 error_create:
1623 ERR("Unable to sanitize wait queue");
1624 return;
1625 }
1626
1627 /*
1628 * Dispatch request from the registration threads to the application
1629 * communication thread.
1630 */
1631 static void *thread_dispatch_ust_registration(void *data)
1632 {
1633 int ret, err = -1;
1634 struct cds_wfq_node *node;
1635 struct ust_command *ust_cmd = NULL;
1636 struct ust_reg_wait_node *wait_node = NULL, *tmp_wait_node;
1637 struct ust_reg_wait_queue wait_queue = {
1638 .count = 0,
1639 };
1640
1641 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG_DISPATCH);
1642
1643 if (testpoint(sessiond_thread_app_reg_dispatch)) {
1644 goto error_testpoint;
1645 }
1646
1647 health_code_update();
1648
1649 CDS_INIT_LIST_HEAD(&wait_queue.head);
1650
1651 DBG("[thread] Dispatch UST command started");
1652
1653 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
1654 health_code_update();
1655
1656 /* Atomically prepare the queue futex */
1657 futex_nto1_prepare(&ust_cmd_queue.futex);
1658
1659 do {
1660 struct ust_app *app = NULL;
1661 ust_cmd = NULL;
1662
1663 /*
1664 * Make sure we don't have node(s) that have hung up before receiving
1665 * the notify socket. This is to clean the list in order to avoid
1666 * memory leaks from notify socket that are never seen.
1667 */
1668 sanitize_wait_queue(&wait_queue);
1669
1670 health_code_update();
1671 /* Dequeue command for registration */
1672 node = cds_wfq_dequeue_blocking(&ust_cmd_queue.queue);
1673 if (node == NULL) {
1674 DBG("Woken up but nothing in the UST command queue");
1675 /* Continue thread execution */
1676 break;
1677 }
1678
1679 ust_cmd = caa_container_of(node, struct ust_command, node);
1680
1681 DBG("Dispatching UST registration pid:%d ppid:%d uid:%d"
1682 " gid:%d sock:%d name:%s (version %d.%d)",
1683 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
1684 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
1685 ust_cmd->sock, ust_cmd->reg_msg.name,
1686 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
1687
1688 if (ust_cmd->reg_msg.type == USTCTL_SOCKET_CMD) {
1689 wait_node = zmalloc(sizeof(*wait_node));
1690 if (!wait_node) {
1691 PERROR("zmalloc wait_node dispatch");
1692 ret = close(ust_cmd->sock);
1693 if (ret < 0) {
1694 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1695 }
1696 lttng_fd_put(LTTNG_FD_APPS, 1);
1697 free(ust_cmd);
1698 goto error;
1699 }
1700 CDS_INIT_LIST_HEAD(&wait_node->head);
1701
1702 /* Create application object if socket is CMD. */
1703 wait_node->app = ust_app_create(&ust_cmd->reg_msg,
1704 ust_cmd->sock);
1705 if (!wait_node->app) {
1706 ret = close(ust_cmd->sock);
1707 if (ret < 0) {
1708 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1709 }
1710 lttng_fd_put(LTTNG_FD_APPS, 1);
1711 free(wait_node);
1712 free(ust_cmd);
1713 continue;
1714 }
1715 /*
1716 * Add application to the wait queue so we can set the notify
1717 * socket before putting this object in the global ht.
1718 */
1719 cds_list_add(&wait_node->head, &wait_queue.head);
1720 wait_queue.count++;
1721
1722 free(ust_cmd);
1723 /*
1724 * We have to continue here since we don't have the notify
1725 * socket and the application MUST be added to the hash table
1726 * only at that moment.
1727 */
1728 continue;
1729 } else {
1730 /*
1731 * Look for the application in the local wait queue and set the
1732 * notify socket if found.
1733 */
1734 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1735 &wait_queue.head, head) {
1736 health_code_update();
1737 if (wait_node->app->pid == ust_cmd->reg_msg.pid) {
1738 wait_node->app->notify_sock = ust_cmd->sock;
1739 cds_list_del(&wait_node->head);
1740 wait_queue.count--;
1741 app = wait_node->app;
1742 free(wait_node);
1743 DBG3("UST app notify socket %d is set", ust_cmd->sock);
1744 break;
1745 }
1746 }
1747
1748 /*
1749 * With no application at this stage the received socket is
1750 * basically useless so close it before we free the cmd data
1751 * structure for good.
1752 */
1753 if (!app) {
1754 ret = close(ust_cmd->sock);
1755 if (ret < 0) {
1756 PERROR("close ust sock dispatch %d", ust_cmd->sock);
1757 }
1758 lttng_fd_put(LTTNG_FD_APPS, 1);
1759 }
1760 free(ust_cmd);
1761 }
1762
1763 if (app) {
1764 /*
1765 * @session_lock_list
1766 *
1767 * Lock the global session list so from the register up to the
1768 * registration done message, no thread can see the application
1769 * and change its state.
1770 */
1771 session_lock_list();
1772 rcu_read_lock();
1773
1774 /*
1775 * Add application to the global hash table. This needs to be
1776 * done before the update to the UST registry can locate the
1777 * application.
1778 */
1779 ust_app_add(app);
1780
1781 /* Set app version. This call will print an error if needed. */
1782 (void) ust_app_version(app);
1783
1784 /* Send notify socket through the notify pipe. */
1785 ret = send_socket_to_thread(apps_cmd_notify_pipe[1],
1786 app->notify_sock);
1787 if (ret < 0) {
1788 rcu_read_unlock();
1789 session_unlock_list();
1790 /*
1791 * No notify thread, stop the UST tracing. However, this is
1792 * not an internal error of the this thread thus setting
1793 * the health error code to a normal exit.
1794 */
1795 err = 0;
1796 goto error;
1797 }
1798
1799 /*
1800 * Update newly registered application with the tracing
1801 * registry info already enabled information.
1802 */
1803 update_ust_app(app->sock);
1804
1805 /*
1806 * Don't care about return value. Let the manage apps threads
1807 * handle app unregistration upon socket close.
1808 */
1809 (void) ust_app_register_done(app->sock);
1810
1811 /*
1812 * Even if the application socket has been closed, send the app
1813 * to the thread and unregistration will take place at that
1814 * place.
1815 */
1816 ret = send_socket_to_thread(apps_cmd_pipe[1], app->sock);
1817 if (ret < 0) {
1818 rcu_read_unlock();
1819 session_unlock_list();
1820 /*
1821 * No apps. thread, stop the UST tracing. However, this is
1822 * not an internal error of the this thread thus setting
1823 * the health error code to a normal exit.
1824 */
1825 err = 0;
1826 goto error;
1827 }
1828
1829 rcu_read_unlock();
1830 session_unlock_list();
1831 }
1832 } while (node != NULL);
1833
1834 health_poll_entry();
1835 /* Futex wait on queue. Blocking call on futex() */
1836 futex_nto1_wait(&ust_cmd_queue.futex);
1837 health_poll_exit();
1838 }
1839 /* Normal exit, no error */
1840 err = 0;
1841
1842 error:
1843 /* Clean up wait queue. */
1844 cds_list_for_each_entry_safe(wait_node, tmp_wait_node,
1845 &wait_queue.head, head) {
1846 cds_list_del(&wait_node->head);
1847 wait_queue.count--;
1848 free(wait_node);
1849 }
1850
1851 error_testpoint:
1852 DBG("Dispatch thread dying");
1853 if (err) {
1854 health_error();
1855 ERR("Health error occurred in %s", __func__);
1856 }
1857 health_unregister(health_sessiond);
1858 return NULL;
1859 }
1860
1861 /*
1862 * This thread manage application registration.
1863 */
1864 static void *thread_registration_apps(void *data)
1865 {
1866 int sock = -1, i, ret, pollfd, err = -1;
1867 uint32_t revents, nb_fd;
1868 struct lttng_poll_event events;
1869 /*
1870 * Get allocated in this thread, enqueued to a global queue, dequeued and
1871 * freed in the manage apps thread.
1872 */
1873 struct ust_command *ust_cmd = NULL;
1874
1875 DBG("[thread] Manage application registration started");
1876
1877 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_APP_REG);
1878
1879 if (testpoint(sessiond_thread_registration_apps)) {
1880 goto error_testpoint;
1881 }
1882
1883 ret = lttcomm_listen_unix_sock(apps_sock);
1884 if (ret < 0) {
1885 goto error_listen;
1886 }
1887
1888 /*
1889 * Pass 2 as size here for the thread quit pipe and apps socket. Nothing
1890 * more will be added to this poll set.
1891 */
1892 ret = sessiond_set_thread_pollset(&events, 2);
1893 if (ret < 0) {
1894 goto error_create_poll;
1895 }
1896
1897 /* Add the application registration socket */
1898 ret = lttng_poll_add(&events, apps_sock, LPOLLIN | LPOLLRDHUP);
1899 if (ret < 0) {
1900 goto error_poll_add;
1901 }
1902
1903 /* Notify all applications to register */
1904 ret = notify_ust_apps(1);
1905 if (ret < 0) {
1906 ERR("Failed to notify applications or create the wait shared memory.\n"
1907 "Execution continues but there might be problem for already\n"
1908 "running applications that wishes to register.");
1909 }
1910
1911 while (1) {
1912 DBG("Accepting application registration");
1913
1914 /* Inifinite blocking call, waiting for transmission */
1915 restart:
1916 health_poll_entry();
1917 ret = lttng_poll_wait(&events, -1);
1918 health_poll_exit();
1919 if (ret < 0) {
1920 /*
1921 * Restart interrupted system call.
1922 */
1923 if (errno == EINTR) {
1924 goto restart;
1925 }
1926 goto error;
1927 }
1928
1929 nb_fd = ret;
1930
1931 for (i = 0; i < nb_fd; i++) {
1932 health_code_update();
1933
1934 /* Fetch once the poll data */
1935 revents = LTTNG_POLL_GETEV(&events, i);
1936 pollfd = LTTNG_POLL_GETFD(&events, i);
1937
1938 if (!revents) {
1939 /* No activity for this FD (poll implementation). */
1940 continue;
1941 }
1942
1943 /* Thread quit pipe has been closed. Killing thread. */
1944 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
1945 if (ret) {
1946 err = 0;
1947 goto exit;
1948 }
1949
1950 /* Event on the registration socket */
1951 if (pollfd == apps_sock) {
1952 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
1953 ERR("Register apps socket poll error");
1954 goto error;
1955 } else if (revents & LPOLLIN) {
1956 sock = lttcomm_accept_unix_sock(apps_sock);
1957 if (sock < 0) {
1958 goto error;
1959 }
1960
1961 /*
1962 * Set socket timeout for both receiving and ending.
1963 * app_socket_timeout is in seconds, whereas
1964 * lttcomm_setsockopt_rcv_timeout and
1965 * lttcomm_setsockopt_snd_timeout expect msec as
1966 * parameter.
1967 */
1968 (void) lttcomm_setsockopt_rcv_timeout(sock,
1969 app_socket_timeout * 1000);
1970 (void) lttcomm_setsockopt_snd_timeout(sock,
1971 app_socket_timeout * 1000);
1972
1973 /*
1974 * Set the CLOEXEC flag. Return code is useless because
1975 * either way, the show must go on.
1976 */
1977 (void) utils_set_fd_cloexec(sock);
1978
1979 /* Create UST registration command for enqueuing */
1980 ust_cmd = zmalloc(sizeof(struct ust_command));
1981 if (ust_cmd == NULL) {
1982 PERROR("ust command zmalloc");
1983 goto error;
1984 }
1985
1986 /*
1987 * Using message-based transmissions to ensure we don't
1988 * have to deal with partially received messages.
1989 */
1990 ret = lttng_fd_get(LTTNG_FD_APPS, 1);
1991 if (ret < 0) {
1992 ERR("Exhausted file descriptors allowed for applications.");
1993 free(ust_cmd);
1994 ret = close(sock);
1995 if (ret) {
1996 PERROR("close");
1997 }
1998 sock = -1;
1999 continue;
2000 }
2001
2002 health_code_update();
2003 ret = ust_app_recv_registration(sock, &ust_cmd->reg_msg);
2004 if (ret < 0) {
2005 free(ust_cmd);
2006 /* Close socket of the application. */
2007 ret = close(sock);
2008 if (ret) {
2009 PERROR("close");
2010 }
2011 lttng_fd_put(LTTNG_FD_APPS, 1);
2012 sock = -1;
2013 continue;
2014 }
2015 health_code_update();
2016
2017 ust_cmd->sock = sock;
2018 sock = -1;
2019
2020 DBG("UST registration received with pid:%d ppid:%d uid:%d"
2021 " gid:%d sock:%d name:%s (version %d.%d)",
2022 ust_cmd->reg_msg.pid, ust_cmd->reg_msg.ppid,
2023 ust_cmd->reg_msg.uid, ust_cmd->reg_msg.gid,
2024 ust_cmd->sock, ust_cmd->reg_msg.name,
2025 ust_cmd->reg_msg.major, ust_cmd->reg_msg.minor);
2026
2027 /*
2028 * Lock free enqueue the registration request. The red pill
2029 * has been taken! This apps will be part of the *system*.
2030 */
2031 cds_wfq_enqueue(&ust_cmd_queue.queue, &ust_cmd->node);
2032
2033 /*
2034 * Wake the registration queue futex. Implicit memory
2035 * barrier with the exchange in cds_wfq_enqueue.
2036 */
2037 futex_nto1_wake(&ust_cmd_queue.futex);
2038 }
2039 }
2040 }
2041 }
2042
2043 exit:
2044 error:
2045 /* Notify that the registration thread is gone */
2046 notify_ust_apps(0);
2047
2048 if (apps_sock >= 0) {
2049 ret = close(apps_sock);
2050 if (ret) {
2051 PERROR("close");
2052 }
2053 }
2054 if (sock >= 0) {
2055 ret = close(sock);
2056 if (ret) {
2057 PERROR("close");
2058 }
2059 lttng_fd_put(LTTNG_FD_APPS, 1);
2060 }
2061 unlink(apps_unix_sock_path);
2062
2063 error_poll_add:
2064 lttng_poll_clean(&events);
2065 error_listen:
2066 error_create_poll:
2067 error_testpoint:
2068 DBG("UST Registration thread cleanup complete");
2069 if (err) {
2070 health_error();
2071 ERR("Health error occurred in %s", __func__);
2072 }
2073 health_unregister(health_sessiond);
2074
2075 return NULL;
2076 }
2077
2078 /*
2079 * Start the thread_manage_consumer. This must be done after a lttng-consumerd
2080 * exec or it will fails.
2081 */
2082 static int spawn_consumer_thread(struct consumer_data *consumer_data)
2083 {
2084 int ret, clock_ret;
2085 struct timespec timeout;
2086
2087 /* Make sure we set the readiness flag to 0 because we are NOT ready */
2088 consumer_data->consumer_thread_is_ready = 0;
2089
2090 /* Setup pthread condition */
2091 ret = pthread_condattr_init(&consumer_data->condattr);
2092 if (ret != 0) {
2093 errno = ret;
2094 PERROR("pthread_condattr_init consumer data");
2095 goto error;
2096 }
2097
2098 /*
2099 * Set the monotonic clock in order to make sure we DO NOT jump in time
2100 * between the clock_gettime() call and the timedwait call. See bug #324
2101 * for a more details and how we noticed it.
2102 */
2103 ret = pthread_condattr_setclock(&consumer_data->condattr, CLOCK_MONOTONIC);
2104 if (ret != 0) {
2105 errno = ret;
2106 PERROR("pthread_condattr_setclock consumer data");
2107 goto error;
2108 }
2109
2110 ret = pthread_cond_init(&consumer_data->cond, &consumer_data->condattr);
2111 if (ret != 0) {
2112 errno = ret;
2113 PERROR("pthread_cond_init consumer data");
2114 goto error;
2115 }
2116
2117 ret = pthread_create(&consumer_data->thread, NULL, thread_manage_consumer,
2118 consumer_data);
2119 if (ret != 0) {
2120 PERROR("pthread_create consumer");
2121 ret = -1;
2122 goto error;
2123 }
2124
2125 /* We are about to wait on a pthread condition */
2126 pthread_mutex_lock(&consumer_data->cond_mutex);
2127
2128 /* Get time for sem_timedwait absolute timeout */
2129 clock_ret = clock_gettime(CLOCK_MONOTONIC, &timeout);
2130 /*
2131 * Set the timeout for the condition timed wait even if the clock gettime
2132 * call fails since we might loop on that call and we want to avoid to
2133 * increment the timeout too many times.
2134 */
2135 timeout.tv_sec += DEFAULT_SEM_WAIT_TIMEOUT;
2136
2137 /*
2138 * The following loop COULD be skipped in some conditions so this is why we
2139 * set ret to 0 in order to make sure at least one round of the loop is
2140 * done.
2141 */
2142 ret = 0;
2143
2144 /*
2145 * Loop until the condition is reached or when a timeout is reached. Note
2146 * that the pthread_cond_timedwait(P) man page specifies that EINTR can NOT
2147 * be returned but the pthread_cond(3), from the glibc-doc, says that it is
2148 * possible. This loop does not take any chances and works with both of
2149 * them.
2150 */
2151 while (!consumer_data->consumer_thread_is_ready && ret != ETIMEDOUT) {
2152 if (clock_ret < 0) {
2153 PERROR("clock_gettime spawn consumer");
2154 /* Infinite wait for the consumerd thread to be ready */
2155 ret = pthread_cond_wait(&consumer_data->cond,
2156 &consumer_data->cond_mutex);
2157 } else {
2158 ret = pthread_cond_timedwait(&consumer_data->cond,
2159 &consumer_data->cond_mutex, &timeout);
2160 }
2161 }
2162
2163 /* Release the pthread condition */
2164 pthread_mutex_unlock(&consumer_data->cond_mutex);
2165
2166 if (ret != 0) {
2167 errno = ret;
2168 if (ret == ETIMEDOUT) {
2169 int pth_ret;
2170
2171 /*
2172 * Call has timed out so we kill the kconsumerd_thread and return
2173 * an error.
2174 */
2175 ERR("Condition timed out. The consumer thread was never ready."
2176 " Killing it");
2177 pth_ret = pthread_cancel(consumer_data->thread);
2178 if (pth_ret < 0) {
2179 PERROR("pthread_cancel consumer thread");
2180 }
2181 } else {
2182 PERROR("pthread_cond_wait failed consumer thread");
2183 }
2184 /* Caller is expecting a negative value on failure. */
2185 ret = -1;
2186 goto error;
2187 }
2188
2189 pthread_mutex_lock(&consumer_data->pid_mutex);
2190 if (consumer_data->pid == 0) {
2191 ERR("Consumerd did not start");
2192 pthread_mutex_unlock(&consumer_data->pid_mutex);
2193 goto error;
2194 }
2195 pthread_mutex_unlock(&consumer_data->pid_mutex);
2196
2197 return 0;
2198
2199 error:
2200 return ret;
2201 }
2202
2203 /*
2204 * Join consumer thread
2205 */
2206 static int join_consumer_thread(struct consumer_data *consumer_data)
2207 {
2208 void *status;
2209
2210 /* Consumer pid must be a real one. */
2211 if (consumer_data->pid > 0) {
2212 int ret;
2213 ret = kill(consumer_data->pid, SIGTERM);
2214 if (ret) {
2215 ERR("Error killing consumer daemon");
2216 return ret;
2217 }
2218 return pthread_join(consumer_data->thread, &status);
2219 } else {
2220 return 0;
2221 }
2222 }
2223
2224 /*
2225 * Fork and exec a consumer daemon (consumerd).
2226 *
2227 * Return pid if successful else -1.
2228 */
2229 static pid_t spawn_consumerd(struct consumer_data *consumer_data)
2230 {
2231 int ret;
2232 pid_t pid;
2233 const char *consumer_to_use;
2234 const char *verbosity;
2235 struct stat st;
2236
2237 DBG("Spawning consumerd");
2238
2239 pid = fork();
2240 if (pid == 0) {
2241 /*
2242 * Exec consumerd.
2243 */
2244 if (opt_verbose_consumer) {
2245 verbosity = "--verbose";
2246 } else {
2247 verbosity = "--quiet";
2248 }
2249 switch (consumer_data->type) {
2250 case LTTNG_CONSUMER_KERNEL:
2251 /*
2252 * Find out which consumerd to execute. We will first try the
2253 * 64-bit path, then the sessiond's installation directory, and
2254 * fallback on the 32-bit one,
2255 */
2256 DBG3("Looking for a kernel consumer at these locations:");
2257 DBG3(" 1) %s", consumerd64_bin);
2258 DBG3(" 2) %s/%s", INSTALL_BIN_PATH, CONSUMERD_FILE);
2259 DBG3(" 3) %s", consumerd32_bin);
2260 if (stat(consumerd64_bin, &st) == 0) {
2261 DBG3("Found location #1");
2262 consumer_to_use = consumerd64_bin;
2263 } else if (stat(INSTALL_BIN_PATH "/" CONSUMERD_FILE, &st) == 0) {
2264 DBG3("Found location #2");
2265 consumer_to_use = INSTALL_BIN_PATH "/" CONSUMERD_FILE;
2266 } else if (stat(consumerd32_bin, &st) == 0) {
2267 DBG3("Found location #3");
2268 consumer_to_use = consumerd32_bin;
2269 } else {
2270 DBG("Could not find any valid consumerd executable");
2271 ret = -EINVAL;
2272 break;
2273 }
2274 DBG("Using kernel consumer at: %s", consumer_to_use);
2275 ret = execl(consumer_to_use,
2276 "lttng-consumerd", verbosity, "-k",
2277 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2278 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2279 "--group", tracing_group_name,
2280 NULL);
2281 break;
2282 case LTTNG_CONSUMER64_UST:
2283 {
2284 char *tmpnew = NULL;
2285
2286 if (consumerd64_libdir[0] != '\0') {
2287 char *tmp;
2288 size_t tmplen;
2289
2290 tmp = getenv("LD_LIBRARY_PATH");
2291 if (!tmp) {
2292 tmp = "";
2293 }
2294 tmplen = strlen("LD_LIBRARY_PATH=")
2295 + strlen(consumerd64_libdir) + 1 /* : */ + strlen(tmp);
2296 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2297 if (!tmpnew) {
2298 ret = -ENOMEM;
2299 goto error;
2300 }
2301 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2302 strcat(tmpnew, consumerd64_libdir);
2303 if (tmp[0] != '\0') {
2304 strcat(tmpnew, ":");
2305 strcat(tmpnew, tmp);
2306 }
2307 ret = putenv(tmpnew);
2308 if (ret) {
2309 ret = -errno;
2310 free(tmpnew);
2311 goto error;
2312 }
2313 }
2314 DBG("Using 64-bit UST consumer at: %s", consumerd64_bin);
2315 ret = execl(consumerd64_bin, "lttng-consumerd", verbosity, "-u",
2316 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2317 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2318 "--group", tracing_group_name,
2319 NULL);
2320 if (consumerd64_libdir[0] != '\0') {
2321 free(tmpnew);
2322 }
2323 break;
2324 }
2325 case LTTNG_CONSUMER32_UST:
2326 {
2327 char *tmpnew = NULL;
2328
2329 if (consumerd32_libdir[0] != '\0') {
2330 char *tmp;
2331 size_t tmplen;
2332
2333 tmp = getenv("LD_LIBRARY_PATH");
2334 if (!tmp) {
2335 tmp = "";
2336 }
2337 tmplen = strlen("LD_LIBRARY_PATH=")
2338 + strlen(consumerd32_libdir) + 1 /* : */ + strlen(tmp);
2339 tmpnew = zmalloc(tmplen + 1 /* \0 */);
2340 if (!tmpnew) {
2341 ret = -ENOMEM;
2342 goto error;
2343 }
2344 strcpy(tmpnew, "LD_LIBRARY_PATH=");
2345 strcat(tmpnew, consumerd32_libdir);
2346 if (tmp[0] != '\0') {
2347 strcat(tmpnew, ":");
2348 strcat(tmpnew, tmp);
2349 }
2350 ret = putenv(tmpnew);
2351 if (ret) {
2352 ret = -errno;
2353 free(tmpnew);
2354 goto error;
2355 }
2356 }
2357 DBG("Using 32-bit UST consumer at: %s", consumerd32_bin);
2358 ret = execl(consumerd32_bin, "lttng-consumerd", verbosity, "-u",
2359 "--consumerd-cmd-sock", consumer_data->cmd_unix_sock_path,
2360 "--consumerd-err-sock", consumer_data->err_unix_sock_path,
2361 "--group", tracing_group_name,
2362 NULL);
2363 if (consumerd32_libdir[0] != '\0') {
2364 free(tmpnew);
2365 }
2366 break;
2367 }
2368 default:
2369 PERROR("unknown consumer type");
2370 exit(EXIT_FAILURE);
2371 }
2372 if (errno != 0) {
2373 PERROR("Consumer execl()");
2374 }
2375 /* Reaching this point, we got a failure on our execl(). */
2376 exit(EXIT_FAILURE);
2377 } else if (pid > 0) {
2378 ret = pid;
2379 } else {
2380 PERROR("start consumer fork");
2381 ret = -errno;
2382 }
2383 error:
2384 return ret;
2385 }
2386
2387 /*
2388 * Spawn the consumerd daemon and session daemon thread.
2389 */
2390 static int start_consumerd(struct consumer_data *consumer_data)
2391 {
2392 int ret;
2393
2394 /*
2395 * Set the listen() state on the socket since there is a possible race
2396 * between the exec() of the consumer daemon and this call if place in the
2397 * consumer thread. See bug #366 for more details.
2398 */
2399 ret = lttcomm_listen_unix_sock(consumer_data->err_sock);
2400 if (ret < 0) {
2401 goto error;
2402 }
2403
2404 pthread_mutex_lock(&consumer_data->pid_mutex);
2405 if (consumer_data->pid != 0) {
2406 pthread_mutex_unlock(&consumer_data->pid_mutex);
2407 goto end;
2408 }
2409
2410 ret = spawn_consumerd(consumer_data);
2411 if (ret < 0) {
2412 ERR("Spawning consumerd failed");
2413 pthread_mutex_unlock(&consumer_data->pid_mutex);
2414 goto error;
2415 }
2416
2417 /* Setting up the consumer_data pid */
2418 consumer_data->pid = ret;
2419 DBG2("Consumer pid %d", consumer_data->pid);
2420 pthread_mutex_unlock(&consumer_data->pid_mutex);
2421
2422 DBG2("Spawning consumer control thread");
2423 ret = spawn_consumer_thread(consumer_data);
2424 if (ret < 0) {
2425 ERR("Fatal error spawning consumer control thread");
2426 goto error;
2427 }
2428
2429 end:
2430 return 0;
2431
2432 error:
2433 /* Cleanup already created sockets on error. */
2434 if (consumer_data->err_sock >= 0) {
2435 int err;
2436
2437 err = close(consumer_data->err_sock);
2438 if (err < 0) {
2439 PERROR("close consumer data error socket");
2440 }
2441 }
2442 return ret;
2443 }
2444
2445 /*
2446 * Setup necessary data for kernel tracer action.
2447 */
2448 static int init_kernel_tracer(void)
2449 {
2450 int ret;
2451
2452 /* Modprobe lttng kernel modules */
2453 ret = modprobe_lttng_control();
2454 if (ret < 0) {
2455 goto error;
2456 }
2457
2458 /* Open debugfs lttng */
2459 kernel_tracer_fd = open(module_proc_lttng, O_RDWR);
2460 if (kernel_tracer_fd < 0) {
2461 DBG("Failed to open %s", module_proc_lttng);
2462 ret = -1;
2463 goto error_open;
2464 }
2465
2466 /* Validate kernel version */
2467 ret = kernel_validate_version(kernel_tracer_fd);
2468 if (ret < 0) {
2469 goto error_version;
2470 }
2471
2472 ret = modprobe_lttng_data();
2473 if (ret < 0) {
2474 goto error_modules;
2475 }
2476
2477 DBG("Kernel tracer fd %d", kernel_tracer_fd);
2478 return 0;
2479
2480 error_version:
2481 modprobe_remove_lttng_control();
2482 ret = close(kernel_tracer_fd);
2483 if (ret) {
2484 PERROR("close");
2485 }
2486 kernel_tracer_fd = -1;
2487 return LTTNG_ERR_KERN_VERSION;
2488
2489 error_modules:
2490 ret = close(kernel_tracer_fd);
2491 if (ret) {
2492 PERROR("close");
2493 }
2494
2495 error_open:
2496 modprobe_remove_lttng_control();
2497
2498 error:
2499 WARN("No kernel tracer available");
2500 kernel_tracer_fd = -1;
2501 if (!is_root) {
2502 return LTTNG_ERR_NEED_ROOT_SESSIOND;
2503 } else {
2504 return LTTNG_ERR_KERN_NA;
2505 }
2506 }
2507
2508
2509 /*
2510 * Copy consumer output from the tracing session to the domain session. The
2511 * function also applies the right modification on a per domain basis for the
2512 * trace files destination directory.
2513 *
2514 * Should *NOT* be called with RCU read-side lock held.
2515 */
2516 static int copy_session_consumer(int domain, struct ltt_session *session)
2517 {
2518 int ret;
2519 const char *dir_name;
2520 struct consumer_output *consumer;
2521
2522 assert(session);
2523 assert(session->consumer);
2524
2525 switch (domain) {
2526 case LTTNG_DOMAIN_KERNEL:
2527 DBG3("Copying tracing session consumer output in kernel session");
2528 /*
2529 * XXX: We should audit the session creation and what this function
2530 * does "extra" in order to avoid a destroy since this function is used
2531 * in the domain session creation (kernel and ust) only. Same for UST
2532 * domain.
2533 */
2534 if (session->kernel_session->consumer) {
2535 consumer_destroy_output(session->kernel_session->consumer);
2536 }
2537 session->kernel_session->consumer =
2538 consumer_copy_output(session->consumer);
2539 /* Ease our life a bit for the next part */
2540 consumer = session->kernel_session->consumer;
2541 dir_name = DEFAULT_KERNEL_TRACE_DIR;
2542 break;
2543 case LTTNG_DOMAIN_JUL:
2544 case LTTNG_DOMAIN_UST:
2545 DBG3("Copying tracing session consumer output in UST session");
2546 if (session->ust_session->consumer) {
2547 consumer_destroy_output(session->ust_session->consumer);
2548 }
2549 session->ust_session->consumer =
2550 consumer_copy_output(session->consumer);
2551 /* Ease our life a bit for the next part */
2552 consumer = session->ust_session->consumer;
2553 dir_name = DEFAULT_UST_TRACE_DIR;
2554 break;
2555 default:
2556 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2557 goto error;
2558 }
2559
2560 /* Append correct directory to subdir */
2561 strncat(consumer->subdir, dir_name,
2562 sizeof(consumer->subdir) - strlen(consumer->subdir) - 1);
2563 DBG3("Copy session consumer subdir %s", consumer->subdir);
2564
2565 ret = LTTNG_OK;
2566
2567 error:
2568 return ret;
2569 }
2570
2571 /*
2572 * Create an UST session and add it to the session ust list.
2573 *
2574 * Should *NOT* be called with RCU read-side lock held.
2575 */
2576 static int create_ust_session(struct ltt_session *session,
2577 struct lttng_domain *domain)
2578 {
2579 int ret;
2580 struct ltt_ust_session *lus = NULL;
2581
2582 assert(session);
2583 assert(domain);
2584 assert(session->consumer);
2585
2586 switch (domain->type) {
2587 case LTTNG_DOMAIN_JUL:
2588 case LTTNG_DOMAIN_UST:
2589 break;
2590 default:
2591 ERR("Unknown UST domain on create session %d", domain->type);
2592 ret = LTTNG_ERR_UNKNOWN_DOMAIN;
2593 goto error;
2594 }
2595
2596 DBG("Creating UST session");
2597
2598 lus = trace_ust_create_session(session->id);
2599 if (lus == NULL) {
2600 ret = LTTNG_ERR_UST_SESS_FAIL;
2601 goto error;
2602 }
2603
2604 lus->uid = session->uid;
2605 lus->gid = session->gid;
2606 lus->output_traces = session->output_traces;
2607 lus->snapshot_mode = session->snapshot_mode;
2608 lus->live_timer_interval = session->live_timer;
2609 session->ust_session = lus;
2610
2611 /* Copy session output to the newly created UST session */
2612 ret = copy_session_consumer(domain->type, session);
2613 if (ret != LTTNG_OK) {
2614 goto error;
2615 }
2616
2617 return LTTNG_OK;
2618
2619 error:
2620 free(lus);
2621 session->ust_session = NULL;
2622 return ret;
2623 }
2624
2625 /*
2626 * Create a kernel tracer session then create the default channel.
2627 */
2628 static int create_kernel_session(struct ltt_session *session)
2629 {
2630 int ret;
2631
2632 DBG("Creating kernel session");
2633
2634 ret = kernel_create_session(session, kernel_tracer_fd);
2635 if (ret < 0) {
2636 ret = LTTNG_ERR_KERN_SESS_FAIL;
2637 goto error;
2638 }
2639
2640 /* Code flow safety */
2641 assert(session->kernel_session);
2642
2643 /* Copy session output to the newly created Kernel session */
2644 ret = copy_session_consumer(LTTNG_DOMAIN_KERNEL, session);
2645 if (ret != LTTNG_OK) {
2646 goto error;
2647 }
2648
2649 /* Create directory(ies) on local filesystem. */
2650 if (session->kernel_session->consumer->type == CONSUMER_DST_LOCAL &&
2651 strlen(session->kernel_session->consumer->dst.trace_path) > 0) {
2652 ret = run_as_mkdir_recursive(
2653 session->kernel_session->consumer->dst.trace_path,
2654 S_IRWXU | S_IRWXG, session->uid, session->gid);
2655 if (ret < 0) {
2656 if (ret != -EEXIST) {
2657 ERR("Trace directory creation error");
2658 goto error;
2659 }
2660 }
2661 }
2662
2663 session->kernel_session->uid = session->uid;
2664 session->kernel_session->gid = session->gid;
2665 session->kernel_session->output_traces = session->output_traces;
2666 session->kernel_session->snapshot_mode = session->snapshot_mode;
2667
2668 return LTTNG_OK;
2669
2670 error:
2671 trace_kernel_destroy_session(session->kernel_session);
2672 session->kernel_session = NULL;
2673 return ret;
2674 }
2675
2676 /*
2677 * Count number of session permitted by uid/gid.
2678 */
2679 static unsigned int lttng_sessions_count(uid_t uid, gid_t gid)
2680 {
2681 unsigned int i = 0;
2682 struct ltt_session *session;
2683
2684 DBG("Counting number of available session for UID %d GID %d",
2685 uid, gid);
2686 cds_list_for_each_entry(session, &session_list_ptr->head, list) {
2687 /*
2688 * Only list the sessions the user can control.
2689 */
2690 if (!session_access_ok(session, uid, gid)) {
2691 continue;
2692 }
2693 i++;
2694 }
2695 return i;
2696 }
2697
2698 /*
2699 * Process the command requested by the lttng client within the command
2700 * context structure. This function make sure that the return structure (llm)
2701 * is set and ready for transmission before returning.
2702 *
2703 * Return any error encountered or 0 for success.
2704 *
2705 * "sock" is only used for special-case var. len data.
2706 *
2707 * Should *NOT* be called with RCU read-side lock held.
2708 */
2709 static int process_client_msg(struct command_ctx *cmd_ctx, int sock,
2710 int *sock_error)
2711 {
2712 int ret = LTTNG_OK;
2713 int need_tracing_session = 1;
2714 int need_domain;
2715
2716 DBG("Processing client command %d", cmd_ctx->lsm->cmd_type);
2717
2718 *sock_error = 0;
2719
2720 switch (cmd_ctx->lsm->cmd_type) {
2721 case LTTNG_CREATE_SESSION:
2722 case LTTNG_CREATE_SESSION_SNAPSHOT:
2723 case LTTNG_CREATE_SESSION_LIVE:
2724 case LTTNG_DESTROY_SESSION:
2725 case LTTNG_LIST_SESSIONS:
2726 case LTTNG_LIST_DOMAINS:
2727 case LTTNG_START_TRACE:
2728 case LTTNG_STOP_TRACE:
2729 case LTTNG_DATA_PENDING:
2730 case LTTNG_SNAPSHOT_ADD_OUTPUT:
2731 case LTTNG_SNAPSHOT_DEL_OUTPUT:
2732 case LTTNG_SNAPSHOT_LIST_OUTPUT:
2733 case LTTNG_SNAPSHOT_RECORD:
2734 need_domain = 0;
2735 break;
2736 default:
2737 need_domain = 1;
2738 }
2739
2740 if (opt_no_kernel && need_domain
2741 && cmd_ctx->lsm->domain.type == LTTNG_DOMAIN_KERNEL) {
2742 if (!is_root) {
2743 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2744 } else {
2745 ret = LTTNG_ERR_KERN_NA;
2746 }
2747 goto error;
2748 }
2749
2750 /* Deny register consumer if we already have a spawned consumer. */
2751 if (cmd_ctx->lsm->cmd_type == LTTNG_REGISTER_CONSUMER) {
2752 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2753 if (kconsumer_data.pid > 0) {
2754 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2755 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2756 goto error;
2757 }
2758 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2759 }
2760
2761 /*
2762 * Check for command that don't needs to allocate a returned payload. We do
2763 * this here so we don't have to make the call for no payload at each
2764 * command.
2765 */
2766 switch(cmd_ctx->lsm->cmd_type) {
2767 case LTTNG_LIST_SESSIONS:
2768 case LTTNG_LIST_TRACEPOINTS:
2769 case LTTNG_LIST_TRACEPOINT_FIELDS:
2770 case LTTNG_LIST_DOMAINS:
2771 case LTTNG_LIST_CHANNELS:
2772 case LTTNG_LIST_EVENTS:
2773 break;
2774 default:
2775 /* Setup lttng message with no payload */
2776 ret = setup_lttng_msg(cmd_ctx, 0);
2777 if (ret < 0) {
2778 /* This label does not try to unlock the session */
2779 goto init_setup_error;
2780 }
2781 }
2782
2783 /* Commands that DO NOT need a session. */
2784 switch (cmd_ctx->lsm->cmd_type) {
2785 case LTTNG_CREATE_SESSION:
2786 case LTTNG_CREATE_SESSION_SNAPSHOT:
2787 case LTTNG_CREATE_SESSION_LIVE:
2788 case LTTNG_CALIBRATE:
2789 case LTTNG_LIST_SESSIONS:
2790 case LTTNG_LIST_TRACEPOINTS:
2791 case LTTNG_LIST_TRACEPOINT_FIELDS:
2792 need_tracing_session = 0;
2793 break;
2794 default:
2795 DBG("Getting session %s by name", cmd_ctx->lsm->session.name);
2796 /*
2797 * We keep the session list lock across _all_ commands
2798 * for now, because the per-session lock does not
2799 * handle teardown properly.
2800 */
2801 session_lock_list();
2802 cmd_ctx->session = session_find_by_name(cmd_ctx->lsm->session.name);
2803 if (cmd_ctx->session == NULL) {
2804 ret = LTTNG_ERR_SESS_NOT_FOUND;
2805 goto error;
2806 } else {
2807 /* Acquire lock for the session */
2808 session_lock(cmd_ctx->session);
2809 }
2810 break;
2811 }
2812
2813 if (!need_domain) {
2814 goto skip_domain;
2815 }
2816
2817 /*
2818 * Check domain type for specific "pre-action".
2819 */
2820 switch (cmd_ctx->lsm->domain.type) {
2821 case LTTNG_DOMAIN_KERNEL:
2822 if (!is_root) {
2823 ret = LTTNG_ERR_NEED_ROOT_SESSIOND;
2824 goto error;
2825 }
2826
2827 /* Kernel tracer check */
2828 if (kernel_tracer_fd == -1) {
2829 /* Basically, load kernel tracer modules */
2830 ret = init_kernel_tracer();
2831 if (ret != 0) {
2832 goto error;
2833 }
2834 }
2835
2836 /* Consumer is in an ERROR state. Report back to client */
2837 if (uatomic_read(&kernel_consumerd_state) == CONSUMER_ERROR) {
2838 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2839 goto error;
2840 }
2841
2842 /* Need a session for kernel command */
2843 if (need_tracing_session) {
2844 if (cmd_ctx->session->kernel_session == NULL) {
2845 ret = create_kernel_session(cmd_ctx->session);
2846 if (ret < 0) {
2847 ret = LTTNG_ERR_KERN_SESS_FAIL;
2848 goto error;
2849 }
2850 }
2851
2852 /* Start the kernel consumer daemon */
2853 pthread_mutex_lock(&kconsumer_data.pid_mutex);
2854 if (kconsumer_data.pid == 0 &&
2855 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2856 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2857 ret = start_consumerd(&kconsumer_data);
2858 if (ret < 0) {
2859 ret = LTTNG_ERR_KERN_CONSUMER_FAIL;
2860 goto error;
2861 }
2862 uatomic_set(&kernel_consumerd_state, CONSUMER_STARTED);
2863 } else {
2864 pthread_mutex_unlock(&kconsumer_data.pid_mutex);
2865 }
2866
2867 /*
2868 * The consumer was just spawned so we need to add the socket to
2869 * the consumer output of the session if exist.
2870 */
2871 ret = consumer_create_socket(&kconsumer_data,
2872 cmd_ctx->session->kernel_session->consumer);
2873 if (ret < 0) {
2874 goto error;
2875 }
2876 }
2877
2878 break;
2879 case LTTNG_DOMAIN_JUL:
2880 case LTTNG_DOMAIN_UST:
2881 {
2882 if (!ust_app_supported()) {
2883 ret = LTTNG_ERR_NO_UST;
2884 goto error;
2885 }
2886 /* Consumer is in an ERROR state. Report back to client */
2887 if (uatomic_read(&ust_consumerd_state) == CONSUMER_ERROR) {
2888 ret = LTTNG_ERR_NO_USTCONSUMERD;
2889 goto error;
2890 }
2891
2892 if (need_tracing_session) {
2893 /* Create UST session if none exist. */
2894 if (cmd_ctx->session->ust_session == NULL) {
2895 ret = create_ust_session(cmd_ctx->session,
2896 &cmd_ctx->lsm->domain);
2897 if (ret != LTTNG_OK) {
2898 goto error;
2899 }
2900 }
2901
2902 /* Start the UST consumer daemons */
2903 /* 64-bit */
2904 pthread_mutex_lock(&ustconsumer64_data.pid_mutex);
2905 if (consumerd64_bin[0] != '\0' &&
2906 ustconsumer64_data.pid == 0 &&
2907 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2908 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2909 ret = start_consumerd(&ustconsumer64_data);
2910 if (ret < 0) {
2911 ret = LTTNG_ERR_UST_CONSUMER64_FAIL;
2912 uatomic_set(&ust_consumerd64_fd, -EINVAL);
2913 goto error;
2914 }
2915
2916 uatomic_set(&ust_consumerd64_fd, ustconsumer64_data.cmd_sock);
2917 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2918 } else {
2919 pthread_mutex_unlock(&ustconsumer64_data.pid_mutex);
2920 }
2921
2922 /*
2923 * Setup socket for consumer 64 bit. No need for atomic access
2924 * since it was set above and can ONLY be set in this thread.
2925 */
2926 ret = consumer_create_socket(&ustconsumer64_data,
2927 cmd_ctx->session->ust_session->consumer);
2928 if (ret < 0) {
2929 goto error;
2930 }
2931
2932 /* 32-bit */
2933 pthread_mutex_lock(&ustconsumer32_data.pid_mutex);
2934 if (consumerd32_bin[0] != '\0' &&
2935 ustconsumer32_data.pid == 0 &&
2936 cmd_ctx->lsm->cmd_type != LTTNG_REGISTER_CONSUMER) {
2937 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2938 ret = start_consumerd(&ustconsumer32_data);
2939 if (ret < 0) {
2940 ret = LTTNG_ERR_UST_CONSUMER32_FAIL;
2941 uatomic_set(&ust_consumerd32_fd, -EINVAL);
2942 goto error;
2943 }
2944
2945 uatomic_set(&ust_consumerd32_fd, ustconsumer32_data.cmd_sock);
2946 uatomic_set(&ust_consumerd_state, CONSUMER_STARTED);
2947 } else {
2948 pthread_mutex_unlock(&ustconsumer32_data.pid_mutex);
2949 }
2950
2951 /*
2952 * Setup socket for consumer 64 bit. No need for atomic access
2953 * since it was set above and can ONLY be set in this thread.
2954 */
2955 ret = consumer_create_socket(&ustconsumer32_data,
2956 cmd_ctx->session->ust_session->consumer);
2957 if (ret < 0) {
2958 goto error;
2959 }
2960 }
2961 break;
2962 }
2963 default:
2964 break;
2965 }
2966 skip_domain:
2967
2968 /* Validate consumer daemon state when start/stop trace command */
2969 if (cmd_ctx->lsm->cmd_type == LTTNG_START_TRACE ||
2970 cmd_ctx->lsm->cmd_type == LTTNG_STOP_TRACE) {
2971 switch (cmd_ctx->lsm->domain.type) {
2972 case LTTNG_DOMAIN_JUL:
2973 case LTTNG_DOMAIN_UST:
2974 if (uatomic_read(&ust_consumerd_state) != CONSUMER_STARTED) {
2975 ret = LTTNG_ERR_NO_USTCONSUMERD;
2976 goto error;
2977 }
2978 break;
2979 case LTTNG_DOMAIN_KERNEL:
2980 if (uatomic_read(&kernel_consumerd_state) != CONSUMER_STARTED) {
2981 ret = LTTNG_ERR_NO_KERNCONSUMERD;
2982 goto error;
2983 }
2984 break;
2985 }
2986 }
2987
2988 /*
2989 * Check that the UID or GID match that of the tracing session.
2990 * The root user can interact with all sessions.
2991 */
2992 if (need_tracing_session) {
2993 if (!session_access_ok(cmd_ctx->session,
2994 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
2995 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds))) {
2996 ret = LTTNG_ERR_EPERM;
2997 goto error;
2998 }
2999 }
3000
3001 /*
3002 * Send relayd information to consumer as soon as we have a domain and a
3003 * session defined.
3004 */
3005 if (cmd_ctx->session && need_domain) {
3006 /*
3007 * Setup relayd if not done yet. If the relayd information was already
3008 * sent to the consumer, this call will gracefully return.
3009 */
3010 ret = cmd_setup_relayd(cmd_ctx->session);
3011 if (ret != LTTNG_OK) {
3012 goto error;
3013 }
3014 }
3015
3016 /* Process by command type */
3017 switch (cmd_ctx->lsm->cmd_type) {
3018 case LTTNG_ADD_CONTEXT:
3019 {
3020 ret = cmd_add_context(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3021 cmd_ctx->lsm->u.context.channel_name,
3022 &cmd_ctx->lsm->u.context.ctx, kernel_poll_pipe[1]);
3023 break;
3024 }
3025 case LTTNG_DISABLE_CHANNEL:
3026 {
3027 ret = cmd_disable_channel(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3028 cmd_ctx->lsm->u.disable.channel_name);
3029 break;
3030 }
3031 case LTTNG_DISABLE_EVENT:
3032 {
3033 ret = cmd_disable_event(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3034 cmd_ctx->lsm->u.disable.channel_name,
3035 cmd_ctx->lsm->u.disable.name);
3036 break;
3037 }
3038 case LTTNG_DISABLE_ALL_EVENT:
3039 {
3040 DBG("Disabling all events");
3041
3042 ret = cmd_disable_event_all(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3043 cmd_ctx->lsm->u.disable.channel_name);
3044 break;
3045 }
3046 case LTTNG_ENABLE_CHANNEL:
3047 {
3048 ret = cmd_enable_channel(cmd_ctx->session, &cmd_ctx->lsm->domain,
3049 &cmd_ctx->lsm->u.channel.chan, kernel_poll_pipe[1]);
3050 break;
3051 }
3052 case LTTNG_ENABLE_EVENT:
3053 {
3054 struct lttng_event_exclusion *exclusion = NULL;
3055 struct lttng_filter_bytecode *bytecode = NULL;
3056
3057 /* Handle exclusion events and receive it from the client. */
3058 if (cmd_ctx->lsm->u.enable.exclusion_count > 0) {
3059 size_t count = cmd_ctx->lsm->u.enable.exclusion_count;
3060
3061 exclusion = zmalloc(sizeof(struct lttng_event_exclusion) +
3062 (count * LTTNG_SYMBOL_NAME_LEN));
3063 if (!exclusion) {
3064 ret = LTTNG_ERR_EXCLUSION_NOMEM;
3065 goto error;
3066 }
3067
3068 DBG("Receiving var len exclusion event list from client ...");
3069 exclusion->count = count;
3070 ret = lttcomm_recv_unix_sock(sock, exclusion->names,
3071 count * LTTNG_SYMBOL_NAME_LEN);
3072 if (ret <= 0) {
3073 DBG("Nothing recv() from client var len data... continuing");
3074 *sock_error = 1;
3075 free(exclusion);
3076 ret = LTTNG_ERR_EXCLUSION_INVAL;
3077 goto error;
3078 }
3079 }
3080
3081 /* Handle filter and get bytecode from client. */
3082 if (cmd_ctx->lsm->u.enable.bytecode_len > 0) {
3083 size_t bytecode_len = cmd_ctx->lsm->u.enable.bytecode_len;
3084
3085 if (bytecode_len > LTTNG_FILTER_MAX_LEN) {
3086 ret = LTTNG_ERR_FILTER_INVAL;
3087 free(exclusion);
3088 goto error;
3089 }
3090
3091 bytecode = zmalloc(bytecode_len);
3092 if (!bytecode) {
3093 free(exclusion);
3094 ret = LTTNG_ERR_FILTER_NOMEM;
3095 goto error;
3096 }
3097
3098 /* Receive var. len. data */
3099 DBG("Receiving var len filter's bytecode from client ...");
3100 ret = lttcomm_recv_unix_sock(sock, bytecode, bytecode_len);
3101 if (ret <= 0) {
3102 DBG("Nothing recv() from client car len data... continuing");
3103 *sock_error = 1;
3104 free(bytecode);
3105 free(exclusion);
3106 ret = LTTNG_ERR_FILTER_INVAL;
3107 goto error;
3108 }
3109
3110 if ((bytecode->len + sizeof(*bytecode)) != bytecode_len) {
3111 free(bytecode);
3112 free(exclusion);
3113 ret = LTTNG_ERR_FILTER_INVAL;
3114 goto error;
3115 }
3116 }
3117
3118 ret = cmd_enable_event(cmd_ctx->session, &cmd_ctx->lsm->domain,
3119 cmd_ctx->lsm->u.enable.channel_name,
3120 &cmd_ctx->lsm->u.enable.event, bytecode, exclusion,
3121 kernel_poll_pipe[1]);
3122 break;
3123 }
3124 case LTTNG_ENABLE_ALL_EVENT:
3125 {
3126 DBG("Enabling all events");
3127
3128 ret = cmd_enable_event_all(cmd_ctx->session, &cmd_ctx->lsm->domain,
3129 cmd_ctx->lsm->u.enable.channel_name,
3130 cmd_ctx->lsm->u.enable.event.type, NULL, kernel_poll_pipe[1]);
3131 break;
3132 }
3133 case LTTNG_LIST_TRACEPOINTS:
3134 {
3135 struct lttng_event *events;
3136 ssize_t nb_events;
3137
3138 session_lock_list();
3139 nb_events = cmd_list_tracepoints(cmd_ctx->lsm->domain.type, &events);
3140 session_unlock_list();
3141 if (nb_events < 0) {
3142 /* Return value is a negative lttng_error_code. */
3143 ret = -nb_events;
3144 goto error;
3145 }
3146
3147 /*
3148 * Setup lttng message with payload size set to the event list size in
3149 * bytes and then copy list into the llm payload.
3150 */
3151 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_event) * nb_events);
3152 if (ret < 0) {
3153 free(events);
3154 goto setup_error;
3155 }
3156
3157 /* Copy event list into message payload */
3158 memcpy(cmd_ctx->llm->payload, events,
3159 sizeof(struct lttng_event) * nb_events);
3160
3161 free(events);
3162
3163 ret = LTTNG_OK;
3164 break;
3165 }
3166 case LTTNG_LIST_TRACEPOINT_FIELDS:
3167 {
3168 struct lttng_event_field *fields;
3169 ssize_t nb_fields;
3170
3171 session_lock_list();
3172 nb_fields = cmd_list_tracepoint_fields(cmd_ctx->lsm->domain.type,
3173 &fields);
3174 session_unlock_list();
3175 if (nb_fields < 0) {
3176 /* Return value is a negative lttng_error_code. */
3177 ret = -nb_fields;
3178 goto error;
3179 }
3180
3181 /*
3182 * Setup lttng message with payload size set to the event list size in
3183 * bytes and then copy list into the llm payload.
3184 */
3185 ret = setup_lttng_msg(cmd_ctx,
3186 sizeof(struct lttng_event_field) * nb_fields);
3187 if (ret < 0) {
3188 free(fields);
3189 goto setup_error;
3190 }
3191
3192 /* Copy event list into message payload */
3193 memcpy(cmd_ctx->llm->payload, fields,
3194 sizeof(struct lttng_event_field) * nb_fields);
3195
3196 free(fields);
3197
3198 ret = LTTNG_OK;
3199 break;
3200 }
3201 case LTTNG_SET_CONSUMER_URI:
3202 {
3203 size_t nb_uri, len;
3204 struct lttng_uri *uris;
3205
3206 nb_uri = cmd_ctx->lsm->u.uri.size;
3207 len = nb_uri * sizeof(struct lttng_uri);
3208
3209 if (nb_uri == 0) {
3210 ret = LTTNG_ERR_INVALID;
3211 goto error;
3212 }
3213
3214 uris = zmalloc(len);
3215 if (uris == NULL) {
3216 ret = LTTNG_ERR_FATAL;
3217 goto error;
3218 }
3219
3220 /* Receive variable len data */
3221 DBG("Receiving %zu URI(s) from client ...", nb_uri);
3222 ret = lttcomm_recv_unix_sock(sock, uris, len);
3223 if (ret <= 0) {
3224 DBG("No URIs received from client... continuing");
3225 *sock_error = 1;
3226 ret = LTTNG_ERR_SESSION_FAIL;
3227 free(uris);
3228 goto error;
3229 }
3230
3231 ret = cmd_set_consumer_uri(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3232 nb_uri, uris);
3233 if (ret != LTTNG_OK) {
3234 free(uris);
3235 goto error;
3236 }
3237
3238 /*
3239 * XXX: 0 means that this URI should be applied on the session. Should
3240 * be a DOMAIN enuam.
3241 */
3242 if (cmd_ctx->lsm->domain.type == 0) {
3243 /* Add the URI for the UST session if a consumer is present. */
3244 if (cmd_ctx->session->ust_session &&
3245 cmd_ctx->session->ust_session->consumer) {
3246 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_UST, cmd_ctx->session,
3247 nb_uri, uris);
3248 } else if (cmd_ctx->session->kernel_session &&
3249 cmd_ctx->session->kernel_session->consumer) {
3250 ret = cmd_set_consumer_uri(LTTNG_DOMAIN_KERNEL,
3251 cmd_ctx->session, nb_uri, uris);
3252 }
3253 }
3254
3255 free(uris);
3256
3257 break;
3258 }
3259 case LTTNG_START_TRACE:
3260 {
3261 ret = cmd_start_trace(cmd_ctx->session);
3262 break;
3263 }
3264 case LTTNG_STOP_TRACE:
3265 {
3266 ret = cmd_stop_trace(cmd_ctx->session);
3267 break;
3268 }
3269 case LTTNG_CREATE_SESSION:
3270 {
3271 size_t nb_uri, len;
3272 struct lttng_uri *uris = NULL;
3273
3274 nb_uri = cmd_ctx->lsm->u.uri.size;
3275 len = nb_uri * sizeof(struct lttng_uri);
3276
3277 if (nb_uri > 0) {
3278 uris = zmalloc(len);
3279 if (uris == NULL) {
3280 ret = LTTNG_ERR_FATAL;
3281 goto error;
3282 }
3283
3284 /* Receive variable len data */
3285 DBG("Waiting for %zu URIs from client ...", nb_uri);
3286 ret = lttcomm_recv_unix_sock(sock, uris, len);
3287 if (ret <= 0) {
3288 DBG("No URIs received from client... continuing");
3289 *sock_error = 1;
3290 ret = LTTNG_ERR_SESSION_FAIL;
3291 free(uris);
3292 goto error;
3293 }
3294
3295 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3296 DBG("Creating session with ONE network URI is a bad call");
3297 ret = LTTNG_ERR_SESSION_FAIL;
3298 free(uris);
3299 goto error;
3300 }
3301 }
3302
3303 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris, nb_uri,
3304 &cmd_ctx->creds, 0);
3305
3306 free(uris);
3307
3308 break;
3309 }
3310 case LTTNG_DESTROY_SESSION:
3311 {
3312 ret = cmd_destroy_session(cmd_ctx->session, kernel_poll_pipe[1]);
3313
3314 /* Set session to NULL so we do not unlock it after free. */
3315 cmd_ctx->session = NULL;
3316 break;
3317 }
3318 case LTTNG_LIST_DOMAINS:
3319 {
3320 ssize_t nb_dom;
3321 struct lttng_domain *domains;
3322
3323 nb_dom = cmd_list_domains(cmd_ctx->session, &domains);
3324 if (nb_dom < 0) {
3325 /* Return value is a negative lttng_error_code. */
3326 ret = -nb_dom;
3327 goto error;
3328 }
3329
3330 ret = setup_lttng_msg(cmd_ctx, nb_dom * sizeof(struct lttng_domain));
3331 if (ret < 0) {
3332 free(domains);
3333 goto setup_error;
3334 }
3335
3336 /* Copy event list into message payload */
3337 memcpy(cmd_ctx->llm->payload, domains,
3338 nb_dom * sizeof(struct lttng_domain));
3339
3340 free(domains);
3341
3342 ret = LTTNG_OK;
3343 break;
3344 }
3345 case LTTNG_LIST_CHANNELS:
3346 {
3347 int nb_chan;
3348 struct lttng_channel *channels = NULL;
3349
3350 nb_chan = cmd_list_channels(cmd_ctx->lsm->domain.type,
3351 cmd_ctx->session, &channels);
3352 if (nb_chan < 0) {
3353 /* Return value is a negative lttng_error_code. */
3354 ret = -nb_chan;
3355 goto error;
3356 }
3357
3358 ret = setup_lttng_msg(cmd_ctx, nb_chan * sizeof(struct lttng_channel));
3359 if (ret < 0) {
3360 free(channels);
3361 goto setup_error;
3362 }
3363
3364 /* Copy event list into message payload */
3365 memcpy(cmd_ctx->llm->payload, channels,
3366 nb_chan * sizeof(struct lttng_channel));
3367
3368 free(channels);
3369
3370 ret = LTTNG_OK;
3371 break;
3372 }
3373 case LTTNG_LIST_EVENTS:
3374 {
3375 ssize_t nb_event;
3376 struct lttng_event *events = NULL;
3377
3378 nb_event = cmd_list_events(cmd_ctx->lsm->domain.type, cmd_ctx->session,
3379 cmd_ctx->lsm->u.list.channel_name, &events);
3380 if (nb_event < 0) {
3381 /* Return value is a negative lttng_error_code. */
3382 ret = -nb_event;
3383 goto error;
3384 }
3385
3386 ret = setup_lttng_msg(cmd_ctx, nb_event * sizeof(struct lttng_event));
3387 if (ret < 0) {
3388 free(events);
3389 goto setup_error;
3390 }
3391
3392 /* Copy event list into message payload */
3393 memcpy(cmd_ctx->llm->payload, events,
3394 nb_event * sizeof(struct lttng_event));
3395
3396 free(events);
3397
3398 ret = LTTNG_OK;
3399 break;
3400 }
3401 case LTTNG_LIST_SESSIONS:
3402 {
3403 unsigned int nr_sessions;
3404
3405 session_lock_list();
3406 nr_sessions = lttng_sessions_count(
3407 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3408 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3409
3410 ret = setup_lttng_msg(cmd_ctx, sizeof(struct lttng_session) * nr_sessions);
3411 if (ret < 0) {
3412 session_unlock_list();
3413 goto setup_error;
3414 }
3415
3416 /* Filled the session array */
3417 cmd_list_lttng_sessions((struct lttng_session *)(cmd_ctx->llm->payload),
3418 LTTNG_SOCK_GET_UID_CRED(&cmd_ctx->creds),
3419 LTTNG_SOCK_GET_GID_CRED(&cmd_ctx->creds));
3420
3421 session_unlock_list();
3422
3423 ret = LTTNG_OK;
3424 break;
3425 }
3426 case LTTNG_CALIBRATE:
3427 {
3428 ret = cmd_calibrate(cmd_ctx->lsm->domain.type,
3429 &cmd_ctx->lsm->u.calibrate);
3430 break;
3431 }
3432 case LTTNG_REGISTER_CONSUMER:
3433 {
3434 struct consumer_data *cdata;
3435
3436 switch (cmd_ctx->lsm->domain.type) {
3437 case LTTNG_DOMAIN_KERNEL:
3438 cdata = &kconsumer_data;
3439 break;
3440 default:
3441 ret = LTTNG_ERR_UND;
3442 goto error;
3443 }
3444
3445 ret = cmd_register_consumer(cmd_ctx->session, cmd_ctx->lsm->domain.type,
3446 cmd_ctx->lsm->u.reg.path, cdata);
3447 break;
3448 }
3449 case LTTNG_DATA_PENDING:
3450 {
3451 ret = cmd_data_pending(cmd_ctx->session);
3452 break;
3453 }
3454 case LTTNG_SNAPSHOT_ADD_OUTPUT:
3455 {
3456 struct lttcomm_lttng_output_id reply;
3457
3458 ret = cmd_snapshot_add_output(cmd_ctx->session,
3459 &cmd_ctx->lsm->u.snapshot_output.output, &reply.id);
3460 if (ret != LTTNG_OK) {
3461 goto error;
3462 }
3463
3464 ret = setup_lttng_msg(cmd_ctx, sizeof(reply));
3465 if (ret < 0) {
3466 goto setup_error;
3467 }
3468
3469 /* Copy output list into message payload */
3470 memcpy(cmd_ctx->llm->payload, &reply, sizeof(reply));
3471 ret = LTTNG_OK;
3472 break;
3473 }
3474 case LTTNG_SNAPSHOT_DEL_OUTPUT:
3475 {
3476 ret = cmd_snapshot_del_output(cmd_ctx->session,
3477 &cmd_ctx->lsm->u.snapshot_output.output);
3478 break;
3479 }
3480 case LTTNG_SNAPSHOT_LIST_OUTPUT:
3481 {
3482 ssize_t nb_output;
3483 struct lttng_snapshot_output *outputs = NULL;
3484
3485 nb_output = cmd_snapshot_list_outputs(cmd_ctx->session, &outputs);
3486 if (nb_output < 0) {
3487 ret = -nb_output;
3488 goto error;
3489 }
3490
3491 ret = setup_lttng_msg(cmd_ctx,
3492 nb_output * sizeof(struct lttng_snapshot_output));
3493 if (ret < 0) {
3494 free(outputs);
3495 goto setup_error;
3496 }
3497
3498 if (outputs) {
3499 /* Copy output list into message payload */
3500 memcpy(cmd_ctx->llm->payload, outputs,
3501 nb_output * sizeof(struct lttng_snapshot_output));
3502 free(outputs);
3503 }
3504
3505 ret = LTTNG_OK;
3506 break;
3507 }
3508 case LTTNG_SNAPSHOT_RECORD:
3509 {
3510 ret = cmd_snapshot_record(cmd_ctx->session,
3511 &cmd_ctx->lsm->u.snapshot_record.output,
3512 cmd_ctx->lsm->u.snapshot_record.wait);
3513 break;
3514 }
3515 case LTTNG_CREATE_SESSION_SNAPSHOT:
3516 {
3517 size_t nb_uri, len;
3518 struct lttng_uri *uris = NULL;
3519
3520 nb_uri = cmd_ctx->lsm->u.uri.size;
3521 len = nb_uri * sizeof(struct lttng_uri);
3522
3523 if (nb_uri > 0) {
3524 uris = zmalloc(len);
3525 if (uris == NULL) {
3526 ret = LTTNG_ERR_FATAL;
3527 goto error;
3528 }
3529
3530 /* Receive variable len data */
3531 DBG("Waiting for %zu URIs from client ...", nb_uri);
3532 ret = lttcomm_recv_unix_sock(sock, uris, len);
3533 if (ret <= 0) {
3534 DBG("No URIs received from client... continuing");
3535 *sock_error = 1;
3536 ret = LTTNG_ERR_SESSION_FAIL;
3537 free(uris);
3538 goto error;
3539 }
3540
3541 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3542 DBG("Creating session with ONE network URI is a bad call");
3543 ret = LTTNG_ERR_SESSION_FAIL;
3544 free(uris);
3545 goto error;
3546 }
3547 }
3548
3549 ret = cmd_create_session_snapshot(cmd_ctx->lsm->session.name, uris,
3550 nb_uri, &cmd_ctx->creds);
3551 free(uris);
3552 break;
3553 }
3554 case LTTNG_CREATE_SESSION_LIVE:
3555 {
3556 size_t nb_uri, len;
3557 struct lttng_uri *uris = NULL;
3558
3559 nb_uri = cmd_ctx->lsm->u.uri.size;
3560 len = nb_uri * sizeof(struct lttng_uri);
3561
3562 if (nb_uri > 0) {
3563 uris = zmalloc(len);
3564 if (uris == NULL) {
3565 ret = LTTNG_ERR_FATAL;
3566 goto error;
3567 }
3568
3569 /* Receive variable len data */
3570 DBG("Waiting for %zu URIs from client ...", nb_uri);
3571 ret = lttcomm_recv_unix_sock(sock, uris, len);
3572 if (ret <= 0) {
3573 DBG("No URIs received from client... continuing");
3574 *sock_error = 1;
3575 ret = LTTNG_ERR_SESSION_FAIL;
3576 free(uris);
3577 goto error;
3578 }
3579
3580 if (nb_uri == 1 && uris[0].dtype != LTTNG_DST_PATH) {
3581 DBG("Creating session with ONE network URI is a bad call");
3582 ret = LTTNG_ERR_SESSION_FAIL;
3583 free(uris);
3584 goto error;
3585 }
3586 }
3587
3588 ret = cmd_create_session_uri(cmd_ctx->lsm->session.name, uris,
3589 nb_uri, &cmd_ctx->creds, cmd_ctx->lsm->u.session_live.timer_interval);
3590 free(uris);
3591 break;
3592 }
3593 default:
3594 ret = LTTNG_ERR_UND;
3595 break;
3596 }
3597
3598 error:
3599 if (cmd_ctx->llm == NULL) {
3600 DBG("Missing llm structure. Allocating one.");
3601 if (setup_lttng_msg(cmd_ctx, 0) < 0) {
3602 goto setup_error;
3603 }
3604 }
3605 /* Set return code */
3606 cmd_ctx->llm->ret_code = ret;
3607 setup_error:
3608 if (cmd_ctx->session) {
3609 session_unlock(cmd_ctx->session);
3610 }
3611 if (need_tracing_session) {
3612 session_unlock_list();
3613 }
3614 init_setup_error:
3615 return ret;
3616 }
3617
3618 /*
3619 * Thread managing health check socket.
3620 */
3621 static void *thread_manage_health(void *data)
3622 {
3623 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
3624 uint32_t revents, nb_fd;
3625 struct lttng_poll_event events;
3626 struct health_comm_msg msg;
3627 struct health_comm_reply reply;
3628
3629 DBG("[thread] Manage health check started");
3630
3631 rcu_register_thread();
3632
3633 /* We might hit an error path before this is created. */
3634 lttng_poll_init(&events);
3635
3636 /* Create unix socket */
3637 sock = lttcomm_create_unix_sock(health_unix_sock_path);
3638 if (sock < 0) {
3639 ERR("Unable to create health check Unix socket");
3640 ret = -1;
3641 goto error;
3642 }
3643
3644 if (is_root) {
3645 /* lttng health client socket path permissions */
3646 ret = chown(health_unix_sock_path, 0,
3647 utils_get_group_id(tracing_group_name));
3648 if (ret < 0) {
3649 ERR("Unable to set group on %s", health_unix_sock_path);
3650 PERROR("chown");
3651 ret = -1;
3652 goto error;
3653 }
3654
3655 ret = chmod(health_unix_sock_path,
3656 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
3657 if (ret < 0) {
3658 ERR("Unable to set permissions on %s", health_unix_sock_path);
3659 PERROR("chmod");
3660 ret = -1;
3661 goto error;
3662 }
3663 }
3664
3665 /*
3666 * Set the CLOEXEC flag. Return code is useless because either way, the
3667 * show must go on.
3668 */
3669 (void) utils_set_fd_cloexec(sock);
3670
3671 ret = lttcomm_listen_unix_sock(sock);
3672 if (ret < 0) {
3673 goto error;
3674 }
3675
3676 /*
3677 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3678 * more will be added to this poll set.
3679 */
3680 ret = sessiond_set_thread_pollset(&events, 2);
3681 if (ret < 0) {
3682 goto error;
3683 }
3684
3685 /* Add the application registration socket */
3686 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
3687 if (ret < 0) {
3688 goto error;
3689 }
3690
3691 lttng_sessiond_notify_ready();
3692
3693 while (1) {
3694 DBG("Health check ready");
3695
3696 /* Inifinite blocking call, waiting for transmission */
3697 restart:
3698 ret = lttng_poll_wait(&events, -1);
3699 if (ret < 0) {
3700 /*
3701 * Restart interrupted system call.
3702 */
3703 if (errno == EINTR) {
3704 goto restart;
3705 }
3706 goto error;
3707 }
3708
3709 nb_fd = ret;
3710
3711 for (i = 0; i < nb_fd; i++) {
3712 /* Fetch once the poll data */
3713 revents = LTTNG_POLL_GETEV(&events, i);
3714 pollfd = LTTNG_POLL_GETFD(&events, i);
3715
3716 if (!revents) {
3717 /* No activity for this FD (poll implementation). */
3718 continue;
3719 }
3720
3721 /* Thread quit pipe has been closed. Killing thread. */
3722 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3723 if (ret) {
3724 err = 0;
3725 goto exit;
3726 }
3727
3728 /* Event on the registration socket */
3729 if (pollfd == sock) {
3730 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3731 ERR("Health socket poll error");
3732 goto error;
3733 }
3734 }
3735 }
3736
3737 new_sock = lttcomm_accept_unix_sock(sock);
3738 if (new_sock < 0) {
3739 goto error;
3740 }
3741
3742 /*
3743 * Set the CLOEXEC flag. Return code is useless because either way, the
3744 * show must go on.
3745 */
3746 (void) utils_set_fd_cloexec(new_sock);
3747
3748 DBG("Receiving data from client for health...");
3749 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
3750 if (ret <= 0) {
3751 DBG("Nothing recv() from client... continuing");
3752 ret = close(new_sock);
3753 if (ret) {
3754 PERROR("close");
3755 }
3756 new_sock = -1;
3757 continue;
3758 }
3759
3760 rcu_thread_online();
3761
3762 memset(&reply, 0, sizeof(reply));
3763 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
3764 /*
3765 * health_check_state returns 0 if health is
3766 * bad.
3767 */
3768 if (!health_check_state(health_sessiond, i)) {
3769 reply.ret_code |= 1ULL << i;
3770 }
3771 }
3772
3773 DBG2("Health check return value %" PRIx64, reply.ret_code);
3774
3775 ret = send_unix_sock(new_sock, (void *) &reply, sizeof(reply));
3776 if (ret < 0) {
3777 ERR("Failed to send health data back to client");
3778 }
3779
3780 /* End of transmission */
3781 ret = close(new_sock);
3782 if (ret) {
3783 PERROR("close");
3784 }
3785 new_sock = -1;
3786 }
3787
3788 exit:
3789 error:
3790 if (err) {
3791 ERR("Health error occurred in %s", __func__);
3792 }
3793 DBG("Health check thread dying");
3794 unlink(health_unix_sock_path);
3795 if (sock >= 0) {
3796 ret = close(sock);
3797 if (ret) {
3798 PERROR("close");
3799 }
3800 }
3801
3802 lttng_poll_clean(&events);
3803
3804 rcu_unregister_thread();
3805 return NULL;
3806 }
3807
3808 /*
3809 * This thread manage all clients request using the unix client socket for
3810 * communication.
3811 */
3812 static void *thread_manage_clients(void *data)
3813 {
3814 int sock = -1, ret, i, pollfd, err = -1;
3815 int sock_error;
3816 uint32_t revents, nb_fd;
3817 struct command_ctx *cmd_ctx = NULL;
3818 struct lttng_poll_event events;
3819
3820 DBG("[thread] Manage client started");
3821
3822 rcu_register_thread();
3823
3824 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_CMD);
3825
3826 health_code_update();
3827
3828 ret = lttcomm_listen_unix_sock(client_sock);
3829 if (ret < 0) {
3830 goto error_listen;
3831 }
3832
3833 /*
3834 * Pass 2 as size here for the thread quit pipe and client_sock. Nothing
3835 * more will be added to this poll set.
3836 */
3837 ret = sessiond_set_thread_pollset(&events, 2);
3838 if (ret < 0) {
3839 goto error_create_poll;
3840 }
3841
3842 /* Add the application registration socket */
3843 ret = lttng_poll_add(&events, client_sock, LPOLLIN | LPOLLPRI);
3844 if (ret < 0) {
3845 goto error;
3846 }
3847
3848 lttng_sessiond_notify_ready();
3849
3850 /* This testpoint is after we signal readiness to the parent. */
3851 if (testpoint(sessiond_thread_manage_clients)) {
3852 goto error;
3853 }
3854
3855 if (testpoint(sessiond_thread_manage_clients_before_loop)) {
3856 goto error;
3857 }
3858
3859 health_code_update();
3860
3861 while (1) {
3862 DBG("Accepting client command ...");
3863
3864 /* Inifinite blocking call, waiting for transmission */
3865 restart:
3866 health_poll_entry();
3867 ret = lttng_poll_wait(&events, -1);
3868 health_poll_exit();
3869 if (ret < 0) {
3870 /*
3871 * Restart interrupted system call.
3872 */
3873 if (errno == EINTR) {
3874 goto restart;
3875 }
3876 goto error;
3877 }
3878
3879 nb_fd = ret;
3880
3881 for (i = 0; i < nb_fd; i++) {
3882 /* Fetch once the poll data */
3883 revents = LTTNG_POLL_GETEV(&events, i);
3884 pollfd = LTTNG_POLL_GETFD(&events, i);
3885
3886 health_code_update();
3887
3888 if (!revents) {
3889 /* No activity for this FD (poll implementation). */
3890 continue;
3891 }
3892
3893 /* Thread quit pipe has been closed. Killing thread. */
3894 ret = sessiond_check_thread_quit_pipe(pollfd, revents);
3895 if (ret) {
3896 err = 0;
3897 goto exit;
3898 }
3899
3900 /* Event on the registration socket */
3901 if (pollfd == client_sock) {
3902 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
3903 ERR("Client socket poll error");
3904 goto error;
3905 }
3906 }
3907 }
3908
3909 DBG("Wait for client response");
3910
3911 health_code_update();
3912
3913 sock = lttcomm_accept_unix_sock(client_sock);
3914 if (sock < 0) {
3915 goto error;
3916 }
3917
3918 /*
3919 * Set the CLOEXEC flag. Return code is useless because either way, the
3920 * show must go on.
3921 */
3922 (void) utils_set_fd_cloexec(sock);
3923
3924 /* Set socket option for credentials retrieval */
3925 ret = lttcomm_setsockopt_creds_unix_sock(sock);
3926 if (ret < 0) {
3927 goto error;
3928 }
3929
3930 /* Allocate context command to process the client request */
3931 cmd_ctx = zmalloc(sizeof(struct command_ctx));
3932 if (cmd_ctx == NULL) {
3933 PERROR("zmalloc cmd_ctx");
3934 goto error;
3935 }
3936
3937 /* Allocate data buffer for reception */
3938 cmd_ctx->lsm = zmalloc(sizeof(struct lttcomm_session_msg));
3939 if (cmd_ctx->lsm == NULL) {
3940 PERROR("zmalloc cmd_ctx->lsm");
3941 goto error;
3942 }
3943
3944 cmd_ctx->llm = NULL;
3945 cmd_ctx->session = NULL;
3946
3947 health_code_update();
3948
3949 /*
3950 * Data is received from the lttng client. The struct
3951 * lttcomm_session_msg (lsm) contains the command and data request of
3952 * the client.
3953 */
3954 DBG("Receiving data from client ...");
3955 ret = lttcomm_recv_creds_unix_sock(sock, cmd_ctx->lsm,
3956 sizeof(struct lttcomm_session_msg), &cmd_ctx->creds);
3957 if (ret <= 0) {
3958 DBG("Nothing recv() from client... continuing");
3959 ret = close(sock);
3960 if (ret) {
3961 PERROR("close");
3962 }
3963 sock = -1;
3964 clean_command_ctx(&cmd_ctx);
3965 continue;
3966 }
3967
3968 health_code_update();
3969
3970 // TODO: Validate cmd_ctx including sanity check for
3971 // security purpose.
3972
3973 rcu_thread_online();
3974 /*
3975 * This function dispatch the work to the kernel or userspace tracer
3976 * libs and fill the lttcomm_lttng_msg data structure of all the needed
3977 * informations for the client. The command context struct contains
3978 * everything this function may needs.
3979 */
3980 ret = process_client_msg(cmd_ctx, sock, &sock_error);
3981 rcu_thread_offline();
3982 if (ret < 0) {
3983 ret = close(sock);
3984 if (ret) {
3985 PERROR("close");
3986 }
3987 sock = -1;
3988 /*
3989 * TODO: Inform client somehow of the fatal error. At
3990 * this point, ret < 0 means that a zmalloc failed
3991 * (ENOMEM). Error detected but still accept
3992 * command, unless a socket error has been
3993 * detected.
3994 */
3995 clean_command_ctx(&cmd_ctx);
3996 continue;
3997 }
3998
3999 health_code_update();
4000
4001 DBG("Sending response (size: %d, retcode: %s)",
4002 cmd_ctx->lttng_msg_size,
4003 lttng_strerror(-cmd_ctx->llm->ret_code));
4004 ret = send_unix_sock(sock, cmd_ctx->llm, cmd_ctx->lttng_msg_size);
4005 if (ret < 0) {
4006 ERR("Failed to send data back to client");
4007 }
4008
4009 /* End of transmission */
4010 ret = close(sock);
4011 if (ret) {
4012 PERROR("close");
4013 }
4014 sock = -1;
4015
4016 clean_command_ctx(&cmd_ctx);
4017
4018 health_code_update();
4019 }
4020
4021 exit:
4022 error:
4023 if (sock >= 0) {
4024 ret = close(sock);
4025 if (ret) {
4026 PERROR("close");
4027 }
4028 }
4029
4030 lttng_poll_clean(&events);
4031 clean_command_ctx(&cmd_ctx);
4032
4033 error_listen:
4034 error_create_poll:
4035 unlink(client_unix_sock_path);
4036 if (client_sock >= 0) {
4037 ret = close(client_sock);
4038 if (ret) {
4039 PERROR("close");
4040 }
4041 }
4042
4043 if (err) {
4044 health_error();
4045 ERR("Health error occurred in %s", __func__);
4046 }
4047
4048 health_unregister(health_sessiond);
4049
4050 DBG("Client thread dying");
4051
4052 rcu_unregister_thread();
4053 return NULL;
4054 }
4055
4056
4057 /*
4058 * usage function on stderr
4059 */
4060 static void usage(void)
4061 {
4062 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
4063 fprintf(stderr, " -h, --help Display this usage.\n");
4064 fprintf(stderr, " -c, --client-sock PATH Specify path for the client unix socket\n");
4065 fprintf(stderr, " -a, --apps-sock PATH Specify path for apps unix socket\n");
4066 fprintf(stderr, " --kconsumerd-err-sock PATH Specify path for the kernel consumer error socket\n");
4067 fprintf(stderr, " --kconsumerd-cmd-sock PATH Specify path for the kernel consumer command socket\n");
4068 fprintf(stderr, " --ustconsumerd32-err-sock PATH Specify path for the 32-bit UST consumer error socket\n");
4069 fprintf(stderr, " --ustconsumerd64-err-sock PATH Specify path for the 64-bit UST consumer error socket\n");
4070 fprintf(stderr, " --ustconsumerd32-cmd-sock PATH Specify path for the 32-bit UST consumer command socket\n");
4071 fprintf(stderr, " --ustconsumerd64-cmd-sock PATH Specify path for the 64-bit UST consumer command socket\n");
4072 fprintf(stderr, " --consumerd32-path PATH Specify path for the 32-bit UST consumer daemon binary\n");
4073 fprintf(stderr, " --consumerd32-libdir PATH Specify path for the 32-bit UST consumer daemon libraries\n");
4074 fprintf(stderr, " --consumerd64-path PATH Specify path for the 64-bit UST consumer daemon binary\n");
4075 fprintf(stderr, " --consumerd64-libdir PATH Specify path for the 64-bit UST consumer daemon libraries\n");
4076 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
4077 fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n");
4078 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
4079 fprintf(stderr, " -V, --version Show version number.\n");
4080 fprintf(stderr, " -S, --sig-parent Send SIGUSR1 to parent pid to notify readiness.\n");
4081 fprintf(stderr, " -q, --quiet No output at all.\n");
4082 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
4083 fprintf(stderr, " -p, --pidfile FILE Write a pid to FILE name overriding the default value.\n");
4084 fprintf(stderr, " --verbose-consumer Verbose mode for consumer. Activate DBG() macro.\n");
4085 fprintf(stderr, " --no-kernel Disable kernel tracer\n");
4086 fprintf(stderr, " --jul-tcp-port JUL application registration TCP port\n");
4087 }
4088
4089 /*
4090 * daemon argument parsing
4091 */
4092 static int parse_args(int argc, char **argv)
4093 {
4094 int c;
4095
4096 static struct option long_options[] = {
4097 { "client-sock", 1, 0, 'c' },
4098 { "apps-sock", 1, 0, 'a' },
4099 { "kconsumerd-cmd-sock", 1, 0, 'C' },
4100 { "kconsumerd-err-sock", 1, 0, 'E' },
4101 { "ustconsumerd32-cmd-sock", 1, 0, 'G' },
4102 { "ustconsumerd32-err-sock", 1, 0, 'H' },
4103 { "ustconsumerd64-cmd-sock", 1, 0, 'D' },
4104 { "ustconsumerd64-err-sock", 1, 0, 'F' },
4105 { "consumerd32-path", 1, 0, 'u' },
4106 { "consumerd32-libdir", 1, 0, 'U' },
4107 { "consumerd64-path", 1, 0, 't' },
4108 { "consumerd64-libdir", 1, 0, 'T' },
4109 { "daemonize", 0, 0, 'd' },
4110 { "sig-parent", 0, 0, 'S' },
4111 { "help", 0, 0, 'h' },
4112 { "group", 1, 0, 'g' },
4113 { "version", 0, 0, 'V' },
4114 { "quiet", 0, 0, 'q' },
4115 { "verbose", 0, 0, 'v' },
4116 { "verbose-consumer", 0, 0, 'Z' },
4117 { "no-kernel", 0, 0, 'N' },
4118 { "pidfile", 1, 0, 'p' },
4119 { "jul-tcp-port", 1, 0, 'J' },
4120 { "background", 0, 0, 'b' },
4121 { NULL, 0, 0, 0 }
4122 };
4123
4124 while (1) {
4125 int option_index = 0;
4126 c = getopt_long(argc, argv, "dhqvVSN" "a:c:g:s:C:E:D:F:Z:u:t:p:J:b",
4127 long_options, &option_index);
4128 if (c == -1) {
4129 break;
4130 }
4131
4132 switch (c) {
4133 case 0:
4134 fprintf(stderr, "option %s", long_options[option_index].name);
4135 if (optarg) {
4136 fprintf(stderr, " with arg %s\n", optarg);
4137 }
4138 break;
4139 case 'c':
4140 snprintf(client_unix_sock_path, PATH_MAX, "%s", optarg);
4141 break;
4142 case 'a':
4143 snprintf(apps_unix_sock_path, PATH_MAX, "%s", optarg);
4144 break;
4145 case 'd':
4146 opt_daemon = 1;
4147 break;
4148 case 'b':
4149 opt_background = 1;
4150 break;
4151 case 'g':
4152 tracing_group_name = optarg;
4153 break;
4154 case 'h':
4155 usage();
4156 exit(EXIT_FAILURE);
4157 case 'V':
4158 fprintf(stdout, "%s\n", VERSION);
4159 exit(EXIT_SUCCESS);
4160 case 'S':
4161 opt_sig_parent = 1;
4162 break;
4163 case 'E':
4164 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4165 break;
4166 case 'C':
4167 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4168 break;
4169 case 'F':
4170 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4171 break;
4172 case 'D':
4173 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4174 break;
4175 case 'H':
4176 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX, "%s", optarg);
4177 break;
4178 case 'G':
4179 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX, "%s", optarg);
4180 break;
4181 case 'N':
4182 opt_no_kernel = 1;
4183 break;
4184 case 'q':
4185 lttng_opt_quiet = 1;
4186 break;
4187 case 'v':
4188 /* Verbose level can increase using multiple -v */
4189 lttng_opt_verbose += 1;
4190 break;
4191 case 'Z':
4192 opt_verbose_consumer += 1;
4193 break;
4194 case 'u':
4195 consumerd32_bin= optarg;
4196 break;
4197 case 'U':
4198 consumerd32_libdir = optarg;
4199 break;
4200 case 't':
4201 consumerd64_bin = optarg;
4202 break;
4203 case 'T':
4204 consumerd64_libdir = optarg;
4205 break;
4206 case 'p':
4207 opt_pidfile = optarg;
4208 break;
4209 case 'J': /* JUL TCP port. */
4210 {
4211 unsigned long v;
4212
4213 errno = 0;
4214 v = strtoul(optarg, NULL, 0);
4215 if (errno != 0 || !isdigit(optarg[0])) {
4216 ERR("Wrong value in --jul-tcp-port parameter: %s", optarg);
4217 return -1;
4218 }
4219 if (v == 0 || v >= 65535) {
4220 ERR("Port overflow in --jul-tcp-port parameter: %s", optarg);
4221 return -1;
4222 }
4223 jul_tcp_port = (uint32_t) v;
4224 DBG3("JUL TCP port set to non default: %u", jul_tcp_port);
4225 break;
4226 }
4227 default:
4228 /* Unknown option or other error.
4229 * Error is printed by getopt, just return */
4230 return -1;
4231 }
4232 }
4233
4234 return 0;
4235 }
4236
4237 /*
4238 * Creates the two needed socket by the daemon.
4239 * apps_sock - The communication socket for all UST apps.
4240 * client_sock - The communication of the cli tool (lttng).
4241 */
4242 static int init_daemon_socket(void)
4243 {
4244 int ret = 0;
4245 mode_t old_umask;
4246
4247 old_umask = umask(0);
4248
4249 /* Create client tool unix socket */
4250 client_sock = lttcomm_create_unix_sock(client_unix_sock_path);
4251 if (client_sock < 0) {
4252 ERR("Create unix sock failed: %s", client_unix_sock_path);
4253 ret = -1;
4254 goto end;
4255 }
4256
4257 /* Set the cloexec flag */
4258 ret = utils_set_fd_cloexec(client_sock);
4259 if (ret < 0) {
4260 ERR("Unable to set CLOEXEC flag to the client Unix socket (fd: %d). "
4261 "Continuing but note that the consumer daemon will have a "
4262 "reference to this socket on exec()", client_sock);
4263 }
4264
4265 /* File permission MUST be 660 */
4266 ret = chmod(client_unix_sock_path, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4267 if (ret < 0) {
4268 ERR("Set file permissions failed: %s", client_unix_sock_path);
4269 PERROR("chmod");
4270 goto end;
4271 }
4272
4273 /* Create the application unix socket */
4274 apps_sock = lttcomm_create_unix_sock(apps_unix_sock_path);
4275 if (apps_sock < 0) {
4276 ERR("Create unix sock failed: %s", apps_unix_sock_path);
4277 ret = -1;
4278 goto end;
4279 }
4280
4281 /* Set the cloexec flag */
4282 ret = utils_set_fd_cloexec(apps_sock);
4283 if (ret < 0) {
4284 ERR("Unable to set CLOEXEC flag to the app Unix socket (fd: %d). "
4285 "Continuing but note that the consumer daemon will have a "
4286 "reference to this socket on exec()", apps_sock);
4287 }
4288
4289 /* File permission MUST be 666 */
4290 ret = chmod(apps_unix_sock_path,
4291 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH);
4292 if (ret < 0) {
4293 ERR("Set file permissions failed: %s", apps_unix_sock_path);
4294 PERROR("chmod");
4295 goto end;
4296 }
4297
4298 DBG3("Session daemon client socket %d and application socket %d created",
4299 client_sock, apps_sock);
4300
4301 end:
4302 umask(old_umask);
4303 return ret;
4304 }
4305
4306 /*
4307 * Check if the global socket is available, and if a daemon is answering at the
4308 * other side. If yes, error is returned.
4309 */
4310 static int check_existing_daemon(void)
4311 {
4312 /* Is there anybody out there ? */
4313 if (lttng_session_daemon_alive()) {
4314 return -EEXIST;
4315 }
4316
4317 return 0;
4318 }
4319
4320 /*
4321 * Set the tracing group gid onto the client socket.
4322 *
4323 * Race window between mkdir and chown is OK because we are going from more
4324 * permissive (root.root) to less permissive (root.tracing).
4325 */
4326 static int set_permissions(char *rundir)
4327 {
4328 int ret;
4329 gid_t gid;
4330
4331 gid = utils_get_group_id(tracing_group_name);
4332
4333 /* Set lttng run dir */
4334 ret = chown(rundir, 0, gid);
4335 if (ret < 0) {
4336 ERR("Unable to set group on %s", rundir);
4337 PERROR("chown");
4338 }
4339
4340 /*
4341 * Ensure all applications and tracing group can search the run
4342 * dir. Allow everyone to read the directory, since it does not
4343 * buy us anything to hide its content.
4344 */
4345 ret = chmod(rundir, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
4346 if (ret < 0) {
4347 ERR("Unable to set permissions on %s", rundir);
4348 PERROR("chmod");
4349 }
4350
4351 /* lttng client socket path */
4352 ret = chown(client_unix_sock_path, 0, gid);
4353 if (ret < 0) {
4354 ERR("Unable to set group on %s", client_unix_sock_path);
4355 PERROR("chown");
4356 }
4357
4358 /* kconsumer error socket path */
4359 ret = chown(kconsumer_data.err_unix_sock_path, 0, 0);
4360 if (ret < 0) {
4361 ERR("Unable to set group on %s", kconsumer_data.err_unix_sock_path);
4362 PERROR("chown");
4363 }
4364
4365 /* 64-bit ustconsumer error socket path */
4366 ret = chown(ustconsumer64_data.err_unix_sock_path, 0, 0);
4367 if (ret < 0) {
4368 ERR("Unable to set group on %s", ustconsumer64_data.err_unix_sock_path);
4369 PERROR("chown");
4370 }
4371
4372 /* 32-bit ustconsumer compat32 error socket path */
4373 ret = chown(ustconsumer32_data.err_unix_sock_path, 0, 0);
4374 if (ret < 0) {
4375 ERR("Unable to set group on %s", ustconsumer32_data.err_unix_sock_path);
4376 PERROR("chown");
4377 }
4378
4379 DBG("All permissions are set");
4380
4381 return ret;
4382 }
4383
4384 /*
4385 * Create the lttng run directory needed for all global sockets and pipe.
4386 */
4387 static int create_lttng_rundir(const char *rundir)
4388 {
4389 int ret;
4390
4391 DBG3("Creating LTTng run directory: %s", rundir);
4392
4393 ret = mkdir(rundir, S_IRWXU);
4394 if (ret < 0) {
4395 if (errno != EEXIST) {
4396 ERR("Unable to create %s", rundir);
4397 goto error;
4398 } else {
4399 ret = 0;
4400 }
4401 }
4402
4403 error:
4404 return ret;
4405 }
4406
4407 /*
4408 * Setup sockets and directory needed by the kconsumerd communication with the
4409 * session daemon.
4410 */
4411 static int set_consumer_sockets(struct consumer_data *consumer_data,
4412 const char *rundir)
4413 {
4414 int ret;
4415 char path[PATH_MAX];
4416
4417 switch (consumer_data->type) {
4418 case LTTNG_CONSUMER_KERNEL:
4419 snprintf(path, PATH_MAX, DEFAULT_KCONSUMERD_PATH, rundir);
4420 break;
4421 case LTTNG_CONSUMER64_UST:
4422 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD64_PATH, rundir);
4423 break;
4424 case LTTNG_CONSUMER32_UST:
4425 snprintf(path, PATH_MAX, DEFAULT_USTCONSUMERD32_PATH, rundir);
4426 break;
4427 default:
4428 ERR("Consumer type unknown");
4429 ret = -EINVAL;
4430 goto error;
4431 }
4432
4433 DBG2("Creating consumer directory: %s", path);
4434
4435 ret = mkdir(path, S_IRWXU | S_IRGRP | S_IXGRP);
4436 if (ret < 0) {
4437 if (errno != EEXIST) {
4438 PERROR("mkdir");
4439 ERR("Failed to create %s", path);
4440 goto error;
4441 }
4442 ret = -1;
4443 }
4444 if (is_root) {
4445 ret = chown(path, 0, utils_get_group_id(tracing_group_name));
4446 if (ret < 0) {
4447 ERR("Unable to set group on %s", path);
4448 PERROR("chown");
4449 goto error;
4450 }
4451 }
4452
4453 /* Create the kconsumerd error unix socket */
4454 consumer_data->err_sock =
4455 lttcomm_create_unix_sock(consumer_data->err_unix_sock_path);
4456 if (consumer_data->err_sock < 0) {
4457 ERR("Create unix sock failed: %s", consumer_data->err_unix_sock_path);
4458 ret = -1;
4459 goto error;
4460 }
4461
4462 /*
4463 * Set the CLOEXEC flag. Return code is useless because either way, the
4464 * show must go on.
4465 */
4466 ret = utils_set_fd_cloexec(consumer_data->err_sock);
4467 if (ret < 0) {
4468 PERROR("utils_set_fd_cloexec");
4469 /* continue anyway */
4470 }
4471
4472 /* File permission MUST be 660 */
4473 ret = chmod(consumer_data->err_unix_sock_path,
4474 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
4475 if (ret < 0) {
4476 ERR("Set file permissions failed: %s", consumer_data->err_unix_sock_path);
4477 PERROR("chmod");
4478 goto error;
4479 }
4480
4481 error:
4482 return ret;
4483 }
4484
4485 /*
4486 * Signal handler for the daemon
4487 *
4488 * Simply stop all worker threads, leaving main() return gracefully after
4489 * joining all threads and calling cleanup().
4490 */
4491 static void sighandler(int sig)
4492 {
4493 switch (sig) {
4494 case SIGPIPE:
4495 DBG("SIGPIPE caught");
4496 return;
4497 case SIGINT:
4498 DBG("SIGINT caught");
4499 stop_threads();
4500 break;
4501 case SIGTERM:
4502 DBG("SIGTERM caught");
4503 stop_threads();
4504 break;
4505 case SIGUSR1:
4506 CMM_STORE_SHARED(recv_child_signal, 1);
4507 break;
4508 default:
4509 break;
4510 }
4511 }
4512
4513 /*
4514 * Setup signal handler for :
4515 * SIGINT, SIGTERM, SIGPIPE
4516 */
4517 static int set_signal_handler(void)
4518 {
4519 int ret = 0;
4520 struct sigaction sa;
4521 sigset_t sigset;
4522
4523 if ((ret = sigemptyset(&sigset)) < 0) {
4524 PERROR("sigemptyset");
4525 return ret;
4526 }
4527
4528 sa.sa_handler = sighandler;
4529 sa.sa_mask = sigset;
4530 sa.sa_flags = 0;
4531 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
4532 PERROR("sigaction");
4533 return ret;
4534 }
4535
4536 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
4537 PERROR("sigaction");
4538 return ret;
4539 }
4540
4541 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
4542 PERROR("sigaction");
4543 return ret;
4544 }
4545
4546 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
4547 PERROR("sigaction");
4548 return ret;
4549 }
4550
4551 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
4552
4553 return ret;
4554 }
4555
4556 /*
4557 * Set open files limit to unlimited. This daemon can open a large number of
4558 * file descriptors in order to consumer multiple kernel traces.
4559 */
4560 static void set_ulimit(void)
4561 {
4562 int ret;
4563 struct rlimit lim;
4564
4565 /* The kernel does not allowed an infinite limit for open files */
4566 lim.rlim_cur = 65535;
4567 lim.rlim_max = 65535;
4568
4569 ret = setrlimit(RLIMIT_NOFILE, &lim);
4570 if (ret < 0) {
4571 PERROR("failed to set open files limit");
4572 }
4573 }
4574
4575 /*
4576 * Write pidfile using the rundir and opt_pidfile.
4577 */
4578 static void write_pidfile(void)
4579 {
4580 int ret;
4581 char pidfile_path[PATH_MAX];
4582
4583 assert(rundir);
4584
4585 if (opt_pidfile) {
4586 strncpy(pidfile_path, opt_pidfile, sizeof(pidfile_path));
4587 } else {
4588 /* Build pidfile path from rundir and opt_pidfile. */
4589 ret = snprintf(pidfile_path, sizeof(pidfile_path), "%s/"
4590 DEFAULT_LTTNG_SESSIOND_PIDFILE, rundir);
4591 if (ret < 0) {
4592 PERROR("snprintf pidfile path");
4593 goto error;
4594 }
4595 }
4596
4597 /*
4598 * Create pid file in rundir. Return value is of no importance. The
4599 * execution will continue even though we are not able to write the file.
4600 */
4601 (void) utils_create_pid_file(getpid(), pidfile_path);
4602
4603 error:
4604 return;
4605 }
4606
4607 /*
4608 * Create lockfile using the rundir and return its fd.
4609 */
4610 static int create_lockfile(void)
4611 {
4612 int ret;
4613 char lockfile_path[PATH_MAX];
4614
4615 ret = generate_lock_file_path(lockfile_path, sizeof(lockfile_path));
4616 if (ret < 0) {
4617 goto error;
4618 }
4619
4620 ret = utils_create_lock_file(lockfile_path);
4621 error:
4622 return ret;
4623 }
4624
4625 /*
4626 * Write JUL TCP port using the rundir.
4627 */
4628 static void write_julport(void)
4629 {
4630 int ret;
4631 char path[PATH_MAX];
4632
4633 assert(rundir);
4634
4635 ret = snprintf(path, sizeof(path), "%s/"
4636 DEFAULT_LTTNG_SESSIOND_JULPORT_FILE, rundir);
4637 if (ret < 0) {
4638 PERROR("snprintf julport path");
4639 goto error;
4640 }
4641
4642 /*
4643 * Create TCP JUL port file in rundir. Return value is of no importance.
4644 * The execution will continue even though we are not able to write the
4645 * file.
4646 */
4647 (void) utils_create_pid_file(jul_tcp_port, path);
4648
4649 error:
4650 return;
4651 }
4652
4653 /*
4654 * main
4655 */
4656 int main(int argc, char **argv)
4657 {
4658 int ret = 0;
4659 void *status;
4660 const char *home_path, *env_app_timeout;
4661
4662 init_kernel_workarounds();
4663
4664 rcu_register_thread();
4665
4666 if ((ret = set_signal_handler()) < 0) {
4667 goto error;
4668 }
4669
4670 setup_consumerd_path();
4671
4672 page_size = sysconf(_SC_PAGESIZE);
4673 if (page_size < 0) {
4674 PERROR("sysconf _SC_PAGESIZE");
4675 page_size = LONG_MAX;
4676 WARN("Fallback page size to %ld", page_size);
4677 }
4678
4679 /* Parse arguments */
4680 progname = argv[0];
4681 if ((ret = parse_args(argc, argv)) < 0) {
4682 goto error;
4683 }
4684
4685 /* Daemonize */
4686 if (opt_daemon || opt_background) {
4687 int i;
4688
4689 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
4690 !opt_background);
4691 if (ret < 0) {
4692 goto error;
4693 }
4694
4695 /*
4696 * We are in the child. Make sure all other file descriptors are
4697 * closed, in case we are called with more opened file descriptors than
4698 * the standard ones.
4699 */
4700 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
4701 (void) close(i);
4702 }
4703 }
4704
4705 /* Create thread quit pipe */
4706 if ((ret = init_thread_quit_pipe()) < 0) {
4707 goto error;
4708 }
4709
4710 /* Check if daemon is UID = 0 */
4711 is_root = !getuid();
4712
4713 if (is_root) {
4714 rundir = strdup(DEFAULT_LTTNG_RUNDIR);
4715 if (!rundir) {
4716 ret = -ENOMEM;
4717 goto error;
4718 }
4719
4720 /* Create global run dir with root access */
4721 ret = create_lttng_rundir(rundir);
4722 if (ret < 0) {
4723 goto error;
4724 }
4725
4726 if (strlen(apps_unix_sock_path) == 0) {
4727 snprintf(apps_unix_sock_path, PATH_MAX,
4728 DEFAULT_GLOBAL_APPS_UNIX_SOCK);
4729 }
4730
4731 if (strlen(client_unix_sock_path) == 0) {
4732 snprintf(client_unix_sock_path, PATH_MAX,
4733 DEFAULT_GLOBAL_CLIENT_UNIX_SOCK);
4734 }
4735
4736 /* Set global SHM for ust */
4737 if (strlen(wait_shm_path) == 0) {
4738 snprintf(wait_shm_path, PATH_MAX,
4739 DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH);
4740 }
4741
4742 if (strlen(health_unix_sock_path) == 0) {
4743 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4744 DEFAULT_GLOBAL_HEALTH_UNIX_SOCK);
4745 }
4746
4747 /* Setup kernel consumerd path */
4748 snprintf(kconsumer_data.err_unix_sock_path, PATH_MAX,
4749 DEFAULT_KCONSUMERD_ERR_SOCK_PATH, rundir);
4750 snprintf(kconsumer_data.cmd_unix_sock_path, PATH_MAX,
4751 DEFAULT_KCONSUMERD_CMD_SOCK_PATH, rundir);
4752
4753 DBG2("Kernel consumer err path: %s",
4754 kconsumer_data.err_unix_sock_path);
4755 DBG2("Kernel consumer cmd path: %s",
4756 kconsumer_data.cmd_unix_sock_path);
4757 } else {
4758 home_path = utils_get_home_dir();
4759 if (home_path == NULL) {
4760 /* TODO: Add --socket PATH option */
4761 ERR("Can't get HOME directory for sockets creation.");
4762 ret = -EPERM;
4763 goto error;
4764 }
4765
4766 /*
4767 * Create rundir from home path. This will create something like
4768 * $HOME/.lttng
4769 */
4770 ret = asprintf(&rundir, DEFAULT_LTTNG_HOME_RUNDIR, home_path);
4771 if (ret < 0) {
4772 ret = -ENOMEM;
4773 goto error;
4774 }
4775
4776 ret = create_lttng_rundir(rundir);
4777 if (ret < 0) {
4778 goto error;
4779 }
4780
4781 if (strlen(apps_unix_sock_path) == 0) {
4782 snprintf(apps_unix_sock_path, PATH_MAX,
4783 DEFAULT_HOME_APPS_UNIX_SOCK, home_path);
4784 }
4785
4786 /* Set the cli tool unix socket path */
4787 if (strlen(client_unix_sock_path) == 0) {
4788 snprintf(client_unix_sock_path, PATH_MAX,
4789 DEFAULT_HOME_CLIENT_UNIX_SOCK, home_path);
4790 }
4791
4792 /* Set global SHM for ust */
4793 if (strlen(wait_shm_path) == 0) {
4794 snprintf(wait_shm_path, PATH_MAX,
4795 DEFAULT_HOME_APPS_WAIT_SHM_PATH, getuid());
4796 }
4797
4798 /* Set health check Unix path */
4799 if (strlen(health_unix_sock_path) == 0) {
4800 snprintf(health_unix_sock_path, sizeof(health_unix_sock_path),
4801 DEFAULT_HOME_HEALTH_UNIX_SOCK, home_path);
4802 }
4803 }
4804
4805 lockfile_fd = create_lockfile();
4806 if (lockfile_fd < 0) {
4807 goto error;
4808 }
4809
4810 /* Set consumer initial state */
4811 kernel_consumerd_state = CONSUMER_STOPPED;
4812 ust_consumerd_state = CONSUMER_STOPPED;
4813
4814 DBG("Client socket path %s", client_unix_sock_path);
4815 DBG("Application socket path %s", apps_unix_sock_path);
4816 DBG("Application wait path %s", wait_shm_path);
4817 DBG("LTTng run directory path: %s", rundir);
4818
4819 /* 32 bits consumerd path setup */
4820 snprintf(ustconsumer32_data.err_unix_sock_path, PATH_MAX,
4821 DEFAULT_USTCONSUMERD32_ERR_SOCK_PATH, rundir);
4822 snprintf(ustconsumer32_data.cmd_unix_sock_path, PATH_MAX,
4823 DEFAULT_USTCONSUMERD32_CMD_SOCK_PATH, rundir);
4824
4825 DBG2("UST consumer 32 bits err path: %s",
4826 ustconsumer32_data.err_unix_sock_path);
4827 DBG2("UST consumer 32 bits cmd path: %s",
4828 ustconsumer32_data.cmd_unix_sock_path);
4829
4830 /* 64 bits consumerd path setup */
4831 snprintf(ustconsumer64_data.err_unix_sock_path, PATH_MAX,
4832 DEFAULT_USTCONSUMERD64_ERR_SOCK_PATH, rundir);
4833 snprintf(ustconsumer64_data.cmd_unix_sock_path, PATH_MAX,
4834 DEFAULT_USTCONSUMERD64_CMD_SOCK_PATH, rundir);
4835
4836 DBG2("UST consumer 64 bits err path: %s",
4837 ustconsumer64_data.err_unix_sock_path);
4838 DBG2("UST consumer 64 bits cmd path: %s",
4839 ustconsumer64_data.cmd_unix_sock_path);
4840
4841 /*
4842 * See if daemon already exist.
4843 */
4844 if ((ret = check_existing_daemon()) < 0) {
4845 ERR("Already running daemon.\n");
4846 /*
4847 * We do not goto exit because we must not cleanup()
4848 * because a daemon is already running.
4849 */
4850 goto error;
4851 }
4852
4853 /*
4854 * Init UST app hash table. Alloc hash table before this point since
4855 * cleanup() can get called after that point.
4856 */
4857 ust_app_ht_alloc();
4858
4859 /* Initialize JUL domain subsystem. */
4860 if ((ret = jul_init()) < 0) {
4861 /* ENOMEM at this point. */
4862 goto error;
4863 }
4864
4865 /* After this point, we can safely call cleanup() with "goto exit" */
4866
4867 /*
4868 * These actions must be executed as root. We do that *after* setting up
4869 * the sockets path because we MUST make the check for another daemon using
4870 * those paths *before* trying to set the kernel consumer sockets and init
4871 * kernel tracer.
4872 */
4873 if (is_root) {
4874 ret = set_consumer_sockets(&kconsumer_data, rundir);
4875 if (ret < 0) {
4876 goto exit;
4877 }
4878
4879 /* Setup kernel tracer */
4880 if (!opt_no_kernel) {
4881 init_kernel_tracer();
4882 }
4883
4884 /* Set ulimit for open files */
4885 set_ulimit();
4886 }
4887 /* init lttng_fd tracking must be done after set_ulimit. */
4888 lttng_fd_init();
4889
4890 ret = set_consumer_sockets(&ustconsumer64_data, rundir);
4891 if (ret < 0) {
4892 goto exit;
4893 }
4894
4895 ret = set_consumer_sockets(&ustconsumer32_data, rundir);
4896 if (ret < 0) {
4897 goto exit;
4898 }
4899
4900 /* Setup the needed unix socket */
4901 if ((ret = init_daemon_socket()) < 0) {
4902 goto exit;
4903 }
4904
4905 /* Set credentials to socket */
4906 if (is_root && ((ret = set_permissions(rundir)) < 0)) {
4907 goto exit;
4908 }
4909
4910 /* Get parent pid if -S, --sig-parent is specified. */
4911 if (opt_sig_parent) {
4912 ppid = getppid();
4913 }
4914
4915 /* Setup the kernel pipe for waking up the kernel thread */
4916 if (is_root && !opt_no_kernel) {
4917 if ((ret = utils_create_pipe_cloexec(kernel_poll_pipe)) < 0) {
4918 goto exit;
4919 }
4920 }
4921
4922 /* Setup the thread ht_cleanup communication pipe. */
4923 if (utils_create_pipe_cloexec(ht_cleanup_pipe) < 0) {
4924 goto exit;
4925 }
4926
4927 /* Setup the thread apps communication pipe. */
4928 if ((ret = utils_create_pipe_cloexec(apps_cmd_pipe)) < 0) {
4929 goto exit;
4930 }
4931
4932 /* Setup the thread apps notify communication pipe. */
4933 if (utils_create_pipe_cloexec(apps_cmd_notify_pipe) < 0) {
4934 goto exit;
4935 }
4936
4937 /* Initialize global buffer per UID and PID registry. */
4938 buffer_reg_init_uid_registry();
4939 buffer_reg_init_pid_registry();
4940
4941 /* Init UST command queue. */
4942 cds_wfq_init(&ust_cmd_queue.queue);
4943
4944 /*
4945 * Get session list pointer. This pointer MUST NOT be free(). This list is
4946 * statically declared in session.c
4947 */
4948 session_list_ptr = session_get_list();
4949
4950 /* Set up max poll set size */
4951 lttng_poll_set_max_size();
4952
4953 cmd_init();
4954
4955 /* Check for the application socket timeout env variable. */
4956 env_app_timeout = getenv(DEFAULT_APP_SOCKET_TIMEOUT_ENV);
4957 if (env_app_timeout) {
4958 app_socket_timeout = atoi(env_app_timeout);
4959 } else {
4960 app_socket_timeout = DEFAULT_APP_SOCKET_RW_TIMEOUT;
4961 }
4962
4963 write_pidfile();
4964 write_julport();
4965
4966 /* Initialize communication library */
4967 lttcomm_init();
4968 /* This is to get the TCP timeout value. */
4969 lttcomm_inet_init();
4970
4971 /*
4972 * Initialize the health check subsystem. This call should set the
4973 * appropriate time values.
4974 */
4975 health_sessiond = health_app_create(NR_HEALTH_SESSIOND_TYPES);
4976 if (!health_sessiond) {
4977 PERROR("health_app_create error");
4978 goto exit_health_sessiond_cleanup;
4979 }
4980
4981 /* Create thread to clean up RCU hash tables */
4982 ret = pthread_create(&ht_cleanup_thread, NULL,
4983 thread_ht_cleanup, (void *) NULL);
4984 if (ret != 0) {
4985 PERROR("pthread_create ht_cleanup");
4986 goto exit_ht_cleanup;
4987 }
4988
4989 /* Create health-check thread */
4990 ret = pthread_create(&health_thread, NULL,
4991 thread_manage_health, (void *) NULL);
4992 if (ret != 0) {
4993 PERROR("pthread_create health");
4994 goto exit_health;
4995 }
4996
4997 /* Create thread to manage the client socket */
4998 ret = pthread_create(&client_thread, NULL,
4999 thread_manage_clients, (void *) NULL);
5000 if (ret != 0) {
5001 PERROR("pthread_create clients");
5002 goto exit_client;
5003 }
5004
5005 /* Create thread to dispatch registration */
5006 ret = pthread_create(&dispatch_thread, NULL,
5007 thread_dispatch_ust_registration, (void *) NULL);
5008 if (ret != 0) {
5009 PERROR("pthread_create dispatch");
5010 goto exit_dispatch;
5011 }
5012
5013 /* Create thread to manage application registration. */
5014 ret = pthread_create(&reg_apps_thread, NULL,
5015 thread_registration_apps, (void *) NULL);
5016 if (ret != 0) {
5017 PERROR("pthread_create registration");
5018 goto exit_reg_apps;
5019 }
5020
5021 /* Create thread to manage application socket */
5022 ret = pthread_create(&apps_thread, NULL,
5023 thread_manage_apps, (void *) NULL);
5024 if (ret != 0) {
5025 PERROR("pthread_create apps");
5026 goto exit_apps;
5027 }
5028
5029 /* Create thread to manage application notify socket */
5030 ret = pthread_create(&apps_notify_thread, NULL,
5031 ust_thread_manage_notify, (void *) NULL);
5032 if (ret != 0) {
5033 PERROR("pthread_create notify");
5034 goto exit_apps_notify;
5035 }
5036
5037 /* Create JUL registration thread. */
5038 ret = pthread_create(&jul_reg_thread, NULL,
5039 jul_thread_manage_registration, (void *) NULL);
5040 if (ret != 0) {
5041 PERROR("pthread_create JUL");
5042 goto exit_jul_reg;
5043 }
5044
5045 /* Don't start this thread if kernel tracing is not requested nor root */
5046 if (is_root && !opt_no_kernel) {
5047 /* Create kernel thread to manage kernel event */
5048 ret = pthread_create(&kernel_thread, NULL,
5049 thread_manage_kernel, (void *) NULL);
5050 if (ret != 0) {
5051 PERROR("pthread_create kernel");
5052 goto exit_kernel;
5053 }
5054
5055 ret = pthread_join(kernel_thread, &status);
5056 if (ret != 0) {
5057 PERROR("pthread_join");
5058 goto error; /* join error, exit without cleanup */
5059 }
5060 }
5061
5062 exit_kernel:
5063 ret = pthread_join(jul_reg_thread, &status);
5064 if (ret != 0) {
5065 PERROR("pthread_join JUL");
5066 goto error; /* join error, exit without cleanup */
5067 }
5068
5069 exit_jul_reg:
5070 ret = pthread_join(apps_notify_thread, &status);
5071 if (ret != 0) {
5072 PERROR("pthread_join apps notify");
5073 goto error; /* join error, exit without cleanup */
5074 }
5075
5076 exit_apps_notify:
5077 ret = pthread_join(apps_thread, &status);
5078 if (ret != 0) {
5079 PERROR("pthread_join apps");
5080 goto error; /* join error, exit without cleanup */
5081 }
5082
5083
5084 exit_apps:
5085 ret = pthread_join(reg_apps_thread, &status);
5086 if (ret != 0) {
5087 PERROR("pthread_join");
5088 goto error; /* join error, exit without cleanup */
5089 }
5090
5091 exit_reg_apps:
5092 ret = pthread_join(dispatch_thread, &status);
5093 if (ret != 0) {
5094 PERROR("pthread_join");
5095 goto error; /* join error, exit without cleanup */
5096 }
5097
5098 exit_dispatch:
5099 ret = pthread_join(client_thread, &status);
5100 if (ret != 0) {
5101 PERROR("pthread_join");
5102 goto error; /* join error, exit without cleanup */
5103 }
5104
5105 ret = join_consumer_thread(&kconsumer_data);
5106 if (ret != 0) {
5107 PERROR("join_consumer");
5108 goto error; /* join error, exit without cleanup */
5109 }
5110
5111 ret = join_consumer_thread(&ustconsumer32_data);
5112 if (ret != 0) {
5113 PERROR("join_consumer ust32");
5114 goto error; /* join error, exit without cleanup */
5115 }
5116
5117 ret = join_consumer_thread(&ustconsumer64_data);
5118 if (ret != 0) {
5119 PERROR("join_consumer ust64");
5120 goto error; /* join error, exit without cleanup */
5121 }
5122
5123 exit_client:
5124 ret = pthread_join(health_thread, &status);
5125 if (ret != 0) {
5126 PERROR("pthread_join health thread");
5127 goto error; /* join error, exit without cleanup */
5128 }
5129
5130 exit_health:
5131 ret = pthread_join(ht_cleanup_thread, &status);
5132 if (ret != 0) {
5133 PERROR("pthread_join ht cleanup thread");
5134 goto error; /* join error, exit without cleanup */
5135 }
5136 exit_ht_cleanup:
5137 health_app_destroy(health_sessiond);
5138 exit_health_sessiond_cleanup:
5139 exit:
5140 /*
5141 * cleanup() is called when no other thread is running.
5142 */
5143 rcu_thread_online();
5144 cleanup();
5145 rcu_thread_offline();
5146 rcu_unregister_thread();
5147 if (!ret) {
5148 exit(EXIT_SUCCESS);
5149 }
5150 error:
5151 exit(EXIT_FAILURE);
5152 }
This page took 0.164928 seconds and 4 git commands to generate.