Fix: relayd: unbalanced RCU read lock/unlock
[lttng-tools.git] / src / bin / lttng-relayd / main.c
1 /*
2 * Copyright (C) 2012 - Julien Desfossez <jdesfossez@efficios.com>
3 * David Goulet <dgoulet@efficios.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2 only,
7 * as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #define _GNU_SOURCE
20 #include <getopt.h>
21 #include <grp.h>
22 #include <limits.h>
23 #include <pthread.h>
24 #include <signal.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <string.h>
28 #include <sys/mman.h>
29 #include <sys/mount.h>
30 #include <sys/resource.h>
31 #include <sys/socket.h>
32 #include <sys/stat.h>
33 #include <sys/types.h>
34 #include <sys/wait.h>
35 #include <inttypes.h>
36 #include <urcu/futex.h>
37 #include <urcu/uatomic.h>
38 #include <unistd.h>
39 #include <fcntl.h>
40 #include <config.h>
41
42 #include <lttng/lttng.h>
43 #include <common/common.h>
44 #include <common/compat/poll.h>
45 #include <common/compat/socket.h>
46 #include <common/defaults.h>
47 #include <common/daemonize.h>
48 #include <common/futex.h>
49 #include <common/sessiond-comm/sessiond-comm.h>
50 #include <common/sessiond-comm/inet.h>
51 #include <common/sessiond-comm/relayd.h>
52 #include <common/uri.h>
53 #include <common/utils.h>
54
55 #include "cmd.h"
56 #include "ctf-trace.h"
57 #include "index.h"
58 #include "utils.h"
59 #include "lttng-relayd.h"
60 #include "live.h"
61 #include "health-relayd.h"
62 #include "testpoint.h"
63 #include "viewer-stream.h"
64 #include "session.h"
65 #include "stream.h"
66 #include "connection.h"
67
68 /* command line options */
69 char *opt_output_path;
70 static int opt_daemon, opt_background;
71
72 /*
73 * We need to wait for listener and live listener threads, as well as
74 * health check thread, before being ready to signal readiness.
75 */
76 #define NR_LTTNG_RELAY_READY 3
77 static int lttng_relay_ready = NR_LTTNG_RELAY_READY;
78 static int recv_child_signal; /* Set to 1 when a SIGUSR1 signal is received. */
79 static pid_t child_ppid; /* Internal parent PID use with daemonize. */
80
81 static struct lttng_uri *control_uri;
82 static struct lttng_uri *data_uri;
83 static struct lttng_uri *live_uri;
84
85 const char *progname;
86
87 const char *tracing_group_name = DEFAULT_TRACING_GROUP;
88
89 /*
90 * Quit pipe for all threads. This permits a single cancellation point
91 * for all threads when receiving an event on the pipe.
92 */
93 int thread_quit_pipe[2] = { -1, -1 };
94
95 /*
96 * This pipe is used to inform the worker thread that a command is queued and
97 * ready to be processed.
98 */
99 static int relay_conn_pipe[2] = { -1, -1 };
100
101 /* Shared between threads */
102 static int dispatch_thread_exit;
103
104 static pthread_t listener_thread;
105 static pthread_t dispatcher_thread;
106 static pthread_t worker_thread;
107 static pthread_t health_thread;
108
109 static uint64_t last_relay_stream_id;
110
111 /*
112 * Relay command queue.
113 *
114 * The relay_thread_listener and relay_thread_dispatcher communicate with this
115 * queue.
116 */
117 static struct relay_conn_queue relay_conn_queue;
118
119 /* buffer allocated at startup, used to store the trace data */
120 static char *data_buffer;
121 static unsigned int data_buffer_size;
122
123 /* We need those values for the file/dir creation. */
124 static uid_t relayd_uid;
125 static gid_t relayd_gid;
126
127 /* Global relay stream hash table. */
128 struct lttng_ht *relay_streams_ht;
129
130 /* Global relay viewer stream hash table. */
131 struct lttng_ht *viewer_streams_ht;
132
133 /* Global hash table that stores relay index object. */
134 struct lttng_ht *indexes_ht;
135
136 /* Relayd health monitoring */
137 struct health_app *health_relayd;
138
139 /*
140 * usage function on stderr
141 */
142 static
143 void usage(void)
144 {
145 fprintf(stderr, "Usage: %s OPTIONS\n\nOptions:\n", progname);
146 fprintf(stderr, " -h, --help Display this usage.\n");
147 fprintf(stderr, " -d, --daemonize Start as a daemon.\n");
148 fprintf(stderr, " -b, --background Start as a daemon, keeping console open.\n");
149 fprintf(stderr, " -C, --control-port URL Control port listening.\n");
150 fprintf(stderr, " -D, --data-port URL Data port listening.\n");
151 fprintf(stderr, " -L, --live-port URL Live view port listening.\n");
152 fprintf(stderr, " -o, --output PATH Output path for traces. Must use an absolute path.\n");
153 fprintf(stderr, " -v, --verbose Verbose mode. Activate DBG() macro.\n");
154 fprintf(stderr, " -g, --group NAME Specify the tracing group name. (default: tracing)\n");
155 }
156
157 static
158 int parse_args(int argc, char **argv)
159 {
160 int c;
161 int ret = 0;
162 char *default_address;
163
164 static struct option long_options[] = {
165 { "control-port", 1, 0, 'C', },
166 { "data-port", 1, 0, 'D', },
167 { "daemonize", 0, 0, 'd', },
168 { "group", 1, 0, 'g', },
169 { "help", 0, 0, 'h', },
170 { "output", 1, 0, 'o', },
171 { "verbose", 0, 0, 'v', },
172 { "background", 0, 0, 'b' },
173 { NULL, 0, 0, 0, },
174 };
175
176 while (1) {
177 int option_index = 0;
178 c = getopt_long(argc, argv, "dhv" "C:D:L:o:g:b",
179 long_options, &option_index);
180 if (c == -1) {
181 break;
182 }
183
184 switch (c) {
185 case 0:
186 fprintf(stderr, "option %s", long_options[option_index].name);
187 if (optarg) {
188 fprintf(stderr, " with arg %s\n", optarg);
189 }
190 break;
191 case 'C':
192 ret = uri_parse(optarg, &control_uri);
193 if (ret < 0) {
194 ERR("Invalid control URI specified");
195 goto exit;
196 }
197 if (control_uri->port == 0) {
198 control_uri->port = DEFAULT_NETWORK_CONTROL_PORT;
199 }
200 break;
201 case 'D':
202 ret = uri_parse(optarg, &data_uri);
203 if (ret < 0) {
204 ERR("Invalid data URI specified");
205 goto exit;
206 }
207 if (data_uri->port == 0) {
208 data_uri->port = DEFAULT_NETWORK_DATA_PORT;
209 }
210 break;
211 case 'L':
212 ret = uri_parse(optarg, &live_uri);
213 if (ret < 0) {
214 ERR("Invalid live URI specified");
215 goto exit;
216 }
217 if (live_uri->port == 0) {
218 live_uri->port = DEFAULT_NETWORK_VIEWER_PORT;
219 }
220 break;
221 case 'd':
222 opt_daemon = 1;
223 break;
224 case 'b':
225 opt_background = 1;
226 break;
227 case 'g':
228 tracing_group_name = optarg;
229 break;
230 case 'h':
231 usage();
232 exit(EXIT_FAILURE);
233 case 'o':
234 ret = asprintf(&opt_output_path, "%s", optarg);
235 if (ret < 0) {
236 PERROR("asprintf opt_output_path");
237 goto exit;
238 }
239 break;
240 case 'v':
241 /* Verbose level can increase using multiple -v */
242 lttng_opt_verbose += 1;
243 break;
244 default:
245 /* Unknown option or other error.
246 * Error is printed by getopt, just return */
247 ret = -1;
248 goto exit;
249 }
250 }
251
252 /* assign default values */
253 if (control_uri == NULL) {
254 ret = asprintf(&default_address,
255 "tcp://" DEFAULT_NETWORK_CONTROL_BIND_ADDRESS ":%d",
256 DEFAULT_NETWORK_CONTROL_PORT);
257 if (ret < 0) {
258 PERROR("asprintf default data address");
259 goto exit;
260 }
261
262 ret = uri_parse(default_address, &control_uri);
263 free(default_address);
264 if (ret < 0) {
265 ERR("Invalid control URI specified");
266 goto exit;
267 }
268 }
269 if (data_uri == NULL) {
270 ret = asprintf(&default_address,
271 "tcp://" DEFAULT_NETWORK_DATA_BIND_ADDRESS ":%d",
272 DEFAULT_NETWORK_DATA_PORT);
273 if (ret < 0) {
274 PERROR("asprintf default data address");
275 goto exit;
276 }
277
278 ret = uri_parse(default_address, &data_uri);
279 free(default_address);
280 if (ret < 0) {
281 ERR("Invalid data URI specified");
282 goto exit;
283 }
284 }
285 if (live_uri == NULL) {
286 ret = asprintf(&default_address,
287 "tcp://" DEFAULT_NETWORK_VIEWER_BIND_ADDRESS ":%d",
288 DEFAULT_NETWORK_VIEWER_PORT);
289 if (ret < 0) {
290 PERROR("asprintf default viewer control address");
291 goto exit;
292 }
293
294 ret = uri_parse(default_address, &live_uri);
295 free(default_address);
296 if (ret < 0) {
297 ERR("Invalid viewer control URI specified");
298 goto exit;
299 }
300 }
301
302 exit:
303 return ret;
304 }
305
306 /*
307 * Cleanup the daemon
308 */
309 static
310 void cleanup(void)
311 {
312 DBG("Cleaning up");
313
314 /* free the dynamically allocated opt_output_path */
315 free(opt_output_path);
316
317 /* Close thread quit pipes */
318 utils_close_pipe(thread_quit_pipe);
319
320 uri_free(control_uri);
321 uri_free(data_uri);
322 /* Live URI is freed in the live thread. */
323 }
324
325 /*
326 * Write to writable pipe used to notify a thread.
327 */
328 static
329 int notify_thread_pipe(int wpipe)
330 {
331 ssize_t ret;
332
333 ret = lttng_write(wpipe, "!", 1);
334 if (ret < 1) {
335 PERROR("write poll pipe");
336 }
337
338 return ret;
339 }
340
341 static void notify_health_quit_pipe(int *pipe)
342 {
343 ssize_t ret;
344
345 ret = lttng_write(pipe[1], "4", 1);
346 if (ret < 1) {
347 PERROR("write relay health quit");
348 }
349 }
350
351 /*
352 * Stop all threads by closing the thread quit pipe.
353 */
354 static
355 void stop_threads(void)
356 {
357 int ret;
358
359 /* Stopping all threads */
360 DBG("Terminating all threads");
361 ret = notify_thread_pipe(thread_quit_pipe[1]);
362 if (ret < 0) {
363 ERR("write error on thread quit pipe");
364 }
365
366 notify_health_quit_pipe(health_quit_pipe);
367
368 /* Dispatch thread */
369 CMM_STORE_SHARED(dispatch_thread_exit, 1);
370 futex_nto1_wake(&relay_conn_queue.futex);
371 }
372
373 /*
374 * Signal handler for the daemon
375 *
376 * Simply stop all worker threads, leaving main() return gracefully after
377 * joining all threads and calling cleanup().
378 */
379 static
380 void sighandler(int sig)
381 {
382 switch (sig) {
383 case SIGPIPE:
384 DBG("SIGPIPE caught");
385 return;
386 case SIGINT:
387 DBG("SIGINT caught");
388 stop_threads();
389 break;
390 case SIGTERM:
391 DBG("SIGTERM caught");
392 stop_threads();
393 break;
394 case SIGUSR1:
395 CMM_STORE_SHARED(recv_child_signal, 1);
396 break;
397 default:
398 break;
399 }
400 }
401
402 /*
403 * Setup signal handler for :
404 * SIGINT, SIGTERM, SIGPIPE
405 */
406 static
407 int set_signal_handler(void)
408 {
409 int ret = 0;
410 struct sigaction sa;
411 sigset_t sigset;
412
413 if ((ret = sigemptyset(&sigset)) < 0) {
414 PERROR("sigemptyset");
415 return ret;
416 }
417
418 sa.sa_handler = sighandler;
419 sa.sa_mask = sigset;
420 sa.sa_flags = 0;
421 if ((ret = sigaction(SIGTERM, &sa, NULL)) < 0) {
422 PERROR("sigaction");
423 return ret;
424 }
425
426 if ((ret = sigaction(SIGINT, &sa, NULL)) < 0) {
427 PERROR("sigaction");
428 return ret;
429 }
430
431 if ((ret = sigaction(SIGPIPE, &sa, NULL)) < 0) {
432 PERROR("sigaction");
433 return ret;
434 }
435
436 if ((ret = sigaction(SIGUSR1, &sa, NULL)) < 0) {
437 PERROR("sigaction");
438 return ret;
439 }
440
441 DBG("Signal handler set for SIGTERM, SIGUSR1, SIGPIPE and SIGINT");
442
443 return ret;
444 }
445
446 void lttng_relay_notify_ready(void)
447 {
448 /* Notify the parent of the fork() process that we are ready. */
449 if (opt_daemon || opt_background) {
450 if (uatomic_sub_return(&lttng_relay_ready, 1) == 0) {
451 kill(child_ppid, SIGUSR1);
452 }
453 }
454 }
455
456 /*
457 * Init thread quit pipe.
458 *
459 * Return -1 on error or 0 if all pipes are created.
460 */
461 static
462 int init_thread_quit_pipe(void)
463 {
464 int ret;
465
466 ret = utils_create_pipe_cloexec(thread_quit_pipe);
467
468 return ret;
469 }
470
471 /*
472 * Create a poll set with O_CLOEXEC and add the thread quit pipe to the set.
473 */
474 static
475 int create_thread_poll_set(struct lttng_poll_event *events, int size)
476 {
477 int ret;
478
479 if (events == NULL || size == 0) {
480 ret = -1;
481 goto error;
482 }
483
484 ret = lttng_poll_create(events, size, LTTNG_CLOEXEC);
485 if (ret < 0) {
486 goto error;
487 }
488
489 /* Add quit pipe */
490 ret = lttng_poll_add(events, thread_quit_pipe[0], LPOLLIN | LPOLLERR);
491 if (ret < 0) {
492 goto error;
493 }
494
495 return 0;
496
497 error:
498 return ret;
499 }
500
501 /*
502 * Check if the thread quit pipe was triggered.
503 *
504 * Return 1 if it was triggered else 0;
505 */
506 static
507 int check_thread_quit_pipe(int fd, uint32_t events)
508 {
509 if (fd == thread_quit_pipe[0] && (events & LPOLLIN)) {
510 return 1;
511 }
512
513 return 0;
514 }
515
516 /*
517 * Create and init socket from uri.
518 */
519 static
520 struct lttcomm_sock *relay_init_sock(struct lttng_uri *uri)
521 {
522 int ret;
523 struct lttcomm_sock *sock = NULL;
524
525 sock = lttcomm_alloc_sock_from_uri(uri);
526 if (sock == NULL) {
527 ERR("Allocating socket");
528 goto error;
529 }
530
531 ret = lttcomm_create_sock(sock);
532 if (ret < 0) {
533 goto error;
534 }
535 DBG("Listening on sock %d", sock->fd);
536
537 ret = sock->ops->bind(sock);
538 if (ret < 0) {
539 goto error;
540 }
541
542 ret = sock->ops->listen(sock, -1);
543 if (ret < 0) {
544 goto error;
545
546 }
547
548 return sock;
549
550 error:
551 if (sock) {
552 lttcomm_destroy_sock(sock);
553 }
554 return NULL;
555 }
556
557 /*
558 * Return nonzero if stream needs to be closed.
559 */
560 static
561 int close_stream_check(struct relay_stream *stream)
562 {
563 if (stream->close_flag && stream->prev_seq == stream->last_net_seq_num) {
564 /*
565 * We are about to close the stream so set the data pending flag to 1
566 * which will make the end data pending command skip the stream which
567 * is now closed and ready. Note that after proceeding to a file close,
568 * the written file is ready for reading.
569 */
570 stream->data_pending_check_done = 1;
571 return 1;
572 }
573 return 0;
574 }
575
576 static void try_close_stream(struct relay_session *session,
577 struct relay_stream *stream)
578 {
579 int ret;
580 struct ctf_trace *ctf_trace;
581
582 assert(session);
583 assert(stream);
584
585 if (!close_stream_check(stream)) {
586 /* Can't close it, not ready for that. */
587 goto end;
588 }
589
590 ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
591 stream->path_name);
592 assert(ctf_trace);
593
594 pthread_mutex_lock(&session->viewer_ready_lock);
595 ctf_trace->invalid_flag = 1;
596 pthread_mutex_unlock(&session->viewer_ready_lock);
597
598 ret = stream_close(session, stream);
599 if (ret || session->snapshot) {
600 /* Already close thus the ctf trace is being or has been destroyed. */
601 goto end;
602 }
603
604 ctf_trace_try_destroy(session, ctf_trace);
605
606 end:
607 return;
608 }
609
610 /*
611 * This thread manages the listening for new connections on the network
612 */
613 static
614 void *relay_thread_listener(void *data)
615 {
616 int i, ret, pollfd, err = -1;
617 uint32_t revents, nb_fd;
618 struct lttng_poll_event events;
619 struct lttcomm_sock *control_sock, *data_sock;
620
621 DBG("[thread] Relay listener started");
622
623 health_register(health_relayd, HEALTH_RELAYD_TYPE_LISTENER);
624
625 health_code_update();
626
627 control_sock = relay_init_sock(control_uri);
628 if (!control_sock) {
629 goto error_sock_control;
630 }
631
632 data_sock = relay_init_sock(data_uri);
633 if (!data_sock) {
634 goto error_sock_relay;
635 }
636
637 /*
638 * Pass 3 as size here for the thread quit pipe, control and data socket.
639 */
640 ret = create_thread_poll_set(&events, 3);
641 if (ret < 0) {
642 goto error_create_poll;
643 }
644
645 /* Add the control socket */
646 ret = lttng_poll_add(&events, control_sock->fd, LPOLLIN | LPOLLRDHUP);
647 if (ret < 0) {
648 goto error_poll_add;
649 }
650
651 /* Add the data socket */
652 ret = lttng_poll_add(&events, data_sock->fd, LPOLLIN | LPOLLRDHUP);
653 if (ret < 0) {
654 goto error_poll_add;
655 }
656
657 lttng_relay_notify_ready();
658
659 if (testpoint(relayd_thread_listener)) {
660 goto error_testpoint;
661 }
662
663 while (1) {
664 health_code_update();
665
666 DBG("Listener accepting connections");
667
668 restart:
669 health_poll_entry();
670 ret = lttng_poll_wait(&events, -1);
671 health_poll_exit();
672 if (ret < 0) {
673 /*
674 * Restart interrupted system call.
675 */
676 if (errno == EINTR) {
677 goto restart;
678 }
679 goto error;
680 }
681
682 nb_fd = ret;
683
684 DBG("Relay new connection received");
685 for (i = 0; i < nb_fd; i++) {
686 health_code_update();
687
688 /* Fetch once the poll data */
689 revents = LTTNG_POLL_GETEV(&events, i);
690 pollfd = LTTNG_POLL_GETFD(&events, i);
691
692 /* Thread quit pipe has been closed. Killing thread. */
693 ret = check_thread_quit_pipe(pollfd, revents);
694 if (ret) {
695 err = 0;
696 goto exit;
697 }
698
699 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
700 ERR("socket poll error");
701 goto error;
702 } else if (revents & LPOLLIN) {
703 /*
704 * Get allocated in this thread, enqueued to a global queue,
705 * dequeued and freed in the worker thread.
706 */
707 int val = 1;
708 struct relay_connection *new_conn;
709 struct lttcomm_sock *newsock;
710
711 new_conn = connection_create();
712 if (!new_conn) {
713 goto error;
714 }
715
716 if (pollfd == data_sock->fd) {
717 new_conn->type = RELAY_DATA;
718 newsock = data_sock->ops->accept(data_sock);
719 DBG("Relay data connection accepted, socket %d",
720 newsock->fd);
721 } else {
722 assert(pollfd == control_sock->fd);
723 new_conn->type = RELAY_CONTROL;
724 newsock = control_sock->ops->accept(control_sock);
725 DBG("Relay control connection accepted, socket %d",
726 newsock->fd);
727 }
728 if (!newsock) {
729 PERROR("accepting sock");
730 connection_free(new_conn);
731 goto error;
732 }
733
734 ret = setsockopt(newsock->fd, SOL_SOCKET, SO_REUSEADDR, &val,
735 sizeof(val));
736 if (ret < 0) {
737 PERROR("setsockopt inet");
738 lttcomm_destroy_sock(newsock);
739 connection_free(new_conn);
740 goto error;
741 }
742 new_conn->sock = newsock;
743
744 /* Enqueue request for the dispatcher thread. */
745 cds_wfq_enqueue(&relay_conn_queue.queue, &new_conn->qnode);
746
747 /*
748 * Wake the dispatch queue futex. Implicit memory barrier with
749 * the exchange in cds_wfq_enqueue.
750 */
751 futex_nto1_wake(&relay_conn_queue.futex);
752 }
753 }
754 }
755
756 exit:
757 error:
758 error_poll_add:
759 error_testpoint:
760 lttng_poll_clean(&events);
761 error_create_poll:
762 if (data_sock->fd >= 0) {
763 ret = data_sock->ops->close(data_sock);
764 if (ret) {
765 PERROR("close");
766 }
767 }
768 lttcomm_destroy_sock(data_sock);
769 error_sock_relay:
770 if (control_sock->fd >= 0) {
771 ret = control_sock->ops->close(control_sock);
772 if (ret) {
773 PERROR("close");
774 }
775 }
776 lttcomm_destroy_sock(control_sock);
777 error_sock_control:
778 if (err) {
779 health_error();
780 ERR("Health error occurred in %s", __func__);
781 }
782 health_unregister(health_relayd);
783 DBG("Relay listener thread cleanup complete");
784 stop_threads();
785 return NULL;
786 }
787
788 /*
789 * This thread manages the dispatching of the requests to worker threads
790 */
791 static
792 void *relay_thread_dispatcher(void *data)
793 {
794 int err = -1;
795 ssize_t ret;
796 struct cds_wfq_node *node;
797 struct relay_connection *new_conn = NULL;
798
799 DBG("[thread] Relay dispatcher started");
800
801 health_register(health_relayd, HEALTH_RELAYD_TYPE_DISPATCHER);
802
803 if (testpoint(relayd_thread_dispatcher)) {
804 goto error_testpoint;
805 }
806
807 health_code_update();
808
809 while (!CMM_LOAD_SHARED(dispatch_thread_exit)) {
810 health_code_update();
811
812 /* Atomically prepare the queue futex */
813 futex_nto1_prepare(&relay_conn_queue.futex);
814
815 do {
816 health_code_update();
817
818 /* Dequeue commands */
819 node = cds_wfq_dequeue_blocking(&relay_conn_queue.queue);
820 if (node == NULL) {
821 DBG("Woken up but nothing in the relay command queue");
822 /* Continue thread execution */
823 break;
824 }
825 new_conn = caa_container_of(node, struct relay_connection, qnode);
826
827 DBG("Dispatching request waiting on sock %d", new_conn->sock->fd);
828
829 /*
830 * Inform worker thread of the new request. This call is blocking
831 * so we can be assured that the data will be read at some point in
832 * time or wait to the end of the world :)
833 */
834 ret = lttng_write(relay_conn_pipe[1], &new_conn, sizeof(new_conn));
835 if (ret < 0) {
836 PERROR("write connection pipe");
837 connection_destroy(new_conn);
838 goto error;
839 }
840 } while (node != NULL);
841
842 /* Futex wait on queue. Blocking call on futex() */
843 health_poll_entry();
844 futex_nto1_wait(&relay_conn_queue.futex);
845 health_poll_exit();
846 }
847
848 /* Normal exit, no error */
849 err = 0;
850
851 error:
852 error_testpoint:
853 if (err) {
854 health_error();
855 ERR("Health error occurred in %s", __func__);
856 }
857 health_unregister(health_relayd);
858 DBG("Dispatch thread dying");
859 stop_threads();
860 return NULL;
861 }
862
863 static void try_close_streams(struct relay_session *session)
864 {
865 struct ctf_trace *ctf_trace;
866 struct lttng_ht_iter iter;
867
868 assert(session);
869
870 pthread_mutex_lock(&session->viewer_ready_lock);
871 rcu_read_lock();
872 cds_lfht_for_each_entry(session->ctf_traces_ht->ht, &iter.iter, ctf_trace,
873 node.node) {
874 struct relay_stream *stream;
875
876 /* Close streams. */
877 cds_list_for_each_entry(stream, &ctf_trace->stream_list, trace_list) {
878 stream_close(session, stream);
879 }
880
881 ctf_trace->invalid_flag = 1;
882 ctf_trace_try_destroy(session, ctf_trace);
883 }
884 rcu_read_unlock();
885 pthread_mutex_unlock(&session->viewer_ready_lock);
886 }
887
888 /*
889 * Try to destroy a session within a connection.
890 */
891 static void destroy_session(struct relay_session *session,
892 struct lttng_ht *sessions_ht)
893 {
894 assert(session);
895 assert(sessions_ht);
896
897 /* Indicate that this session can be destroyed from now on. */
898 session->close_flag = 1;
899
900 try_close_streams(session);
901
902 /*
903 * This will try to delete and destroy the session if no viewer is attached
904 * to it meaning the refcount is down to zero.
905 */
906 session_try_destroy(sessions_ht, session);
907 }
908
909 /*
910 * Copy index data from the control port to a given index object.
911 */
912 static void copy_index_control_data(struct relay_index *index,
913 struct lttcomm_relayd_index *data)
914 {
915 assert(index);
916 assert(data);
917
918 /*
919 * The index on disk is encoded in big endian, so we don't need to convert
920 * the data received on the network. The data_offset value is NEVER
921 * modified here and is updated by the data thread.
922 */
923 index->index_data.packet_size = data->packet_size;
924 index->index_data.content_size = data->content_size;
925 index->index_data.timestamp_begin = data->timestamp_begin;
926 index->index_data.timestamp_end = data->timestamp_end;
927 index->index_data.events_discarded = data->events_discarded;
928 index->index_data.stream_id = data->stream_id;
929 }
930
931 /*
932 * Handle the RELAYD_CREATE_SESSION command.
933 *
934 * On success, send back the session id or else return a negative value.
935 */
936 static
937 int relay_create_session(struct lttcomm_relayd_hdr *recv_hdr,
938 struct relay_connection *conn)
939 {
940 int ret = 0, send_ret;
941 struct relay_session *session;
942 struct lttcomm_relayd_status_session reply;
943
944 assert(recv_hdr);
945 assert(conn);
946
947 memset(&reply, 0, sizeof(reply));
948
949 session = session_create();
950 if (!session) {
951 ret = -1;
952 goto error;
953 }
954 session->minor = conn->minor;
955 session->major = conn->major;
956 conn->session_id = session->id;
957 conn->session = session;
958
959 reply.session_id = htobe64(session->id);
960
961 switch (conn->minor) {
962 case 1:
963 case 2:
964 case 3:
965 break;
966 case 4: /* LTTng sessiond 2.4 */
967 default:
968 ret = cmd_create_session_2_4(conn, session);
969 }
970
971 lttng_ht_add_unique_u64(conn->sessions_ht, &session->session_n);
972 DBG("Created session %" PRIu64, session->id);
973
974 error:
975 if (ret < 0) {
976 reply.ret_code = htobe32(LTTNG_ERR_FATAL);
977 } else {
978 reply.ret_code = htobe32(LTTNG_OK);
979 }
980
981 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
982 if (send_ret < 0) {
983 ERR("Relayd sending session id");
984 ret = send_ret;
985 }
986
987 return ret;
988 }
989
990 /*
991 * When we have received all the streams and the metadata for a channel,
992 * we make them visible to the viewer threads.
993 */
994 static
995 void set_viewer_ready_flag(struct relay_connection *conn)
996 {
997 struct relay_stream *stream, *tmp_stream;
998
999 pthread_mutex_lock(&conn->session->viewer_ready_lock);
1000 cds_list_for_each_entry_safe(stream, tmp_stream, &conn->recv_head,
1001 recv_list) {
1002 stream->viewer_ready = 1;
1003 cds_list_del(&stream->recv_list);
1004 }
1005 pthread_mutex_unlock(&conn->session->viewer_ready_lock);
1006 return;
1007 }
1008
1009 /*
1010 * Add a recv handle node to the connection recv list with the given stream
1011 * handle. A new node is allocated thus must be freed when the node is deleted
1012 * from the list.
1013 */
1014 static void queue_stream(struct relay_stream *stream,
1015 struct relay_connection *conn)
1016 {
1017 assert(conn);
1018 assert(stream);
1019
1020 cds_list_add(&stream->recv_list, &conn->recv_head);
1021 }
1022
1023 /*
1024 * relay_add_stream: allocate a new stream for a session
1025 */
1026 static
1027 int relay_add_stream(struct lttcomm_relayd_hdr *recv_hdr,
1028 struct relay_connection *conn)
1029 {
1030 int ret, send_ret;
1031 struct relay_session *session = conn->session;
1032 struct relay_stream *stream = NULL;
1033 struct lttcomm_relayd_status_stream reply;
1034 struct ctf_trace *trace;
1035
1036 if (!session || conn->version_check_done == 0) {
1037 ERR("Trying to add a stream before version check");
1038 ret = -1;
1039 goto end_no_session;
1040 }
1041
1042 stream = zmalloc(sizeof(struct relay_stream));
1043 if (stream == NULL) {
1044 PERROR("relay stream zmalloc");
1045 ret = -1;
1046 goto end_no_session;
1047 }
1048
1049 switch (conn->minor) {
1050 case 1: /* LTTng sessiond 2.1 */
1051 ret = cmd_recv_stream_2_1(conn, stream);
1052 break;
1053 case 2: /* LTTng sessiond 2.2 */
1054 default:
1055 ret = cmd_recv_stream_2_2(conn, stream);
1056 break;
1057 }
1058 if (ret < 0) {
1059 goto err_free_stream;
1060 }
1061
1062 rcu_read_lock();
1063 stream->stream_handle = ++last_relay_stream_id;
1064 stream->prev_seq = -1ULL;
1065 stream->session_id = session->id;
1066 stream->index_fd = -1;
1067 stream->read_index_fd = -1;
1068 stream->ctf_stream_id = -1ULL;
1069 lttng_ht_node_init_u64(&stream->node, stream->stream_handle);
1070 pthread_mutex_init(&stream->lock, NULL);
1071
1072 ret = utils_mkdir_recursive(stream->path_name, S_IRWXU | S_IRWXG);
1073 if (ret < 0) {
1074 ERR("relay creating output directory");
1075 goto end;
1076 }
1077
1078 /*
1079 * No need to use run_as API here because whatever we receives, the relayd
1080 * uses its own credentials for the stream files.
1081 */
1082 ret = utils_create_stream_file(stream->path_name, stream->channel_name,
1083 stream->tracefile_size, 0, relayd_uid, relayd_gid, NULL);
1084 if (ret < 0) {
1085 ERR("Create output file");
1086 goto end;
1087 }
1088 stream->fd = ret;
1089 if (stream->tracefile_size) {
1090 DBG("Tracefile %s/%s_0 created", stream->path_name, stream->channel_name);
1091 } else {
1092 DBG("Tracefile %s/%s created", stream->path_name, stream->channel_name);
1093 }
1094
1095 trace = ctf_trace_find_by_path(session->ctf_traces_ht, stream->path_name);
1096 if (!trace) {
1097 trace = ctf_trace_create(stream->path_name);
1098 if (!trace) {
1099 ret = -1;
1100 goto end;
1101 }
1102 ctf_trace_add(session->ctf_traces_ht, trace);
1103 }
1104 ctf_trace_get_ref(trace);
1105
1106 if (!strncmp(stream->channel_name, DEFAULT_METADATA_NAME, NAME_MAX)) {
1107 stream->metadata_flag = 1;
1108 /* Assign quick reference to the metadata stream in the trace. */
1109 trace->metadata_stream = stream;
1110 }
1111
1112 /*
1113 * Add the stream in the recv list of the connection. Once the end stream
1114 * message is received, this list is emptied and streams are set with the
1115 * viewer ready flag.
1116 */
1117 queue_stream(stream, conn);
1118
1119 /*
1120 * Both in the ctf_trace object and the global stream ht since the data
1121 * side of the relayd does not have the concept of session.
1122 */
1123 lttng_ht_add_unique_u64(relay_streams_ht, &stream->node);
1124 cds_list_add_tail(&stream->trace_list, &trace->stream_list);
1125
1126 session->stream_count++;
1127
1128 DBG("Relay new stream added %s with ID %" PRIu64, stream->channel_name,
1129 stream->stream_handle);
1130
1131 end:
1132 memset(&reply, 0, sizeof(reply));
1133 reply.handle = htobe64(stream->stream_handle);
1134 /* send the session id to the client or a negative return code on error */
1135 if (ret < 0) {
1136 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1137 /* stream was not properly added to the ht, so free it */
1138 free(stream);
1139 } else {
1140 reply.ret_code = htobe32(LTTNG_OK);
1141 }
1142
1143 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1144 sizeof(struct lttcomm_relayd_status_stream), 0);
1145 if (send_ret < 0) {
1146 ERR("Relay sending stream id");
1147 ret = send_ret;
1148 }
1149 rcu_read_unlock();
1150
1151 end_no_session:
1152 return ret;
1153
1154 err_free_stream:
1155 free(stream->path_name);
1156 free(stream->channel_name);
1157 free(stream);
1158 return ret;
1159 }
1160
1161 /*
1162 * relay_close_stream: close a specific stream
1163 */
1164 static
1165 int relay_close_stream(struct lttcomm_relayd_hdr *recv_hdr,
1166 struct relay_connection *conn)
1167 {
1168 int ret, send_ret;
1169 struct relay_session *session = conn->session;
1170 struct lttcomm_relayd_close_stream stream_info;
1171 struct lttcomm_relayd_generic_reply reply;
1172 struct relay_stream *stream;
1173
1174 DBG("Close stream received");
1175
1176 if (!session || conn->version_check_done == 0) {
1177 ERR("Trying to close a stream before version check");
1178 ret = -1;
1179 goto end_no_session;
1180 }
1181
1182 ret = conn->sock->ops->recvmsg(conn->sock, &stream_info,
1183 sizeof(struct lttcomm_relayd_close_stream), 0);
1184 if (ret < sizeof(struct lttcomm_relayd_close_stream)) {
1185 if (ret == 0) {
1186 /* Orderly shutdown. Not necessary to print an error. */
1187 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1188 } else {
1189 ERR("Relay didn't receive valid add_stream struct size : %d", ret);
1190 }
1191 ret = -1;
1192 goto end_no_session;
1193 }
1194
1195 rcu_read_lock();
1196 stream = stream_find_by_id(relay_streams_ht,
1197 be64toh(stream_info.stream_id));
1198 if (!stream) {
1199 ret = -1;
1200 goto end_unlock;
1201 }
1202
1203 stream->last_net_seq_num = be64toh(stream_info.last_net_seq_num);
1204 stream->close_flag = 1;
1205 session->stream_count--;
1206 assert(session->stream_count >= 0);
1207
1208 /* Check if we can close it or else the data will do it. */
1209 try_close_stream(session, stream);
1210
1211 end_unlock:
1212 rcu_read_unlock();
1213
1214 memset(&reply, 0, sizeof(reply));
1215 if (ret < 0) {
1216 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1217 } else {
1218 reply.ret_code = htobe32(LTTNG_OK);
1219 }
1220 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1221 sizeof(struct lttcomm_relayd_generic_reply), 0);
1222 if (send_ret < 0) {
1223 ERR("Relay sending stream id");
1224 ret = send_ret;
1225 }
1226
1227 end_no_session:
1228 return ret;
1229 }
1230
1231 /*
1232 * relay_unknown_command: send -1 if received unknown command
1233 */
1234 static
1235 void relay_unknown_command(struct relay_connection *conn)
1236 {
1237 struct lttcomm_relayd_generic_reply reply;
1238 int ret;
1239
1240 memset(&reply, 0, sizeof(reply));
1241 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1242 ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1243 sizeof(struct lttcomm_relayd_generic_reply), 0);
1244 if (ret < 0) {
1245 ERR("Relay sending unknown command");
1246 }
1247 }
1248
1249 /*
1250 * relay_start: send an acknowledgment to the client to tell if we are
1251 * ready to receive data. We are ready if a session is established.
1252 */
1253 static
1254 int relay_start(struct lttcomm_relayd_hdr *recv_hdr,
1255 struct relay_connection *conn)
1256 {
1257 int ret = htobe32(LTTNG_OK);
1258 struct lttcomm_relayd_generic_reply reply;
1259 struct relay_session *session = conn->session;
1260
1261 if (!session) {
1262 DBG("Trying to start the streaming without a session established");
1263 ret = htobe32(LTTNG_ERR_UNK);
1264 }
1265
1266 memset(&reply, 0, sizeof(reply));
1267 reply.ret_code = ret;
1268 ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1269 sizeof(struct lttcomm_relayd_generic_reply), 0);
1270 if (ret < 0) {
1271 ERR("Relay sending start ack");
1272 }
1273
1274 return ret;
1275 }
1276
1277 /*
1278 * Append padding to the file pointed by the file descriptor fd.
1279 */
1280 static int write_padding_to_file(int fd, uint32_t size)
1281 {
1282 ssize_t ret = 0;
1283 char *zeros;
1284
1285 if (size == 0) {
1286 goto end;
1287 }
1288
1289 zeros = zmalloc(size);
1290 if (zeros == NULL) {
1291 PERROR("zmalloc zeros for padding");
1292 ret = -1;
1293 goto end;
1294 }
1295
1296 ret = lttng_write(fd, zeros, size);
1297 if (ret < size) {
1298 PERROR("write padding to file");
1299 }
1300
1301 free(zeros);
1302
1303 end:
1304 return ret;
1305 }
1306
1307 /*
1308 * relay_recv_metadata: receive the metada for the session.
1309 */
1310 static
1311 int relay_recv_metadata(struct lttcomm_relayd_hdr *recv_hdr,
1312 struct relay_connection *conn)
1313 {
1314 int ret = htobe32(LTTNG_OK);
1315 ssize_t size_ret;
1316 struct relay_session *session = conn->session;
1317 struct lttcomm_relayd_metadata_payload *metadata_struct;
1318 struct relay_stream *metadata_stream;
1319 uint64_t data_size, payload_size;
1320 struct ctf_trace *ctf_trace;
1321
1322 if (!session) {
1323 ERR("Metadata sent before version check");
1324 ret = -1;
1325 goto end;
1326 }
1327
1328 data_size = payload_size = be64toh(recv_hdr->data_size);
1329 if (data_size < sizeof(struct lttcomm_relayd_metadata_payload)) {
1330 ERR("Incorrect data size");
1331 ret = -1;
1332 goto end;
1333 }
1334 payload_size -= sizeof(struct lttcomm_relayd_metadata_payload);
1335
1336 if (data_buffer_size < data_size) {
1337 /* In case the realloc fails, we can free the memory */
1338 char *tmp_data_ptr;
1339
1340 tmp_data_ptr = realloc(data_buffer, data_size);
1341 if (!tmp_data_ptr) {
1342 ERR("Allocating data buffer");
1343 free(data_buffer);
1344 ret = -1;
1345 goto end;
1346 }
1347 data_buffer = tmp_data_ptr;
1348 data_buffer_size = data_size;
1349 }
1350 memset(data_buffer, 0, data_size);
1351 DBG2("Relay receiving metadata, waiting for %" PRIu64 " bytes", data_size);
1352 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, data_size, 0);
1353 if (ret < 0 || ret != data_size) {
1354 if (ret == 0) {
1355 /* Orderly shutdown. Not necessary to print an error. */
1356 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1357 } else {
1358 ERR("Relay didn't receive the whole metadata");
1359 }
1360 ret = -1;
1361 goto end;
1362 }
1363 metadata_struct = (struct lttcomm_relayd_metadata_payload *) data_buffer;
1364
1365 rcu_read_lock();
1366 metadata_stream = stream_find_by_id(relay_streams_ht,
1367 be64toh(metadata_struct->stream_id));
1368 if (!metadata_stream) {
1369 ret = -1;
1370 goto end_unlock;
1371 }
1372
1373 size_ret = lttng_write(metadata_stream->fd, metadata_struct->payload,
1374 payload_size);
1375 if (size_ret < payload_size) {
1376 ERR("Relay error writing metadata on file");
1377 ret = -1;
1378 goto end_unlock;
1379 }
1380
1381 ret = write_padding_to_file(metadata_stream->fd,
1382 be32toh(metadata_struct->padding_size));
1383 if (ret < 0) {
1384 goto end_unlock;
1385 }
1386
1387 ctf_trace = ctf_trace_find_by_path(session->ctf_traces_ht,
1388 metadata_stream->path_name);
1389 assert(ctf_trace);
1390 ctf_trace->metadata_received +=
1391 payload_size + be32toh(metadata_struct->padding_size);
1392
1393 DBG2("Relay metadata written");
1394
1395 end_unlock:
1396 rcu_read_unlock();
1397 end:
1398 return ret;
1399 }
1400
1401 /*
1402 * relay_send_version: send relayd version number
1403 */
1404 static
1405 int relay_send_version(struct lttcomm_relayd_hdr *recv_hdr,
1406 struct relay_connection *conn)
1407 {
1408 int ret;
1409 struct lttcomm_relayd_version reply, msg;
1410
1411 assert(conn);
1412
1413 conn->version_check_done = 1;
1414
1415 /* Get version from the other side. */
1416 ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0);
1417 if (ret < 0 || ret != sizeof(msg)) {
1418 if (ret == 0) {
1419 /* Orderly shutdown. Not necessary to print an error. */
1420 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1421 } else {
1422 ERR("Relay failed to receive the version values.");
1423 }
1424 ret = -1;
1425 goto end;
1426 }
1427
1428 memset(&reply, 0, sizeof(reply));
1429 reply.major = RELAYD_VERSION_COMM_MAJOR;
1430 reply.minor = RELAYD_VERSION_COMM_MINOR;
1431
1432 /* Major versions must be the same */
1433 if (reply.major != be32toh(msg.major)) {
1434 DBG("Incompatible major versions (%u vs %u), deleting session",
1435 reply.major, be32toh(msg.major));
1436 destroy_session(conn->session, conn->sessions_ht);
1437 ret = 0;
1438 goto end;
1439 }
1440
1441 conn->major = reply.major;
1442 /* We adapt to the lowest compatible version */
1443 if (reply.minor <= be32toh(msg.minor)) {
1444 conn->minor = reply.minor;
1445 } else {
1446 conn->minor = be32toh(msg.minor);
1447 }
1448
1449 reply.major = htobe32(reply.major);
1450 reply.minor = htobe32(reply.minor);
1451 ret = conn->sock->ops->sendmsg(conn->sock, &reply,
1452 sizeof(struct lttcomm_relayd_version), 0);
1453 if (ret < 0) {
1454 ERR("Relay sending version");
1455 }
1456
1457 DBG("Version check done using protocol %u.%u", conn->major,
1458 conn->minor);
1459
1460 end:
1461 return ret;
1462 }
1463
1464 /*
1465 * Check for data pending for a given stream id from the session daemon.
1466 */
1467 static
1468 int relay_data_pending(struct lttcomm_relayd_hdr *recv_hdr,
1469 struct relay_connection *conn)
1470 {
1471 struct relay_session *session = conn->session;
1472 struct lttcomm_relayd_data_pending msg;
1473 struct lttcomm_relayd_generic_reply reply;
1474 struct relay_stream *stream;
1475 int ret;
1476 uint64_t last_net_seq_num, stream_id;
1477
1478 DBG("Data pending command received");
1479
1480 if (!session || conn->version_check_done == 0) {
1481 ERR("Trying to check for data before version check");
1482 ret = -1;
1483 goto end_no_session;
1484 }
1485
1486 ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0);
1487 if (ret < sizeof(msg)) {
1488 if (ret == 0) {
1489 /* Orderly shutdown. Not necessary to print an error. */
1490 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1491 } else {
1492 ERR("Relay didn't receive valid data_pending struct size : %d",
1493 ret);
1494 }
1495 ret = -1;
1496 goto end_no_session;
1497 }
1498
1499 stream_id = be64toh(msg.stream_id);
1500 last_net_seq_num = be64toh(msg.last_net_seq_num);
1501
1502 rcu_read_lock();
1503 stream = stream_find_by_id(relay_streams_ht, stream_id);
1504 if (stream == NULL) {
1505 ret = -1;
1506 goto end_unlock;
1507 }
1508
1509 DBG("Data pending for stream id %" PRIu64 " prev_seq %" PRIu64
1510 " and last_seq %" PRIu64, stream_id, stream->prev_seq,
1511 last_net_seq_num);
1512
1513 /* Avoid wrapping issue */
1514 if (((int64_t) (stream->prev_seq - last_net_seq_num)) >= 0) {
1515 /* Data has in fact been written and is NOT pending */
1516 ret = 0;
1517 } else {
1518 /* Data still being streamed thus pending */
1519 ret = 1;
1520 }
1521
1522 /* Pending check is now done. */
1523 stream->data_pending_check_done = 1;
1524
1525 end_unlock:
1526 rcu_read_unlock();
1527
1528 memset(&reply, 0, sizeof(reply));
1529 reply.ret_code = htobe32(ret);
1530 ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1531 if (ret < 0) {
1532 ERR("Relay data pending ret code failed");
1533 }
1534
1535 end_no_session:
1536 return ret;
1537 }
1538
1539 /*
1540 * Wait for the control socket to reach a quiescent state.
1541 *
1542 * Note that for now, when receiving this command from the session daemon, this
1543 * means that every subsequent commands or data received on the control socket
1544 * has been handled. So, this is why we simply return OK here.
1545 */
1546 static
1547 int relay_quiescent_control(struct lttcomm_relayd_hdr *recv_hdr,
1548 struct relay_connection *conn)
1549 {
1550 int ret;
1551 uint64_t stream_id;
1552 struct relay_stream *stream;
1553 struct lttng_ht_iter iter;
1554 struct lttcomm_relayd_quiescent_control msg;
1555 struct lttcomm_relayd_generic_reply reply;
1556
1557 DBG("Checking quiescent state on control socket");
1558
1559 if (!conn->session || conn->version_check_done == 0) {
1560 ERR("Trying to check for data before version check");
1561 ret = -1;
1562 goto end_no_session;
1563 }
1564
1565 ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0);
1566 if (ret < sizeof(msg)) {
1567 if (ret == 0) {
1568 /* Orderly shutdown. Not necessary to print an error. */
1569 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1570 } else {
1571 ERR("Relay didn't receive valid begin data_pending struct size: %d",
1572 ret);
1573 }
1574 ret = -1;
1575 goto end_no_session;
1576 }
1577
1578 stream_id = be64toh(msg.stream_id);
1579
1580 rcu_read_lock();
1581 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1582 node.node) {
1583 if (stream->stream_handle == stream_id) {
1584 stream->data_pending_check_done = 1;
1585 DBG("Relay quiescent control pending flag set to %" PRIu64,
1586 stream_id);
1587 break;
1588 }
1589 }
1590 rcu_read_unlock();
1591
1592 memset(&reply, 0, sizeof(reply));
1593 reply.ret_code = htobe32(LTTNG_OK);
1594 ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1595 if (ret < 0) {
1596 ERR("Relay data quiescent control ret code failed");
1597 }
1598
1599 end_no_session:
1600 return ret;
1601 }
1602
1603 /*
1604 * Initialize a data pending command. This means that a client is about to ask
1605 * for data pending for each stream he/she holds. Simply iterate over all
1606 * streams of a session and set the data_pending_check_done flag.
1607 *
1608 * This command returns to the client a LTTNG_OK code.
1609 */
1610 static
1611 int relay_begin_data_pending(struct lttcomm_relayd_hdr *recv_hdr,
1612 struct relay_connection *conn)
1613 {
1614 int ret;
1615 struct lttng_ht_iter iter;
1616 struct lttcomm_relayd_begin_data_pending msg;
1617 struct lttcomm_relayd_generic_reply reply;
1618 struct relay_stream *stream;
1619 uint64_t session_id;
1620
1621 assert(recv_hdr);
1622 assert(conn);
1623
1624 DBG("Init streams for data pending");
1625
1626 if (!conn->session || conn->version_check_done == 0) {
1627 ERR("Trying to check for data before version check");
1628 ret = -1;
1629 goto end_no_session;
1630 }
1631
1632 ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0);
1633 if (ret < sizeof(msg)) {
1634 if (ret == 0) {
1635 /* Orderly shutdown. Not necessary to print an error. */
1636 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1637 } else {
1638 ERR("Relay didn't receive valid begin data_pending struct size: %d",
1639 ret);
1640 }
1641 ret = -1;
1642 goto end_no_session;
1643 }
1644
1645 session_id = be64toh(msg.session_id);
1646
1647 /*
1648 * Iterate over all streams to set the begin data pending flag. For now, the
1649 * streams are indexed by stream handle so we have to iterate over all
1650 * streams to find the one associated with the right session_id.
1651 */
1652 rcu_read_lock();
1653 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1654 node.node) {
1655 if (stream->session_id == session_id) {
1656 stream->data_pending_check_done = 0;
1657 DBG("Set begin data pending flag to stream %" PRIu64,
1658 stream->stream_handle);
1659 }
1660 }
1661 rcu_read_unlock();
1662
1663 memset(&reply, 0, sizeof(reply));
1664 /* All good, send back reply. */
1665 reply.ret_code = htobe32(LTTNG_OK);
1666
1667 ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1668 if (ret < 0) {
1669 ERR("Relay begin data pending send reply failed");
1670 }
1671
1672 end_no_session:
1673 return ret;
1674 }
1675
1676 /*
1677 * End data pending command. This will check, for a given session id, if each
1678 * stream associated with it has its data_pending_check_done flag set. If not,
1679 * this means that the client lost track of the stream but the data is still
1680 * being streamed on our side. In this case, we inform the client that data is
1681 * inflight.
1682 *
1683 * Return to the client if there is data in flight or not with a ret_code.
1684 */
1685 static
1686 int relay_end_data_pending(struct lttcomm_relayd_hdr *recv_hdr,
1687 struct relay_connection *conn)
1688 {
1689 int ret;
1690 struct lttng_ht_iter iter;
1691 struct lttcomm_relayd_end_data_pending msg;
1692 struct lttcomm_relayd_generic_reply reply;
1693 struct relay_stream *stream;
1694 uint64_t session_id;
1695 uint32_t is_data_inflight = 0;
1696
1697 assert(recv_hdr);
1698 assert(conn);
1699
1700 DBG("End data pending command");
1701
1702 if (!conn->session || conn->version_check_done == 0) {
1703 ERR("Trying to check for data before version check");
1704 ret = -1;
1705 goto end_no_session;
1706 }
1707
1708 ret = conn->sock->ops->recvmsg(conn->sock, &msg, sizeof(msg), 0);
1709 if (ret < sizeof(msg)) {
1710 if (ret == 0) {
1711 /* Orderly shutdown. Not necessary to print an error. */
1712 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1713 } else {
1714 ERR("Relay didn't receive valid end data_pending struct size: %d",
1715 ret);
1716 }
1717 ret = -1;
1718 goto end_no_session;
1719 }
1720
1721 session_id = be64toh(msg.session_id);
1722
1723 /* Iterate over all streams to see if the begin data pending flag is set. */
1724 rcu_read_lock();
1725 cds_lfht_for_each_entry(relay_streams_ht->ht, &iter.iter, stream,
1726 node.node) {
1727 if (stream->session_id == session_id &&
1728 !stream->data_pending_check_done && !stream->terminated_flag) {
1729 is_data_inflight = 1;
1730 DBG("Data is still in flight for stream %" PRIu64,
1731 stream->stream_handle);
1732 break;
1733 }
1734 }
1735 rcu_read_unlock();
1736
1737 memset(&reply, 0, sizeof(reply));
1738 /* All good, send back reply. */
1739 reply.ret_code = htobe32(is_data_inflight);
1740
1741 ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1742 if (ret < 0) {
1743 ERR("Relay end data pending send reply failed");
1744 }
1745
1746 end_no_session:
1747 return ret;
1748 }
1749
1750 /*
1751 * Receive an index for a specific stream.
1752 *
1753 * Return 0 on success else a negative value.
1754 */
1755 static
1756 int relay_recv_index(struct lttcomm_relayd_hdr *recv_hdr,
1757 struct relay_connection *conn)
1758 {
1759 int ret, send_ret, index_created = 0;
1760 struct relay_session *session = conn->session;
1761 struct lttcomm_relayd_index index_info;
1762 struct relay_index *index, *wr_index = NULL;
1763 struct lttcomm_relayd_generic_reply reply;
1764 struct relay_stream *stream;
1765 uint64_t net_seq_num;
1766
1767 assert(conn);
1768
1769 DBG("Relay receiving index");
1770
1771 if (!session || conn->version_check_done == 0) {
1772 ERR("Trying to close a stream before version check");
1773 ret = -1;
1774 goto end_no_session;
1775 }
1776
1777 ret = conn->sock->ops->recvmsg(conn->sock, &index_info,
1778 sizeof(index_info), 0);
1779 if (ret < sizeof(index_info)) {
1780 if (ret == 0) {
1781 /* Orderly shutdown. Not necessary to print an error. */
1782 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
1783 } else {
1784 ERR("Relay didn't receive valid index struct size : %d", ret);
1785 }
1786 ret = -1;
1787 goto end_no_session;
1788 }
1789
1790 net_seq_num = be64toh(index_info.net_seq_num);
1791
1792 rcu_read_lock();
1793 stream = stream_find_by_id(relay_streams_ht,
1794 be64toh(index_info.relay_stream_id));
1795 if (!stream) {
1796 ret = -1;
1797 goto end_rcu_unlock;
1798 }
1799
1800 /* Live beacon handling */
1801 if (index_info.packet_size == 0) {
1802 DBG("Received live beacon for stream %" PRIu64, stream->stream_handle);
1803
1804 /*
1805 * Only flag a stream inactive when it has already received data
1806 * and no indexes are in flight.
1807 */
1808 if (stream->total_index_received > 0 && stream->indexes_in_flight == 0) {
1809 stream->beacon_ts_end = be64toh(index_info.timestamp_end);
1810 }
1811 ret = 0;
1812 goto end_rcu_unlock;
1813 } else {
1814 stream->beacon_ts_end = -1ULL;
1815 }
1816
1817 index = relay_index_find(stream->stream_handle, net_seq_num);
1818 if (!index) {
1819 /* A successful creation will add the object to the HT. */
1820 index = relay_index_create(stream->stream_handle, net_seq_num);
1821 if (!index) {
1822 goto end_rcu_unlock;
1823 }
1824 index_created = 1;
1825 stream->indexes_in_flight++;
1826 }
1827
1828 copy_index_control_data(index, &index_info);
1829 if (stream->ctf_stream_id == -1ULL) {
1830 stream->ctf_stream_id = be64toh(index_info.stream_id);
1831 }
1832
1833 if (index_created) {
1834 /*
1835 * Try to add the relay index object to the hash table. If an object
1836 * already exist, destroy back the index created, set the data in this
1837 * object and write it on disk.
1838 */
1839 relay_index_add(index, &wr_index);
1840 if (wr_index) {
1841 copy_index_control_data(wr_index, &index_info);
1842 free(index);
1843 }
1844 } else {
1845 /* The index already exists so write it on disk. */
1846 wr_index = index;
1847 }
1848
1849 /* Do we have a writable ready index to write on disk. */
1850 if (wr_index) {
1851 ret = relay_index_write(wr_index->fd, wr_index);
1852 if (ret < 0) {
1853 goto end_rcu_unlock;
1854 }
1855 stream->total_index_received++;
1856 stream->indexes_in_flight--;
1857 assert(stream->indexes_in_flight >= 0);
1858 }
1859
1860 end_rcu_unlock:
1861 rcu_read_unlock();
1862
1863 memset(&reply, 0, sizeof(reply));
1864 if (ret < 0) {
1865 reply.ret_code = htobe32(LTTNG_ERR_UNK);
1866 } else {
1867 reply.ret_code = htobe32(LTTNG_OK);
1868 }
1869 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1870 if (send_ret < 0) {
1871 ERR("Relay sending close index id reply");
1872 ret = send_ret;
1873 }
1874
1875 end_no_session:
1876 return ret;
1877 }
1878
1879 /*
1880 * Receive the streams_sent message.
1881 *
1882 * Return 0 on success else a negative value.
1883 */
1884 static
1885 int relay_streams_sent(struct lttcomm_relayd_hdr *recv_hdr,
1886 struct relay_connection *conn)
1887 {
1888 int ret, send_ret;
1889 struct lttcomm_relayd_generic_reply reply;
1890
1891 assert(conn);
1892
1893 DBG("Relay receiving streams_sent");
1894
1895 if (!conn->session || conn->version_check_done == 0) {
1896 ERR("Trying to close a stream before version check");
1897 ret = -1;
1898 goto end_no_session;
1899 }
1900
1901 /*
1902 * Flag every pending stream in the connection recv list that they are
1903 * ready to be used by the viewer.
1904 */
1905 set_viewer_ready_flag(conn);
1906
1907 /*
1908 * Inform the viewer that there are new streams in the session.
1909 */
1910 if (conn->session->viewer_refcount) {
1911 uatomic_set(&conn->session->new_streams, 1);
1912 }
1913
1914 memset(&reply, 0, sizeof(reply));
1915 reply.ret_code = htobe32(LTTNG_OK);
1916 send_ret = conn->sock->ops->sendmsg(conn->sock, &reply, sizeof(reply), 0);
1917 if (send_ret < 0) {
1918 ERR("Relay sending sent_stream reply");
1919 ret = send_ret;
1920 } else {
1921 /* Success. */
1922 ret = 0;
1923 }
1924
1925 end_no_session:
1926 return ret;
1927 }
1928
1929 /*
1930 * Process the commands received on the control socket
1931 */
1932 static
1933 int relay_process_control(struct lttcomm_relayd_hdr *recv_hdr,
1934 struct relay_connection *conn)
1935 {
1936 int ret = 0;
1937
1938 switch (be32toh(recv_hdr->cmd)) {
1939 case RELAYD_CREATE_SESSION:
1940 ret = relay_create_session(recv_hdr, conn);
1941 break;
1942 case RELAYD_ADD_STREAM:
1943 ret = relay_add_stream(recv_hdr, conn);
1944 break;
1945 case RELAYD_START_DATA:
1946 ret = relay_start(recv_hdr, conn);
1947 break;
1948 case RELAYD_SEND_METADATA:
1949 ret = relay_recv_metadata(recv_hdr, conn);
1950 break;
1951 case RELAYD_VERSION:
1952 ret = relay_send_version(recv_hdr, conn);
1953 break;
1954 case RELAYD_CLOSE_STREAM:
1955 ret = relay_close_stream(recv_hdr, conn);
1956 break;
1957 case RELAYD_DATA_PENDING:
1958 ret = relay_data_pending(recv_hdr, conn);
1959 break;
1960 case RELAYD_QUIESCENT_CONTROL:
1961 ret = relay_quiescent_control(recv_hdr, conn);
1962 break;
1963 case RELAYD_BEGIN_DATA_PENDING:
1964 ret = relay_begin_data_pending(recv_hdr, conn);
1965 break;
1966 case RELAYD_END_DATA_PENDING:
1967 ret = relay_end_data_pending(recv_hdr, conn);
1968 break;
1969 case RELAYD_SEND_INDEX:
1970 ret = relay_recv_index(recv_hdr, conn);
1971 break;
1972 case RELAYD_STREAMS_SENT:
1973 ret = relay_streams_sent(recv_hdr, conn);
1974 break;
1975 case RELAYD_UPDATE_SYNC_INFO:
1976 default:
1977 ERR("Received unknown command (%u)", be32toh(recv_hdr->cmd));
1978 relay_unknown_command(conn);
1979 ret = -1;
1980 goto end;
1981 }
1982
1983 end:
1984 return ret;
1985 }
1986
1987 /*
1988 * Handle index for a data stream.
1989 *
1990 * RCU read side lock MUST be acquired.
1991 *
1992 * Return 0 on success else a negative value.
1993 */
1994 static int handle_index_data(struct relay_stream *stream, uint64_t net_seq_num,
1995 int rotate_index)
1996 {
1997 int ret = 0, index_created = 0;
1998 uint64_t stream_id, data_offset;
1999 struct relay_index *index, *wr_index = NULL;
2000
2001 assert(stream);
2002
2003 stream_id = stream->stream_handle;
2004 /* Get data offset because we are about to update the index. */
2005 data_offset = htobe64(stream->tracefile_size_current);
2006
2007 /*
2008 * Lookup for an existing index for that stream id/sequence number. If on
2009 * exists, the control thread already received the data for it thus we need
2010 * to write it on disk.
2011 */
2012 index = relay_index_find(stream_id, net_seq_num);
2013 if (!index) {
2014 /* A successful creation will add the object to the HT. */
2015 index = relay_index_create(stream_id, net_seq_num);
2016 if (!index) {
2017 ret = -1;
2018 goto error;
2019 }
2020 index_created = 1;
2021 stream->indexes_in_flight++;
2022 }
2023
2024 if (rotate_index || stream->index_fd < 0) {
2025 index->to_close_fd = stream->index_fd;
2026 ret = index_create_file(stream->path_name, stream->channel_name,
2027 relayd_uid, relayd_gid, stream->tracefile_size,
2028 stream->tracefile_count_current);
2029 if (ret < 0) {
2030 /* This will close the stream's index fd if one. */
2031 relay_index_free_safe(index);
2032 goto error;
2033 }
2034 stream->index_fd = ret;
2035 }
2036 index->fd = stream->index_fd;
2037 index->index_data.offset = data_offset;
2038
2039 if (index_created) {
2040 /*
2041 * Try to add the relay index object to the hash table. If an object
2042 * already exist, destroy back the index created and set the data.
2043 */
2044 relay_index_add(index, &wr_index);
2045 if (wr_index) {
2046 /* Copy back data from the created index. */
2047 wr_index->fd = index->fd;
2048 wr_index->to_close_fd = index->to_close_fd;
2049 wr_index->index_data.offset = data_offset;
2050 free(index);
2051 }
2052 } else {
2053 /* The index already exists so write it on disk. */
2054 wr_index = index;
2055 }
2056
2057 /* Do we have a writable ready index to write on disk. */
2058 if (wr_index) {
2059 ret = relay_index_write(wr_index->fd, wr_index);
2060 if (ret < 0) {
2061 goto error;
2062 }
2063 stream->total_index_received++;
2064 stream->indexes_in_flight--;
2065 assert(stream->indexes_in_flight >= 0);
2066 }
2067
2068 error:
2069 return ret;
2070 }
2071
2072 /*
2073 * relay_process_data: Process the data received on the data socket
2074 */
2075 static
2076 int relay_process_data(struct relay_connection *conn)
2077 {
2078 int ret = 0, rotate_index = 0;
2079 ssize_t size_ret;
2080 struct relay_stream *stream;
2081 struct lttcomm_relayd_data_hdr data_hdr;
2082 uint64_t stream_id;
2083 uint64_t net_seq_num;
2084 uint32_t data_size;
2085 struct relay_session *session;
2086
2087 assert(conn);
2088
2089 ret = conn->sock->ops->recvmsg(conn->sock, &data_hdr,
2090 sizeof(struct lttcomm_relayd_data_hdr), 0);
2091 if (ret <= 0) {
2092 if (ret == 0) {
2093 /* Orderly shutdown. Not necessary to print an error. */
2094 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
2095 } else {
2096 ERR("Unable to receive data header on sock %d", conn->sock->fd);
2097 }
2098 ret = -1;
2099 goto end;
2100 }
2101
2102 stream_id = be64toh(data_hdr.stream_id);
2103
2104 rcu_read_lock();
2105 stream = stream_find_by_id(relay_streams_ht, stream_id);
2106 if (!stream) {
2107 ret = -1;
2108 goto end_rcu_unlock;
2109 }
2110
2111 session = session_find_by_id(conn->sessions_ht, stream->session_id);
2112 assert(session);
2113
2114 data_size = be32toh(data_hdr.data_size);
2115 if (data_buffer_size < data_size) {
2116 char *tmp_data_ptr;
2117
2118 tmp_data_ptr = realloc(data_buffer, data_size);
2119 if (!tmp_data_ptr) {
2120 ERR("Allocating data buffer");
2121 free(data_buffer);
2122 ret = -1;
2123 goto end_rcu_unlock;
2124 }
2125 data_buffer = tmp_data_ptr;
2126 data_buffer_size = data_size;
2127 }
2128 memset(data_buffer, 0, data_size);
2129
2130 net_seq_num = be64toh(data_hdr.net_seq_num);
2131
2132 DBG3("Receiving data of size %u for stream id %" PRIu64 " seqnum %" PRIu64,
2133 data_size, stream_id, net_seq_num);
2134 ret = conn->sock->ops->recvmsg(conn->sock, data_buffer, data_size, 0);
2135 if (ret <= 0) {
2136 if (ret == 0) {
2137 /* Orderly shutdown. Not necessary to print an error. */
2138 DBG("Socket %d did an orderly shutdown", conn->sock->fd);
2139 }
2140 ret = -1;
2141 goto end_rcu_unlock;
2142 }
2143
2144 /* Check if a rotation is needed. */
2145 if (stream->tracefile_size > 0 &&
2146 (stream->tracefile_size_current + data_size) >
2147 stream->tracefile_size) {
2148 struct relay_viewer_stream *vstream;
2149 uint64_t new_id;
2150
2151 new_id = (stream->tracefile_count_current + 1) %
2152 stream->tracefile_count;
2153 /*
2154 * When we wrap-around back to 0, we start overwriting old
2155 * trace data.
2156 */
2157 if (!stream->tracefile_overwrite && new_id == 0) {
2158 stream->tracefile_overwrite = 1;
2159 }
2160 pthread_mutex_lock(&stream->viewer_stream_rotation_lock);
2161 if (stream->tracefile_overwrite) {
2162 stream->oldest_tracefile_id =
2163 (stream->oldest_tracefile_id + 1) %
2164 stream->tracefile_count;
2165 }
2166 vstream = viewer_stream_find_by_id(stream->stream_handle);
2167 if (vstream) {
2168 /*
2169 * The viewer is reading a file about to be
2170 * overwritten. Close the FDs it is
2171 * currently using and let it handle the fault.
2172 */
2173 if (vstream->tracefile_count_current == new_id) {
2174 pthread_mutex_lock(&vstream->overwrite_lock);
2175 vstream->abort_flag = 1;
2176 pthread_mutex_unlock(&vstream->overwrite_lock);
2177 DBG("Streaming side setting abort_flag on stream %s_%lu\n",
2178 stream->channel_name, new_id);
2179 } else if (vstream->tracefile_count_current ==
2180 stream->tracefile_count_current) {
2181 /*
2182 * The reader and writer were in the
2183 * same trace file, inform the viewer
2184 * that no new index will ever be added
2185 * to this file.
2186 */
2187 vstream->close_write_flag = 1;
2188 }
2189 }
2190 ret = utils_rotate_stream_file(stream->path_name, stream->channel_name,
2191 stream->tracefile_size, stream->tracefile_count,
2192 relayd_uid, relayd_gid, stream->fd,
2193 &(stream->tracefile_count_current), &stream->fd);
2194 stream->total_index_received = 0;
2195 pthread_mutex_unlock(&stream->viewer_stream_rotation_lock);
2196 if (ret < 0) {
2197 ERR("Rotating stream output file");
2198 goto end_rcu_unlock;
2199 }
2200 /* Reset current size because we just perform a stream rotation. */
2201 stream->tracefile_size_current = 0;
2202 rotate_index = 1;
2203 }
2204
2205 /*
2206 * Index are handled in protocol version 2.4 and above. Also, snapshot and
2207 * index are NOT supported.
2208 */
2209 if (session->minor >= 4 && !session->snapshot) {
2210 ret = handle_index_data(stream, net_seq_num, rotate_index);
2211 if (ret < 0) {
2212 goto end_rcu_unlock;
2213 }
2214 }
2215
2216 /* Write data to stream output fd. */
2217 size_ret = lttng_write(stream->fd, data_buffer, data_size);
2218 if (size_ret < data_size) {
2219 ERR("Relay error writing data to file");
2220 ret = -1;
2221 goto end_rcu_unlock;
2222 }
2223
2224 DBG2("Relay wrote %d bytes to tracefile for stream id %" PRIu64,
2225 ret, stream->stream_handle);
2226
2227 ret = write_padding_to_file(stream->fd, be32toh(data_hdr.padding_size));
2228 if (ret < 0) {
2229 goto end_rcu_unlock;
2230 }
2231 stream->tracefile_size_current += data_size + be32toh(data_hdr.padding_size);
2232
2233 stream->prev_seq = net_seq_num;
2234
2235 try_close_stream(session, stream);
2236
2237 end_rcu_unlock:
2238 rcu_read_unlock();
2239 end:
2240 return ret;
2241 }
2242
2243 static
2244 void cleanup_connection_pollfd(struct lttng_poll_event *events, int pollfd)
2245 {
2246 int ret;
2247
2248 assert(events);
2249
2250 (void) lttng_poll_del(events, pollfd);
2251
2252 ret = close(pollfd);
2253 if (ret < 0) {
2254 ERR("Closing pollfd %d", pollfd);
2255 }
2256 }
2257
2258 static void destroy_connection(struct lttng_ht *relay_connections_ht,
2259 struct relay_connection *conn)
2260 {
2261 assert(relay_connections_ht);
2262 assert(conn);
2263
2264 connection_delete(relay_connections_ht, conn);
2265
2266 /* For the control socket, we try to destroy the session. */
2267 if (conn->type == RELAY_CONTROL && conn->session) {
2268 destroy_session(conn->session, conn->sessions_ht);
2269 }
2270
2271 connection_destroy(conn);
2272 }
2273
2274 /*
2275 * This thread does the actual work
2276 */
2277 static
2278 void *relay_thread_worker(void *data)
2279 {
2280 int ret, err = -1, last_seen_data_fd = -1;
2281 uint32_t nb_fd;
2282 struct relay_connection *conn;
2283 struct lttng_poll_event events;
2284 struct lttng_ht *relay_connections_ht;
2285 struct lttng_ht_iter iter;
2286 struct lttcomm_relayd_hdr recv_hdr;
2287 struct relay_local_data *relay_ctx = (struct relay_local_data *) data;
2288 struct lttng_ht *sessions_ht = relay_ctx->sessions_ht;
2289 struct relay_index *index;
2290
2291 DBG("[thread] Relay worker started");
2292
2293 rcu_register_thread();
2294
2295 health_register(health_relayd, HEALTH_RELAYD_TYPE_WORKER);
2296
2297 if (testpoint(relayd_thread_worker)) {
2298 goto error_testpoint;
2299 }
2300
2301 health_code_update();
2302
2303 /* table of connections indexed on socket */
2304 relay_connections_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG);
2305 if (!relay_connections_ht) {
2306 goto relay_connections_ht_error;
2307 }
2308
2309 /* Tables of received indexes indexed by index handle and net_seq_num. */
2310 indexes_ht = lttng_ht_new(0, LTTNG_HT_TYPE_TWO_U64);
2311 if (!indexes_ht) {
2312 goto indexes_ht_error;
2313 }
2314
2315 ret = create_thread_poll_set(&events, 2);
2316 if (ret < 0) {
2317 goto error_poll_create;
2318 }
2319
2320 ret = lttng_poll_add(&events, relay_conn_pipe[0], LPOLLIN | LPOLLRDHUP);
2321 if (ret < 0) {
2322 goto error;
2323 }
2324
2325 restart:
2326 while (1) {
2327 int idx = -1, i, seen_control = 0, last_notdel_data_fd = -1;
2328
2329 health_code_update();
2330
2331 /* Infinite blocking call, waiting for transmission */
2332 DBG3("Relayd worker thread polling...");
2333 health_poll_entry();
2334 ret = lttng_poll_wait(&events, -1);
2335 health_poll_exit();
2336 if (ret < 0) {
2337 /*
2338 * Restart interrupted system call.
2339 */
2340 if (errno == EINTR) {
2341 goto restart;
2342 }
2343 goto error;
2344 }
2345
2346 nb_fd = ret;
2347
2348 /*
2349 * Process control. The control connection is prioritised so we don't
2350 * starve it with high throughout put tracing data on the data
2351 * connection.
2352 */
2353 for (i = 0; i < nb_fd; i++) {
2354 /* Fetch once the poll data */
2355 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2356 int pollfd = LTTNG_POLL_GETFD(&events, i);
2357
2358 health_code_update();
2359
2360 /* Thread quit pipe has been closed. Killing thread. */
2361 ret = check_thread_quit_pipe(pollfd, revents);
2362 if (ret) {
2363 err = 0;
2364 goto exit;
2365 }
2366
2367 /* Inspect the relay conn pipe for new connection */
2368 if (pollfd == relay_conn_pipe[0]) {
2369 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2370 ERR("Relay connection pipe error");
2371 goto error;
2372 } else if (revents & LPOLLIN) {
2373 ret = lttng_read(relay_conn_pipe[0], &conn, sizeof(conn));
2374 if (ret < 0) {
2375 goto error;
2376 }
2377 conn->sessions_ht = sessions_ht;
2378 connection_init(conn);
2379 lttng_poll_add(&events, conn->sock->fd,
2380 LPOLLIN | LPOLLRDHUP);
2381 rcu_read_lock();
2382 lttng_ht_add_unique_ulong(relay_connections_ht,
2383 &conn->sock_n);
2384 rcu_read_unlock();
2385 DBG("Connection socket %d added", conn->sock->fd);
2386 }
2387 } else {
2388 rcu_read_lock();
2389 conn = connection_find_by_sock(relay_connections_ht, pollfd);
2390 /* If not found, there is a synchronization issue. */
2391 assert(conn);
2392
2393 if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
2394 cleanup_connection_pollfd(&events, pollfd);
2395 destroy_connection(relay_connections_ht, conn);
2396 if (last_seen_data_fd == pollfd) {
2397 last_seen_data_fd = last_notdel_data_fd;
2398 }
2399 } else if (revents & LPOLLIN) {
2400 if (conn->type == RELAY_CONTROL) {
2401 ret = conn->sock->ops->recvmsg(conn->sock, &recv_hdr,
2402 sizeof(recv_hdr), 0);
2403 if (ret <= 0) {
2404 /* Connection closed */
2405 cleanup_connection_pollfd(&events, pollfd);
2406 destroy_connection(relay_connections_ht, conn);
2407 DBG("Control connection closed with %d", pollfd);
2408 } else {
2409 ret = relay_process_control(&recv_hdr, conn);
2410 if (ret < 0) {
2411 /* Clear the session on error. */
2412 cleanup_connection_pollfd(&events, pollfd);
2413 destroy_connection(relay_connections_ht, conn);
2414 DBG("Connection closed with %d", pollfd);
2415 }
2416 seen_control = 1;
2417 }
2418 } else {
2419 /*
2420 * Flag the last seen data fd not deleted. It will be
2421 * used as the last seen fd if any fd gets deleted in
2422 * this first loop.
2423 */
2424 last_notdel_data_fd = pollfd;
2425 }
2426 } else {
2427 ERR("Unknown poll events %u for sock %d", revents, pollfd);
2428 }
2429 rcu_read_unlock();
2430 }
2431 }
2432
2433 /*
2434 * The last loop handled a control request, go back to poll to make
2435 * sure we prioritise the control socket.
2436 */
2437 if (seen_control) {
2438 continue;
2439 }
2440
2441 if (last_seen_data_fd >= 0) {
2442 for (i = 0; i < nb_fd; i++) {
2443 int pollfd = LTTNG_POLL_GETFD(&events, i);
2444
2445 health_code_update();
2446
2447 if (last_seen_data_fd == pollfd) {
2448 idx = i;
2449 break;
2450 }
2451 }
2452 }
2453
2454 /* Process data connection. */
2455 for (i = idx + 1; i < nb_fd; i++) {
2456 /* Fetch the poll data. */
2457 uint32_t revents = LTTNG_POLL_GETEV(&events, i);
2458 int pollfd = LTTNG_POLL_GETFD(&events, i);
2459
2460 health_code_update();
2461
2462 /* Skip the command pipe. It's handled in the first loop. */
2463 if (pollfd == relay_conn_pipe[0]) {
2464 continue;
2465 }
2466
2467 if (revents) {
2468 rcu_read_lock();
2469 conn = connection_find_by_sock(relay_connections_ht, pollfd);
2470 if (!conn) {
2471 /* Skip it. Might be removed before. */
2472 rcu_read_unlock();
2473 continue;
2474 }
2475
2476 if (revents & LPOLLIN) {
2477 if (conn->type != RELAY_DATA) {
2478 rcu_read_unlock();
2479 continue;
2480 }
2481
2482 ret = relay_process_data(conn);
2483 /* Connection closed */
2484 if (ret < 0) {
2485 cleanup_connection_pollfd(&events, pollfd);
2486 destroy_connection(relay_connections_ht, conn);
2487 DBG("Data connection closed with %d", pollfd);
2488 /*
2489 * Every goto restart call sets the last seen fd where
2490 * here we don't really care since we gracefully
2491 * continue the loop after the connection is deleted.
2492 */
2493 } else {
2494 /* Keep last seen port. */
2495 last_seen_data_fd = pollfd;
2496 rcu_read_unlock();
2497 goto restart;
2498 }
2499 }
2500 rcu_read_unlock();
2501 }
2502 }
2503 last_seen_data_fd = -1;
2504 }
2505
2506 /* Normal exit, no error */
2507 ret = 0;
2508
2509 exit:
2510 error:
2511 lttng_poll_clean(&events);
2512
2513 /* Cleanup reamaining connection object. */
2514 rcu_read_lock();
2515 cds_lfht_for_each_entry(relay_connections_ht->ht, &iter.iter, conn,
2516 sock_n.node) {
2517 health_code_update();
2518 destroy_connection(relay_connections_ht, conn);
2519 }
2520 rcu_read_unlock();
2521 error_poll_create:
2522 rcu_read_lock();
2523 cds_lfht_for_each_entry(indexes_ht->ht, &iter.iter, index,
2524 index_n.node) {
2525 health_code_update();
2526 relay_index_delete(index);
2527 relay_index_free_safe(index);
2528 }
2529 rcu_read_unlock();
2530 lttng_ht_destroy(indexes_ht);
2531 indexes_ht_error:
2532 lttng_ht_destroy(relay_connections_ht);
2533 relay_connections_ht_error:
2534 /* Close relay conn pipes */
2535 utils_close_pipe(relay_conn_pipe);
2536 if (err) {
2537 DBG("Thread exited with error");
2538 }
2539 DBG("Worker thread cleanup complete");
2540 free(data_buffer);
2541 error_testpoint:
2542 if (err) {
2543 health_error();
2544 ERR("Health error occurred in %s", __func__);
2545 }
2546 health_unregister(health_relayd);
2547 rcu_unregister_thread();
2548 stop_threads();
2549 return NULL;
2550 }
2551
2552 /*
2553 * Create the relay command pipe to wake thread_manage_apps.
2554 * Closed in cleanup().
2555 */
2556 static int create_relay_conn_pipe(void)
2557 {
2558 int ret;
2559
2560 ret = utils_create_pipe_cloexec(relay_conn_pipe);
2561
2562 return ret;
2563 }
2564
2565 /*
2566 * main
2567 */
2568 int main(int argc, char **argv)
2569 {
2570 int ret = 0;
2571 void *status;
2572 struct relay_local_data *relay_ctx;
2573
2574 /* Parse arguments */
2575 progname = argv[0];
2576 if ((ret = parse_args(argc, argv)) < 0) {
2577 goto exit;
2578 }
2579
2580 if ((ret = set_signal_handler()) < 0) {
2581 goto exit;
2582 }
2583
2584 /* Try to create directory if -o, --output is specified. */
2585 if (opt_output_path) {
2586 if (*opt_output_path != '/') {
2587 ERR("Please specify an absolute path for -o, --output PATH");
2588 goto exit;
2589 }
2590
2591 ret = utils_mkdir_recursive(opt_output_path, S_IRWXU | S_IRWXG);
2592 if (ret < 0) {
2593 ERR("Unable to create %s", opt_output_path);
2594 goto exit;
2595 }
2596 }
2597
2598 /* Daemonize */
2599 if (opt_daemon || opt_background) {
2600 int i;
2601
2602 ret = lttng_daemonize(&child_ppid, &recv_child_signal,
2603 !opt_background);
2604 if (ret < 0) {
2605 goto exit;
2606 }
2607
2608 /*
2609 * We are in the child. Make sure all other file
2610 * descriptors are closed, in case we are called with
2611 * more opened file descriptors than the standard ones.
2612 */
2613 for (i = 3; i < sysconf(_SC_OPEN_MAX); i++) {
2614 (void) close(i);
2615 }
2616 }
2617
2618 /* Create thread quit pipe */
2619 if ((ret = init_thread_quit_pipe()) < 0) {
2620 goto error;
2621 }
2622
2623 /* We need those values for the file/dir creation. */
2624 relayd_uid = getuid();
2625 relayd_gid = getgid();
2626
2627 /* Check if daemon is UID = 0 */
2628 if (relayd_uid == 0) {
2629 if (control_uri->port < 1024 || data_uri->port < 1024 ||
2630 live_uri->port < 1024) {
2631 ERR("Need to be root to use ports < 1024");
2632 ret = -1;
2633 goto exit;
2634 }
2635 }
2636
2637 /* Setup the thread apps communication pipe. */
2638 if ((ret = create_relay_conn_pipe()) < 0) {
2639 goto exit;
2640 }
2641
2642 /* Init relay command queue. */
2643 cds_wfq_init(&relay_conn_queue.queue);
2644
2645 /* Set up max poll set size */
2646 lttng_poll_set_max_size();
2647
2648 /* Initialize communication library */
2649 lttcomm_init();
2650 lttcomm_inet_init();
2651
2652 relay_ctx = zmalloc(sizeof(struct relay_local_data));
2653 if (!relay_ctx) {
2654 PERROR("relay_ctx");
2655 goto exit;
2656 }
2657
2658 /* tables of sessions indexed by session ID */
2659 relay_ctx->sessions_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2660 if (!relay_ctx->sessions_ht) {
2661 goto exit_relay_ctx_sessions;
2662 }
2663
2664 /* tables of streams indexed by stream ID */
2665 relay_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2666 if (!relay_streams_ht) {
2667 goto exit_relay_ctx_streams;
2668 }
2669
2670 /* tables of streams indexed by stream ID */
2671 viewer_streams_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
2672 if (!viewer_streams_ht) {
2673 goto exit_relay_ctx_viewer_streams;
2674 }
2675
2676 /* Initialize thread health monitoring */
2677 health_relayd = health_app_create(NR_HEALTH_RELAYD_TYPES);
2678 if (!health_relayd) {
2679 PERROR("health_app_create error");
2680 goto exit_health_app_create;
2681 }
2682
2683 ret = utils_create_pipe(health_quit_pipe);
2684 if (ret < 0) {
2685 goto error_health_pipe;
2686 }
2687
2688 /* Create thread to manage the client socket */
2689 ret = pthread_create(&health_thread, NULL,
2690 thread_manage_health, (void *) NULL);
2691 if (ret != 0) {
2692 PERROR("pthread_create health");
2693 goto health_error;
2694 }
2695
2696 /* Setup the dispatcher thread */
2697 ret = pthread_create(&dispatcher_thread, NULL,
2698 relay_thread_dispatcher, (void *) NULL);
2699 if (ret != 0) {
2700 PERROR("pthread_create dispatcher");
2701 goto exit_dispatcher;
2702 }
2703
2704 /* Setup the worker thread */
2705 ret = pthread_create(&worker_thread, NULL,
2706 relay_thread_worker, (void *) relay_ctx);
2707 if (ret != 0) {
2708 PERROR("pthread_create worker");
2709 goto exit_worker;
2710 }
2711
2712 /* Setup the listener thread */
2713 ret = pthread_create(&listener_thread, NULL,
2714 relay_thread_listener, (void *) NULL);
2715 if (ret != 0) {
2716 PERROR("pthread_create listener");
2717 goto exit_listener;
2718 }
2719
2720 ret = live_start_threads(live_uri, relay_ctx);
2721 if (ret != 0) {
2722 ERR("Starting live viewer threads");
2723 goto exit_live;
2724 }
2725
2726 exit_live:
2727 ret = pthread_join(listener_thread, &status);
2728 if (ret != 0) {
2729 PERROR("pthread_join");
2730 goto error; /* join error, exit without cleanup */
2731 }
2732
2733 exit_listener:
2734 ret = pthread_join(worker_thread, &status);
2735 if (ret != 0) {
2736 PERROR("pthread_join");
2737 goto error; /* join error, exit without cleanup */
2738 }
2739
2740 exit_worker:
2741 ret = pthread_join(dispatcher_thread, &status);
2742 if (ret != 0) {
2743 PERROR("pthread_join");
2744 goto error; /* join error, exit without cleanup */
2745 }
2746
2747 exit_dispatcher:
2748 ret = pthread_join(health_thread, &status);
2749 if (ret != 0) {
2750 PERROR("pthread_join health thread");
2751 goto error; /* join error, exit without cleanup */
2752 }
2753
2754 /*
2755 * Stop live threads only after joining other threads.
2756 */
2757 live_stop_threads();
2758
2759 health_error:
2760 utils_close_pipe(health_quit_pipe);
2761
2762 error_health_pipe:
2763 health_app_destroy(health_relayd);
2764
2765 exit_health_app_create:
2766 lttng_ht_destroy(viewer_streams_ht);
2767
2768 exit_relay_ctx_viewer_streams:
2769 lttng_ht_destroy(relay_streams_ht);
2770
2771 exit_relay_ctx_streams:
2772 lttng_ht_destroy(relay_ctx->sessions_ht);
2773
2774 exit_relay_ctx_sessions:
2775 free(relay_ctx);
2776
2777 exit:
2778 cleanup();
2779 if (!ret) {
2780 exit(EXIT_SUCCESS);
2781 }
2782
2783 error:
2784 exit(EXIT_FAILURE);
2785 }
This page took 0.089511 seconds and 4 git commands to generate.