Distinguish UST return codes from transport return codes
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <lttng/ust-error.h>
45 #include <ust-comm.h>
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include "tracepoint-internal.h"
49 #include "ltt-tracer-core.h"
50 #include "compat.h"
51 #include "../libringbuffer/tlsfixup.h"
52
53 /*
54 * Has lttng ust comm constructor been called ?
55 */
56 static int initialized;
57
58 /*
59 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
60 * Held when handling a command, also held by fork() to deal with
61 * removal of threads, and by exit path.
62 */
63
64 /* Should the ust comm thread quit ? */
65 static int lttng_ust_comm_should_quit;
66
67 /*
68 * Wait for either of these before continuing to the main
69 * program:
70 * - the register_done message from sessiond daemon
71 * (will let the sessiond daemon enable sessions before main
72 * starts.)
73 * - sessiond daemon is not reachable.
74 * - timeout (ensuring applications are resilient to session
75 * daemon problems).
76 */
77 static sem_t constructor_wait;
78 /*
79 * Doing this for both the global and local sessiond.
80 */
81 static int sem_count = { 2 };
82
83 /*
84 * Counting nesting within lttng-ust. Used to ensure that calling fork()
85 * from liblttng-ust does not execute the pre/post fork handlers.
86 */
87 static int __thread lttng_ust_nest_count;
88
89 /*
90 * Info about socket and associated listener thread.
91 */
92 struct sock_info {
93 const char *name;
94 pthread_t ust_listener; /* listener thread */
95 int root_handle;
96 int constructor_sem_posted;
97 int allowed;
98 int global;
99
100 char sock_path[PATH_MAX];
101 int socket;
102
103 char wait_shm_path[PATH_MAX];
104 char *wait_shm_mmap;
105 };
106
107 /* Socket from app (connect) to session daemon (listen) for communication */
108 struct sock_info global_apps = {
109 .name = "global",
110 .global = 1,
111
112 .root_handle = -1,
113 .allowed = 1,
114
115 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
116 .socket = -1,
117
118 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
119 };
120
121 /* TODO: allow global_apps_sock_path override */
122
123 struct sock_info local_apps = {
124 .name = "local",
125 .global = 0,
126 .root_handle = -1,
127 .allowed = 0, /* Check setuid bit first */
128
129 .socket = -1,
130 };
131
132 static int wait_poll_fallback;
133
134 extern void ltt_ring_buffer_client_overwrite_init(void);
135 extern void ltt_ring_buffer_client_discard_init(void);
136 extern void ltt_ring_buffer_metadata_client_init(void);
137 extern void ltt_ring_buffer_client_overwrite_exit(void);
138 extern void ltt_ring_buffer_client_discard_exit(void);
139 extern void ltt_ring_buffer_metadata_client_exit(void);
140
141 /*
142 * Force a read (imply TLS fixup for dlopen) of TLS variables.
143 */
144 static
145 void lttng_fixup_nest_count_tls(void)
146 {
147 asm volatile ("" : : "m" (lttng_ust_nest_count));
148 }
149
150 static
151 int setup_local_apps(void)
152 {
153 const char *home_dir;
154 uid_t uid;
155
156 uid = getuid();
157 /*
158 * Disallow per-user tracing for setuid binaries.
159 */
160 if (uid != geteuid()) {
161 assert(local_apps.allowed == 0);
162 return 0;
163 }
164 home_dir = (const char *) getenv("HOME");
165 if (!home_dir) {
166 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
167 assert(local_apps.allowed == 0);
168 return -ENOENT;
169 }
170 local_apps.allowed = 1;
171 snprintf(local_apps.sock_path, PATH_MAX,
172 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
173 snprintf(local_apps.wait_shm_path, PATH_MAX,
174 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
175 return 0;
176 }
177
178 static
179 int register_app_to_sessiond(int socket)
180 {
181 ssize_t ret;
182 struct {
183 uint32_t major;
184 uint32_t minor;
185 pid_t pid;
186 pid_t ppid;
187 uid_t uid;
188 gid_t gid;
189 uint32_t bits_per_long;
190 char name[16]; /* process name */
191 } reg_msg;
192
193 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
194 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
195 reg_msg.pid = getpid();
196 reg_msg.ppid = getppid();
197 reg_msg.uid = getuid();
198 reg_msg.gid = getgid();
199 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
200 lttng_ust_getprocname(reg_msg.name);
201
202 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
203 if (ret >= 0 && ret != sizeof(reg_msg))
204 return -EIO;
205 return ret;
206 }
207
208 static
209 int send_reply(int sock, struct ustcomm_ust_reply *lur)
210 {
211 ssize_t len;
212
213 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
214 switch (len) {
215 case sizeof(*lur):
216 DBG("message successfully sent");
217 return 0;
218 default:
219 if (len == -ECONNRESET) {
220 DBG("remote end closed connection");
221 return 0;
222 }
223 if (len < 0)
224 return len;
225 DBG("incorrect message size: %zd", len);
226 return -EINVAL;
227 }
228 }
229
230 static
231 int handle_register_done(struct sock_info *sock_info)
232 {
233 int ret;
234
235 if (sock_info->constructor_sem_posted)
236 return 0;
237 sock_info->constructor_sem_posted = 1;
238 if (uatomic_read(&sem_count) <= 0) {
239 return 0;
240 }
241 ret = uatomic_add_return(&sem_count, -1);
242 if (ret == 0) {
243 ret = sem_post(&constructor_wait);
244 assert(!ret);
245 }
246 return 0;
247 }
248
249 static
250 int handle_message(struct sock_info *sock_info,
251 int sock, struct ustcomm_ust_msg *lum)
252 {
253 int ret = 0;
254 const struct lttng_ust_objd_ops *ops;
255 struct ustcomm_ust_reply lur;
256 int shm_fd, wait_fd;
257 union ust_args args;
258 ssize_t len;
259
260 ust_lock();
261
262 memset(&lur, 0, sizeof(lur));
263
264 if (lttng_ust_comm_should_quit) {
265 ret = -EPERM;
266 goto end;
267 }
268
269 ops = objd_ops(lum->handle);
270 if (!ops) {
271 ret = -ENOENT;
272 goto end;
273 }
274
275 switch (lum->cmd) {
276 case LTTNG_UST_REGISTER_DONE:
277 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
278 ret = handle_register_done(sock_info);
279 else
280 ret = -EINVAL;
281 break;
282 case LTTNG_UST_RELEASE:
283 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
284 ret = -EPERM;
285 else
286 ret = lttng_ust_objd_unref(lum->handle);
287 break;
288 case LTTNG_UST_FILTER:
289 {
290 /* Receive filter data */
291 struct lttng_ust_filter_bytecode *bytecode;
292
293 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
294 ERR("Filter data size is too large: %u bytes",
295 lum->u.filter.data_size);
296 ret = -EINVAL;
297 goto error;
298 }
299
300 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
301 ERR("Filter reloc offset %u is not within data",
302 lum->u.filter.reloc_offset);
303 ret = -EINVAL;
304 goto error;
305 }
306
307 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
308 if (!bytecode) {
309 ret = -ENOMEM;
310 goto error;
311 }
312 len = ustcomm_recv_unix_sock(sock, bytecode->data,
313 lum->u.filter.data_size);
314 switch (len) {
315 case 0: /* orderly shutdown */
316 ret = 0;
317 free(bytecode);
318 goto error;
319 default:
320 if (len == lum->u.filter.data_size) {
321 DBG("filter data received");
322 break;
323 } else if (len < 0) {
324 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
325 if (len == -ECONNRESET) {
326 ERR("%s remote end closed connection", sock_info->name);
327 ret = len;
328 free(bytecode);
329 goto error;
330 }
331 ret = len;
332 goto end;
333 } else {
334 DBG("incorrect filter data message size: %zd", len);
335 ret = -EINVAL;
336 free(bytecode);
337 goto end;
338 }
339 }
340 bytecode->len = lum->u.filter.data_size;
341 bytecode->reloc_offset = lum->u.filter.reloc_offset;
342 if (ops->cmd) {
343 ret = ops->cmd(lum->handle, lum->cmd,
344 (unsigned long) bytecode,
345 &args);
346 if (ret) {
347 free(bytecode);
348 }
349 /* don't free bytecode if everything went fine. */
350 } else {
351 ret = -ENOSYS;
352 free(bytecode);
353 }
354 break;
355 }
356 default:
357 if (ops->cmd)
358 ret = ops->cmd(lum->handle, lum->cmd,
359 (unsigned long) &lum->u,
360 &args);
361 else
362 ret = -ENOSYS;
363 break;
364 }
365
366 end:
367 lur.handle = lum->handle;
368 lur.cmd = lum->cmd;
369 lur.ret_val = ret;
370 if (ret >= 0) {
371 lur.ret_code = LTTNG_UST_OK;
372 } else {
373 /*
374 * Use -LTTNG_UST_ERR as wildcard for UST internal
375 * error that are not caused by the transport, except if
376 * we already have a more precise error message to
377 * report.
378 */
379 if (ret > -LTTNG_UST_ERR)
380 lur.ret_code = -LTTNG_UST_ERR;
381 else
382 lur.ret_code = ret;
383 }
384 if (ret >= 0) {
385 switch (lum->cmd) {
386 case LTTNG_UST_STREAM:
387 /*
388 * Special-case reply to send stream info.
389 * Use lum.u output.
390 */
391 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
392 shm_fd = *args.stream.shm_fd;
393 wait_fd = *args.stream.wait_fd;
394 break;
395 case LTTNG_UST_METADATA:
396 case LTTNG_UST_CHANNEL:
397 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
398 shm_fd = *args.channel.shm_fd;
399 wait_fd = *args.channel.wait_fd;
400 break;
401 case LTTNG_UST_TRACER_VERSION:
402 lur.u.version = lum->u.version;
403 break;
404 case LTTNG_UST_TRACEPOINT_LIST_GET:
405 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
406 break;
407 }
408 }
409 ret = send_reply(sock, &lur);
410 if (ret < 0) {
411 DBG("error sending reply");
412 goto error;
413 }
414
415 if ((lum->cmd == LTTNG_UST_STREAM
416 || lum->cmd == LTTNG_UST_CHANNEL
417 || lum->cmd == LTTNG_UST_METADATA)
418 && lur.ret_code == LTTNG_UST_OK) {
419 int sendret = 0;
420
421 /* we also need to send the file descriptors. */
422 ret = ustcomm_send_fds_unix_sock(sock,
423 &shm_fd, &shm_fd,
424 1, sizeof(int));
425 if (ret < 0) {
426 ERR("send shm_fd");
427 sendret = ret;
428 }
429 /*
430 * The sessiond expects 2 file descriptors, even upon
431 * error.
432 */
433 ret = ustcomm_send_fds_unix_sock(sock,
434 &wait_fd, &wait_fd,
435 1, sizeof(int));
436 if (ret < 0) {
437 perror("send wait_fd");
438 goto error;
439 }
440 if (sendret) {
441 ret = sendret;
442 goto error;
443 }
444 }
445 /*
446 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
447 * after the reply.
448 */
449 if (lur.ret_code == LTTNG_UST_OK) {
450 switch (lum->cmd) {
451 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
452 len = ustcomm_send_unix_sock(sock,
453 &args.field_list.entry,
454 sizeof(args.field_list.entry));
455 if (len < 0) {
456 ret = len;
457 goto error;
458 }
459 if (len != sizeof(args.field_list.entry)) {
460 ret = -EINVAL;
461 goto error;
462 }
463 }
464 }
465 /*
466 * We still have the memory map reference, and the fds have been
467 * sent to the sessiond. We can therefore close those fds. Note
468 * that we keep the write side of the wait_fd open, but close
469 * the read side.
470 */
471 if (lur.ret_code == LTTNG_UST_OK) {
472 switch (lum->cmd) {
473 case LTTNG_UST_STREAM:
474 if (shm_fd >= 0) {
475 ret = close(shm_fd);
476 if (ret) {
477 PERROR("Error closing stream shm_fd");
478 }
479 *args.stream.shm_fd = -1;
480 }
481 if (wait_fd >= 0) {
482 ret = close(wait_fd);
483 if (ret) {
484 PERROR("Error closing stream wait_fd");
485 }
486 *args.stream.wait_fd = -1;
487 }
488 break;
489 case LTTNG_UST_METADATA:
490 case LTTNG_UST_CHANNEL:
491 if (shm_fd >= 0) {
492 ret = close(shm_fd);
493 if (ret) {
494 PERROR("Error closing channel shm_fd");
495 }
496 *args.channel.shm_fd = -1;
497 }
498 if (wait_fd >= 0) {
499 ret = close(wait_fd);
500 if (ret) {
501 PERROR("Error closing channel wait_fd");
502 }
503 *args.channel.wait_fd = -1;
504 }
505 break;
506 }
507 }
508
509 error:
510 ust_unlock();
511 return ret;
512 }
513
514 static
515 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
516 {
517 int ret;
518
519 if (sock_info->socket != -1) {
520 ret = ustcomm_close_unix_sock(sock_info->socket);
521 if (ret) {
522 ERR("Error closing apps socket");
523 }
524 sock_info->socket = -1;
525 }
526 if (sock_info->root_handle != -1) {
527 ret = lttng_ust_objd_unref(sock_info->root_handle);
528 if (ret) {
529 ERR("Error unref root handle");
530 }
531 sock_info->root_handle = -1;
532 }
533 sock_info->constructor_sem_posted = 0;
534 /*
535 * wait_shm_mmap is used by listener threads outside of the
536 * ust lock, so we cannot tear it down ourselves, because we
537 * cannot join on these threads. Leave this task to the OS
538 * process exit.
539 */
540 if (!exiting && sock_info->wait_shm_mmap) {
541 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
542 if (ret) {
543 ERR("Error unmapping wait shm");
544 }
545 sock_info->wait_shm_mmap = NULL;
546 }
547 }
548
549 /*
550 * Using fork to set umask in the child process (not multi-thread safe).
551 * We deal with the shm_open vs ftruncate race (happening when the
552 * sessiond owns the shm and does not let everybody modify it, to ensure
553 * safety against shm_unlink) by simply letting the mmap fail and
554 * retrying after a few seconds.
555 * For global shm, everybody has rw access to it until the sessiond
556 * starts.
557 */
558 static
559 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
560 {
561 int wait_shm_fd, ret;
562 pid_t pid;
563
564 /*
565 * Try to open read-only.
566 */
567 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
568 if (wait_shm_fd >= 0) {
569 goto end;
570 } else if (wait_shm_fd < 0 && errno != ENOENT) {
571 /*
572 * Real-only open did not work, and it's not because the
573 * entry was not present. It's a failure that prohibits
574 * using shm.
575 */
576 ERR("Error opening shm %s", sock_info->wait_shm_path);
577 goto end;
578 }
579 /*
580 * If the open failed because the file did not exist, try
581 * creating it ourself.
582 */
583 lttng_ust_nest_count++;
584 pid = fork();
585 lttng_ust_nest_count--;
586 if (pid > 0) {
587 int status;
588
589 /*
590 * Parent: wait for child to return, in which case the
591 * shared memory map will have been created.
592 */
593 pid = wait(&status);
594 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
595 wait_shm_fd = -1;
596 goto end;
597 }
598 /*
599 * Try to open read-only again after creation.
600 */
601 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
602 if (wait_shm_fd < 0) {
603 /*
604 * Real-only open did not work. It's a failure
605 * that prohibits using shm.
606 */
607 ERR("Error opening shm %s", sock_info->wait_shm_path);
608 goto end;
609 }
610 goto end;
611 } else if (pid == 0) {
612 int create_mode;
613
614 /* Child */
615 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
616 if (sock_info->global)
617 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
618 /*
619 * We're alone in a child process, so we can modify the
620 * process-wide umask.
621 */
622 umask(~create_mode);
623 /*
624 * Try creating shm (or get rw access).
625 * We don't do an exclusive open, because we allow other
626 * processes to create+ftruncate it concurrently.
627 */
628 wait_shm_fd = shm_open(sock_info->wait_shm_path,
629 O_RDWR | O_CREAT, create_mode);
630 if (wait_shm_fd >= 0) {
631 ret = ftruncate(wait_shm_fd, mmap_size);
632 if (ret) {
633 PERROR("ftruncate");
634 _exit(EXIT_FAILURE);
635 }
636 _exit(EXIT_SUCCESS);
637 }
638 /*
639 * For local shm, we need to have rw access to accept
640 * opening it: this means the local sessiond will be
641 * able to wake us up. For global shm, we open it even
642 * if rw access is not granted, because the root.root
643 * sessiond will be able to override all rights and wake
644 * us up.
645 */
646 if (!sock_info->global && errno != EACCES) {
647 ERR("Error opening shm %s", sock_info->wait_shm_path);
648 _exit(EXIT_FAILURE);
649 }
650 /*
651 * The shm exists, but we cannot open it RW. Report
652 * success.
653 */
654 _exit(EXIT_SUCCESS);
655 } else {
656 return -1;
657 }
658 end:
659 if (wait_shm_fd >= 0 && !sock_info->global) {
660 struct stat statbuf;
661
662 /*
663 * Ensure that our user is the owner of the shm file for
664 * local shm. If we do not own the file, it means our
665 * sessiond will not have access to wake us up (there is
666 * probably a rogue process trying to fake our
667 * sessiond). Fallback to polling method in this case.
668 */
669 ret = fstat(wait_shm_fd, &statbuf);
670 if (ret) {
671 PERROR("fstat");
672 goto error_close;
673 }
674 if (statbuf.st_uid != getuid())
675 goto error_close;
676 }
677 return wait_shm_fd;
678
679 error_close:
680 ret = close(wait_shm_fd);
681 if (ret) {
682 PERROR("Error closing fd");
683 }
684 return -1;
685 }
686
687 static
688 char *get_map_shm(struct sock_info *sock_info)
689 {
690 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
691 int wait_shm_fd, ret;
692 char *wait_shm_mmap;
693
694 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
695 if (wait_shm_fd < 0) {
696 goto error;
697 }
698 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
699 MAP_SHARED, wait_shm_fd, 0);
700 /* close shm fd immediately after taking the mmap reference */
701 ret = close(wait_shm_fd);
702 if (ret) {
703 PERROR("Error closing fd");
704 }
705 if (wait_shm_mmap == MAP_FAILED) {
706 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
707 goto error;
708 }
709 return wait_shm_mmap;
710
711 error:
712 return NULL;
713 }
714
715 static
716 void wait_for_sessiond(struct sock_info *sock_info)
717 {
718 int ret;
719
720 ust_lock();
721 if (lttng_ust_comm_should_quit) {
722 goto quit;
723 }
724 if (wait_poll_fallback) {
725 goto error;
726 }
727 if (!sock_info->wait_shm_mmap) {
728 sock_info->wait_shm_mmap = get_map_shm(sock_info);
729 if (!sock_info->wait_shm_mmap)
730 goto error;
731 }
732 ust_unlock();
733
734 DBG("Waiting for %s apps sessiond", sock_info->name);
735 /* Wait for futex wakeup */
736 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
737 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
738 FUTEX_WAIT, 0, NULL, NULL, 0);
739 if (ret < 0) {
740 if (errno == EFAULT) {
741 wait_poll_fallback = 1;
742 DBG(
743 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
744 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
745 "Please upgrade your kernel "
746 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
747 "mainline). LTTng-UST will use polling mode fallback.");
748 if (ust_debug())
749 PERROR("futex");
750 }
751 }
752 }
753 return;
754
755 quit:
756 ust_unlock();
757 return;
758
759 error:
760 ust_unlock();
761 return;
762 }
763
764 /*
765 * This thread does not allocate any resource, except within
766 * handle_message, within mutex protection. This mutex protects against
767 * fork and exit.
768 * The other moment it allocates resources is at socket connection, which
769 * is also protected by the mutex.
770 */
771 static
772 void *ust_listener_thread(void *arg)
773 {
774 struct sock_info *sock_info = arg;
775 int sock, ret, prev_connect_failed = 0, has_waited = 0;
776
777 /* Restart trying to connect to the session daemon */
778 restart:
779 if (prev_connect_failed) {
780 /* Wait for sessiond availability with pipe */
781 wait_for_sessiond(sock_info);
782 if (has_waited) {
783 has_waited = 0;
784 /*
785 * Sleep for 5 seconds before retrying after a
786 * sequence of failure / wait / failure. This
787 * deals with a killed or broken session daemon.
788 */
789 sleep(5);
790 }
791 has_waited = 1;
792 prev_connect_failed = 0;
793 }
794 ust_lock();
795
796 if (lttng_ust_comm_should_quit) {
797 ust_unlock();
798 goto quit;
799 }
800
801 if (sock_info->socket != -1) {
802 ret = ustcomm_close_unix_sock(sock_info->socket);
803 if (ret) {
804 ERR("Error closing %s apps socket", sock_info->name);
805 }
806 sock_info->socket = -1;
807 }
808
809 /* Register */
810 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
811 if (ret < 0) {
812 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
813 prev_connect_failed = 1;
814 /*
815 * If we cannot find the sessiond daemon, don't delay
816 * constructor execution.
817 */
818 ret = handle_register_done(sock_info);
819 assert(!ret);
820 ust_unlock();
821 goto restart;
822 }
823
824 sock_info->socket = sock = ret;
825
826 /*
827 * Create only one root handle per listener thread for the whole
828 * process lifetime.
829 */
830 if (sock_info->root_handle == -1) {
831 ret = lttng_abi_create_root_handle();
832 if (ret < 0) {
833 ERR("Error creating root handle");
834 ust_unlock();
835 goto quit;
836 }
837 sock_info->root_handle = ret;
838 }
839
840 ret = register_app_to_sessiond(sock);
841 if (ret < 0) {
842 ERR("Error registering to %s apps socket", sock_info->name);
843 prev_connect_failed = 1;
844 /*
845 * If we cannot register to the sessiond daemon, don't
846 * delay constructor execution.
847 */
848 ret = handle_register_done(sock_info);
849 assert(!ret);
850 ust_unlock();
851 goto restart;
852 }
853 ust_unlock();
854
855 for (;;) {
856 ssize_t len;
857 struct ustcomm_ust_msg lum;
858
859 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
860 switch (len) {
861 case 0: /* orderly shutdown */
862 DBG("%s ltt-sessiond has performed an orderly shutdown", sock_info->name);
863 ust_lock();
864 /*
865 * Either sessiond has shutdown or refused us by closing the socket.
866 * In either case, we don't want to delay construction execution,
867 * and we need to wait before retry.
868 */
869 prev_connect_failed = 1;
870 /*
871 * If we cannot register to the sessiond daemon, don't
872 * delay constructor execution.
873 */
874 ret = handle_register_done(sock_info);
875 assert(!ret);
876 ust_unlock();
877 goto end;
878 case sizeof(lum):
879 DBG("message received");
880 ret = handle_message(sock_info, sock, &lum);
881 if (ret) {
882 ERR("Error handling message for %s socket", sock_info->name);
883 }
884 continue;
885 default:
886 if (len < 0) {
887 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
888 } else {
889 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
890 }
891 if (len == -ECONNRESET) {
892 DBG("%s remote end closed connection", sock_info->name);
893 goto end;
894 }
895 goto end;
896 }
897
898 }
899 end:
900 goto restart; /* try to reconnect */
901 quit:
902 return NULL;
903 }
904
905 /*
906 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
907 */
908 static
909 int get_timeout(struct timespec *constructor_timeout)
910 {
911 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
912 char *str_delay;
913 int ret;
914
915 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
916 if (str_delay) {
917 constructor_delay_ms = strtol(str_delay, NULL, 10);
918 }
919
920 switch (constructor_delay_ms) {
921 case -1:/* fall-through */
922 case 0:
923 return constructor_delay_ms;
924 default:
925 break;
926 }
927
928 /*
929 * If we are unable to find the current time, don't wait.
930 */
931 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
932 if (ret) {
933 return -1;
934 }
935 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
936 constructor_timeout->tv_nsec +=
937 (constructor_delay_ms % 1000UL) * 1000000UL;
938 if (constructor_timeout->tv_nsec >= 1000000000UL) {
939 constructor_timeout->tv_sec++;
940 constructor_timeout->tv_nsec -= 1000000000UL;
941 }
942 return 1;
943 }
944
945 /*
946 * sessiond monitoring thread: monitor presence of global and per-user
947 * sessiond by polling the application common named pipe.
948 */
949 void __attribute__((constructor)) lttng_ust_init(void)
950 {
951 struct timespec constructor_timeout;
952 sigset_t sig_all_blocked, orig_parent_mask;
953 pthread_attr_t thread_attr;
954 int timeout_mode;
955 int ret;
956
957 if (uatomic_xchg(&initialized, 1) == 1)
958 return;
959
960 /*
961 * Fixup interdependency between TLS fixup mutex (which happens
962 * to be the dynamic linker mutex) and ust_lock, taken within
963 * the ust lock.
964 */
965 lttng_fixup_event_tls();
966 lttng_fixup_ringbuffer_tls();
967 lttng_fixup_vtid_tls();
968 lttng_fixup_nest_count_tls();
969 lttng_fixup_procname_tls();
970
971 /*
972 * We want precise control over the order in which we construct
973 * our sub-libraries vs starting to receive commands from
974 * sessiond (otherwise leading to errors when trying to create
975 * sessiond before the init functions are completed).
976 */
977 init_usterr();
978 init_tracepoint();
979 ltt_ring_buffer_metadata_client_init();
980 ltt_ring_buffer_client_overwrite_init();
981 ltt_ring_buffer_client_discard_init();
982
983 timeout_mode = get_timeout(&constructor_timeout);
984
985 ret = sem_init(&constructor_wait, 0, 0);
986 assert(!ret);
987
988 ret = setup_local_apps();
989 if (ret) {
990 DBG("local apps setup returned %d", ret);
991 }
992
993 /* A new thread created by pthread_create inherits the signal mask
994 * from the parent. To avoid any signal being received by the
995 * listener thread, we block all signals temporarily in the parent,
996 * while we create the listener thread.
997 */
998 sigfillset(&sig_all_blocked);
999 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1000 if (ret) {
1001 ERR("pthread_sigmask: %s", strerror(ret));
1002 }
1003
1004 ret = pthread_attr_init(&thread_attr);
1005 if (ret) {
1006 ERR("pthread_attr_init: %s", strerror(ret));
1007 }
1008 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1009 if (ret) {
1010 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1011 }
1012
1013 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1014 ust_listener_thread, &global_apps);
1015 if (ret) {
1016 ERR("pthread_create global: %s", strerror(ret));
1017 }
1018 if (local_apps.allowed) {
1019 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1020 ust_listener_thread, &local_apps);
1021 if (ret) {
1022 ERR("pthread_create local: %s", strerror(ret));
1023 }
1024 } else {
1025 handle_register_done(&local_apps);
1026 }
1027 ret = pthread_attr_destroy(&thread_attr);
1028 if (ret) {
1029 ERR("pthread_attr_destroy: %s", strerror(ret));
1030 }
1031
1032 /* Restore original signal mask in parent */
1033 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1034 if (ret) {
1035 ERR("pthread_sigmask: %s", strerror(ret));
1036 }
1037
1038 switch (timeout_mode) {
1039 case 1: /* timeout wait */
1040 do {
1041 ret = sem_timedwait(&constructor_wait,
1042 &constructor_timeout);
1043 } while (ret < 0 && errno == EINTR);
1044 if (ret < 0 && errno == ETIMEDOUT) {
1045 ERR("Timed out waiting for ltt-sessiond");
1046 } else {
1047 assert(!ret);
1048 }
1049 break;
1050 case -1:/* wait forever */
1051 do {
1052 ret = sem_wait(&constructor_wait);
1053 } while (ret < 0 && errno == EINTR);
1054 assert(!ret);
1055 break;
1056 case 0: /* no timeout */
1057 break;
1058 }
1059 }
1060
1061 static
1062 void lttng_ust_cleanup(int exiting)
1063 {
1064 cleanup_sock_info(&global_apps, exiting);
1065 if (local_apps.allowed) {
1066 cleanup_sock_info(&local_apps, exiting);
1067 }
1068 /*
1069 * The teardown in this function all affect data structures
1070 * accessed under the UST lock by the listener thread. This
1071 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1072 * that none of these threads are accessing this data at this
1073 * point.
1074 */
1075 lttng_ust_abi_exit();
1076 lttng_ust_events_exit();
1077 ltt_ring_buffer_client_discard_exit();
1078 ltt_ring_buffer_client_overwrite_exit();
1079 ltt_ring_buffer_metadata_client_exit();
1080 exit_tracepoint();
1081 if (!exiting) {
1082 /* Reinitialize values for fork */
1083 sem_count = 2;
1084 lttng_ust_comm_should_quit = 0;
1085 initialized = 0;
1086 }
1087 }
1088
1089 void __attribute__((destructor)) lttng_ust_exit(void)
1090 {
1091 int ret;
1092
1093 /*
1094 * Using pthread_cancel here because:
1095 * A) we don't want to hang application teardown.
1096 * B) the thread is not allocating any resource.
1097 */
1098
1099 /*
1100 * Require the communication thread to quit. Synchronize with
1101 * mutexes to ensure it is not in a mutex critical section when
1102 * pthread_cancel is later called.
1103 */
1104 ust_lock();
1105 lttng_ust_comm_should_quit = 1;
1106 ust_unlock();
1107
1108 /* cancel threads */
1109 ret = pthread_cancel(global_apps.ust_listener);
1110 if (ret) {
1111 ERR("Error cancelling global ust listener thread: %s",
1112 strerror(ret));
1113 }
1114 if (local_apps.allowed) {
1115 ret = pthread_cancel(local_apps.ust_listener);
1116 if (ret) {
1117 ERR("Error cancelling local ust listener thread: %s",
1118 strerror(ret));
1119 }
1120 }
1121 /*
1122 * Do NOT join threads: use of sys_futex makes it impossible to
1123 * join the threads without using async-cancel, but async-cancel
1124 * is delivered by a signal, which could hit the target thread
1125 * anywhere in its code path, including while the ust_lock() is
1126 * held, causing a deadlock for the other thread. Let the OS
1127 * cleanup the threads if there are stalled in a syscall.
1128 */
1129 lttng_ust_cleanup(1);
1130 }
1131
1132 /*
1133 * We exclude the worker threads across fork and clone (except
1134 * CLONE_VM), because these system calls only keep the forking thread
1135 * running in the child. Therefore, we don't want to call fork or clone
1136 * in the middle of an tracepoint or ust tracing state modification.
1137 * Holding this mutex protects these structures across fork and clone.
1138 */
1139 void ust_before_fork(sigset_t *save_sigset)
1140 {
1141 /*
1142 * Disable signals. This is to avoid that the child intervenes
1143 * before it is properly setup for tracing. It is safer to
1144 * disable all signals, because then we know we are not breaking
1145 * anything by restoring the original mask.
1146 */
1147 sigset_t all_sigs;
1148 int ret;
1149
1150 if (lttng_ust_nest_count)
1151 return;
1152 /* Disable signals */
1153 sigfillset(&all_sigs);
1154 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1155 if (ret == -1) {
1156 PERROR("sigprocmask");
1157 }
1158 ust_lock();
1159 rcu_bp_before_fork();
1160 }
1161
1162 static void ust_after_fork_common(sigset_t *restore_sigset)
1163 {
1164 int ret;
1165
1166 DBG("process %d", getpid());
1167 ust_unlock();
1168 /* Restore signals */
1169 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1170 if (ret == -1) {
1171 PERROR("sigprocmask");
1172 }
1173 }
1174
1175 void ust_after_fork_parent(sigset_t *restore_sigset)
1176 {
1177 if (lttng_ust_nest_count)
1178 return;
1179 DBG("process %d", getpid());
1180 rcu_bp_after_fork_parent();
1181 /* Release mutexes and reenable signals */
1182 ust_after_fork_common(restore_sigset);
1183 }
1184
1185 /*
1186 * After fork, in the child, we need to cleanup all the leftover state,
1187 * except the worker thread which already magically disappeared thanks
1188 * to the weird Linux fork semantics. After tyding up, we call
1189 * lttng_ust_init() again to start over as a new PID.
1190 *
1191 * This is meant for forks() that have tracing in the child between the
1192 * fork and following exec call (if there is any).
1193 */
1194 void ust_after_fork_child(sigset_t *restore_sigset)
1195 {
1196 if (lttng_ust_nest_count)
1197 return;
1198 DBG("process %d", getpid());
1199 /* Release urcu mutexes */
1200 rcu_bp_after_fork_child();
1201 lttng_ust_cleanup(0);
1202 lttng_context_vtid_reset();
1203 /* Release mutexes and reenable signals */
1204 ust_after_fork_common(restore_sigset);
1205 lttng_ust_init();
1206 }
This page took 0.080561 seconds and 5 git commands to generate.