Cleanup: don't spawn per-user thread if HOME is not set
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include <helper.h>
47 #include "tracepoint-internal.h"
48 #include "ltt-tracer-core.h"
49 #include "compat.h"
50 #include "../libringbuffer/tlsfixup.h"
51
52 /*
53 * Has lttng ust comm constructor been called ?
54 */
55 static int initialized;
56
57 /*
58 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
59 * Held when handling a command, also held by fork() to deal with
60 * removal of threads, and by exit path.
61 */
62
63 /* Should the ust comm thread quit ? */
64 static int lttng_ust_comm_should_quit;
65
66 /*
67 * Wait for either of these before continuing to the main
68 * program:
69 * - the register_done message from sessiond daemon
70 * (will let the sessiond daemon enable sessions before main
71 * starts.)
72 * - sessiond daemon is not reachable.
73 * - timeout (ensuring applications are resilient to session
74 * daemon problems).
75 */
76 static sem_t constructor_wait;
77 /*
78 * Doing this for both the global and local sessiond.
79 */
80 static int sem_count = { 2 };
81
82 /*
83 * Counting nesting within lttng-ust. Used to ensure that calling fork()
84 * from liblttng-ust does not execute the pre/post fork handlers.
85 */
86 static int __thread lttng_ust_nest_count;
87
88 /*
89 * Info about socket and associated listener thread.
90 */
91 struct sock_info {
92 const char *name;
93 pthread_t ust_listener; /* listener thread */
94 int root_handle;
95 int constructor_sem_posted;
96 int allowed;
97 int global;
98
99 char sock_path[PATH_MAX];
100 int socket;
101
102 char wait_shm_path[PATH_MAX];
103 char *wait_shm_mmap;
104 };
105
106 /* Socket from app (connect) to session daemon (listen) for communication */
107 struct sock_info global_apps = {
108 .name = "global",
109 .global = 1,
110
111 .root_handle = -1,
112 .allowed = 1,
113
114 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
115 .socket = -1,
116
117 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
118 };
119
120 /* TODO: allow global_apps_sock_path override */
121
122 struct sock_info local_apps = {
123 .name = "local",
124 .global = 0,
125 .root_handle = -1,
126 .allowed = 0, /* Check setuid bit first */
127
128 .socket = -1,
129 };
130
131 static int wait_poll_fallback;
132
133 extern void ltt_ring_buffer_client_overwrite_init(void);
134 extern void ltt_ring_buffer_client_discard_init(void);
135 extern void ltt_ring_buffer_metadata_client_init(void);
136 extern void ltt_ring_buffer_client_overwrite_exit(void);
137 extern void ltt_ring_buffer_client_discard_exit(void);
138 extern void ltt_ring_buffer_metadata_client_exit(void);
139
140 /*
141 * Force a read (imply TLS fixup for dlopen) of TLS variables.
142 */
143 static
144 void lttng_fixup_nest_count_tls(void)
145 {
146 asm volatile ("" : : "m" (lttng_ust_nest_count));
147 }
148
149 static
150 int setup_local_apps(void)
151 {
152 const char *home_dir;
153 uid_t uid;
154
155 uid = getuid();
156 /*
157 * Disallow per-user tracing for setuid binaries.
158 */
159 if (uid != geteuid()) {
160 assert(local_apps.allowed == 0);
161 return 0;
162 }
163 home_dir = (const char *) getenv("HOME");
164 if (!home_dir) {
165 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
166 assert(local_apps.allowed == 0);
167 return -ENOENT;
168 }
169 local_apps.allowed = 1;
170 snprintf(local_apps.sock_path, PATH_MAX,
171 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
172 snprintf(local_apps.wait_shm_path, PATH_MAX,
173 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
174 return 0;
175 }
176
177 static
178 int register_app_to_sessiond(int socket)
179 {
180 ssize_t ret;
181 struct {
182 uint32_t major;
183 uint32_t minor;
184 pid_t pid;
185 pid_t ppid;
186 uid_t uid;
187 gid_t gid;
188 uint32_t bits_per_long;
189 char name[16]; /* process name */
190 } reg_msg;
191
192 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
193 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
194 reg_msg.pid = getpid();
195 reg_msg.ppid = getppid();
196 reg_msg.uid = getuid();
197 reg_msg.gid = getgid();
198 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
199 lttng_ust_getprocname(reg_msg.name);
200
201 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
202 if (ret >= 0 && ret != sizeof(reg_msg))
203 return -EIO;
204 return ret;
205 }
206
207 static
208 int send_reply(int sock, struct ustcomm_ust_reply *lur)
209 {
210 ssize_t len;
211
212 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
213 switch (len) {
214 case sizeof(*lur):
215 DBG("message successfully sent");
216 return 0;
217 case -1:
218 if (errno == ECONNRESET) {
219 printf("remote end closed connection\n");
220 return 0;
221 }
222 return -1;
223 default:
224 printf("incorrect message size: %zd\n", len);
225 return -1;
226 }
227 }
228
229 static
230 int handle_register_done(struct sock_info *sock_info)
231 {
232 int ret;
233
234 if (sock_info->constructor_sem_posted)
235 return 0;
236 sock_info->constructor_sem_posted = 1;
237 if (uatomic_read(&sem_count) <= 0) {
238 return 0;
239 }
240 ret = uatomic_add_return(&sem_count, -1);
241 if (ret == 0) {
242 ret = sem_post(&constructor_wait);
243 assert(!ret);
244 }
245 return 0;
246 }
247
248 static
249 int handle_message(struct sock_info *sock_info,
250 int sock, struct ustcomm_ust_msg *lum)
251 {
252 int ret = 0;
253 const struct lttng_ust_objd_ops *ops;
254 struct ustcomm_ust_reply lur;
255 int shm_fd, wait_fd;
256 union ust_args args;
257 ssize_t len;
258
259 ust_lock();
260
261 memset(&lur, 0, sizeof(lur));
262
263 if (lttng_ust_comm_should_quit) {
264 ret = -EPERM;
265 goto end;
266 }
267
268 ops = objd_ops(lum->handle);
269 if (!ops) {
270 ret = -ENOENT;
271 goto end;
272 }
273
274 switch (lum->cmd) {
275 case LTTNG_UST_REGISTER_DONE:
276 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
277 ret = handle_register_done(sock_info);
278 else
279 ret = -EINVAL;
280 break;
281 case LTTNG_UST_RELEASE:
282 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
283 ret = -EPERM;
284 else
285 ret = lttng_ust_objd_unref(lum->handle);
286 break;
287 case LTTNG_UST_FILTER:
288 {
289 /* Receive filter data */
290 struct lttng_ust_filter_bytecode *bytecode;
291
292 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
293 ERR("Filter data size is too large: %u bytes\n",
294 lum->u.filter.data_size);
295 ret = -EINVAL;
296 goto error;
297 }
298
299 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
300 ERR("Filter reloc offset %u is not within data\n",
301 lum->u.filter.reloc_offset);
302 ret = -EINVAL;
303 goto error;
304 }
305
306 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
307 if (!bytecode) {
308 ret = -ENOMEM;
309 goto error;
310 }
311 len = ustcomm_recv_unix_sock(sock, bytecode->data,
312 lum->u.filter.data_size);
313 switch (len) {
314 case 0: /* orderly shutdown */
315 ret = 0;
316 free(bytecode);
317 goto error;
318 case -1:
319 DBG("Receive failed from lttng-sessiond with errno %d", errno);
320 if (errno == ECONNRESET) {
321 ERR("%s remote end closed connection\n", sock_info->name);
322 ret = -EINVAL;
323 free(bytecode);
324 goto error;
325 }
326 ret = -EINVAL;
327 goto end;
328 default:
329 if (len == lum->u.filter.data_size) {
330 DBG("filter data received\n");
331 break;
332 } else {
333 ERR("incorrect filter data message size: %zd\n", len);
334 ret = -EINVAL;
335 free(bytecode);
336 goto end;
337 }
338 }
339 bytecode->len = lum->u.filter.data_size;
340 bytecode->reloc_offset = lum->u.filter.reloc_offset;
341 if (ops->cmd) {
342 ret = ops->cmd(lum->handle, lum->cmd,
343 (unsigned long) bytecode,
344 &args);
345 if (ret) {
346 free(bytecode);
347 }
348 /* don't free bytecode if everything went fine. */
349 } else {
350 ret = -ENOSYS;
351 free(bytecode);
352 }
353 break;
354 }
355 default:
356 if (ops->cmd)
357 ret = ops->cmd(lum->handle, lum->cmd,
358 (unsigned long) &lum->u,
359 &args);
360 else
361 ret = -ENOSYS;
362 break;
363 }
364
365 end:
366 lur.handle = lum->handle;
367 lur.cmd = lum->cmd;
368 lur.ret_val = ret;
369 if (ret >= 0) {
370 lur.ret_code = USTCOMM_OK;
371 } else {
372 //lur.ret_code = USTCOMM_SESSION_FAIL;
373 lur.ret_code = ret;
374 }
375 if (ret >= 0) {
376 switch (lum->cmd) {
377 case LTTNG_UST_STREAM:
378 /*
379 * Special-case reply to send stream info.
380 * Use lum.u output.
381 */
382 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
383 shm_fd = *args.stream.shm_fd;
384 wait_fd = *args.stream.wait_fd;
385 break;
386 case LTTNG_UST_METADATA:
387 case LTTNG_UST_CHANNEL:
388 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
389 shm_fd = *args.channel.shm_fd;
390 wait_fd = *args.channel.wait_fd;
391 break;
392 case LTTNG_UST_TRACER_VERSION:
393 lur.u.version = lum->u.version;
394 break;
395 case LTTNG_UST_TRACEPOINT_LIST_GET:
396 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
397 break;
398 }
399 }
400 ret = send_reply(sock, &lur);
401 if (ret < 0) {
402 perror("error sending reply");
403 goto error;
404 }
405
406 if ((lum->cmd == LTTNG_UST_STREAM
407 || lum->cmd == LTTNG_UST_CHANNEL
408 || lum->cmd == LTTNG_UST_METADATA)
409 && lur.ret_code == USTCOMM_OK) {
410 int sendret = 0;
411
412 /* we also need to send the file descriptors. */
413 ret = ustcomm_send_fds_unix_sock(sock,
414 &shm_fd, &shm_fd,
415 1, sizeof(int));
416 if (ret < 0) {
417 perror("send shm_fd");
418 sendret = ret;
419 }
420 /*
421 * The sessiond expects 2 file descriptors, even upon
422 * error.
423 */
424 ret = ustcomm_send_fds_unix_sock(sock,
425 &wait_fd, &wait_fd,
426 1, sizeof(int));
427 if (ret < 0) {
428 perror("send wait_fd");
429 goto error;
430 }
431 if (sendret) {
432 ret = sendret;
433 goto error;
434 }
435 }
436 /*
437 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
438 * after the reply.
439 */
440 if (lur.ret_code == USTCOMM_OK) {
441 switch (lum->cmd) {
442 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
443 len = ustcomm_send_unix_sock(sock,
444 &args.field_list.entry,
445 sizeof(args.field_list.entry));
446 if (len != sizeof(args.field_list.entry)) {
447 ret = -1;
448 goto error;
449 }
450 }
451 }
452 /*
453 * We still have the memory map reference, and the fds have been
454 * sent to the sessiond. We can therefore close those fds. Note
455 * that we keep the write side of the wait_fd open, but close
456 * the read side.
457 */
458 if (lur.ret_code == USTCOMM_OK) {
459 switch (lum->cmd) {
460 case LTTNG_UST_STREAM:
461 if (shm_fd >= 0) {
462 ret = close(shm_fd);
463 if (ret) {
464 PERROR("Error closing stream shm_fd");
465 }
466 *args.stream.shm_fd = -1;
467 }
468 if (wait_fd >= 0) {
469 ret = close(wait_fd);
470 if (ret) {
471 PERROR("Error closing stream wait_fd");
472 }
473 *args.stream.wait_fd = -1;
474 }
475 break;
476 case LTTNG_UST_METADATA:
477 case LTTNG_UST_CHANNEL:
478 if (shm_fd >= 0) {
479 ret = close(shm_fd);
480 if (ret) {
481 PERROR("Error closing channel shm_fd");
482 }
483 *args.channel.shm_fd = -1;
484 }
485 if (wait_fd >= 0) {
486 ret = close(wait_fd);
487 if (ret) {
488 PERROR("Error closing channel wait_fd");
489 }
490 *args.channel.wait_fd = -1;
491 }
492 break;
493 }
494 }
495
496 error:
497 ust_unlock();
498 return ret;
499 }
500
501 static
502 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
503 {
504 int ret;
505
506 if (sock_info->socket != -1) {
507 ret = ustcomm_close_unix_sock(sock_info->socket);
508 if (ret) {
509 ERR("Error closing apps socket");
510 }
511 sock_info->socket = -1;
512 }
513 if (sock_info->root_handle != -1) {
514 ret = lttng_ust_objd_unref(sock_info->root_handle);
515 if (ret) {
516 ERR("Error unref root handle");
517 }
518 sock_info->root_handle = -1;
519 }
520 sock_info->constructor_sem_posted = 0;
521 /*
522 * wait_shm_mmap is used by listener threads outside of the
523 * ust lock, so we cannot tear it down ourselves, because we
524 * cannot join on these threads. Leave this task to the OS
525 * process exit.
526 */
527 if (!exiting && sock_info->wait_shm_mmap) {
528 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
529 if (ret) {
530 ERR("Error unmapping wait shm");
531 }
532 sock_info->wait_shm_mmap = NULL;
533 }
534 }
535
536 /*
537 * Using fork to set umask in the child process (not multi-thread safe).
538 * We deal with the shm_open vs ftruncate race (happening when the
539 * sessiond owns the shm and does not let everybody modify it, to ensure
540 * safety against shm_unlink) by simply letting the mmap fail and
541 * retrying after a few seconds.
542 * For global shm, everybody has rw access to it until the sessiond
543 * starts.
544 */
545 static
546 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
547 {
548 int wait_shm_fd, ret;
549 pid_t pid;
550
551 /*
552 * Try to open read-only.
553 */
554 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
555 if (wait_shm_fd >= 0) {
556 goto end;
557 } else if (wait_shm_fd < 0 && errno != ENOENT) {
558 /*
559 * Real-only open did not work, and it's not because the
560 * entry was not present. It's a failure that prohibits
561 * using shm.
562 */
563 ERR("Error opening shm %s", sock_info->wait_shm_path);
564 goto end;
565 }
566 /*
567 * If the open failed because the file did not exist, try
568 * creating it ourself.
569 */
570 lttng_ust_nest_count++;
571 pid = fork();
572 lttng_ust_nest_count--;
573 if (pid > 0) {
574 int status;
575
576 /*
577 * Parent: wait for child to return, in which case the
578 * shared memory map will have been created.
579 */
580 pid = wait(&status);
581 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
582 wait_shm_fd = -1;
583 goto end;
584 }
585 /*
586 * Try to open read-only again after creation.
587 */
588 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
589 if (wait_shm_fd < 0) {
590 /*
591 * Real-only open did not work. It's a failure
592 * that prohibits using shm.
593 */
594 ERR("Error opening shm %s", sock_info->wait_shm_path);
595 goto end;
596 }
597 goto end;
598 } else if (pid == 0) {
599 int create_mode;
600
601 /* Child */
602 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
603 if (sock_info->global)
604 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
605 /*
606 * We're alone in a child process, so we can modify the
607 * process-wide umask.
608 */
609 umask(~create_mode);
610 /*
611 * Try creating shm (or get rw access).
612 * We don't do an exclusive open, because we allow other
613 * processes to create+ftruncate it concurrently.
614 */
615 wait_shm_fd = shm_open(sock_info->wait_shm_path,
616 O_RDWR | O_CREAT, create_mode);
617 if (wait_shm_fd >= 0) {
618 ret = ftruncate(wait_shm_fd, mmap_size);
619 if (ret) {
620 PERROR("ftruncate");
621 _exit(EXIT_FAILURE);
622 }
623 _exit(EXIT_SUCCESS);
624 }
625 /*
626 * For local shm, we need to have rw access to accept
627 * opening it: this means the local sessiond will be
628 * able to wake us up. For global shm, we open it even
629 * if rw access is not granted, because the root.root
630 * sessiond will be able to override all rights and wake
631 * us up.
632 */
633 if (!sock_info->global && errno != EACCES) {
634 ERR("Error opening shm %s", sock_info->wait_shm_path);
635 _exit(EXIT_FAILURE);
636 }
637 /*
638 * The shm exists, but we cannot open it RW. Report
639 * success.
640 */
641 _exit(EXIT_SUCCESS);
642 } else {
643 return -1;
644 }
645 end:
646 if (wait_shm_fd >= 0 && !sock_info->global) {
647 struct stat statbuf;
648
649 /*
650 * Ensure that our user is the owner of the shm file for
651 * local shm. If we do not own the file, it means our
652 * sessiond will not have access to wake us up (there is
653 * probably a rogue process trying to fake our
654 * sessiond). Fallback to polling method in this case.
655 */
656 ret = fstat(wait_shm_fd, &statbuf);
657 if (ret) {
658 PERROR("fstat");
659 goto error_close;
660 }
661 if (statbuf.st_uid != getuid())
662 goto error_close;
663 }
664 return wait_shm_fd;
665
666 error_close:
667 ret = close(wait_shm_fd);
668 if (ret) {
669 PERROR("Error closing fd");
670 }
671 return -1;
672 }
673
674 static
675 char *get_map_shm(struct sock_info *sock_info)
676 {
677 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
678 int wait_shm_fd, ret;
679 char *wait_shm_mmap;
680
681 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
682 if (wait_shm_fd < 0) {
683 goto error;
684 }
685 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
686 MAP_SHARED, wait_shm_fd, 0);
687 /* close shm fd immediately after taking the mmap reference */
688 ret = close(wait_shm_fd);
689 if (ret) {
690 PERROR("Error closing fd");
691 }
692 if (wait_shm_mmap == MAP_FAILED) {
693 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
694 goto error;
695 }
696 return wait_shm_mmap;
697
698 error:
699 return NULL;
700 }
701
702 static
703 void wait_for_sessiond(struct sock_info *sock_info)
704 {
705 int ret;
706
707 ust_lock();
708 if (lttng_ust_comm_should_quit) {
709 goto quit;
710 }
711 if (wait_poll_fallback) {
712 goto error;
713 }
714 if (!sock_info->wait_shm_mmap) {
715 sock_info->wait_shm_mmap = get_map_shm(sock_info);
716 if (!sock_info->wait_shm_mmap)
717 goto error;
718 }
719 ust_unlock();
720
721 DBG("Waiting for %s apps sessiond", sock_info->name);
722 /* Wait for futex wakeup */
723 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
724 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
725 FUTEX_WAIT, 0, NULL, NULL, 0);
726 if (ret < 0) {
727 if (errno == EFAULT) {
728 wait_poll_fallback = 1;
729 DBG(
730 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
731 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
732 "Please upgrade your kernel "
733 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
734 "mainline). LTTng-UST will use polling mode fallback.");
735 if (ust_debug())
736 PERROR("futex");
737 }
738 }
739 }
740 return;
741
742 quit:
743 ust_unlock();
744 return;
745
746 error:
747 ust_unlock();
748 return;
749 }
750
751 /*
752 * This thread does not allocate any resource, except within
753 * handle_message, within mutex protection. This mutex protects against
754 * fork and exit.
755 * The other moment it allocates resources is at socket connection, which
756 * is also protected by the mutex.
757 */
758 static
759 void *ust_listener_thread(void *arg)
760 {
761 struct sock_info *sock_info = arg;
762 int sock, ret, prev_connect_failed = 0, has_waited = 0;
763
764 /* Restart trying to connect to the session daemon */
765 restart:
766 if (prev_connect_failed) {
767 /* Wait for sessiond availability with pipe */
768 wait_for_sessiond(sock_info);
769 if (has_waited) {
770 has_waited = 0;
771 /*
772 * Sleep for 5 seconds before retrying after a
773 * sequence of failure / wait / failure. This
774 * deals with a killed or broken session daemon.
775 */
776 sleep(5);
777 }
778 has_waited = 1;
779 prev_connect_failed = 0;
780 }
781 ust_lock();
782
783 if (lttng_ust_comm_should_quit) {
784 ust_unlock();
785 goto quit;
786 }
787
788 if (sock_info->socket != -1) {
789 ret = ustcomm_close_unix_sock(sock_info->socket);
790 if (ret) {
791 ERR("Error closing %s apps socket", sock_info->name);
792 }
793 sock_info->socket = -1;
794 }
795
796 /* Register */
797 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
798 if (ret < 0) {
799 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
800 prev_connect_failed = 1;
801 /*
802 * If we cannot find the sessiond daemon, don't delay
803 * constructor execution.
804 */
805 ret = handle_register_done(sock_info);
806 assert(!ret);
807 ust_unlock();
808 goto restart;
809 }
810
811 sock_info->socket = sock = ret;
812
813 /*
814 * Create only one root handle per listener thread for the whole
815 * process lifetime.
816 */
817 if (sock_info->root_handle == -1) {
818 ret = lttng_abi_create_root_handle();
819 if (ret < 0) {
820 ERR("Error creating root handle");
821 ust_unlock();
822 goto quit;
823 }
824 sock_info->root_handle = ret;
825 }
826
827 ret = register_app_to_sessiond(sock);
828 if (ret < 0) {
829 ERR("Error registering to %s apps socket", sock_info->name);
830 prev_connect_failed = 1;
831 /*
832 * If we cannot register to the sessiond daemon, don't
833 * delay constructor execution.
834 */
835 ret = handle_register_done(sock_info);
836 assert(!ret);
837 ust_unlock();
838 goto restart;
839 }
840 ust_unlock();
841
842 for (;;) {
843 ssize_t len;
844 struct ustcomm_ust_msg lum;
845
846 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
847 switch (len) {
848 case 0: /* orderly shutdown */
849 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
850 ust_lock();
851 /*
852 * Either sessiond has shutdown or refused us by closing the socket.
853 * In either case, we don't want to delay construction execution,
854 * and we need to wait before retry.
855 */
856 prev_connect_failed = 1;
857 /*
858 * If we cannot register to the sessiond daemon, don't
859 * delay constructor execution.
860 */
861 ret = handle_register_done(sock_info);
862 assert(!ret);
863 ust_unlock();
864 goto end;
865 case sizeof(lum):
866 DBG("message received\n");
867 ret = handle_message(sock_info, sock, &lum);
868 if (ret < 0) {
869 ERR("Error handling message for %s socket", sock_info->name);
870 }
871 continue;
872 case -1:
873 DBG("Receive failed from lttng-sessiond with errno %d", errno);
874 if (errno == ECONNRESET) {
875 ERR("%s remote end closed connection\n", sock_info->name);
876 goto end;
877 }
878 goto end;
879 default:
880 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
881 continue;
882 }
883
884 }
885 end:
886 goto restart; /* try to reconnect */
887 quit:
888 return NULL;
889 }
890
891 /*
892 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
893 */
894 static
895 int get_timeout(struct timespec *constructor_timeout)
896 {
897 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
898 char *str_delay;
899 int ret;
900
901 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
902 if (str_delay) {
903 constructor_delay_ms = strtol(str_delay, NULL, 10);
904 }
905
906 switch (constructor_delay_ms) {
907 case -1:/* fall-through */
908 case 0:
909 return constructor_delay_ms;
910 default:
911 break;
912 }
913
914 /*
915 * If we are unable to find the current time, don't wait.
916 */
917 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
918 if (ret) {
919 return -1;
920 }
921 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
922 constructor_timeout->tv_nsec +=
923 (constructor_delay_ms % 1000UL) * 1000000UL;
924 if (constructor_timeout->tv_nsec >= 1000000000UL) {
925 constructor_timeout->tv_sec++;
926 constructor_timeout->tv_nsec -= 1000000000UL;
927 }
928 return 1;
929 }
930
931 /*
932 * sessiond monitoring thread: monitor presence of global and per-user
933 * sessiond by polling the application common named pipe.
934 */
935 /* TODO */
936
937 void __attribute__((constructor)) lttng_ust_init(void)
938 {
939 struct timespec constructor_timeout;
940 sigset_t sig_all_blocked, orig_parent_mask;
941 pthread_attr_t thread_attr;
942 int timeout_mode;
943 int ret;
944
945 if (uatomic_xchg(&initialized, 1) == 1)
946 return;
947
948 /*
949 * Fixup interdependency between TLS fixup mutex (which happens
950 * to be the dynamic linker mutex) and ust_lock, taken within
951 * the ust lock.
952 */
953 lttng_fixup_event_tls();
954 lttng_fixup_ringbuffer_tls();
955 lttng_fixup_vtid_tls();
956 lttng_fixup_nest_count_tls();
957 lttng_fixup_procname_tls();
958
959 /*
960 * We want precise control over the order in which we construct
961 * our sub-libraries vs starting to receive commands from
962 * sessiond (otherwise leading to errors when trying to create
963 * sessiond before the init functions are completed).
964 */
965 init_usterr();
966 init_tracepoint();
967 ltt_ring_buffer_metadata_client_init();
968 ltt_ring_buffer_client_overwrite_init();
969 ltt_ring_buffer_client_discard_init();
970
971 timeout_mode = get_timeout(&constructor_timeout);
972
973 ret = sem_init(&constructor_wait, 0, 0);
974 assert(!ret);
975
976 ret = setup_local_apps();
977 if (ret) {
978 DBG("local apps setup returned %d", ret);
979 }
980
981 /* A new thread created by pthread_create inherits the signal mask
982 * from the parent. To avoid any signal being received by the
983 * listener thread, we block all signals temporarily in the parent,
984 * while we create the listener thread.
985 */
986 sigfillset(&sig_all_blocked);
987 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
988 if (ret) {
989 ERR("pthread_sigmask: %s", strerror(ret));
990 }
991
992 ret = pthread_attr_init(&thread_attr);
993 if (ret) {
994 ERR("pthread_attr_init: %s", strerror(ret));
995 }
996 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
997 if (ret) {
998 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
999 }
1000
1001 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1002 ust_listener_thread, &global_apps);
1003 if (ret) {
1004 ERR("pthread_create global: %s", strerror(ret));
1005 }
1006 if (local_apps.allowed) {
1007 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1008 ust_listener_thread, &local_apps);
1009 if (ret) {
1010 ERR("pthread_create local: %s", strerror(ret));
1011 }
1012 } else {
1013 handle_register_done(&local_apps);
1014 }
1015 ret = pthread_attr_destroy(&thread_attr);
1016 if (ret) {
1017 ERR("pthread_attr_destroy: %s", strerror(ret));
1018 }
1019
1020 /* Restore original signal mask in parent */
1021 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1022 if (ret) {
1023 ERR("pthread_sigmask: %s", strerror(ret));
1024 }
1025
1026 switch (timeout_mode) {
1027 case 1: /* timeout wait */
1028 do {
1029 ret = sem_timedwait(&constructor_wait,
1030 &constructor_timeout);
1031 } while (ret < 0 && errno == EINTR);
1032 if (ret < 0 && errno == ETIMEDOUT) {
1033 ERR("Timed out waiting for ltt-sessiond");
1034 } else {
1035 assert(!ret);
1036 }
1037 break;
1038 case -1:/* wait forever */
1039 do {
1040 ret = sem_wait(&constructor_wait);
1041 } while (ret < 0 && errno == EINTR);
1042 assert(!ret);
1043 break;
1044 case 0: /* no timeout */
1045 break;
1046 }
1047 }
1048
1049 static
1050 void lttng_ust_cleanup(int exiting)
1051 {
1052 cleanup_sock_info(&global_apps, exiting);
1053 if (local_apps.allowed) {
1054 cleanup_sock_info(&local_apps, exiting);
1055 }
1056 /*
1057 * The teardown in this function all affect data structures
1058 * accessed under the UST lock by the listener thread. This
1059 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1060 * that none of these threads are accessing this data at this
1061 * point.
1062 */
1063 lttng_ust_abi_exit();
1064 lttng_ust_events_exit();
1065 ltt_ring_buffer_client_discard_exit();
1066 ltt_ring_buffer_client_overwrite_exit();
1067 ltt_ring_buffer_metadata_client_exit();
1068 exit_tracepoint();
1069 if (!exiting) {
1070 /* Reinitialize values for fork */
1071 sem_count = 2;
1072 lttng_ust_comm_should_quit = 0;
1073 initialized = 0;
1074 }
1075 }
1076
1077 void __attribute__((destructor)) lttng_ust_exit(void)
1078 {
1079 int ret;
1080
1081 /*
1082 * Using pthread_cancel here because:
1083 * A) we don't want to hang application teardown.
1084 * B) the thread is not allocating any resource.
1085 */
1086
1087 /*
1088 * Require the communication thread to quit. Synchronize with
1089 * mutexes to ensure it is not in a mutex critical section when
1090 * pthread_cancel is later called.
1091 */
1092 ust_lock();
1093 lttng_ust_comm_should_quit = 1;
1094 ust_unlock();
1095
1096 /* cancel threads */
1097 ret = pthread_cancel(global_apps.ust_listener);
1098 if (ret) {
1099 ERR("Error cancelling global ust listener thread: %s",
1100 strerror(ret));
1101 }
1102 if (local_apps.allowed) {
1103 ret = pthread_cancel(local_apps.ust_listener);
1104 if (ret) {
1105 ERR("Error cancelling local ust listener thread: %s",
1106 strerror(ret));
1107 }
1108 }
1109 /*
1110 * Do NOT join threads: use of sys_futex makes it impossible to
1111 * join the threads without using async-cancel, but async-cancel
1112 * is delivered by a signal, which could hit the target thread
1113 * anywhere in its code path, including while the ust_lock() is
1114 * held, causing a deadlock for the other thread. Let the OS
1115 * cleanup the threads if there are stalled in a syscall.
1116 */
1117 lttng_ust_cleanup(1);
1118 }
1119
1120 /*
1121 * We exclude the worker threads across fork and clone (except
1122 * CLONE_VM), because these system calls only keep the forking thread
1123 * running in the child. Therefore, we don't want to call fork or clone
1124 * in the middle of an tracepoint or ust tracing state modification.
1125 * Holding this mutex protects these structures across fork and clone.
1126 */
1127 void ust_before_fork(sigset_t *save_sigset)
1128 {
1129 /*
1130 * Disable signals. This is to avoid that the child intervenes
1131 * before it is properly setup for tracing. It is safer to
1132 * disable all signals, because then we know we are not breaking
1133 * anything by restoring the original mask.
1134 */
1135 sigset_t all_sigs;
1136 int ret;
1137
1138 if (lttng_ust_nest_count)
1139 return;
1140 /* Disable signals */
1141 sigfillset(&all_sigs);
1142 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1143 if (ret == -1) {
1144 PERROR("sigprocmask");
1145 }
1146 ust_lock();
1147 rcu_bp_before_fork();
1148 }
1149
1150 static void ust_after_fork_common(sigset_t *restore_sigset)
1151 {
1152 int ret;
1153
1154 DBG("process %d", getpid());
1155 ust_unlock();
1156 /* Restore signals */
1157 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1158 if (ret == -1) {
1159 PERROR("sigprocmask");
1160 }
1161 }
1162
1163 void ust_after_fork_parent(sigset_t *restore_sigset)
1164 {
1165 if (lttng_ust_nest_count)
1166 return;
1167 DBG("process %d", getpid());
1168 rcu_bp_after_fork_parent();
1169 /* Release mutexes and reenable signals */
1170 ust_after_fork_common(restore_sigset);
1171 }
1172
1173 /*
1174 * After fork, in the child, we need to cleanup all the leftover state,
1175 * except the worker thread which already magically disappeared thanks
1176 * to the weird Linux fork semantics. After tyding up, we call
1177 * lttng_ust_init() again to start over as a new PID.
1178 *
1179 * This is meant for forks() that have tracing in the child between the
1180 * fork and following exec call (if there is any).
1181 */
1182 void ust_after_fork_child(sigset_t *restore_sigset)
1183 {
1184 if (lttng_ust_nest_count)
1185 return;
1186 DBG("process %d", getpid());
1187 /* Release urcu mutexes */
1188 rcu_bp_after_fork_child();
1189 lttng_ust_cleanup(0);
1190 lttng_context_vtid_reset();
1191 /* Release mutexes and reenable signals */
1192 ust_after_fork_common(restore_sigset);
1193 lttng_ust_init();
1194 }
This page took 0.058894 seconds and 5 git commands to generate.