Fix: perform TLS fixup of nest count outside of UST mutex
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include "tracepoint-internal.h"
47 #include "ltt-tracer-core.h"
48 #include "compat.h"
49 #include "../libringbuffer/tlsfixup.h"
50
51 /*
52 * Has lttng ust comm constructor been called ?
53 */
54 static int initialized;
55
56 /*
57 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
58 * Held when handling a command, also held by fork() to deal with
59 * removal of threads, and by exit path.
60 */
61
62 /* Should the ust comm thread quit ? */
63 static int lttng_ust_comm_should_quit;
64
65 /*
66 * Wait for either of these before continuing to the main
67 * program:
68 * - the register_done message from sessiond daemon
69 * (will let the sessiond daemon enable sessions before main
70 * starts.)
71 * - sessiond daemon is not reachable.
72 * - timeout (ensuring applications are resilient to session
73 * daemon problems).
74 */
75 static sem_t constructor_wait;
76 /*
77 * Doing this for both the global and local sessiond.
78 */
79 static int sem_count = { 2 };
80
81 /*
82 * Counting nesting within lttng-ust. Used to ensure that calling fork()
83 * from liblttng-ust does not execute the pre/post fork handlers.
84 */
85 static int __thread lttng_ust_nest_count;
86
87 /*
88 * Info about socket and associated listener thread.
89 */
90 struct sock_info {
91 const char *name;
92 pthread_t ust_listener; /* listener thread */
93 int root_handle;
94 int constructor_sem_posted;
95 int allowed;
96 int global;
97
98 char sock_path[PATH_MAX];
99 int socket;
100
101 char wait_shm_path[PATH_MAX];
102 char *wait_shm_mmap;
103 };
104
105 /* Socket from app (connect) to session daemon (listen) for communication */
106 struct sock_info global_apps = {
107 .name = "global",
108 .global = 1,
109
110 .root_handle = -1,
111 .allowed = 1,
112
113 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
114 .socket = -1,
115
116 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
117 };
118
119 /* TODO: allow global_apps_sock_path override */
120
121 struct sock_info local_apps = {
122 .name = "local",
123 .global = 0,
124 .root_handle = -1,
125 .allowed = 0, /* Check setuid bit first */
126
127 .socket = -1,
128 };
129
130 static int wait_poll_fallback;
131
132 extern void ltt_ring_buffer_client_overwrite_init(void);
133 extern void ltt_ring_buffer_client_discard_init(void);
134 extern void ltt_ring_buffer_metadata_client_init(void);
135 extern void ltt_ring_buffer_client_overwrite_exit(void);
136 extern void ltt_ring_buffer_client_discard_exit(void);
137 extern void ltt_ring_buffer_metadata_client_exit(void);
138
139 /*
140 * Force a read (imply TLS fixup for dlopen) of TLS variables.
141 */
142 static
143 void lttng_fixup_nest_count_tls(void)
144 {
145 asm volatile ("" : : "m" (lttng_ust_nest_count));
146 }
147
148 static
149 int setup_local_apps(void)
150 {
151 const char *home_dir;
152 uid_t uid;
153
154 uid = getuid();
155 /*
156 * Disallow per-user tracing for setuid binaries.
157 */
158 if (uid != geteuid()) {
159 local_apps.allowed = 0;
160 return 0;
161 } else {
162 local_apps.allowed = 1;
163 }
164 home_dir = (const char *) getenv("HOME");
165 if (!home_dir)
166 return -ENOENT;
167 snprintf(local_apps.sock_path, PATH_MAX,
168 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
169 snprintf(local_apps.wait_shm_path, PATH_MAX,
170 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
171 return 0;
172 }
173
174 static
175 int register_app_to_sessiond(int socket)
176 {
177 ssize_t ret;
178 struct {
179 uint32_t major;
180 uint32_t minor;
181 pid_t pid;
182 pid_t ppid;
183 uid_t uid;
184 gid_t gid;
185 uint32_t bits_per_long;
186 char name[16]; /* process name */
187 } reg_msg;
188
189 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
190 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
191 reg_msg.pid = getpid();
192 reg_msg.ppid = getppid();
193 reg_msg.uid = getuid();
194 reg_msg.gid = getgid();
195 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
196 lttng_ust_getprocname(reg_msg.name);
197
198 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
199 if (ret >= 0 && ret != sizeof(reg_msg))
200 return -EIO;
201 return ret;
202 }
203
204 static
205 int send_reply(int sock, struct ustcomm_ust_reply *lur)
206 {
207 ssize_t len;
208
209 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
210 switch (len) {
211 case sizeof(*lur):
212 DBG("message successfully sent");
213 return 0;
214 case -1:
215 if (errno == ECONNRESET) {
216 printf("remote end closed connection\n");
217 return 0;
218 }
219 return -1;
220 default:
221 printf("incorrect message size: %zd\n", len);
222 return -1;
223 }
224 }
225
226 static
227 int handle_register_done(struct sock_info *sock_info)
228 {
229 int ret;
230
231 if (sock_info->constructor_sem_posted)
232 return 0;
233 sock_info->constructor_sem_posted = 1;
234 if (uatomic_read(&sem_count) <= 0) {
235 return 0;
236 }
237 ret = uatomic_add_return(&sem_count, -1);
238 if (ret == 0) {
239 ret = sem_post(&constructor_wait);
240 assert(!ret);
241 }
242 return 0;
243 }
244
245 static
246 int handle_message(struct sock_info *sock_info,
247 int sock, struct ustcomm_ust_msg *lum)
248 {
249 int ret = 0;
250 const struct lttng_ust_objd_ops *ops;
251 struct ustcomm_ust_reply lur;
252 int shm_fd, wait_fd;
253 union ust_args args;
254 ssize_t len;
255
256 ust_lock();
257
258 memset(&lur, 0, sizeof(lur));
259
260 if (lttng_ust_comm_should_quit) {
261 ret = -EPERM;
262 goto end;
263 }
264
265 ops = objd_ops(lum->handle);
266 if (!ops) {
267 ret = -ENOENT;
268 goto end;
269 }
270
271 switch (lum->cmd) {
272 case LTTNG_UST_REGISTER_DONE:
273 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
274 ret = handle_register_done(sock_info);
275 else
276 ret = -EINVAL;
277 break;
278 case LTTNG_UST_RELEASE:
279 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
280 ret = -EPERM;
281 else
282 ret = lttng_ust_objd_unref(lum->handle);
283 break;
284 default:
285 if (ops->cmd)
286 ret = ops->cmd(lum->handle, lum->cmd,
287 (unsigned long) &lum->u,
288 &args);
289 else
290 ret = -ENOSYS;
291 break;
292 }
293
294 end:
295 lur.handle = lum->handle;
296 lur.cmd = lum->cmd;
297 lur.ret_val = ret;
298 if (ret >= 0) {
299 lur.ret_code = USTCOMM_OK;
300 } else {
301 //lur.ret_code = USTCOMM_SESSION_FAIL;
302 lur.ret_code = ret;
303 }
304 if (ret >= 0) {
305 switch (lum->cmd) {
306 case LTTNG_UST_STREAM:
307 /*
308 * Special-case reply to send stream info.
309 * Use lum.u output.
310 */
311 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
312 shm_fd = *args.stream.shm_fd;
313 wait_fd = *args.stream.wait_fd;
314 break;
315 case LTTNG_UST_METADATA:
316 case LTTNG_UST_CHANNEL:
317 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
318 shm_fd = *args.channel.shm_fd;
319 wait_fd = *args.channel.wait_fd;
320 break;
321 case LTTNG_UST_TRACER_VERSION:
322 lur.u.version = lum->u.version;
323 break;
324 case LTTNG_UST_TRACEPOINT_LIST_GET:
325 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
326 break;
327 }
328 }
329 ret = send_reply(sock, &lur);
330 if (ret < 0) {
331 perror("error sending reply");
332 goto error;
333 }
334
335 if ((lum->cmd == LTTNG_UST_STREAM
336 || lum->cmd == LTTNG_UST_CHANNEL
337 || lum->cmd == LTTNG_UST_METADATA)
338 && lur.ret_code == USTCOMM_OK) {
339 int sendret = 0;
340
341 /* we also need to send the file descriptors. */
342 ret = ustcomm_send_fds_unix_sock(sock,
343 &shm_fd, &shm_fd,
344 1, sizeof(int));
345 if (ret < 0) {
346 perror("send shm_fd");
347 sendret = ret;
348 }
349 /*
350 * The sessiond expects 2 file descriptors, even upon
351 * error.
352 */
353 ret = ustcomm_send_fds_unix_sock(sock,
354 &wait_fd, &wait_fd,
355 1, sizeof(int));
356 if (ret < 0) {
357 perror("send wait_fd");
358 goto error;
359 }
360 if (sendret) {
361 ret = sendret;
362 goto error;
363 }
364 }
365 /*
366 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
367 * after the reply.
368 */
369 if (lur.ret_code == USTCOMM_OK) {
370 switch (lum->cmd) {
371 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
372 len = ustcomm_send_unix_sock(sock,
373 &args.field_list.entry,
374 sizeof(args.field_list.entry));
375 if (len != sizeof(args.field_list.entry)) {
376 ret = -1;
377 goto error;
378 }
379 }
380 }
381 /*
382 * We still have the memory map reference, and the fds have been
383 * sent to the sessiond. We can therefore close those fds. Note
384 * that we keep the write side of the wait_fd open, but close
385 * the read side.
386 */
387 if (lur.ret_code == USTCOMM_OK) {
388 switch (lum->cmd) {
389 case LTTNG_UST_STREAM:
390 if (shm_fd >= 0) {
391 ret = close(shm_fd);
392 if (ret) {
393 PERROR("Error closing stream shm_fd");
394 }
395 *args.stream.shm_fd = -1;
396 }
397 if (wait_fd >= 0) {
398 ret = close(wait_fd);
399 if (ret) {
400 PERROR("Error closing stream wait_fd");
401 }
402 *args.stream.wait_fd = -1;
403 }
404 break;
405 case LTTNG_UST_METADATA:
406 case LTTNG_UST_CHANNEL:
407 if (shm_fd >= 0) {
408 ret = close(shm_fd);
409 if (ret) {
410 PERROR("Error closing channel shm_fd");
411 }
412 *args.channel.shm_fd = -1;
413 }
414 if (wait_fd >= 0) {
415 ret = close(wait_fd);
416 if (ret) {
417 PERROR("Error closing channel wait_fd");
418 }
419 *args.channel.wait_fd = -1;
420 }
421 break;
422 }
423 }
424
425 error:
426 ust_unlock();
427 return ret;
428 }
429
430 static
431 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
432 {
433 int ret;
434
435 if (sock_info->socket != -1) {
436 ret = ustcomm_close_unix_sock(sock_info->socket);
437 if (ret) {
438 ERR("Error closing apps socket");
439 }
440 sock_info->socket = -1;
441 }
442 if (sock_info->root_handle != -1) {
443 ret = lttng_ust_objd_unref(sock_info->root_handle);
444 if (ret) {
445 ERR("Error unref root handle");
446 }
447 sock_info->root_handle = -1;
448 }
449 sock_info->constructor_sem_posted = 0;
450 /*
451 * wait_shm_mmap is used by listener threads outside of the
452 * ust lock, so we cannot tear it down ourselves, because we
453 * cannot join on these threads. Leave this task to the OS
454 * process exit.
455 */
456 if (!exiting && sock_info->wait_shm_mmap) {
457 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
458 if (ret) {
459 ERR("Error unmapping wait shm");
460 }
461 sock_info->wait_shm_mmap = NULL;
462 }
463 }
464
465 /*
466 * Using fork to set umask in the child process (not multi-thread safe).
467 * We deal with the shm_open vs ftruncate race (happening when the
468 * sessiond owns the shm and does not let everybody modify it, to ensure
469 * safety against shm_unlink) by simply letting the mmap fail and
470 * retrying after a few seconds.
471 * For global shm, everybody has rw access to it until the sessiond
472 * starts.
473 */
474 static
475 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
476 {
477 int wait_shm_fd, ret;
478 pid_t pid;
479
480 /*
481 * Try to open read-only.
482 */
483 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
484 if (wait_shm_fd >= 0) {
485 goto end;
486 } else if (wait_shm_fd < 0 && errno != ENOENT) {
487 /*
488 * Real-only open did not work, and it's not because the
489 * entry was not present. It's a failure that prohibits
490 * using shm.
491 */
492 ERR("Error opening shm %s", sock_info->wait_shm_path);
493 goto end;
494 }
495 /*
496 * If the open failed because the file did not exist, try
497 * creating it ourself.
498 */
499 lttng_ust_nest_count++;
500 pid = fork();
501 lttng_ust_nest_count--;
502 if (pid > 0) {
503 int status;
504
505 /*
506 * Parent: wait for child to return, in which case the
507 * shared memory map will have been created.
508 */
509 pid = wait(&status);
510 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
511 wait_shm_fd = -1;
512 goto end;
513 }
514 /*
515 * Try to open read-only again after creation.
516 */
517 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
518 if (wait_shm_fd < 0) {
519 /*
520 * Real-only open did not work. It's a failure
521 * that prohibits using shm.
522 */
523 ERR("Error opening shm %s", sock_info->wait_shm_path);
524 goto end;
525 }
526 goto end;
527 } else if (pid == 0) {
528 int create_mode;
529
530 /* Child */
531 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
532 if (sock_info->global)
533 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
534 /*
535 * We're alone in a child process, so we can modify the
536 * process-wide umask.
537 */
538 umask(~create_mode);
539 /*
540 * Try creating shm (or get rw access).
541 * We don't do an exclusive open, because we allow other
542 * processes to create+ftruncate it concurrently.
543 */
544 wait_shm_fd = shm_open(sock_info->wait_shm_path,
545 O_RDWR | O_CREAT, create_mode);
546 if (wait_shm_fd >= 0) {
547 ret = ftruncate(wait_shm_fd, mmap_size);
548 if (ret) {
549 PERROR("ftruncate");
550 exit(EXIT_FAILURE);
551 }
552 exit(EXIT_SUCCESS);
553 }
554 /*
555 * For local shm, we need to have rw access to accept
556 * opening it: this means the local sessiond will be
557 * able to wake us up. For global shm, we open it even
558 * if rw access is not granted, because the root.root
559 * sessiond will be able to override all rights and wake
560 * us up.
561 */
562 if (!sock_info->global && errno != EACCES) {
563 ERR("Error opening shm %s", sock_info->wait_shm_path);
564 exit(EXIT_FAILURE);
565 }
566 /*
567 * The shm exists, but we cannot open it RW. Report
568 * success.
569 */
570 exit(EXIT_SUCCESS);
571 } else {
572 return -1;
573 }
574 end:
575 if (wait_shm_fd >= 0 && !sock_info->global) {
576 struct stat statbuf;
577
578 /*
579 * Ensure that our user is the owner of the shm file for
580 * local shm. If we do not own the file, it means our
581 * sessiond will not have access to wake us up (there is
582 * probably a rogue process trying to fake our
583 * sessiond). Fallback to polling method in this case.
584 */
585 ret = fstat(wait_shm_fd, &statbuf);
586 if (ret) {
587 PERROR("fstat");
588 goto error_close;
589 }
590 if (statbuf.st_uid != getuid())
591 goto error_close;
592 }
593 return wait_shm_fd;
594
595 error_close:
596 ret = close(wait_shm_fd);
597 if (ret) {
598 PERROR("Error closing fd");
599 }
600 return -1;
601 }
602
603 static
604 char *get_map_shm(struct sock_info *sock_info)
605 {
606 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
607 int wait_shm_fd, ret;
608 char *wait_shm_mmap;
609
610 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
611 if (wait_shm_fd < 0) {
612 goto error;
613 }
614 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
615 MAP_SHARED, wait_shm_fd, 0);
616 /* close shm fd immediately after taking the mmap reference */
617 ret = close(wait_shm_fd);
618 if (ret) {
619 PERROR("Error closing fd");
620 }
621 if (wait_shm_mmap == MAP_FAILED) {
622 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
623 goto error;
624 }
625 return wait_shm_mmap;
626
627 error:
628 return NULL;
629 }
630
631 static
632 void wait_for_sessiond(struct sock_info *sock_info)
633 {
634 int ret;
635
636 ust_lock();
637 if (lttng_ust_comm_should_quit) {
638 goto quit;
639 }
640 if (wait_poll_fallback) {
641 goto error;
642 }
643 if (!sock_info->wait_shm_mmap) {
644 sock_info->wait_shm_mmap = get_map_shm(sock_info);
645 if (!sock_info->wait_shm_mmap)
646 goto error;
647 }
648 ust_unlock();
649
650 DBG("Waiting for %s apps sessiond", sock_info->name);
651 /* Wait for futex wakeup */
652 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
653 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
654 FUTEX_WAIT, 0, NULL, NULL, 0);
655 if (ret < 0) {
656 if (errno == EFAULT) {
657 wait_poll_fallback = 1;
658 DBG(
659 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
660 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
661 "Please upgrade your kernel "
662 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
663 "mainline). LTTng-UST will use polling mode fallback.");
664 if (ust_debug())
665 PERROR("futex");
666 }
667 }
668 }
669 return;
670
671 quit:
672 ust_unlock();
673 return;
674
675 error:
676 ust_unlock();
677 return;
678 }
679
680 /*
681 * This thread does not allocate any resource, except within
682 * handle_message, within mutex protection. This mutex protects against
683 * fork and exit.
684 * The other moment it allocates resources is at socket connexion, which
685 * is also protected by the mutex.
686 */
687 static
688 void *ust_listener_thread(void *arg)
689 {
690 struct sock_info *sock_info = arg;
691 int sock, ret, prev_connect_failed = 0, has_waited = 0;
692
693 /* Restart trying to connect to the session daemon */
694 restart:
695 if (prev_connect_failed) {
696 /* Wait for sessiond availability with pipe */
697 wait_for_sessiond(sock_info);
698 if (has_waited) {
699 has_waited = 0;
700 /*
701 * Sleep for 5 seconds before retrying after a
702 * sequence of failure / wait / failure. This
703 * deals with a killed or broken session daemon.
704 */
705 sleep(5);
706 }
707 has_waited = 1;
708 prev_connect_failed = 0;
709 }
710 ust_lock();
711
712 if (lttng_ust_comm_should_quit) {
713 ust_unlock();
714 goto quit;
715 }
716
717 if (sock_info->socket != -1) {
718 ret = ustcomm_close_unix_sock(sock_info->socket);
719 if (ret) {
720 ERR("Error closing %s apps socket", sock_info->name);
721 }
722 sock_info->socket = -1;
723 }
724
725 /* Register */
726 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
727 if (ret < 0) {
728 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
729 prev_connect_failed = 1;
730 /*
731 * If we cannot find the sessiond daemon, don't delay
732 * constructor execution.
733 */
734 ret = handle_register_done(sock_info);
735 assert(!ret);
736 ust_unlock();
737 goto restart;
738 }
739
740 sock_info->socket = sock = ret;
741
742 /*
743 * Create only one root handle per listener thread for the whole
744 * process lifetime.
745 */
746 if (sock_info->root_handle == -1) {
747 ret = lttng_abi_create_root_handle();
748 if (ret < 0) {
749 ERR("Error creating root handle");
750 ust_unlock();
751 goto quit;
752 }
753 sock_info->root_handle = ret;
754 }
755
756 ret = register_app_to_sessiond(sock);
757 if (ret < 0) {
758 ERR("Error registering to %s apps socket", sock_info->name);
759 prev_connect_failed = 1;
760 /*
761 * If we cannot register to the sessiond daemon, don't
762 * delay constructor execution.
763 */
764 ret = handle_register_done(sock_info);
765 assert(!ret);
766 ust_unlock();
767 goto restart;
768 }
769 ust_unlock();
770
771 for (;;) {
772 ssize_t len;
773 struct ustcomm_ust_msg lum;
774
775 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
776 switch (len) {
777 case 0: /* orderly shutdown */
778 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
779 ust_lock();
780 /*
781 * Either sessiond has shutdown or refused us by closing the socket.
782 * In either case, we don't want to delay construction execution,
783 * and we need to wait before retry.
784 */
785 prev_connect_failed = 1;
786 /*
787 * If we cannot register to the sessiond daemon, don't
788 * delay constructor execution.
789 */
790 ret = handle_register_done(sock_info);
791 assert(!ret);
792 ust_unlock();
793 goto end;
794 case sizeof(lum):
795 DBG("message received\n");
796 ret = handle_message(sock_info, sock, &lum);
797 if (ret < 0) {
798 ERR("Error handling message for %s socket", sock_info->name);
799 }
800 continue;
801 case -1:
802 DBG("Receive failed from lttng-sessiond with errno %d", errno);
803 if (errno == ECONNRESET) {
804 ERR("%s remote end closed connection\n", sock_info->name);
805 goto end;
806 }
807 goto end;
808 default:
809 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
810 continue;
811 }
812
813 }
814 end:
815 goto restart; /* try to reconnect */
816 quit:
817 return NULL;
818 }
819
820 /*
821 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
822 */
823 static
824 int get_timeout(struct timespec *constructor_timeout)
825 {
826 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
827 char *str_delay;
828 int ret;
829
830 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
831 if (str_delay) {
832 constructor_delay_ms = strtol(str_delay, NULL, 10);
833 }
834
835 switch (constructor_delay_ms) {
836 case -1:/* fall-through */
837 case 0:
838 return constructor_delay_ms;
839 default:
840 break;
841 }
842
843 /*
844 * If we are unable to find the current time, don't wait.
845 */
846 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
847 if (ret) {
848 return -1;
849 }
850 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
851 constructor_timeout->tv_nsec +=
852 (constructor_delay_ms % 1000UL) * 1000000UL;
853 if (constructor_timeout->tv_nsec >= 1000000000UL) {
854 constructor_timeout->tv_sec++;
855 constructor_timeout->tv_nsec -= 1000000000UL;
856 }
857 return 1;
858 }
859
860 /*
861 * sessiond monitoring thread: monitor presence of global and per-user
862 * sessiond by polling the application common named pipe.
863 */
864 /* TODO */
865
866 void __attribute__((constructor)) lttng_ust_init(void)
867 {
868 struct timespec constructor_timeout;
869 sigset_t sig_all_blocked, orig_parent_mask;
870 int timeout_mode;
871 int ret;
872
873 if (uatomic_xchg(&initialized, 1) == 1)
874 return;
875
876 /*
877 * Fixup interdependency between TLS fixup mutex (which happens
878 * to be the dynamic linker mutex) and ust_lock, taken within
879 * the ust lock.
880 */
881 lttng_fixup_event_tls();
882 lttng_fixup_ringbuffer_tls();
883 lttng_fixup_vtid_tls();
884 lttng_fixup_nest_count_tls();
885
886 /*
887 * We want precise control over the order in which we construct
888 * our sub-libraries vs starting to receive commands from
889 * sessiond (otherwise leading to errors when trying to create
890 * sessiond before the init functions are completed).
891 */
892 init_usterr();
893 init_tracepoint();
894 ltt_ring_buffer_metadata_client_init();
895 ltt_ring_buffer_client_overwrite_init();
896 ltt_ring_buffer_client_discard_init();
897
898 timeout_mode = get_timeout(&constructor_timeout);
899
900 ret = sem_init(&constructor_wait, 0, 0);
901 assert(!ret);
902
903 ret = setup_local_apps();
904 if (ret) {
905 ERR("Error setting up to local apps");
906 }
907
908 /* A new thread created by pthread_create inherits the signal mask
909 * from the parent. To avoid any signal being received by the
910 * listener thread, we block all signals temporarily in the parent,
911 * while we create the listener thread.
912 */
913 sigfillset(&sig_all_blocked);
914 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
915 if (ret) {
916 ERR("pthread_sigmask: %s", strerror(ret));
917 }
918
919 ret = pthread_create(&global_apps.ust_listener, NULL,
920 ust_listener_thread, &global_apps);
921 if (ret) {
922 ERR("pthread_create global: %s", strerror(ret));
923 }
924 if (local_apps.allowed) {
925 ret = pthread_create(&local_apps.ust_listener, NULL,
926 ust_listener_thread, &local_apps);
927 if (ret) {
928 ERR("pthread_create local: %s", strerror(ret));
929 }
930 } else {
931 handle_register_done(&local_apps);
932 }
933
934 /* Restore original signal mask in parent */
935 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
936 if (ret) {
937 ERR("pthread_sigmask: %s", strerror(ret));
938 }
939
940 switch (timeout_mode) {
941 case 1: /* timeout wait */
942 do {
943 ret = sem_timedwait(&constructor_wait,
944 &constructor_timeout);
945 } while (ret < 0 && errno == EINTR);
946 if (ret < 0 && errno == ETIMEDOUT) {
947 ERR("Timed out waiting for ltt-sessiond");
948 } else {
949 assert(!ret);
950 }
951 break;
952 case -1:/* wait forever */
953 do {
954 ret = sem_wait(&constructor_wait);
955 } while (ret < 0 && errno == EINTR);
956 assert(!ret);
957 break;
958 case 0: /* no timeout */
959 break;
960 }
961 }
962
963 static
964 void lttng_ust_cleanup(int exiting)
965 {
966 cleanup_sock_info(&global_apps, exiting);
967 if (local_apps.allowed) {
968 cleanup_sock_info(&local_apps, exiting);
969 }
970 /*
971 * The teardown in this function all affect data structures
972 * accessed under the UST lock by the listener thread. This
973 * lock, along with the lttng_ust_comm_should_quit flag, ensure
974 * that none of these threads are accessing this data at this
975 * point.
976 */
977 lttng_ust_abi_exit();
978 lttng_ust_events_exit();
979 ltt_ring_buffer_client_discard_exit();
980 ltt_ring_buffer_client_overwrite_exit();
981 ltt_ring_buffer_metadata_client_exit();
982 exit_tracepoint();
983 if (!exiting) {
984 /* Reinitialize values for fork */
985 sem_count = 2;
986 lttng_ust_comm_should_quit = 0;
987 initialized = 0;
988 }
989 }
990
991 void __attribute__((destructor)) lttng_ust_exit(void)
992 {
993 int ret;
994
995 /*
996 * Using pthread_cancel here because:
997 * A) we don't want to hang application teardown.
998 * B) the thread is not allocating any resource.
999 */
1000
1001 /*
1002 * Require the communication thread to quit. Synchronize with
1003 * mutexes to ensure it is not in a mutex critical section when
1004 * pthread_cancel is later called.
1005 */
1006 ust_lock();
1007 lttng_ust_comm_should_quit = 1;
1008 ust_unlock();
1009
1010 /* cancel threads */
1011 ret = pthread_cancel(global_apps.ust_listener);
1012 if (ret) {
1013 ERR("Error cancelling global ust listener thread: %s",
1014 strerror(ret));
1015 }
1016 if (local_apps.allowed) {
1017 ret = pthread_cancel(local_apps.ust_listener);
1018 if (ret) {
1019 ERR("Error cancelling local ust listener thread: %s",
1020 strerror(ret));
1021 }
1022 }
1023 /*
1024 * Do NOT join threads: use of sys_futex makes it impossible to
1025 * join the threads without using async-cancel, but async-cancel
1026 * is delivered by a signal, which could hit the target thread
1027 * anywhere in its code path, including while the ust_lock() is
1028 * held, causing a deadlock for the other thread. Let the OS
1029 * cleanup the threads if there are stalled in a syscall.
1030 */
1031 lttng_ust_cleanup(1);
1032 }
1033
1034 /*
1035 * We exclude the worker threads across fork and clone (except
1036 * CLONE_VM), because these system calls only keep the forking thread
1037 * running in the child. Therefore, we don't want to call fork or clone
1038 * in the middle of an tracepoint or ust tracing state modification.
1039 * Holding this mutex protects these structures across fork and clone.
1040 */
1041 void ust_before_fork(sigset_t *save_sigset)
1042 {
1043 /*
1044 * Disable signals. This is to avoid that the child intervenes
1045 * before it is properly setup for tracing. It is safer to
1046 * disable all signals, because then we know we are not breaking
1047 * anything by restoring the original mask.
1048 */
1049 sigset_t all_sigs;
1050 int ret;
1051
1052 if (lttng_ust_nest_count)
1053 return;
1054 /* Disable signals */
1055 sigfillset(&all_sigs);
1056 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1057 if (ret == -1) {
1058 PERROR("sigprocmask");
1059 }
1060 ust_lock();
1061 rcu_bp_before_fork();
1062 }
1063
1064 static void ust_after_fork_common(sigset_t *restore_sigset)
1065 {
1066 int ret;
1067
1068 DBG("process %d", getpid());
1069 ust_unlock();
1070 /* Restore signals */
1071 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1072 if (ret == -1) {
1073 PERROR("sigprocmask");
1074 }
1075 }
1076
1077 void ust_after_fork_parent(sigset_t *restore_sigset)
1078 {
1079 if (lttng_ust_nest_count)
1080 return;
1081 DBG("process %d", getpid());
1082 rcu_bp_after_fork_parent();
1083 /* Release mutexes and reenable signals */
1084 ust_after_fork_common(restore_sigset);
1085 }
1086
1087 /*
1088 * After fork, in the child, we need to cleanup all the leftover state,
1089 * except the worker thread which already magically disappeared thanks
1090 * to the weird Linux fork semantics. After tyding up, we call
1091 * lttng_ust_init() again to start over as a new PID.
1092 *
1093 * This is meant for forks() that have tracing in the child between the
1094 * fork and following exec call (if there is any).
1095 */
1096 void ust_after_fork_child(sigset_t *restore_sigset)
1097 {
1098 if (lttng_ust_nest_count)
1099 return;
1100 DBG("process %d", getpid());
1101 /* Release urcu mutexes */
1102 rcu_bp_after_fork_child();
1103 lttng_ust_cleanup(0);
1104 lttng_context_vtid_reset();
1105 /* Release mutexes and reenable signals */
1106 ust_after_fork_common(restore_sigset);
1107 lttng_ust_init();
1108 }
This page took 0.051238 seconds and 5 git commands to generate.