Fix: get_wait_shm() ust mutex deadlock (add 2 missing exit calls)
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <ust-comm.h>
45 #include <usterr-signal-safe.h>
46 #include <helper.h>
47 #include "tracepoint-internal.h"
48 #include "ltt-tracer-core.h"
49 #include "compat.h"
50 #include "../libringbuffer/tlsfixup.h"
51
52 /*
53 * Has lttng ust comm constructor been called ?
54 */
55 static int initialized;
56
57 /*
58 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
59 * Held when handling a command, also held by fork() to deal with
60 * removal of threads, and by exit path.
61 */
62
63 /* Should the ust comm thread quit ? */
64 static int lttng_ust_comm_should_quit;
65
66 /*
67 * Wait for either of these before continuing to the main
68 * program:
69 * - the register_done message from sessiond daemon
70 * (will let the sessiond daemon enable sessions before main
71 * starts.)
72 * - sessiond daemon is not reachable.
73 * - timeout (ensuring applications are resilient to session
74 * daemon problems).
75 */
76 static sem_t constructor_wait;
77 /*
78 * Doing this for both the global and local sessiond.
79 */
80 static int sem_count = { 2 };
81
82 /*
83 * Counting nesting within lttng-ust. Used to ensure that calling fork()
84 * from liblttng-ust does not execute the pre/post fork handlers.
85 */
86 static int __thread lttng_ust_nest_count;
87
88 /*
89 * Info about socket and associated listener thread.
90 */
91 struct sock_info {
92 const char *name;
93 pthread_t ust_listener; /* listener thread */
94 int root_handle;
95 int constructor_sem_posted;
96 int allowed;
97 int global;
98
99 char sock_path[PATH_MAX];
100 int socket;
101
102 char wait_shm_path[PATH_MAX];
103 char *wait_shm_mmap;
104 };
105
106 /* Socket from app (connect) to session daemon (listen) for communication */
107 struct sock_info global_apps = {
108 .name = "global",
109 .global = 1,
110
111 .root_handle = -1,
112 .allowed = 1,
113
114 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
115 .socket = -1,
116
117 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
118 };
119
120 /* TODO: allow global_apps_sock_path override */
121
122 struct sock_info local_apps = {
123 .name = "local",
124 .global = 0,
125 .root_handle = -1,
126 .allowed = 0, /* Check setuid bit first */
127
128 .socket = -1,
129 };
130
131 static int wait_poll_fallback;
132
133 extern void ltt_ring_buffer_client_overwrite_init(void);
134 extern void ltt_ring_buffer_client_discard_init(void);
135 extern void ltt_ring_buffer_metadata_client_init(void);
136 extern void ltt_ring_buffer_client_overwrite_exit(void);
137 extern void ltt_ring_buffer_client_discard_exit(void);
138 extern void ltt_ring_buffer_metadata_client_exit(void);
139
140 /*
141 * Force a read (imply TLS fixup for dlopen) of TLS variables.
142 */
143 static
144 void lttng_fixup_nest_count_tls(void)
145 {
146 asm volatile ("" : : "m" (lttng_ust_nest_count));
147 }
148
149 static
150 int setup_local_apps(void)
151 {
152 const char *home_dir;
153 uid_t uid;
154
155 uid = getuid();
156 /*
157 * Disallow per-user tracing for setuid binaries.
158 */
159 if (uid != geteuid()) {
160 local_apps.allowed = 0;
161 return 0;
162 } else {
163 local_apps.allowed = 1;
164 }
165 home_dir = (const char *) getenv("HOME");
166 if (!home_dir)
167 return -ENOENT;
168 snprintf(local_apps.sock_path, PATH_MAX,
169 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
170 snprintf(local_apps.wait_shm_path, PATH_MAX,
171 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
172 return 0;
173 }
174
175 static
176 int register_app_to_sessiond(int socket)
177 {
178 ssize_t ret;
179 struct {
180 uint32_t major;
181 uint32_t minor;
182 pid_t pid;
183 pid_t ppid;
184 uid_t uid;
185 gid_t gid;
186 uint32_t bits_per_long;
187 char name[16]; /* process name */
188 } reg_msg;
189
190 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
191 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
192 reg_msg.pid = getpid();
193 reg_msg.ppid = getppid();
194 reg_msg.uid = getuid();
195 reg_msg.gid = getgid();
196 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
197 lttng_ust_getprocname(reg_msg.name);
198
199 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
200 if (ret >= 0 && ret != sizeof(reg_msg))
201 return -EIO;
202 return ret;
203 }
204
205 static
206 int send_reply(int sock, struct ustcomm_ust_reply *lur)
207 {
208 ssize_t len;
209
210 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
211 switch (len) {
212 case sizeof(*lur):
213 DBG("message successfully sent");
214 return 0;
215 case -1:
216 if (errno == ECONNRESET) {
217 printf("remote end closed connection\n");
218 return 0;
219 }
220 return -1;
221 default:
222 printf("incorrect message size: %zd\n", len);
223 return -1;
224 }
225 }
226
227 static
228 int handle_register_done(struct sock_info *sock_info)
229 {
230 int ret;
231
232 if (sock_info->constructor_sem_posted)
233 return 0;
234 sock_info->constructor_sem_posted = 1;
235 if (uatomic_read(&sem_count) <= 0) {
236 return 0;
237 }
238 ret = uatomic_add_return(&sem_count, -1);
239 if (ret == 0) {
240 ret = sem_post(&constructor_wait);
241 assert(!ret);
242 }
243 return 0;
244 }
245
246 static
247 int handle_message(struct sock_info *sock_info,
248 int sock, struct ustcomm_ust_msg *lum)
249 {
250 int ret = 0;
251 const struct lttng_ust_objd_ops *ops;
252 struct ustcomm_ust_reply lur;
253 int shm_fd, wait_fd;
254 union ust_args args;
255 ssize_t len;
256
257 ust_lock();
258
259 memset(&lur, 0, sizeof(lur));
260
261 if (lttng_ust_comm_should_quit) {
262 ret = -EPERM;
263 goto end;
264 }
265
266 ops = objd_ops(lum->handle);
267 if (!ops) {
268 ret = -ENOENT;
269 goto end;
270 }
271
272 switch (lum->cmd) {
273 case LTTNG_UST_REGISTER_DONE:
274 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
275 ret = handle_register_done(sock_info);
276 else
277 ret = -EINVAL;
278 break;
279 case LTTNG_UST_RELEASE:
280 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
281 ret = -EPERM;
282 else
283 ret = lttng_ust_objd_unref(lum->handle);
284 break;
285 case LTTNG_UST_FILTER:
286 {
287 /* Receive filter data */
288 struct lttng_ust_filter_bytecode *bytecode;
289
290 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
291 ERR("Filter data size is too large: %u bytes\n",
292 lum->u.filter.data_size);
293 ret = -EINVAL;
294 goto error;
295 }
296
297 if (lum->u.filter.reloc_offset > lum->u.filter.data_size - 1) {
298 ERR("Filter reloc offset %u is not within data\n",
299 lum->u.filter.reloc_offset);
300 ret = -EINVAL;
301 goto error;
302 }
303
304 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
305 if (!bytecode) {
306 ret = -ENOMEM;
307 goto error;
308 }
309 len = ustcomm_recv_unix_sock(sock, bytecode->data,
310 lum->u.filter.data_size);
311 switch (len) {
312 case 0: /* orderly shutdown */
313 ret = 0;
314 free(bytecode);
315 goto error;
316 case -1:
317 DBG("Receive failed from lttng-sessiond with errno %d", errno);
318 if (errno == ECONNRESET) {
319 ERR("%s remote end closed connection\n", sock_info->name);
320 ret = -EINVAL;
321 free(bytecode);
322 goto error;
323 }
324 ret = -EINVAL;
325 goto end;
326 default:
327 if (len == lum->u.filter.data_size) {
328 DBG("filter data received\n");
329 break;
330 } else {
331 ERR("incorrect filter data message size: %zd\n", len);
332 ret = -EINVAL;
333 free(bytecode);
334 goto end;
335 }
336 }
337 bytecode->len = lum->u.filter.data_size;
338 bytecode->reloc_offset = lum->u.filter.reloc_offset;
339 if (ops->cmd) {
340 ret = ops->cmd(lum->handle, lum->cmd,
341 (unsigned long) bytecode,
342 &args);
343 if (ret) {
344 free(bytecode);
345 }
346 /* don't free bytecode if everything went fine. */
347 } else {
348 ret = -ENOSYS;
349 free(bytecode);
350 }
351 break;
352 }
353 default:
354 if (ops->cmd)
355 ret = ops->cmd(lum->handle, lum->cmd,
356 (unsigned long) &lum->u,
357 &args);
358 else
359 ret = -ENOSYS;
360 break;
361 }
362
363 end:
364 lur.handle = lum->handle;
365 lur.cmd = lum->cmd;
366 lur.ret_val = ret;
367 if (ret >= 0) {
368 lur.ret_code = USTCOMM_OK;
369 } else {
370 //lur.ret_code = USTCOMM_SESSION_FAIL;
371 lur.ret_code = ret;
372 }
373 if (ret >= 0) {
374 switch (lum->cmd) {
375 case LTTNG_UST_STREAM:
376 /*
377 * Special-case reply to send stream info.
378 * Use lum.u output.
379 */
380 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
381 shm_fd = *args.stream.shm_fd;
382 wait_fd = *args.stream.wait_fd;
383 break;
384 case LTTNG_UST_METADATA:
385 case LTTNG_UST_CHANNEL:
386 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
387 shm_fd = *args.channel.shm_fd;
388 wait_fd = *args.channel.wait_fd;
389 break;
390 case LTTNG_UST_TRACER_VERSION:
391 lur.u.version = lum->u.version;
392 break;
393 case LTTNG_UST_TRACEPOINT_LIST_GET:
394 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
395 break;
396 }
397 }
398 ret = send_reply(sock, &lur);
399 if (ret < 0) {
400 perror("error sending reply");
401 goto error;
402 }
403
404 if ((lum->cmd == LTTNG_UST_STREAM
405 || lum->cmd == LTTNG_UST_CHANNEL
406 || lum->cmd == LTTNG_UST_METADATA)
407 && lur.ret_code == USTCOMM_OK) {
408 int sendret = 0;
409
410 /* we also need to send the file descriptors. */
411 ret = ustcomm_send_fds_unix_sock(sock,
412 &shm_fd, &shm_fd,
413 1, sizeof(int));
414 if (ret < 0) {
415 perror("send shm_fd");
416 sendret = ret;
417 }
418 /*
419 * The sessiond expects 2 file descriptors, even upon
420 * error.
421 */
422 ret = ustcomm_send_fds_unix_sock(sock,
423 &wait_fd, &wait_fd,
424 1, sizeof(int));
425 if (ret < 0) {
426 perror("send wait_fd");
427 goto error;
428 }
429 if (sendret) {
430 ret = sendret;
431 goto error;
432 }
433 }
434 /*
435 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
436 * after the reply.
437 */
438 if (lur.ret_code == USTCOMM_OK) {
439 switch (lum->cmd) {
440 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
441 len = ustcomm_send_unix_sock(sock,
442 &args.field_list.entry,
443 sizeof(args.field_list.entry));
444 if (len != sizeof(args.field_list.entry)) {
445 ret = -1;
446 goto error;
447 }
448 }
449 }
450 /*
451 * We still have the memory map reference, and the fds have been
452 * sent to the sessiond. We can therefore close those fds. Note
453 * that we keep the write side of the wait_fd open, but close
454 * the read side.
455 */
456 if (lur.ret_code == USTCOMM_OK) {
457 switch (lum->cmd) {
458 case LTTNG_UST_STREAM:
459 if (shm_fd >= 0) {
460 ret = close(shm_fd);
461 if (ret) {
462 PERROR("Error closing stream shm_fd");
463 }
464 *args.stream.shm_fd = -1;
465 }
466 if (wait_fd >= 0) {
467 ret = close(wait_fd);
468 if (ret) {
469 PERROR("Error closing stream wait_fd");
470 }
471 *args.stream.wait_fd = -1;
472 }
473 break;
474 case LTTNG_UST_METADATA:
475 case LTTNG_UST_CHANNEL:
476 if (shm_fd >= 0) {
477 ret = close(shm_fd);
478 if (ret) {
479 PERROR("Error closing channel shm_fd");
480 }
481 *args.channel.shm_fd = -1;
482 }
483 if (wait_fd >= 0) {
484 ret = close(wait_fd);
485 if (ret) {
486 PERROR("Error closing channel wait_fd");
487 }
488 *args.channel.wait_fd = -1;
489 }
490 break;
491 }
492 }
493
494 error:
495 ust_unlock();
496 return ret;
497 }
498
499 static
500 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
501 {
502 int ret;
503
504 if (sock_info->socket != -1) {
505 ret = ustcomm_close_unix_sock(sock_info->socket);
506 if (ret) {
507 ERR("Error closing apps socket");
508 }
509 sock_info->socket = -1;
510 }
511 if (sock_info->root_handle != -1) {
512 ret = lttng_ust_objd_unref(sock_info->root_handle);
513 if (ret) {
514 ERR("Error unref root handle");
515 }
516 sock_info->root_handle = -1;
517 }
518 sock_info->constructor_sem_posted = 0;
519 /*
520 * wait_shm_mmap is used by listener threads outside of the
521 * ust lock, so we cannot tear it down ourselves, because we
522 * cannot join on these threads. Leave this task to the OS
523 * process exit.
524 */
525 if (!exiting && sock_info->wait_shm_mmap) {
526 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
527 if (ret) {
528 ERR("Error unmapping wait shm");
529 }
530 sock_info->wait_shm_mmap = NULL;
531 }
532 }
533
534 /*
535 * Using fork to set umask in the child process (not multi-thread safe).
536 * We deal with the shm_open vs ftruncate race (happening when the
537 * sessiond owns the shm and does not let everybody modify it, to ensure
538 * safety against shm_unlink) by simply letting the mmap fail and
539 * retrying after a few seconds.
540 * For global shm, everybody has rw access to it until the sessiond
541 * starts.
542 */
543 static
544 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
545 {
546 int wait_shm_fd, ret;
547 pid_t pid;
548
549 /*
550 * Try to open read-only.
551 */
552 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
553 if (wait_shm_fd >= 0) {
554 goto end;
555 } else if (wait_shm_fd < 0 && errno != ENOENT) {
556 /*
557 * Real-only open did not work, and it's not because the
558 * entry was not present. It's a failure that prohibits
559 * using shm.
560 */
561 ERR("Error opening shm %s", sock_info->wait_shm_path);
562 goto end;
563 }
564 /*
565 * If the open failed because the file did not exist, try
566 * creating it ourself.
567 */
568 lttng_ust_nest_count++;
569 pid = fork();
570 lttng_ust_nest_count--;
571 if (pid > 0) {
572 int status;
573
574 /*
575 * Parent: wait for child to return, in which case the
576 * shared memory map will have been created.
577 */
578 pid = wait(&status);
579 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
580 wait_shm_fd = -1;
581 goto end;
582 }
583 /*
584 * Try to open read-only again after creation.
585 */
586 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
587 if (wait_shm_fd < 0) {
588 /*
589 * Real-only open did not work. It's a failure
590 * that prohibits using shm.
591 */
592 ERR("Error opening shm %s", sock_info->wait_shm_path);
593 goto end;
594 }
595 goto end;
596 } else if (pid == 0) {
597 int create_mode;
598
599 /* Child */
600 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
601 if (sock_info->global)
602 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
603 /*
604 * We're alone in a child process, so we can modify the
605 * process-wide umask.
606 */
607 umask(~create_mode);
608 /*
609 * Try creating shm (or get rw access).
610 * We don't do an exclusive open, because we allow other
611 * processes to create+ftruncate it concurrently.
612 */
613 wait_shm_fd = shm_open(sock_info->wait_shm_path,
614 O_RDWR | O_CREAT, create_mode);
615 if (wait_shm_fd >= 0) {
616 ret = ftruncate(wait_shm_fd, mmap_size);
617 if (ret) {
618 PERROR("ftruncate");
619 _exit(EXIT_FAILURE);
620 }
621 _exit(EXIT_SUCCESS);
622 }
623 /*
624 * For local shm, we need to have rw access to accept
625 * opening it: this means the local sessiond will be
626 * able to wake us up. For global shm, we open it even
627 * if rw access is not granted, because the root.root
628 * sessiond will be able to override all rights and wake
629 * us up.
630 */
631 if (!sock_info->global && errno != EACCES) {
632 ERR("Error opening shm %s", sock_info->wait_shm_path);
633 _exit(EXIT_FAILURE);
634 }
635 /*
636 * The shm exists, but we cannot open it RW. Report
637 * success.
638 */
639 _exit(EXIT_SUCCESS);
640 } else {
641 return -1;
642 }
643 end:
644 if (wait_shm_fd >= 0 && !sock_info->global) {
645 struct stat statbuf;
646
647 /*
648 * Ensure that our user is the owner of the shm file for
649 * local shm. If we do not own the file, it means our
650 * sessiond will not have access to wake us up (there is
651 * probably a rogue process trying to fake our
652 * sessiond). Fallback to polling method in this case.
653 */
654 ret = fstat(wait_shm_fd, &statbuf);
655 if (ret) {
656 PERROR("fstat");
657 goto error_close;
658 }
659 if (statbuf.st_uid != getuid())
660 goto error_close;
661 }
662 return wait_shm_fd;
663
664 error_close:
665 ret = close(wait_shm_fd);
666 if (ret) {
667 PERROR("Error closing fd");
668 }
669 return -1;
670 }
671
672 static
673 char *get_map_shm(struct sock_info *sock_info)
674 {
675 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
676 int wait_shm_fd, ret;
677 char *wait_shm_mmap;
678
679 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
680 if (wait_shm_fd < 0) {
681 goto error;
682 }
683 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
684 MAP_SHARED, wait_shm_fd, 0);
685 /* close shm fd immediately after taking the mmap reference */
686 ret = close(wait_shm_fd);
687 if (ret) {
688 PERROR("Error closing fd");
689 }
690 if (wait_shm_mmap == MAP_FAILED) {
691 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
692 goto error;
693 }
694 return wait_shm_mmap;
695
696 error:
697 return NULL;
698 }
699
700 static
701 void wait_for_sessiond(struct sock_info *sock_info)
702 {
703 int ret;
704
705 ust_lock();
706 if (lttng_ust_comm_should_quit) {
707 goto quit;
708 }
709 if (wait_poll_fallback) {
710 goto error;
711 }
712 if (!sock_info->wait_shm_mmap) {
713 sock_info->wait_shm_mmap = get_map_shm(sock_info);
714 if (!sock_info->wait_shm_mmap)
715 goto error;
716 }
717 ust_unlock();
718
719 DBG("Waiting for %s apps sessiond", sock_info->name);
720 /* Wait for futex wakeup */
721 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
722 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
723 FUTEX_WAIT, 0, NULL, NULL, 0);
724 if (ret < 0) {
725 if (errno == EFAULT) {
726 wait_poll_fallback = 1;
727 DBG(
728 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
729 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
730 "Please upgrade your kernel "
731 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
732 "mainline). LTTng-UST will use polling mode fallback.");
733 if (ust_debug())
734 PERROR("futex");
735 }
736 }
737 }
738 return;
739
740 quit:
741 ust_unlock();
742 return;
743
744 error:
745 ust_unlock();
746 return;
747 }
748
749 /*
750 * This thread does not allocate any resource, except within
751 * handle_message, within mutex protection. This mutex protects against
752 * fork and exit.
753 * The other moment it allocates resources is at socket connection, which
754 * is also protected by the mutex.
755 */
756 static
757 void *ust_listener_thread(void *arg)
758 {
759 struct sock_info *sock_info = arg;
760 int sock, ret, prev_connect_failed = 0, has_waited = 0;
761
762 /* Restart trying to connect to the session daemon */
763 restart:
764 if (prev_connect_failed) {
765 /* Wait for sessiond availability with pipe */
766 wait_for_sessiond(sock_info);
767 if (has_waited) {
768 has_waited = 0;
769 /*
770 * Sleep for 5 seconds before retrying after a
771 * sequence of failure / wait / failure. This
772 * deals with a killed or broken session daemon.
773 */
774 sleep(5);
775 }
776 has_waited = 1;
777 prev_connect_failed = 0;
778 }
779 ust_lock();
780
781 if (lttng_ust_comm_should_quit) {
782 ust_unlock();
783 goto quit;
784 }
785
786 if (sock_info->socket != -1) {
787 ret = ustcomm_close_unix_sock(sock_info->socket);
788 if (ret) {
789 ERR("Error closing %s apps socket", sock_info->name);
790 }
791 sock_info->socket = -1;
792 }
793
794 /* Register */
795 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
796 if (ret < 0) {
797 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
798 prev_connect_failed = 1;
799 /*
800 * If we cannot find the sessiond daemon, don't delay
801 * constructor execution.
802 */
803 ret = handle_register_done(sock_info);
804 assert(!ret);
805 ust_unlock();
806 goto restart;
807 }
808
809 sock_info->socket = sock = ret;
810
811 /*
812 * Create only one root handle per listener thread for the whole
813 * process lifetime.
814 */
815 if (sock_info->root_handle == -1) {
816 ret = lttng_abi_create_root_handle();
817 if (ret < 0) {
818 ERR("Error creating root handle");
819 ust_unlock();
820 goto quit;
821 }
822 sock_info->root_handle = ret;
823 }
824
825 ret = register_app_to_sessiond(sock);
826 if (ret < 0) {
827 ERR("Error registering to %s apps socket", sock_info->name);
828 prev_connect_failed = 1;
829 /*
830 * If we cannot register to the sessiond daemon, don't
831 * delay constructor execution.
832 */
833 ret = handle_register_done(sock_info);
834 assert(!ret);
835 ust_unlock();
836 goto restart;
837 }
838 ust_unlock();
839
840 for (;;) {
841 ssize_t len;
842 struct ustcomm_ust_msg lum;
843
844 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
845 switch (len) {
846 case 0: /* orderly shutdown */
847 DBG("%s ltt-sessiond has performed an orderly shutdown\n", sock_info->name);
848 ust_lock();
849 /*
850 * Either sessiond has shutdown or refused us by closing the socket.
851 * In either case, we don't want to delay construction execution,
852 * and we need to wait before retry.
853 */
854 prev_connect_failed = 1;
855 /*
856 * If we cannot register to the sessiond daemon, don't
857 * delay constructor execution.
858 */
859 ret = handle_register_done(sock_info);
860 assert(!ret);
861 ust_unlock();
862 goto end;
863 case sizeof(lum):
864 DBG("message received\n");
865 ret = handle_message(sock_info, sock, &lum);
866 if (ret < 0) {
867 ERR("Error handling message for %s socket", sock_info->name);
868 }
869 continue;
870 case -1:
871 DBG("Receive failed from lttng-sessiond with errno %d", errno);
872 if (errno == ECONNRESET) {
873 ERR("%s remote end closed connection\n", sock_info->name);
874 goto end;
875 }
876 goto end;
877 default:
878 ERR("incorrect message size (%s socket): %zd\n", sock_info->name, len);
879 continue;
880 }
881
882 }
883 end:
884 goto restart; /* try to reconnect */
885 quit:
886 return NULL;
887 }
888
889 /*
890 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
891 */
892 static
893 int get_timeout(struct timespec *constructor_timeout)
894 {
895 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
896 char *str_delay;
897 int ret;
898
899 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
900 if (str_delay) {
901 constructor_delay_ms = strtol(str_delay, NULL, 10);
902 }
903
904 switch (constructor_delay_ms) {
905 case -1:/* fall-through */
906 case 0:
907 return constructor_delay_ms;
908 default:
909 break;
910 }
911
912 /*
913 * If we are unable to find the current time, don't wait.
914 */
915 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
916 if (ret) {
917 return -1;
918 }
919 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
920 constructor_timeout->tv_nsec +=
921 (constructor_delay_ms % 1000UL) * 1000000UL;
922 if (constructor_timeout->tv_nsec >= 1000000000UL) {
923 constructor_timeout->tv_sec++;
924 constructor_timeout->tv_nsec -= 1000000000UL;
925 }
926 return 1;
927 }
928
929 /*
930 * sessiond monitoring thread: monitor presence of global and per-user
931 * sessiond by polling the application common named pipe.
932 */
933 /* TODO */
934
935 void __attribute__((constructor)) lttng_ust_init(void)
936 {
937 struct timespec constructor_timeout;
938 sigset_t sig_all_blocked, orig_parent_mask;
939 pthread_attr_t thread_attr;
940 int timeout_mode;
941 int ret;
942
943 if (uatomic_xchg(&initialized, 1) == 1)
944 return;
945
946 /*
947 * Fixup interdependency between TLS fixup mutex (which happens
948 * to be the dynamic linker mutex) and ust_lock, taken within
949 * the ust lock.
950 */
951 lttng_fixup_event_tls();
952 lttng_fixup_ringbuffer_tls();
953 lttng_fixup_vtid_tls();
954 lttng_fixup_nest_count_tls();
955
956 /*
957 * We want precise control over the order in which we construct
958 * our sub-libraries vs starting to receive commands from
959 * sessiond (otherwise leading to errors when trying to create
960 * sessiond before the init functions are completed).
961 */
962 init_usterr();
963 init_tracepoint();
964 ltt_ring_buffer_metadata_client_init();
965 ltt_ring_buffer_client_overwrite_init();
966 ltt_ring_buffer_client_discard_init();
967
968 timeout_mode = get_timeout(&constructor_timeout);
969
970 ret = sem_init(&constructor_wait, 0, 0);
971 assert(!ret);
972
973 ret = setup_local_apps();
974 if (ret) {
975 ERR("Error setting up to local apps");
976 }
977
978 /* A new thread created by pthread_create inherits the signal mask
979 * from the parent. To avoid any signal being received by the
980 * listener thread, we block all signals temporarily in the parent,
981 * while we create the listener thread.
982 */
983 sigfillset(&sig_all_blocked);
984 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
985 if (ret) {
986 ERR("pthread_sigmask: %s", strerror(ret));
987 }
988
989 ret = pthread_attr_init(&thread_attr);
990 if (ret) {
991 ERR("pthread_attr_init: %s", strerror(ret));
992 }
993 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
994 if (ret) {
995 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
996 }
997
998 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
999 ust_listener_thread, &global_apps);
1000 if (ret) {
1001 ERR("pthread_create global: %s", strerror(ret));
1002 }
1003 if (local_apps.allowed) {
1004 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1005 ust_listener_thread, &local_apps);
1006 if (ret) {
1007 ERR("pthread_create local: %s", strerror(ret));
1008 }
1009 } else {
1010 handle_register_done(&local_apps);
1011 }
1012 ret = pthread_attr_destroy(&thread_attr);
1013 if (ret) {
1014 ERR("pthread_attr_destroy: %s", strerror(ret));
1015 }
1016
1017 /* Restore original signal mask in parent */
1018 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1019 if (ret) {
1020 ERR("pthread_sigmask: %s", strerror(ret));
1021 }
1022
1023 switch (timeout_mode) {
1024 case 1: /* timeout wait */
1025 do {
1026 ret = sem_timedwait(&constructor_wait,
1027 &constructor_timeout);
1028 } while (ret < 0 && errno == EINTR);
1029 if (ret < 0 && errno == ETIMEDOUT) {
1030 ERR("Timed out waiting for ltt-sessiond");
1031 } else {
1032 assert(!ret);
1033 }
1034 break;
1035 case -1:/* wait forever */
1036 do {
1037 ret = sem_wait(&constructor_wait);
1038 } while (ret < 0 && errno == EINTR);
1039 assert(!ret);
1040 break;
1041 case 0: /* no timeout */
1042 break;
1043 }
1044 }
1045
1046 static
1047 void lttng_ust_cleanup(int exiting)
1048 {
1049 cleanup_sock_info(&global_apps, exiting);
1050 if (local_apps.allowed) {
1051 cleanup_sock_info(&local_apps, exiting);
1052 }
1053 /*
1054 * The teardown in this function all affect data structures
1055 * accessed under the UST lock by the listener thread. This
1056 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1057 * that none of these threads are accessing this data at this
1058 * point.
1059 */
1060 lttng_ust_abi_exit();
1061 lttng_ust_events_exit();
1062 ltt_ring_buffer_client_discard_exit();
1063 ltt_ring_buffer_client_overwrite_exit();
1064 ltt_ring_buffer_metadata_client_exit();
1065 exit_tracepoint();
1066 if (!exiting) {
1067 /* Reinitialize values for fork */
1068 sem_count = 2;
1069 lttng_ust_comm_should_quit = 0;
1070 initialized = 0;
1071 }
1072 }
1073
1074 void __attribute__((destructor)) lttng_ust_exit(void)
1075 {
1076 int ret;
1077
1078 /*
1079 * Using pthread_cancel here because:
1080 * A) we don't want to hang application teardown.
1081 * B) the thread is not allocating any resource.
1082 */
1083
1084 /*
1085 * Require the communication thread to quit. Synchronize with
1086 * mutexes to ensure it is not in a mutex critical section when
1087 * pthread_cancel is later called.
1088 */
1089 ust_lock();
1090 lttng_ust_comm_should_quit = 1;
1091 ust_unlock();
1092
1093 /* cancel threads */
1094 ret = pthread_cancel(global_apps.ust_listener);
1095 if (ret) {
1096 ERR("Error cancelling global ust listener thread: %s",
1097 strerror(ret));
1098 }
1099 if (local_apps.allowed) {
1100 ret = pthread_cancel(local_apps.ust_listener);
1101 if (ret) {
1102 ERR("Error cancelling local ust listener thread: %s",
1103 strerror(ret));
1104 }
1105 }
1106 /*
1107 * Do NOT join threads: use of sys_futex makes it impossible to
1108 * join the threads without using async-cancel, but async-cancel
1109 * is delivered by a signal, which could hit the target thread
1110 * anywhere in its code path, including while the ust_lock() is
1111 * held, causing a deadlock for the other thread. Let the OS
1112 * cleanup the threads if there are stalled in a syscall.
1113 */
1114 lttng_ust_cleanup(1);
1115 }
1116
1117 /*
1118 * We exclude the worker threads across fork and clone (except
1119 * CLONE_VM), because these system calls only keep the forking thread
1120 * running in the child. Therefore, we don't want to call fork or clone
1121 * in the middle of an tracepoint or ust tracing state modification.
1122 * Holding this mutex protects these structures across fork and clone.
1123 */
1124 void ust_before_fork(sigset_t *save_sigset)
1125 {
1126 /*
1127 * Disable signals. This is to avoid that the child intervenes
1128 * before it is properly setup for tracing. It is safer to
1129 * disable all signals, because then we know we are not breaking
1130 * anything by restoring the original mask.
1131 */
1132 sigset_t all_sigs;
1133 int ret;
1134
1135 if (lttng_ust_nest_count)
1136 return;
1137 /* Disable signals */
1138 sigfillset(&all_sigs);
1139 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1140 if (ret == -1) {
1141 PERROR("sigprocmask");
1142 }
1143 ust_lock();
1144 rcu_bp_before_fork();
1145 }
1146
1147 static void ust_after_fork_common(sigset_t *restore_sigset)
1148 {
1149 int ret;
1150
1151 DBG("process %d", getpid());
1152 ust_unlock();
1153 /* Restore signals */
1154 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1155 if (ret == -1) {
1156 PERROR("sigprocmask");
1157 }
1158 }
1159
1160 void ust_after_fork_parent(sigset_t *restore_sigset)
1161 {
1162 if (lttng_ust_nest_count)
1163 return;
1164 DBG("process %d", getpid());
1165 rcu_bp_after_fork_parent();
1166 /* Release mutexes and reenable signals */
1167 ust_after_fork_common(restore_sigset);
1168 }
1169
1170 /*
1171 * After fork, in the child, we need to cleanup all the leftover state,
1172 * except the worker thread which already magically disappeared thanks
1173 * to the weird Linux fork semantics. After tyding up, we call
1174 * lttng_ust_init() again to start over as a new PID.
1175 *
1176 * This is meant for forks() that have tracing in the child between the
1177 * fork and following exec call (if there is any).
1178 */
1179 void ust_after_fork_child(sigset_t *restore_sigset)
1180 {
1181 if (lttng_ust_nest_count)
1182 return;
1183 DBG("process %d", getpid());
1184 /* Release urcu mutexes */
1185 rcu_bp_after_fork_child();
1186 lttng_ust_cleanup(0);
1187 lttng_context_vtid_reset();
1188 /* Release mutexes and reenable signals */
1189 ust_after_fork_common(restore_sigset);
1190 lttng_ust_init();
1191 }
This page took 0.057722 seconds and 5 git commands to generate.