Fix: missing test for lttng_ust_comm_should_quit in lttng-ust-comm.c
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
1 /*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #define _LGPL_SOURCE
23 #include <sys/types.h>
24 #include <sys/socket.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <sys/types.h>
28 #include <sys/wait.h>
29 #include <fcntl.h>
30 #include <unistd.h>
31 #include <errno.h>
32 #include <pthread.h>
33 #include <semaphore.h>
34 #include <time.h>
35 #include <assert.h>
36 #include <signal.h>
37 #include <urcu/uatomic.h>
38 #include <urcu/futex.h>
39 #include <urcu/compiler.h>
40
41 #include <lttng/ust-events.h>
42 #include <lttng/ust-abi.h>
43 #include <lttng/ust.h>
44 #include <lttng/ust-error.h>
45 #include <ust-comm.h>
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include "tracepoint-internal.h"
49 #include "lttng-tracer-core.h"
50 #include "compat.h"
51 #include "../libringbuffer/tlsfixup.h"
52
53 /*
54 * Has lttng ust comm constructor been called ?
55 */
56 static int initialized;
57
58 /*
59 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
60 * Held when handling a command, also held by fork() to deal with
61 * removal of threads, and by exit path.
62 */
63
64 /* Should the ust comm thread quit ? */
65 static int lttng_ust_comm_should_quit;
66
67 /*
68 * Wait for either of these before continuing to the main
69 * program:
70 * - the register_done message from sessiond daemon
71 * (will let the sessiond daemon enable sessions before main
72 * starts.)
73 * - sessiond daemon is not reachable.
74 * - timeout (ensuring applications are resilient to session
75 * daemon problems).
76 */
77 static sem_t constructor_wait;
78 /*
79 * Doing this for both the global and local sessiond.
80 */
81 static int sem_count = { 2 };
82
83 /*
84 * Counting nesting within lttng-ust. Used to ensure that calling fork()
85 * from liblttng-ust does not execute the pre/post fork handlers.
86 */
87 static int __thread lttng_ust_nest_count;
88
89 /*
90 * Info about socket and associated listener thread.
91 */
92 struct sock_info {
93 const char *name;
94 pthread_t ust_listener; /* listener thread */
95 int root_handle;
96 int constructor_sem_posted;
97 int allowed;
98 int global;
99
100 char sock_path[PATH_MAX];
101 int socket;
102
103 char wait_shm_path[PATH_MAX];
104 char *wait_shm_mmap;
105 };
106
107 /* Socket from app (connect) to session daemon (listen) for communication */
108 struct sock_info global_apps = {
109 .name = "global",
110 .global = 1,
111
112 .root_handle = -1,
113 .allowed = 1,
114
115 .sock_path = DEFAULT_GLOBAL_APPS_UNIX_SOCK,
116 .socket = -1,
117
118 .wait_shm_path = DEFAULT_GLOBAL_APPS_WAIT_SHM_PATH,
119 };
120
121 /* TODO: allow global_apps_sock_path override */
122
123 struct sock_info local_apps = {
124 .name = "local",
125 .global = 0,
126 .root_handle = -1,
127 .allowed = 0, /* Check setuid bit first */
128
129 .socket = -1,
130 };
131
132 static int wait_poll_fallback;
133
134 extern void lttng_ring_buffer_client_overwrite_init(void);
135 extern void lttng_ring_buffer_client_discard_init(void);
136 extern void lttng_ring_buffer_metadata_client_init(void);
137 extern void lttng_ring_buffer_client_overwrite_exit(void);
138 extern void lttng_ring_buffer_client_discard_exit(void);
139 extern void lttng_ring_buffer_metadata_client_exit(void);
140
141 /*
142 * Force a read (imply TLS fixup for dlopen) of TLS variables.
143 */
144 static
145 void lttng_fixup_nest_count_tls(void)
146 {
147 asm volatile ("" : : "m" (lttng_ust_nest_count));
148 }
149
150 static
151 int setup_local_apps(void)
152 {
153 const char *home_dir;
154 uid_t uid;
155
156 uid = getuid();
157 /*
158 * Disallow per-user tracing for setuid binaries.
159 */
160 if (uid != geteuid()) {
161 assert(local_apps.allowed == 0);
162 return 0;
163 }
164 home_dir = (const char *) getenv("HOME");
165 if (!home_dir) {
166 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
167 assert(local_apps.allowed == 0);
168 return -ENOENT;
169 }
170 local_apps.allowed = 1;
171 snprintf(local_apps.sock_path, PATH_MAX,
172 DEFAULT_HOME_APPS_UNIX_SOCK, home_dir);
173 snprintf(local_apps.wait_shm_path, PATH_MAX,
174 DEFAULT_HOME_APPS_WAIT_SHM_PATH, uid);
175 return 0;
176 }
177
178 static
179 int register_app_to_sessiond(int socket)
180 {
181 ssize_t ret;
182 struct {
183 uint32_t major;
184 uint32_t minor;
185 pid_t pid;
186 pid_t ppid;
187 uid_t uid;
188 gid_t gid;
189 uint32_t bits_per_long;
190 char name[16]; /* process name */
191 } reg_msg;
192
193 reg_msg.major = LTTNG_UST_COMM_VERSION_MAJOR;
194 reg_msg.minor = LTTNG_UST_COMM_VERSION_MINOR;
195 reg_msg.pid = getpid();
196 reg_msg.ppid = getppid();
197 reg_msg.uid = getuid();
198 reg_msg.gid = getgid();
199 reg_msg.bits_per_long = CAA_BITS_PER_LONG;
200 lttng_ust_getprocname(reg_msg.name);
201
202 ret = ustcomm_send_unix_sock(socket, &reg_msg, sizeof(reg_msg));
203 if (ret >= 0 && ret != sizeof(reg_msg))
204 return -EIO;
205 return ret;
206 }
207
208 static
209 int send_reply(int sock, struct ustcomm_ust_reply *lur)
210 {
211 ssize_t len;
212
213 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
214 switch (len) {
215 case sizeof(*lur):
216 DBG("message successfully sent");
217 return 0;
218 default:
219 if (len == -ECONNRESET) {
220 DBG("remote end closed connection");
221 return 0;
222 }
223 if (len < 0)
224 return len;
225 DBG("incorrect message size: %zd", len);
226 return -EINVAL;
227 }
228 }
229
230 static
231 int handle_register_done(struct sock_info *sock_info)
232 {
233 int ret;
234
235 if (sock_info->constructor_sem_posted)
236 return 0;
237 sock_info->constructor_sem_posted = 1;
238 if (uatomic_read(&sem_count) <= 0) {
239 return 0;
240 }
241 ret = uatomic_add_return(&sem_count, -1);
242 if (ret == 0) {
243 ret = sem_post(&constructor_wait);
244 assert(!ret);
245 }
246 return 0;
247 }
248
249 static
250 int handle_message(struct sock_info *sock_info,
251 int sock, struct ustcomm_ust_msg *lum)
252 {
253 int ret = 0;
254 const struct lttng_ust_objd_ops *ops;
255 struct ustcomm_ust_reply lur;
256 int shm_fd, wait_fd;
257 union ust_args args;
258 ssize_t len;
259
260 ust_lock();
261
262 memset(&lur, 0, sizeof(lur));
263
264 if (lttng_ust_comm_should_quit) {
265 ret = -EPERM;
266 goto end;
267 }
268
269 ops = objd_ops(lum->handle);
270 if (!ops) {
271 ret = -ENOENT;
272 goto end;
273 }
274
275 switch (lum->cmd) {
276 case LTTNG_UST_REGISTER_DONE:
277 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
278 ret = handle_register_done(sock_info);
279 else
280 ret = -EINVAL;
281 break;
282 case LTTNG_UST_RELEASE:
283 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
284 ret = -EPERM;
285 else
286 ret = lttng_ust_objd_unref(lum->handle);
287 break;
288 case LTTNG_UST_FILTER:
289 {
290 /* Receive filter data */
291 struct lttng_ust_filter_bytecode_node *bytecode;
292
293 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
294 ERR("Filter data size is too large: %u bytes",
295 lum->u.filter.data_size);
296 ret = -EINVAL;
297 goto error;
298 }
299
300 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
301 ERR("Filter reloc offset %u is not within data",
302 lum->u.filter.reloc_offset);
303 ret = -EINVAL;
304 goto error;
305 }
306
307 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
308 if (!bytecode) {
309 ret = -ENOMEM;
310 goto error;
311 }
312 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
313 lum->u.filter.data_size);
314 switch (len) {
315 case 0: /* orderly shutdown */
316 ret = 0;
317 free(bytecode);
318 goto error;
319 default:
320 if (len == lum->u.filter.data_size) {
321 DBG("filter data received");
322 break;
323 } else if (len < 0) {
324 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
325 if (len == -ECONNRESET) {
326 ERR("%s remote end closed connection", sock_info->name);
327 ret = len;
328 free(bytecode);
329 goto error;
330 }
331 ret = len;
332 goto end;
333 } else {
334 DBG("incorrect filter data message size: %zd", len);
335 ret = -EINVAL;
336 free(bytecode);
337 goto end;
338 }
339 }
340 bytecode->bc.len = lum->u.filter.data_size;
341 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
342 bytecode->bc.seqnum = lum->u.filter.seqnum;
343 if (ops->cmd) {
344 ret = ops->cmd(lum->handle, lum->cmd,
345 (unsigned long) bytecode,
346 &args, sock_info);
347 if (ret) {
348 free(bytecode);
349 }
350 /* don't free bytecode if everything went fine. */
351 } else {
352 ret = -ENOSYS;
353 free(bytecode);
354 }
355 break;
356 }
357 default:
358 if (ops->cmd)
359 ret = ops->cmd(lum->handle, lum->cmd,
360 (unsigned long) &lum->u,
361 &args, sock_info);
362 else
363 ret = -ENOSYS;
364 break;
365 }
366
367 end:
368 lur.handle = lum->handle;
369 lur.cmd = lum->cmd;
370 lur.ret_val = ret;
371 if (ret >= 0) {
372 lur.ret_code = LTTNG_UST_OK;
373 } else {
374 /*
375 * Use -LTTNG_UST_ERR as wildcard for UST internal
376 * error that are not caused by the transport, except if
377 * we already have a more precise error message to
378 * report.
379 */
380 if (ret > -LTTNG_UST_ERR) {
381 /* Translate code to UST error. */
382 switch (ret) {
383 case -EEXIST:
384 lur.ret_code = -LTTNG_UST_ERR_EXIST;
385 break;
386 case -EINVAL:
387 lur.ret_code = -LTTNG_UST_ERR_INVAL;
388 break;
389 case -ENOENT:
390 lur.ret_code = -LTTNG_UST_ERR_NOENT;
391 break;
392 case -EPERM:
393 lur.ret_code = -LTTNG_UST_ERR_PERM;
394 break;
395 case -ENOSYS:
396 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
397 break;
398 default:
399 lur.ret_code = -LTTNG_UST_ERR;
400 break;
401 }
402 } else {
403 lur.ret_code = ret;
404 }
405 }
406 if (ret >= 0) {
407 switch (lum->cmd) {
408 case LTTNG_UST_STREAM:
409 /*
410 * Special-case reply to send stream info.
411 * Use lum.u output.
412 */
413 lur.u.stream.memory_map_size = *args.stream.memory_map_size;
414 shm_fd = *args.stream.shm_fd;
415 wait_fd = *args.stream.wait_fd;
416 break;
417 case LTTNG_UST_METADATA:
418 case LTTNG_UST_CHANNEL:
419 lur.u.channel.memory_map_size = *args.channel.memory_map_size;
420 shm_fd = *args.channel.shm_fd;
421 wait_fd = *args.channel.wait_fd;
422 break;
423 case LTTNG_UST_TRACER_VERSION:
424 lur.u.version = lum->u.version;
425 break;
426 case LTTNG_UST_TRACEPOINT_LIST_GET:
427 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
428 break;
429 }
430 }
431 ret = send_reply(sock, &lur);
432 if (ret < 0) {
433 DBG("error sending reply");
434 goto error;
435 }
436
437 if ((lum->cmd == LTTNG_UST_STREAM
438 || lum->cmd == LTTNG_UST_CHANNEL
439 || lum->cmd == LTTNG_UST_METADATA)
440 && lur.ret_code == LTTNG_UST_OK) {
441 int sendret = 0;
442
443 /* we also need to send the file descriptors. */
444 ret = ustcomm_send_fds_unix_sock(sock,
445 &shm_fd, &shm_fd,
446 1, sizeof(int));
447 if (ret < 0) {
448 ERR("send shm_fd");
449 sendret = ret;
450 }
451 /*
452 * The sessiond expects 2 file descriptors, even upon
453 * error.
454 */
455 ret = ustcomm_send_fds_unix_sock(sock,
456 &wait_fd, &wait_fd,
457 1, sizeof(int));
458 if (ret < 0) {
459 perror("send wait_fd");
460 goto error;
461 }
462 if (sendret) {
463 ret = sendret;
464 goto error;
465 }
466 }
467 /*
468 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
469 * after the reply.
470 */
471 if (lur.ret_code == LTTNG_UST_OK) {
472 switch (lum->cmd) {
473 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
474 len = ustcomm_send_unix_sock(sock,
475 &args.field_list.entry,
476 sizeof(args.field_list.entry));
477 if (len < 0) {
478 ret = len;
479 goto error;
480 }
481 if (len != sizeof(args.field_list.entry)) {
482 ret = -EINVAL;
483 goto error;
484 }
485 }
486 }
487 /*
488 * We still have the memory map reference, and the fds have been
489 * sent to the sessiond. We can therefore close those fds. Note
490 * that we keep the write side of the wait_fd open, but close
491 * the read side.
492 */
493 if (lur.ret_code == LTTNG_UST_OK) {
494 switch (lum->cmd) {
495 case LTTNG_UST_STREAM:
496 if (shm_fd >= 0) {
497 ret = close(shm_fd);
498 if (ret) {
499 PERROR("Error closing stream shm_fd");
500 }
501 *args.stream.shm_fd = -1;
502 }
503 if (wait_fd >= 0) {
504 ret = close(wait_fd);
505 if (ret) {
506 PERROR("Error closing stream wait_fd");
507 }
508 *args.stream.wait_fd = -1;
509 }
510 break;
511 case LTTNG_UST_METADATA:
512 case LTTNG_UST_CHANNEL:
513 if (shm_fd >= 0) {
514 ret = close(shm_fd);
515 if (ret) {
516 PERROR("Error closing channel shm_fd");
517 }
518 *args.channel.shm_fd = -1;
519 }
520 if (wait_fd >= 0) {
521 ret = close(wait_fd);
522 if (ret) {
523 PERROR("Error closing channel wait_fd");
524 }
525 *args.channel.wait_fd = -1;
526 }
527 break;
528 }
529 }
530
531 error:
532 ust_unlock();
533 return ret;
534 }
535
536 static
537 void cleanup_sock_info(struct sock_info *sock_info, int exiting)
538 {
539 int ret;
540
541 if (sock_info->socket != -1) {
542 ret = ustcomm_close_unix_sock(sock_info->socket);
543 if (ret) {
544 ERR("Error closing apps socket");
545 }
546 sock_info->socket = -1;
547 }
548 if (sock_info->root_handle != -1) {
549 ret = lttng_ust_objd_unref(sock_info->root_handle);
550 if (ret) {
551 ERR("Error unref root handle");
552 }
553 sock_info->root_handle = -1;
554 }
555 sock_info->constructor_sem_posted = 0;
556 /*
557 * wait_shm_mmap is used by listener threads outside of the
558 * ust lock, so we cannot tear it down ourselves, because we
559 * cannot join on these threads. Leave this task to the OS
560 * process exit.
561 */
562 if (!exiting && sock_info->wait_shm_mmap) {
563 ret = munmap(sock_info->wait_shm_mmap, sysconf(_SC_PAGE_SIZE));
564 if (ret) {
565 ERR("Error unmapping wait shm");
566 }
567 sock_info->wait_shm_mmap = NULL;
568 }
569 }
570
571 /*
572 * Using fork to set umask in the child process (not multi-thread safe).
573 * We deal with the shm_open vs ftruncate race (happening when the
574 * sessiond owns the shm and does not let everybody modify it, to ensure
575 * safety against shm_unlink) by simply letting the mmap fail and
576 * retrying after a few seconds.
577 * For global shm, everybody has rw access to it until the sessiond
578 * starts.
579 */
580 static
581 int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
582 {
583 int wait_shm_fd, ret;
584 pid_t pid;
585
586 /*
587 * Try to open read-only.
588 */
589 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
590 if (wait_shm_fd >= 0) {
591 goto end;
592 } else if (wait_shm_fd < 0 && errno != ENOENT) {
593 /*
594 * Real-only open did not work, and it's not because the
595 * entry was not present. It's a failure that prohibits
596 * using shm.
597 */
598 ERR("Error opening shm %s", sock_info->wait_shm_path);
599 goto end;
600 }
601 /*
602 * If the open failed because the file did not exist, try
603 * creating it ourself.
604 */
605 lttng_ust_nest_count++;
606 pid = fork();
607 lttng_ust_nest_count--;
608 if (pid > 0) {
609 int status;
610
611 /*
612 * Parent: wait for child to return, in which case the
613 * shared memory map will have been created.
614 */
615 pid = wait(&status);
616 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
617 wait_shm_fd = -1;
618 goto end;
619 }
620 /*
621 * Try to open read-only again after creation.
622 */
623 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
624 if (wait_shm_fd < 0) {
625 /*
626 * Real-only open did not work. It's a failure
627 * that prohibits using shm.
628 */
629 ERR("Error opening shm %s", sock_info->wait_shm_path);
630 goto end;
631 }
632 goto end;
633 } else if (pid == 0) {
634 int create_mode;
635
636 /* Child */
637 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
638 if (sock_info->global)
639 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
640 /*
641 * We're alone in a child process, so we can modify the
642 * process-wide umask.
643 */
644 umask(~create_mode);
645 /*
646 * Try creating shm (or get rw access).
647 * We don't do an exclusive open, because we allow other
648 * processes to create+ftruncate it concurrently.
649 */
650 wait_shm_fd = shm_open(sock_info->wait_shm_path,
651 O_RDWR | O_CREAT, create_mode);
652 if (wait_shm_fd >= 0) {
653 ret = ftruncate(wait_shm_fd, mmap_size);
654 if (ret) {
655 PERROR("ftruncate");
656 _exit(EXIT_FAILURE);
657 }
658 _exit(EXIT_SUCCESS);
659 }
660 /*
661 * For local shm, we need to have rw access to accept
662 * opening it: this means the local sessiond will be
663 * able to wake us up. For global shm, we open it even
664 * if rw access is not granted, because the root.root
665 * sessiond will be able to override all rights and wake
666 * us up.
667 */
668 if (!sock_info->global && errno != EACCES) {
669 ERR("Error opening shm %s", sock_info->wait_shm_path);
670 _exit(EXIT_FAILURE);
671 }
672 /*
673 * The shm exists, but we cannot open it RW. Report
674 * success.
675 */
676 _exit(EXIT_SUCCESS);
677 } else {
678 return -1;
679 }
680 end:
681 if (wait_shm_fd >= 0 && !sock_info->global) {
682 struct stat statbuf;
683
684 /*
685 * Ensure that our user is the owner of the shm file for
686 * local shm. If we do not own the file, it means our
687 * sessiond will not have access to wake us up (there is
688 * probably a rogue process trying to fake our
689 * sessiond). Fallback to polling method in this case.
690 */
691 ret = fstat(wait_shm_fd, &statbuf);
692 if (ret) {
693 PERROR("fstat");
694 goto error_close;
695 }
696 if (statbuf.st_uid != getuid())
697 goto error_close;
698 }
699 return wait_shm_fd;
700
701 error_close:
702 ret = close(wait_shm_fd);
703 if (ret) {
704 PERROR("Error closing fd");
705 }
706 return -1;
707 }
708
709 static
710 char *get_map_shm(struct sock_info *sock_info)
711 {
712 size_t mmap_size = sysconf(_SC_PAGE_SIZE);
713 int wait_shm_fd, ret;
714 char *wait_shm_mmap;
715
716 wait_shm_fd = get_wait_shm(sock_info, mmap_size);
717 if (wait_shm_fd < 0) {
718 goto error;
719 }
720 wait_shm_mmap = mmap(NULL, mmap_size, PROT_READ,
721 MAP_SHARED, wait_shm_fd, 0);
722 /* close shm fd immediately after taking the mmap reference */
723 ret = close(wait_shm_fd);
724 if (ret) {
725 PERROR("Error closing fd");
726 }
727 if (wait_shm_mmap == MAP_FAILED) {
728 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
729 goto error;
730 }
731 return wait_shm_mmap;
732
733 error:
734 return NULL;
735 }
736
737 static
738 void wait_for_sessiond(struct sock_info *sock_info)
739 {
740 int ret;
741
742 ust_lock();
743 if (lttng_ust_comm_should_quit) {
744 goto quit;
745 }
746 if (wait_poll_fallback) {
747 goto error;
748 }
749 if (!sock_info->wait_shm_mmap) {
750 sock_info->wait_shm_mmap = get_map_shm(sock_info);
751 if (!sock_info->wait_shm_mmap)
752 goto error;
753 }
754 ust_unlock();
755
756 DBG("Waiting for %s apps sessiond", sock_info->name);
757 /* Wait for futex wakeup */
758 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
759 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
760 FUTEX_WAIT, 0, NULL, NULL, 0);
761 if (ret < 0) {
762 if (errno == EFAULT) {
763 wait_poll_fallback = 1;
764 DBG(
765 "Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
766 "do not support FUTEX_WAKE on read-only memory mappings correctly. "
767 "Please upgrade your kernel "
768 "(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
769 "mainline). LTTng-UST will use polling mode fallback.");
770 if (ust_debug())
771 PERROR("futex");
772 }
773 }
774 }
775 return;
776
777 quit:
778 ust_unlock();
779 return;
780
781 error:
782 ust_unlock();
783 return;
784 }
785
786 /*
787 * This thread does not allocate any resource, except within
788 * handle_message, within mutex protection. This mutex protects against
789 * fork and exit.
790 * The other moment it allocates resources is at socket connection, which
791 * is also protected by the mutex.
792 */
793 static
794 void *ust_listener_thread(void *arg)
795 {
796 struct sock_info *sock_info = arg;
797 int sock, ret, prev_connect_failed = 0, has_waited = 0;
798
799 /* Restart trying to connect to the session daemon */
800 restart:
801 if (prev_connect_failed) {
802 /* Wait for sessiond availability with pipe */
803 wait_for_sessiond(sock_info);
804 if (has_waited) {
805 has_waited = 0;
806 /*
807 * Sleep for 5 seconds before retrying after a
808 * sequence of failure / wait / failure. This
809 * deals with a killed or broken session daemon.
810 */
811 sleep(5);
812 }
813 has_waited = 1;
814 prev_connect_failed = 0;
815 }
816 ust_lock();
817
818 if (lttng_ust_comm_should_quit) {
819 ust_unlock();
820 goto quit;
821 }
822
823 if (sock_info->socket != -1) {
824 ret = ustcomm_close_unix_sock(sock_info->socket);
825 if (ret) {
826 ERR("Error closing %s apps socket", sock_info->name);
827 }
828 sock_info->socket = -1;
829 }
830
831 /* Register */
832 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
833 if (ret < 0) {
834 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
835 prev_connect_failed = 1;
836 /*
837 * If we cannot find the sessiond daemon, don't delay
838 * constructor execution.
839 */
840 ret = handle_register_done(sock_info);
841 assert(!ret);
842 ust_unlock();
843 goto restart;
844 }
845
846 sock_info->socket = sock = ret;
847
848 /*
849 * Create only one root handle per listener thread for the whole
850 * process lifetime, so we ensure we get ID which is statically
851 * assigned to the root handle.
852 */
853 if (sock_info->root_handle == -1) {
854 ret = lttng_abi_create_root_handle();
855 if (ret < 0) {
856 ERR("Error creating root handle");
857 ust_unlock();
858 goto quit;
859 }
860 sock_info->root_handle = ret;
861 }
862
863 ret = register_app_to_sessiond(sock);
864 if (ret < 0) {
865 ERR("Error registering to %s apps socket", sock_info->name);
866 prev_connect_failed = 1;
867 /*
868 * If we cannot register to the sessiond daemon, don't
869 * delay constructor execution.
870 */
871 ret = handle_register_done(sock_info);
872 assert(!ret);
873 ust_unlock();
874 goto restart;
875 }
876 ust_unlock();
877
878 for (;;) {
879 ssize_t len;
880 struct ustcomm_ust_msg lum;
881
882 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
883 switch (len) {
884 case 0: /* orderly shutdown */
885 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
886 ust_lock();
887 if (lttng_ust_comm_should_quit) {
888 ust_unlock();
889 goto quit;
890 }
891 /*
892 * Either sessiond has shutdown or refused us by closing the socket.
893 * In either case, we don't want to delay construction execution,
894 * and we need to wait before retry.
895 */
896 prev_connect_failed = 1;
897 /*
898 * If we cannot register to the sessiond daemon, don't
899 * delay constructor execution.
900 */
901 ret = handle_register_done(sock_info);
902 assert(!ret);
903 ust_unlock();
904 goto end;
905 case sizeof(lum):
906 DBG("message received");
907 ret = handle_message(sock_info, sock, &lum);
908 if (ret) {
909 ERR("Error handling message for %s socket", sock_info->name);
910 }
911 continue;
912 default:
913 if (len < 0) {
914 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
915 } else {
916 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
917 }
918 if (len == -ECONNRESET) {
919 DBG("%s remote end closed connection", sock_info->name);
920 goto end;
921 }
922 goto end;
923 }
924
925 }
926 end:
927 ust_lock();
928 if (lttng_ust_comm_should_quit) {
929 ust_unlock();
930 goto quit;
931 }
932 /* Cleanup socket handles before trying to reconnect */
933 lttng_ust_objd_table_owner_cleanup(sock_info);
934 ust_unlock();
935 goto restart; /* try to reconnect */
936 quit:
937 return NULL;
938 }
939
940 /*
941 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
942 */
943 static
944 int get_timeout(struct timespec *constructor_timeout)
945 {
946 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
947 char *str_delay;
948 int ret;
949
950 str_delay = getenv("LTTNG_UST_REGISTER_TIMEOUT");
951 if (str_delay) {
952 constructor_delay_ms = strtol(str_delay, NULL, 10);
953 }
954
955 switch (constructor_delay_ms) {
956 case -1:/* fall-through */
957 case 0:
958 return constructor_delay_ms;
959 default:
960 break;
961 }
962
963 /*
964 * If we are unable to find the current time, don't wait.
965 */
966 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
967 if (ret) {
968 return -1;
969 }
970 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
971 constructor_timeout->tv_nsec +=
972 (constructor_delay_ms % 1000UL) * 1000000UL;
973 if (constructor_timeout->tv_nsec >= 1000000000UL) {
974 constructor_timeout->tv_sec++;
975 constructor_timeout->tv_nsec -= 1000000000UL;
976 }
977 return 1;
978 }
979
980 /*
981 * sessiond monitoring thread: monitor presence of global and per-user
982 * sessiond by polling the application common named pipe.
983 */
984 void __attribute__((constructor)) lttng_ust_init(void)
985 {
986 struct timespec constructor_timeout;
987 sigset_t sig_all_blocked, orig_parent_mask;
988 pthread_attr_t thread_attr;
989 int timeout_mode;
990 int ret;
991
992 if (uatomic_xchg(&initialized, 1) == 1)
993 return;
994
995 /*
996 * Fixup interdependency between TLS fixup mutex (which happens
997 * to be the dynamic linker mutex) and ust_lock, taken within
998 * the ust lock.
999 */
1000 lttng_fixup_event_tls();
1001 lttng_fixup_ringbuffer_tls();
1002 lttng_fixup_vtid_tls();
1003 lttng_fixup_nest_count_tls();
1004 lttng_fixup_procname_tls();
1005
1006 /*
1007 * We want precise control over the order in which we construct
1008 * our sub-libraries vs starting to receive commands from
1009 * sessiond (otherwise leading to errors when trying to create
1010 * sessiond before the init functions are completed).
1011 */
1012 init_usterr();
1013 init_tracepoint();
1014 lttng_ring_buffer_metadata_client_init();
1015 lttng_ring_buffer_client_overwrite_init();
1016 lttng_ring_buffer_client_discard_init();
1017
1018 timeout_mode = get_timeout(&constructor_timeout);
1019
1020 ret = sem_init(&constructor_wait, 0, 0);
1021 assert(!ret);
1022
1023 ret = setup_local_apps();
1024 if (ret) {
1025 DBG("local apps setup returned %d", ret);
1026 }
1027
1028 /* A new thread created by pthread_create inherits the signal mask
1029 * from the parent. To avoid any signal being received by the
1030 * listener thread, we block all signals temporarily in the parent,
1031 * while we create the listener thread.
1032 */
1033 sigfillset(&sig_all_blocked);
1034 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1035 if (ret) {
1036 ERR("pthread_sigmask: %s", strerror(ret));
1037 }
1038
1039 ret = pthread_attr_init(&thread_attr);
1040 if (ret) {
1041 ERR("pthread_attr_init: %s", strerror(ret));
1042 }
1043 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1044 if (ret) {
1045 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1046 }
1047
1048 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
1049 ust_listener_thread, &global_apps);
1050 if (ret) {
1051 ERR("pthread_create global: %s", strerror(ret));
1052 }
1053 if (local_apps.allowed) {
1054 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
1055 ust_listener_thread, &local_apps);
1056 if (ret) {
1057 ERR("pthread_create local: %s", strerror(ret));
1058 }
1059 } else {
1060 handle_register_done(&local_apps);
1061 }
1062 ret = pthread_attr_destroy(&thread_attr);
1063 if (ret) {
1064 ERR("pthread_attr_destroy: %s", strerror(ret));
1065 }
1066
1067 /* Restore original signal mask in parent */
1068 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1069 if (ret) {
1070 ERR("pthread_sigmask: %s", strerror(ret));
1071 }
1072
1073 switch (timeout_mode) {
1074 case 1: /* timeout wait */
1075 do {
1076 ret = sem_timedwait(&constructor_wait,
1077 &constructor_timeout);
1078 } while (ret < 0 && errno == EINTR);
1079 if (ret < 0 && errno == ETIMEDOUT) {
1080 ERR("Timed out waiting for lttng-sessiond");
1081 } else {
1082 assert(!ret);
1083 }
1084 break;
1085 case -1:/* wait forever */
1086 do {
1087 ret = sem_wait(&constructor_wait);
1088 } while (ret < 0 && errno == EINTR);
1089 assert(!ret);
1090 break;
1091 case 0: /* no timeout */
1092 break;
1093 }
1094 }
1095
1096 static
1097 void lttng_ust_cleanup(int exiting)
1098 {
1099 cleanup_sock_info(&global_apps, exiting);
1100 if (local_apps.allowed) {
1101 cleanup_sock_info(&local_apps, exiting);
1102 }
1103 /*
1104 * The teardown in this function all affect data structures
1105 * accessed under the UST lock by the listener thread. This
1106 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1107 * that none of these threads are accessing this data at this
1108 * point.
1109 */
1110 lttng_ust_abi_exit();
1111 lttng_ust_events_exit();
1112 lttng_ring_buffer_client_discard_exit();
1113 lttng_ring_buffer_client_overwrite_exit();
1114 lttng_ring_buffer_metadata_client_exit();
1115 exit_tracepoint();
1116 if (!exiting) {
1117 /* Reinitialize values for fork */
1118 sem_count = 2;
1119 lttng_ust_comm_should_quit = 0;
1120 initialized = 0;
1121 }
1122 }
1123
1124 void __attribute__((destructor)) lttng_ust_exit(void)
1125 {
1126 int ret;
1127
1128 /*
1129 * Using pthread_cancel here because:
1130 * A) we don't want to hang application teardown.
1131 * B) the thread is not allocating any resource.
1132 */
1133
1134 /*
1135 * Require the communication thread to quit. Synchronize with
1136 * mutexes to ensure it is not in a mutex critical section when
1137 * pthread_cancel is later called.
1138 */
1139 ust_lock();
1140 lttng_ust_comm_should_quit = 1;
1141 ust_unlock();
1142
1143 /* cancel threads */
1144 ret = pthread_cancel(global_apps.ust_listener);
1145 if (ret) {
1146 ERR("Error cancelling global ust listener thread: %s",
1147 strerror(ret));
1148 }
1149 if (local_apps.allowed) {
1150 ret = pthread_cancel(local_apps.ust_listener);
1151 if (ret) {
1152 ERR("Error cancelling local ust listener thread: %s",
1153 strerror(ret));
1154 }
1155 }
1156 /*
1157 * Do NOT join threads: use of sys_futex makes it impossible to
1158 * join the threads without using async-cancel, but async-cancel
1159 * is delivered by a signal, which could hit the target thread
1160 * anywhere in its code path, including while the ust_lock() is
1161 * held, causing a deadlock for the other thread. Let the OS
1162 * cleanup the threads if there are stalled in a syscall.
1163 */
1164 lttng_ust_cleanup(1);
1165 }
1166
1167 /*
1168 * We exclude the worker threads across fork and clone (except
1169 * CLONE_VM), because these system calls only keep the forking thread
1170 * running in the child. Therefore, we don't want to call fork or clone
1171 * in the middle of an tracepoint or ust tracing state modification.
1172 * Holding this mutex protects these structures across fork and clone.
1173 */
1174 void ust_before_fork(sigset_t *save_sigset)
1175 {
1176 /*
1177 * Disable signals. This is to avoid that the child intervenes
1178 * before it is properly setup for tracing. It is safer to
1179 * disable all signals, because then we know we are not breaking
1180 * anything by restoring the original mask.
1181 */
1182 sigset_t all_sigs;
1183 int ret;
1184
1185 if (lttng_ust_nest_count)
1186 return;
1187 /* Disable signals */
1188 sigfillset(&all_sigs);
1189 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
1190 if (ret == -1) {
1191 PERROR("sigprocmask");
1192 }
1193 ust_lock();
1194 rcu_bp_before_fork();
1195 }
1196
1197 static void ust_after_fork_common(sigset_t *restore_sigset)
1198 {
1199 int ret;
1200
1201 DBG("process %d", getpid());
1202 ust_unlock();
1203 /* Restore signals */
1204 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
1205 if (ret == -1) {
1206 PERROR("sigprocmask");
1207 }
1208 }
1209
1210 void ust_after_fork_parent(sigset_t *restore_sigset)
1211 {
1212 if (lttng_ust_nest_count)
1213 return;
1214 DBG("process %d", getpid());
1215 rcu_bp_after_fork_parent();
1216 /* Release mutexes and reenable signals */
1217 ust_after_fork_common(restore_sigset);
1218 }
1219
1220 /*
1221 * After fork, in the child, we need to cleanup all the leftover state,
1222 * except the worker thread which already magically disappeared thanks
1223 * to the weird Linux fork semantics. After tyding up, we call
1224 * lttng_ust_init() again to start over as a new PID.
1225 *
1226 * This is meant for forks() that have tracing in the child between the
1227 * fork and following exec call (if there is any).
1228 */
1229 void ust_after_fork_child(sigset_t *restore_sigset)
1230 {
1231 if (lttng_ust_nest_count)
1232 return;
1233 DBG("process %d", getpid());
1234 /* Release urcu mutexes */
1235 rcu_bp_after_fork_child();
1236 lttng_ust_cleanup(0);
1237 lttng_context_vtid_reset();
1238 /* Release mutexes and reenable signals */
1239 ust_after_fork_common(restore_sigset);
1240 lttng_ust_init();
1241 }
This page took 0.054758 seconds and 5 git commands to generate.