Bugfix for #745 deadlock with baddr statedump+fork
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
CommitLineData
2691221a
MD
1/*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
80e2814b 22#define _LGPL_SOURCE
2691221a
MD
23#include <sys/types.h>
24#include <sys/socket.h>
7fc90dca
MD
25#include <sys/mman.h>
26#include <sys/stat.h>
58d4b2a2
MD
27#include <sys/types.h>
28#include <sys/wait.h>
7fc90dca 29#include <fcntl.h>
2691221a
MD
30#include <unistd.h>
31#include <errno.h>
d9e99d10 32#include <pthread.h>
11ff9c7d
MD
33#include <semaphore.h>
34#include <time.h>
1ea11eab 35#include <assert.h>
e822f505 36#include <signal.h>
95259bd0 37#include <urcu/uatomic.h>
80e2814b 38#include <urcu/futex.h>
c117fb1b 39#include <urcu/compiler.h>
1ea11eab 40
4318ae1b 41#include <lttng/ust-events.h>
4318ae1b 42#include <lttng/ust-abi.h>
4318ae1b 43#include <lttng/ust.h>
7bc53e94 44#include <lttng/ust-error.h>
74d81a6c 45#include <lttng/ust-ctl.h>
8c90a710 46#include <urcu/tls-compat.h>
44c72f10
MD
47#include <ust-comm.h>
48#include <usterr-signal-safe.h>
cd54f6d9 49#include <helper.h>
44c72f10 50#include "tracepoint-internal.h"
7dd08bec 51#include "lttng-tracer-core.h"
08114193 52#include "compat.h"
f645cfa7 53#include "../libringbuffer/tlsfixup.h"
394598c1 54#include "lttng-ust-baddr.h"
edaa1431
MD
55
56/*
57 * Has lttng ust comm constructor been called ?
58 */
59static int initialized;
60
1ea11eab 61/*
17dfb34b
MD
62 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
63 * Held when handling a command, also held by fork() to deal with
64 * removal of threads, and by exit path.
3327ac33
MD
65 *
66 * The UST lock is the centralized mutex across UST tracing control and
67 * probe registration.
68 *
69 * ust_exit_mutex must never nest in ust_mutex.
d58d1454
MD
70 *
71 * ust_mutex_nest is a per-thread nesting counter, allowing the perf
72 * counter lazy initialization called by events within the statedump,
73 * which traces while the ust_mutex is held.
3327ac33
MD
74 */
75static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
76
d58d1454
MD
77/* Allow nesting the ust_mutex within the same thread. */
78static DEFINE_URCU_TLS(int, ust_mutex_nest);
79
3327ac33
MD
80/*
81 * ust_exit_mutex protects thread_active variable wrt thread exit. It
82 * cannot be done by ust_mutex because pthread_cancel(), which takes an
83 * internal libc lock, cannot nest within ust_mutex.
84 *
85 * It never nests within a ust_mutex.
1ea11eab 86 */
3327ac33 87static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
1ea11eab 88
458d678c
PW
89/*
90 * ust_fork_mutex protects base address statedump tracing against forks. It
91 * prevents the dynamic loader lock to be taken (by base address statedump
92 * tracing) while a fork is happening, thus preventing deadlock issues with
93 * the dynamic loader lock.
94 */
95static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
96
1ea11eab
MD
97/* Should the ust comm thread quit ? */
98static int lttng_ust_comm_should_quit;
99
3327ac33 100/*
d58d1454 101 * Return 0 on success, -1 if should quit.
3327ac33 102 * The lock is taken in both cases.
d58d1454 103 * Signal-safe.
3327ac33
MD
104 */
105int ust_lock(void)
106{
d58d1454
MD
107 sigset_t sig_all_blocked, orig_mask;
108 int ret;
109
110 sigfillset(&sig_all_blocked);
111 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
112 if (ret) {
113 ERR("pthread_sigmask: %s", strerror(ret));
114 }
115 if (!URCU_TLS(ust_mutex_nest)++)
116 pthread_mutex_lock(&ust_mutex);
117 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
118 if (ret) {
119 ERR("pthread_sigmask: %s", strerror(ret));
120 }
3327ac33
MD
121 if (lttng_ust_comm_should_quit) {
122 return -1;
123 } else {
124 return 0;
125 }
126}
127
128/*
129 * ust_lock_nocheck() can be used in constructors/destructors, because
130 * they are already nested within the dynamic loader lock, and therefore
131 * have exclusive access against execution of liblttng-ust destructor.
d58d1454 132 * Signal-safe.
3327ac33
MD
133 */
134void ust_lock_nocheck(void)
135{
d58d1454
MD
136 sigset_t sig_all_blocked, orig_mask;
137 int ret;
138
139 sigfillset(&sig_all_blocked);
140 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
141 if (ret) {
142 ERR("pthread_sigmask: %s", strerror(ret));
143 }
144 if (!URCU_TLS(ust_mutex_nest)++)
145 pthread_mutex_lock(&ust_mutex);
146 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
147 if (ret) {
148 ERR("pthread_sigmask: %s", strerror(ret));
149 }
3327ac33
MD
150}
151
d58d1454
MD
152/*
153 * Signal-safe.
154 */
3327ac33
MD
155void ust_unlock(void)
156{
d58d1454
MD
157 sigset_t sig_all_blocked, orig_mask;
158 int ret;
159
160 sigfillset(&sig_all_blocked);
161 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
162 if (ret) {
163 ERR("pthread_sigmask: %s", strerror(ret));
164 }
165 if (!--URCU_TLS(ust_mutex_nest))
166 pthread_mutex_unlock(&ust_mutex);
167 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
168 if (ret) {
169 ERR("pthread_sigmask: %s", strerror(ret));
170 }
3327ac33
MD
171}
172
11ff9c7d
MD
173/*
174 * Wait for either of these before continuing to the main
175 * program:
176 * - the register_done message from sessiond daemon
177 * (will let the sessiond daemon enable sessions before main
178 * starts.)
179 * - sessiond daemon is not reachable.
180 * - timeout (ensuring applications are resilient to session
181 * daemon problems).
182 */
183static sem_t constructor_wait;
950aab0c
MD
184/*
185 * Doing this for both the global and local sessiond.
186 */
95259bd0 187static int sem_count = { 2 };
11ff9c7d 188
e8508a49
MD
189/*
190 * Counting nesting within lttng-ust. Used to ensure that calling fork()
191 * from liblttng-ust does not execute the pre/post fork handlers.
192 */
8c90a710 193static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
e8508a49 194
1ea11eab
MD
195/*
196 * Info about socket and associated listener thread.
197 */
198struct sock_info {
11ff9c7d 199 const char *name;
1ea11eab 200 pthread_t ust_listener; /* listener thread */
46050b1a 201 int root_handle;
8d20bf54
MD
202 int constructor_sem_posted;
203 int allowed;
44e073f5 204 int global;
e33f3265 205 int thread_active;
7fc90dca
MD
206
207 char sock_path[PATH_MAX];
208 int socket;
32ce8569 209 int notify_socket;
7fc90dca
MD
210
211 char wait_shm_path[PATH_MAX];
212 char *wait_shm_mmap;
37dddb65
MD
213 /* Keep track of lazy state dump not performed yet. */
214 int statedump_pending;
1ea11eab 215};
2691221a
MD
216
217/* Socket from app (connect) to session daemon (listen) for communication */
1ea11eab 218struct sock_info global_apps = {
11ff9c7d 219 .name = "global",
44e073f5 220 .global = 1,
7fc90dca 221
46050b1a 222 .root_handle = -1,
8d20bf54 223 .allowed = 1,
e33f3265 224 .thread_active = 0,
7fc90dca 225
32ce8569 226 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
7fc90dca 227 .socket = -1,
32ce8569 228 .notify_socket = -1,
7fc90dca 229
32ce8569 230 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
95c25348 231
37dddb65 232 .statedump_pending = 0,
1ea11eab 233};
2691221a
MD
234
235/* TODO: allow global_apps_sock_path override */
236
1ea11eab 237struct sock_info local_apps = {
11ff9c7d 238 .name = "local",
44e073f5 239 .global = 0,
46050b1a 240 .root_handle = -1,
8d20bf54 241 .allowed = 0, /* Check setuid bit first */
e33f3265 242 .thread_active = 0,
7fc90dca
MD
243
244 .socket = -1,
32ce8569 245 .notify_socket = -1,
95c25348 246
37dddb65 247 .statedump_pending = 0,
1ea11eab 248};
2691221a 249
37ed587a
MD
250static int wait_poll_fallback;
251
74d81a6c
MD
252static const char *cmd_name_mapping[] = {
253 [ LTTNG_UST_RELEASE ] = "Release",
254 [ LTTNG_UST_SESSION ] = "Create Session",
255 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
256
257 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
258 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
259 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
260 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
261
262 /* Session FD commands */
263 [ LTTNG_UST_CHANNEL ] = "Create Channel",
264 [ LTTNG_UST_SESSION_START ] = "Start Session",
265 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
266
267 /* Channel FD commands */
268 [ LTTNG_UST_STREAM ] = "Create Stream",
269 [ LTTNG_UST_EVENT ] = "Create Event",
270
271 /* Event and Channel FD commands */
272 [ LTTNG_UST_CONTEXT ] = "Create Context",
273 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
274
275 /* Event, Channel and Session commands */
276 [ LTTNG_UST_ENABLE ] = "Enable",
277 [ LTTNG_UST_DISABLE ] = "Disable",
278
279 /* Tracepoint list commands */
280 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
281 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
282
283 /* Event FD commands */
284 [ LTTNG_UST_FILTER ] = "Create Filter",
75582b3a 285 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
74d81a6c
MD
286};
287
ff517991
MD
288static const char *str_timeout;
289static int got_timeout_env;
290
7dd08bec 291extern void lttng_ring_buffer_client_overwrite_init(void);
34a91bdb 292extern void lttng_ring_buffer_client_overwrite_rt_init(void);
7dd08bec 293extern void lttng_ring_buffer_client_discard_init(void);
34a91bdb 294extern void lttng_ring_buffer_client_discard_rt_init(void);
7dd08bec
MD
295extern void lttng_ring_buffer_metadata_client_init(void);
296extern void lttng_ring_buffer_client_overwrite_exit(void);
34a91bdb 297extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
7dd08bec 298extern void lttng_ring_buffer_client_discard_exit(void);
34a91bdb 299extern void lttng_ring_buffer_client_discard_rt_exit(void);
7dd08bec 300extern void lttng_ring_buffer_metadata_client_exit(void);
edaa1431 301
3c6f6263
AM
302/*
303 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
304 * pointer.
305 */
306static
307const char *get_lttng_home_dir(void)
308{
309 const char *val;
310
311 val = (const char *) getenv("LTTNG_HOME");
312 if (val != NULL) {
313 return val;
314 }
315 return (const char *) getenv("HOME");
316}
317
a903623f
MD
318/*
319 * Force a read (imply TLS fixup for dlopen) of TLS variables.
320 */
321static
322void lttng_fixup_nest_count_tls(void)
323{
8c90a710 324 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
a903623f
MD
325}
326
d58d1454
MD
327static
328void lttng_fixup_ust_mutex_nest_tls(void)
329{
330 asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
331}
332
32ce8569
MD
333int lttng_get_notify_socket(void *owner)
334{
335 struct sock_info *info = owner;
336
337 return info->notify_socket;
338}
339
74d81a6c
MD
340static
341void print_cmd(int cmd, int handle)
342{
343 const char *cmd_name = "Unknown";
344
fd67a004
MD
345 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
346 && cmd_name_mapping[cmd]) {
74d81a6c
MD
347 cmd_name = cmd_name_mapping[cmd];
348 }
fd67a004
MD
349 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
350 cmd_name, cmd,
74d81a6c
MD
351 lttng_ust_obj_get_name(handle), handle);
352}
353
2691221a 354static
8d20bf54 355int setup_local_apps(void)
2691221a
MD
356{
357 const char *home_dir;
7fc90dca 358 uid_t uid;
2691221a 359
7fc90dca 360 uid = getuid();
8d20bf54
MD
361 /*
362 * Disallow per-user tracing for setuid binaries.
363 */
7fc90dca 364 if (uid != geteuid()) {
9ec6895c 365 assert(local_apps.allowed == 0);
d0a1ae63 366 return 0;
8d20bf54 367 }
3c6f6263 368 home_dir = get_lttng_home_dir();
9ec6895c
MD
369 if (!home_dir) {
370 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
371 assert(local_apps.allowed == 0);
2691221a 372 return -ENOENT;
9ec6895c
MD
373 }
374 local_apps.allowed = 1;
32ce8569
MD
375 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
376 home_dir,
377 LTTNG_DEFAULT_HOME_RUNDIR,
378 LTTNG_UST_SOCK_FILENAME);
379 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
380 LTTNG_UST_WAIT_FILENAME,
381 uid);
2691221a
MD
382 return 0;
383}
384
ff517991
MD
385/*
386 * Get notify_sock timeout, in ms.
387 * -1: don't wait. 0: wait forever. >0: timeout, in ms.
388 */
389static
390long get_timeout(void)
391{
392 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
393
394 if (!got_timeout_env) {
395 str_timeout = getenv("LTTNG_UST_REGISTER_TIMEOUT");
396 got_timeout_env = 1;
397 }
398 if (str_timeout)
399 constructor_delay_ms = strtol(str_timeout, NULL, 10);
400 return constructor_delay_ms;
401}
402
403static
404long get_notify_sock_timeout(void)
405{
406 return get_timeout();
407}
408
409/*
410 * Return values: -1: don't wait. 0: wait forever. 1: timeout wait.
411 */
412static
413int get_constructor_timeout(struct timespec *constructor_timeout)
414{
415 long constructor_delay_ms;
416 int ret;
417
418 constructor_delay_ms = get_timeout();
419
420 switch (constructor_delay_ms) {
421 case -1:/* fall-through */
422 case 0:
423 return constructor_delay_ms;
424 default:
425 break;
426 }
427
428 /*
429 * If we are unable to find the current time, don't wait.
430 */
431 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
432 if (ret) {
433 return -1;
434 }
435 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
436 constructor_timeout->tv_nsec +=
437 (constructor_delay_ms % 1000UL) * 1000000UL;
438 if (constructor_timeout->tv_nsec >= 1000000000UL) {
439 constructor_timeout->tv_sec++;
440 constructor_timeout->tv_nsec -= 1000000000UL;
441 }
442 return 1;
443}
444
2691221a 445static
32ce8569 446int register_to_sessiond(int socket, enum ustctl_socket_type type)
2691221a 447{
32ce8569
MD
448 return ustcomm_send_reg_msg(socket,
449 type,
450 CAA_BITS_PER_LONG,
451 lttng_alignof(uint8_t) * CHAR_BIT,
452 lttng_alignof(uint16_t) * CHAR_BIT,
453 lttng_alignof(uint32_t) * CHAR_BIT,
454 lttng_alignof(uint64_t) * CHAR_BIT,
455 lttng_alignof(unsigned long) * CHAR_BIT);
2691221a
MD
456}
457
d9e99d10 458static
57773204 459int send_reply(int sock, struct ustcomm_ust_reply *lur)
d9e99d10 460{
9eb62b9c 461 ssize_t len;
d3a492d1 462
57773204 463 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
d3a492d1 464 switch (len) {
a4be8962 465 case sizeof(*lur):
d3a492d1
MD
466 DBG("message successfully sent");
467 return 0;
7bc53e94
MD
468 default:
469 if (len == -ECONNRESET) {
470 DBG("remote end closed connection");
d3a492d1
MD
471 return 0;
472 }
7bc53e94
MD
473 if (len < 0)
474 return len;
475 DBG("incorrect message size: %zd", len);
476 return -EINVAL;
d3a492d1
MD
477 }
478}
479
480static
edaa1431 481int handle_register_done(struct sock_info *sock_info)
11ff9c7d
MD
482{
483 int ret;
484
edaa1431
MD
485 if (sock_info->constructor_sem_posted)
486 return 0;
487 sock_info->constructor_sem_posted = 1;
56cd7e2f
MD
488 if (uatomic_read(&sem_count) <= 0) {
489 return 0;
490 }
95259bd0
MD
491 ret = uatomic_add_return(&sem_count, -1);
492 if (ret == 0) {
493 ret = sem_post(&constructor_wait);
494 assert(!ret);
495 }
11ff9c7d
MD
496 return 0;
497}
498
37dddb65
MD
499/*
500 * Only execute pending statedump after the constructor semaphore has
501 * been posted by each listener thread. This means statedump will only
502 * be performed after the "registration done" command is received from
503 * each session daemon the application is connected to.
504 *
505 * This ensures we don't run into deadlock issues with the dynamic
506 * loader mutex, which is held while the constructor is called and
507 * waiting on the constructor semaphore. All operations requiring this
508 * dynamic loader lock need to be postponed using this mechanism.
509 */
510static
511void handle_pending_statedump(struct sock_info *sock_info)
512{
513 int ctor_passed = sock_info->constructor_sem_posted;
514
515 if (ctor_passed && sock_info->statedump_pending) {
458d678c 516 pthread_mutex_lock(&ust_fork_mutex);
37dddb65
MD
517 sock_info->statedump_pending = 0;
518 lttng_handle_pending_statedump(sock_info);
458d678c 519 pthread_mutex_unlock(&ust_fork_mutex);
37dddb65
MD
520 }
521}
522
11ff9c7d
MD
523static
524int handle_message(struct sock_info *sock_info,
57773204 525 int sock, struct ustcomm_ust_msg *lum)
d3a492d1 526{
1ea11eab 527 int ret = 0;
b61ce3b2 528 const struct lttng_ust_objd_ops *ops;
57773204 529 struct ustcomm_ust_reply lur;
ef9ff354 530 union ust_args args;
40003310 531 ssize_t len;
1ea11eab 532
46050b1a
MD
533 memset(&lur, 0, sizeof(lur));
534
3327ac33 535 if (ust_lock()) {
74d81a6c 536 ret = -LTTNG_UST_ERR_EXITING;
1ea11eab
MD
537 goto end;
538 }
9eb62b9c 539
46050b1a
MD
540 ops = objd_ops(lum->handle);
541 if (!ops) {
542 ret = -ENOENT;
543 goto end;
1ea11eab 544 }
46050b1a
MD
545
546 switch (lum->cmd) {
11ff9c7d
MD
547 case LTTNG_UST_REGISTER_DONE:
548 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
edaa1431 549 ret = handle_register_done(sock_info);
11ff9c7d
MD
550 else
551 ret = -EINVAL;
552 break;
46050b1a
MD
553 case LTTNG_UST_RELEASE:
554 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
555 ret = -EPERM;
556 else
1849ef7c 557 ret = lttng_ust_objd_unref(lum->handle, 1);
d9e99d10 558 break;
2d78951a
MD
559 case LTTNG_UST_FILTER:
560 {
561 /* Receive filter data */
f488575f 562 struct lttng_ust_filter_bytecode_node *bytecode;
2d78951a 563
cd54f6d9 564 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
7bc53e94 565 ERR("Filter data size is too large: %u bytes",
2d78951a
MD
566 lum->u.filter.data_size);
567 ret = -EINVAL;
568 goto error;
569 }
2734ca65 570
885b1dfd 571 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
7bc53e94 572 ERR("Filter reloc offset %u is not within data",
2734ca65
CB
573 lum->u.filter.reloc_offset);
574 ret = -EINVAL;
575 goto error;
576 }
577
cd54f6d9
MD
578 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
579 if (!bytecode) {
580 ret = -ENOMEM;
581 goto error;
582 }
f488575f 583 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
2d78951a
MD
584 lum->u.filter.data_size);
585 switch (len) {
586 case 0: /* orderly shutdown */
587 ret = 0;
cd54f6d9 588 free(bytecode);
2d78951a 589 goto error;
2d78951a
MD
590 default:
591 if (len == lum->u.filter.data_size) {
7bc53e94 592 DBG("filter data received");
2d78951a 593 break;
7bc53e94
MD
594 } else if (len < 0) {
595 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
596 if (len == -ECONNRESET) {
597 ERR("%s remote end closed connection", sock_info->name);
598 ret = len;
599 free(bytecode);
600 goto error;
601 }
602 ret = len;
eb8bf361 603 free(bytecode);
7bc53e94 604 goto end;
2d78951a 605 } else {
7bc53e94 606 DBG("incorrect filter data message size: %zd", len);
2d78951a 607 ret = -EINVAL;
cd54f6d9 608 free(bytecode);
2d78951a
MD
609 goto end;
610 }
611 }
f488575f
MD
612 bytecode->bc.len = lum->u.filter.data_size;
613 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
3f6fd224 614 bytecode->bc.seqnum = lum->u.filter.seqnum;
cd54f6d9 615 if (ops->cmd) {
2d78951a 616 ret = ops->cmd(lum->handle, lum->cmd,
cd54f6d9 617 (unsigned long) bytecode,
f59ed768 618 &args, sock_info);
cd54f6d9
MD
619 if (ret) {
620 free(bytecode);
621 }
622 /* don't free bytecode if everything went fine. */
623 } else {
2d78951a 624 ret = -ENOSYS;
cd54f6d9
MD
625 free(bytecode);
626 }
2d78951a
MD
627 break;
628 }
86e36163
JI
629 case LTTNG_UST_EXCLUSION:
630 {
631 /* Receive exclusion names */
632 struct lttng_ust_excluder_node *node;
633 unsigned int count;
634
635 count = lum->u.exclusion.count;
636 if (count == 0) {
637 /* There are no names to read */
638 ret = 0;
639 goto error;
640 }
641 node = zmalloc(sizeof(*node) +
642 count * LTTNG_UST_SYM_NAME_LEN);
643 if (!node) {
644 ret = -ENOMEM;
645 goto error;
646 }
647 node->excluder.count = count;
648 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
649 count * LTTNG_UST_SYM_NAME_LEN);
650 switch (len) {
651 case 0: /* orderly shutdown */
652 ret = 0;
653 free(node);
654 goto error;
655 default:
656 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
657 DBG("Exclusion data received");
658 break;
659 } else if (len < 0) {
660 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
661 if (len == -ECONNRESET) {
662 ERR("%s remote end closed connection", sock_info->name);
663 ret = len;
664 free(node);
665 goto error;
666 }
667 ret = len;
668 free(node);
669 goto end;
670 } else {
671 DBG("Incorrect exclusion data message size: %zd", len);
672 ret = -EINVAL;
673 free(node);
674 goto end;
675 }
676 }
677 if (ops->cmd) {
678 ret = ops->cmd(lum->handle, lum->cmd,
679 (unsigned long) node,
680 &args, sock_info);
681 if (ret) {
682 free(node);
683 }
684 /* Don't free exclusion data if everything went fine. */
685 } else {
686 ret = -ENOSYS;
687 free(node);
688 }
689 break;
690 }
74d81a6c
MD
691 case LTTNG_UST_CHANNEL:
692 {
693 void *chan_data;
ff0f5728 694 int wakeup_fd;
74d81a6c
MD
695
696 len = ustcomm_recv_channel_from_sessiond(sock,
ff0f5728
MD
697 &chan_data, lum->u.channel.len,
698 &wakeup_fd);
74d81a6c
MD
699 switch (len) {
700 case 0: /* orderly shutdown */
701 ret = 0;
702 goto error;
703 default:
704 if (len == lum->u.channel.len) {
705 DBG("channel data received");
706 break;
707 } else if (len < 0) {
708 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
709 if (len == -ECONNRESET) {
710 ERR("%s remote end closed connection", sock_info->name);
711 ret = len;
712 goto error;
713 }
714 ret = len;
715 goto end;
716 } else {
717 DBG("incorrect channel data message size: %zd", len);
718 ret = -EINVAL;
719 goto end;
720 }
721 }
722 args.channel.chan_data = chan_data;
ff0f5728 723 args.channel.wakeup_fd = wakeup_fd;
74d81a6c
MD
724 if (ops->cmd)
725 ret = ops->cmd(lum->handle, lum->cmd,
726 (unsigned long) &lum->u,
727 &args, sock_info);
728 else
729 ret = -ENOSYS;
730 break;
731 }
732 case LTTNG_UST_STREAM:
733 {
734 /* Receive shm_fd, wakeup_fd */
735 ret = ustcomm_recv_stream_from_sessiond(sock,
736 &lum->u.stream.len,
737 &args.stream.shm_fd,
738 &args.stream.wakeup_fd);
739 if (ret) {
740 goto end;
741 }
742 if (ops->cmd)
743 ret = ops->cmd(lum->handle, lum->cmd,
744 (unsigned long) &lum->u,
745 &args, sock_info);
746 else
747 ret = -ENOSYS;
748 break;
749 }
d9e99d10 750 default:
46050b1a
MD
751 if (ops->cmd)
752 ret = ops->cmd(lum->handle, lum->cmd,
ef9ff354 753 (unsigned long) &lum->u,
f59ed768 754 &args, sock_info);
46050b1a
MD
755 else
756 ret = -ENOSYS;
757 break;
d9e99d10 758 }
46050b1a 759
1ea11eab 760end:
46050b1a
MD
761 lur.handle = lum->handle;
762 lur.cmd = lum->cmd;
763 lur.ret_val = ret;
764 if (ret >= 0) {
7bc53e94 765 lur.ret_code = LTTNG_UST_OK;
46050b1a 766 } else {
7bc53e94
MD
767 /*
768 * Use -LTTNG_UST_ERR as wildcard for UST internal
769 * error that are not caused by the transport, except if
770 * we already have a more precise error message to
771 * report.
772 */
64b2564e
DG
773 if (ret > -LTTNG_UST_ERR) {
774 /* Translate code to UST error. */
775 switch (ret) {
776 case -EEXIST:
777 lur.ret_code = -LTTNG_UST_ERR_EXIST;
778 break;
779 case -EINVAL:
780 lur.ret_code = -LTTNG_UST_ERR_INVAL;
781 break;
782 case -ENOENT:
783 lur.ret_code = -LTTNG_UST_ERR_NOENT;
784 break;
785 case -EPERM:
786 lur.ret_code = -LTTNG_UST_ERR_PERM;
787 break;
788 case -ENOSYS:
789 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
790 break;
791 default:
792 lur.ret_code = -LTTNG_UST_ERR;
793 break;
794 }
795 } else {
7bc53e94 796 lur.ret_code = ret;
64b2564e 797 }
46050b1a 798 }
e6ea14c5
MD
799 if (ret >= 0) {
800 switch (lum->cmd) {
e6ea14c5
MD
801 case LTTNG_UST_TRACER_VERSION:
802 lur.u.version = lum->u.version;
803 break;
804 case LTTNG_UST_TRACEPOINT_LIST_GET:
805 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
806 break;
807 }
381c0f1e 808 }
74d81a6c 809 DBG("Return value: %d", lur.ret_val);
46050b1a 810 ret = send_reply(sock, &lur);
193183fb 811 if (ret < 0) {
7bc53e94 812 DBG("error sending reply");
193183fb
MD
813 goto error;
814 }
46050b1a 815
40003310
MD
816 /*
817 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
818 * after the reply.
819 */
7bc53e94 820 if (lur.ret_code == LTTNG_UST_OK) {
40003310
MD
821 switch (lum->cmd) {
822 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
823 len = ustcomm_send_unix_sock(sock,
824 &args.field_list.entry,
825 sizeof(args.field_list.entry));
7bc53e94
MD
826 if (len < 0) {
827 ret = len;
828 goto error;
829 }
40003310 830 if (len != sizeof(args.field_list.entry)) {
7bc53e94 831 ret = -EINVAL;
40003310
MD
832 goto error;
833 }
834 }
835 }
ef9ff354 836
381c0f1e 837error:
17dfb34b 838 ust_unlock();
d9e99d10 839
37dddb65
MD
840 /*
841 * Performed delayed statedump operations outside of the UST
842 * lock. We need to take the dynamic loader lock before we take
843 * the UST lock internally within handle_pending_statedump().
844 */
845 handle_pending_statedump(sock_info);
246be17e 846
37dddb65 847 return ret;
246be17e
PW
848}
849
46050b1a 850static
efe0de09 851void cleanup_sock_info(struct sock_info *sock_info, int exiting)
46050b1a
MD
852{
853 int ret;
854
5b14aab3
MD
855 if (sock_info->root_handle != -1) {
856 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
857 if (ret) {
858 ERR("Error unref root handle");
859 }
860 sock_info->root_handle = -1;
861 }
862 sock_info->constructor_sem_posted = 0;
863
864 /*
865 * wait_shm_mmap, socket and notify socket are used by listener
866 * threads outside of the ust lock, so we cannot tear them down
867 * ourselves, because we cannot join on these threads. Leave
868 * responsibility of cleaning up these resources to the OS
869 * process exit.
870 */
871 if (exiting)
872 return;
873
46050b1a 874 if (sock_info->socket != -1) {
e6973a89 875 ret = ustcomm_close_unix_sock(sock_info->socket);
46050b1a 876 if (ret) {
32ce8569 877 ERR("Error closing ust cmd socket");
46050b1a
MD
878 }
879 sock_info->socket = -1;
880 }
32ce8569
MD
881 if (sock_info->notify_socket != -1) {
882 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
883 if (ret) {
884 ERR("Error closing ust notify socket");
885 }
886 sock_info->notify_socket = -1;
887 }
5b14aab3 888 if (sock_info->wait_shm_mmap) {
172d6b68
MD
889 long page_size;
890
891 page_size = sysconf(_SC_PAGE_SIZE);
892 if (page_size > 0) {
893 ret = munmap(sock_info->wait_shm_mmap, page_size);
894 if (ret) {
895 ERR("Error unmapping wait shm");
896 }
7fc90dca
MD
897 }
898 sock_info->wait_shm_mmap = NULL;
899 }
900}
901
58d4b2a2 902/*
33bbeb90
MD
903 * Using fork to set umask in the child process (not multi-thread safe).
904 * We deal with the shm_open vs ftruncate race (happening when the
905 * sessiond owns the shm and does not let everybody modify it, to ensure
906 * safety against shm_unlink) by simply letting the mmap fail and
907 * retrying after a few seconds.
908 * For global shm, everybody has rw access to it until the sessiond
909 * starts.
58d4b2a2 910 */
7fc90dca 911static
58d4b2a2 912int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
7fc90dca 913{
7fc90dca 914 int wait_shm_fd, ret;
58d4b2a2 915 pid_t pid;
44e073f5 916
58d4b2a2 917 /*
33bbeb90 918 * Try to open read-only.
58d4b2a2 919 */
33bbeb90 920 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2 921 if (wait_shm_fd >= 0) {
7aa76730
MD
922 int32_t tmp_read;
923 ssize_t len;
924 size_t bytes_read = 0;
925
926 /*
927 * Try to read the fd. If unable to do so, try opening
928 * it in write mode.
929 */
930 do {
931 len = read(wait_shm_fd,
932 &((char *) &tmp_read)[bytes_read],
933 sizeof(tmp_read) - bytes_read);
934 if (len > 0) {
935 bytes_read += len;
936 }
937 } while ((len < 0 && errno == EINTR)
938 || (len > 0 && bytes_read < sizeof(tmp_read)));
939 if (bytes_read != sizeof(tmp_read)) {
940 ret = close(wait_shm_fd);
941 if (ret) {
942 ERR("close wait_shm_fd");
943 }
944 goto open_write;
945 }
58d4b2a2
MD
946 goto end;
947 } else if (wait_shm_fd < 0 && errno != ENOENT) {
948 /*
33bbeb90
MD
949 * Real-only open did not work, and it's not because the
950 * entry was not present. It's a failure that prohibits
951 * using shm.
58d4b2a2 952 */
7fc90dca 953 ERR("Error opening shm %s", sock_info->wait_shm_path);
58d4b2a2 954 goto end;
7fc90dca 955 }
7aa76730
MD
956
957open_write:
7fc90dca 958 /*
7aa76730
MD
959 * If the open failed because the file did not exist, or because
960 * the file was not truncated yet, try creating it ourself.
7fc90dca 961 */
8c90a710 962 URCU_TLS(lttng_ust_nest_count)++;
58d4b2a2 963 pid = fork();
8c90a710 964 URCU_TLS(lttng_ust_nest_count)--;
58d4b2a2
MD
965 if (pid > 0) {
966 int status;
967
968 /*
969 * Parent: wait for child to return, in which case the
970 * shared memory map will have been created.
971 */
972 pid = wait(&status);
b7d3cb32 973 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
58d4b2a2
MD
974 wait_shm_fd = -1;
975 goto end;
7fc90dca 976 }
58d4b2a2
MD
977 /*
978 * Try to open read-only again after creation.
979 */
33bbeb90 980 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2
MD
981 if (wait_shm_fd < 0) {
982 /*
983 * Real-only open did not work. It's a failure
984 * that prohibits using shm.
985 */
986 ERR("Error opening shm %s", sock_info->wait_shm_path);
987 goto end;
988 }
989 goto end;
990 } else if (pid == 0) {
991 int create_mode;
992
993 /* Child */
33bbeb90 994 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
58d4b2a2 995 if (sock_info->global)
33bbeb90 996 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
58d4b2a2
MD
997 /*
998 * We're alone in a child process, so we can modify the
999 * process-wide umask.
1000 */
33bbeb90 1001 umask(~create_mode);
58d4b2a2 1002 /*
33bbeb90
MD
1003 * Try creating shm (or get rw access).
1004 * We don't do an exclusive open, because we allow other
1005 * processes to create+ftruncate it concurrently.
58d4b2a2
MD
1006 */
1007 wait_shm_fd = shm_open(sock_info->wait_shm_path,
1008 O_RDWR | O_CREAT, create_mode);
1009 if (wait_shm_fd >= 0) {
1010 ret = ftruncate(wait_shm_fd, mmap_size);
1011 if (ret) {
1012 PERROR("ftruncate");
b0c1425d 1013 _exit(EXIT_FAILURE);
58d4b2a2 1014 }
b0c1425d 1015 _exit(EXIT_SUCCESS);
58d4b2a2 1016 }
33bbeb90
MD
1017 /*
1018 * For local shm, we need to have rw access to accept
1019 * opening it: this means the local sessiond will be
1020 * able to wake us up. For global shm, we open it even
1021 * if rw access is not granted, because the root.root
1022 * sessiond will be able to override all rights and wake
1023 * us up.
1024 */
1025 if (!sock_info->global && errno != EACCES) {
58d4b2a2 1026 ERR("Error opening shm %s", sock_info->wait_shm_path);
5d3bc5ed 1027 _exit(EXIT_FAILURE);
58d4b2a2
MD
1028 }
1029 /*
33bbeb90
MD
1030 * The shm exists, but we cannot open it RW. Report
1031 * success.
58d4b2a2 1032 */
5d3bc5ed 1033 _exit(EXIT_SUCCESS);
58d4b2a2
MD
1034 } else {
1035 return -1;
7fc90dca 1036 }
58d4b2a2 1037end:
33bbeb90
MD
1038 if (wait_shm_fd >= 0 && !sock_info->global) {
1039 struct stat statbuf;
1040
1041 /*
1042 * Ensure that our user is the owner of the shm file for
1043 * local shm. If we do not own the file, it means our
1044 * sessiond will not have access to wake us up (there is
1045 * probably a rogue process trying to fake our
1046 * sessiond). Fallback to polling method in this case.
1047 */
1048 ret = fstat(wait_shm_fd, &statbuf);
1049 if (ret) {
1050 PERROR("fstat");
1051 goto error_close;
1052 }
1053 if (statbuf.st_uid != getuid())
1054 goto error_close;
1055 }
58d4b2a2 1056 return wait_shm_fd;
33bbeb90
MD
1057
1058error_close:
1059 ret = close(wait_shm_fd);
1060 if (ret) {
1061 PERROR("Error closing fd");
1062 }
1063 return -1;
58d4b2a2
MD
1064}
1065
1066static
1067char *get_map_shm(struct sock_info *sock_info)
1068{
172d6b68 1069 long page_size;
58d4b2a2
MD
1070 int wait_shm_fd, ret;
1071 char *wait_shm_mmap;
1072
172d6b68
MD
1073 page_size = sysconf(_SC_PAGE_SIZE);
1074 if (page_size < 0) {
1075 goto error;
1076 }
1077
1078 wait_shm_fd = get_wait_shm(sock_info, page_size);
58d4b2a2
MD
1079 if (wait_shm_fd < 0) {
1080 goto error;
44e073f5 1081 }
172d6b68 1082 wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
7fc90dca 1083 MAP_SHARED, wait_shm_fd, 0);
7fc90dca
MD
1084 /* close shm fd immediately after taking the mmap reference */
1085 ret = close(wait_shm_fd);
1086 if (ret) {
33bbeb90
MD
1087 PERROR("Error closing fd");
1088 }
1089 if (wait_shm_mmap == MAP_FAILED) {
1090 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
1091 goto error;
7fc90dca
MD
1092 }
1093 return wait_shm_mmap;
1094
1095error:
1096 return NULL;
1097}
1098
1099static
1100void wait_for_sessiond(struct sock_info *sock_info)
1101{
efe0de09 1102 int ret;
80e2814b 1103
3327ac33 1104 if (ust_lock()) {
7fc90dca
MD
1105 goto quit;
1106 }
37ed587a
MD
1107 if (wait_poll_fallback) {
1108 goto error;
1109 }
7fc90dca
MD
1110 if (!sock_info->wait_shm_mmap) {
1111 sock_info->wait_shm_mmap = get_map_shm(sock_info);
1112 if (!sock_info->wait_shm_mmap)
1113 goto error;
1114 }
1115 ust_unlock();
1116
1117 DBG("Waiting for %s apps sessiond", sock_info->name);
80e2814b
MD
1118 /* Wait for futex wakeup */
1119 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap) == 0) {
1120 ret = futex_async((int32_t *) sock_info->wait_shm_mmap,
1121 FUTEX_WAIT, 0, NULL, NULL, 0);
80e2814b 1122 if (ret < 0) {
37ed587a
MD
1123 if (errno == EFAULT) {
1124 wait_poll_fallback = 1;
a8b870ad 1125 DBG(
37ed587a
MD
1126"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1127"do not support FUTEX_WAKE on read-only memory mappings correctly. "
1128"Please upgrade your kernel "
1129"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1130"mainline). LTTng-UST will use polling mode fallback.");
cd27263b
MD
1131 if (ust_debug())
1132 PERROR("futex");
37ed587a 1133 }
80e2814b
MD
1134 }
1135 }
7fc90dca
MD
1136 return;
1137
1138quit:
1139 ust_unlock();
1140 return;
1141
1142error:
1143 ust_unlock();
7fc90dca 1144 return;
46050b1a
MD
1145}
1146
1ea11eab
MD
1147/*
1148 * This thread does not allocate any resource, except within
1149 * handle_message, within mutex protection. This mutex protects against
1150 * fork and exit.
98bf993f 1151 * The other moment it allocates resources is at socket connection, which
1ea11eab
MD
1152 * is also protected by the mutex.
1153 */
d9e99d10
MD
1154static
1155void *ust_listener_thread(void *arg)
1156{
1ea11eab 1157 struct sock_info *sock_info = arg;
c0eedf81 1158 int sock, ret, prev_connect_failed = 0, has_waited = 0;
ff517991 1159 long timeout;
d9e99d10 1160
9eb62b9c
MD
1161 /* Restart trying to connect to the session daemon */
1162restart:
c0eedf81
MD
1163 if (prev_connect_failed) {
1164 /* Wait for sessiond availability with pipe */
1165 wait_for_sessiond(sock_info);
1166 if (has_waited) {
1167 has_waited = 0;
1168 /*
1169 * Sleep for 5 seconds before retrying after a
1170 * sequence of failure / wait / failure. This
1171 * deals with a killed or broken session daemon.
1172 */
1173 sleep(5);
1174 }
1175 has_waited = 1;
1176 prev_connect_failed = 0;
1177 }
9eb62b9c 1178
1ea11eab 1179 if (sock_info->socket != -1) {
e6973a89 1180 ret = ustcomm_close_unix_sock(sock_info->socket);
1ea11eab 1181 if (ret) {
32ce8569
MD
1182 ERR("Error closing %s ust cmd socket",
1183 sock_info->name);
1ea11eab
MD
1184 }
1185 sock_info->socket = -1;
1186 }
32ce8569
MD
1187 if (sock_info->notify_socket != -1) {
1188 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1189 if (ret) {
1190 ERR("Error closing %s ust notify socket",
1191 sock_info->name);
1192 }
1193 sock_info->notify_socket = -1;
1194 }
46050b1a 1195
321f2351
MD
1196 /*
1197 * Register. We need to perform both connect and sending
1198 * registration message before doing the next connect otherwise
1199 * we may reach unix socket connect queue max limits and block
1200 * on the 2nd connect while the session daemon is awaiting the
1201 * first connect registration message.
1202 */
1203 /* Connect cmd socket */
1204 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1205 if (ret < 0) {
1206 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1207 prev_connect_failed = 1;
5b14aab3 1208
3327ac33 1209 if (ust_lock()) {
321f2351 1210 goto quit;
32ce8569 1211 }
46050b1a 1212
e3426ddc 1213 /*
321f2351
MD
1214 * If we cannot find the sessiond daemon, don't delay
1215 * constructor execution.
e3426ddc 1216 */
321f2351
MD
1217 ret = handle_register_done(sock_info);
1218 assert(!ret);
1219 ust_unlock();
1220 goto restart;
27fe9f21 1221 }
321f2351 1222 sock_info->socket = ret;
27fe9f21 1223
3327ac33 1224 if (ust_lock()) {
5b14aab3
MD
1225 goto quit;
1226 }
1227
46050b1a
MD
1228 /*
1229 * Create only one root handle per listener thread for the whole
f59ed768
MD
1230 * process lifetime, so we ensure we get ID which is statically
1231 * assigned to the root handle.
46050b1a
MD
1232 */
1233 if (sock_info->root_handle == -1) {
1234 ret = lttng_abi_create_root_handle();
a51070bb 1235 if (ret < 0) {
46050b1a 1236 ERR("Error creating root handle");
46050b1a
MD
1237 goto quit;
1238 }
1239 sock_info->root_handle = ret;
9eb62b9c 1240 }
1ea11eab 1241
32ce8569 1242 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
9eb62b9c 1243 if (ret < 0) {
32ce8569
MD
1244 ERR("Error registering to %s ust cmd socket",
1245 sock_info->name);
c0eedf81 1246 prev_connect_failed = 1;
11ff9c7d
MD
1247 /*
1248 * If we cannot register to the sessiond daemon, don't
1249 * delay constructor execution.
1250 */
edaa1431 1251 ret = handle_register_done(sock_info);
11ff9c7d 1252 assert(!ret);
17dfb34b 1253 ust_unlock();
9eb62b9c
MD
1254 goto restart;
1255 }
321f2351
MD
1256
1257 ust_unlock();
1258
1259 /* Connect notify socket */
1260 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1261 if (ret < 0) {
1262 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1263 prev_connect_failed = 1;
1264
3327ac33 1265 if (ust_lock()) {
321f2351
MD
1266 goto quit;
1267 }
1268
1269 /*
1270 * If we cannot find the sessiond daemon, don't delay
1271 * constructor execution.
1272 */
1273 ret = handle_register_done(sock_info);
1274 assert(!ret);
1275 ust_unlock();
1276 goto restart;
1277 }
1278 sock_info->notify_socket = ret;
1279
1280 timeout = get_notify_sock_timeout();
1281 if (timeout >= 0) {
1282 /*
1283 * Give at least 10ms to sessiond to reply to
1284 * notifications.
1285 */
1286 if (timeout < 10)
1287 timeout = 10;
1288 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1289 timeout);
1290 if (ret < 0) {
1291 WARN("Error setting socket receive timeout");
1292 }
1293 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1294 timeout);
1295 if (ret < 0) {
1296 WARN("Error setting socket send timeout");
1297 }
1298 } else if (timeout < -1) {
1299 WARN("Unsupported timeout value %ld", timeout);
1300 }
1301
3327ac33 1302 if (ust_lock()) {
321f2351
MD
1303 goto quit;
1304 }
1305
32ce8569
MD
1306 ret = register_to_sessiond(sock_info->notify_socket,
1307 USTCTL_SOCKET_NOTIFY);
1308 if (ret < 0) {
1309 ERR("Error registering to %s ust notify socket",
1310 sock_info->name);
1311 prev_connect_failed = 1;
1312 /*
1313 * If we cannot register to the sessiond daemon, don't
1314 * delay constructor execution.
1315 */
1316 ret = handle_register_done(sock_info);
1317 assert(!ret);
1318 ust_unlock();
1319 goto restart;
1320 }
1321 sock = sock_info->socket;
1322
17dfb34b 1323 ust_unlock();
46050b1a 1324
d9e99d10
MD
1325 for (;;) {
1326 ssize_t len;
57773204 1327 struct ustcomm_ust_msg lum;
d9e99d10 1328
57773204 1329 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
d9e99d10
MD
1330 switch (len) {
1331 case 0: /* orderly shutdown */
7dd08bec 1332 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
3327ac33 1333 if (ust_lock()) {
d5e1fea6
MD
1334 goto quit;
1335 }
8236ba10
MD
1336 /*
1337 * Either sessiond has shutdown or refused us by closing the socket.
1338 * In either case, we don't want to delay construction execution,
1339 * and we need to wait before retry.
1340 */
1341 prev_connect_failed = 1;
1342 /*
1343 * If we cannot register to the sessiond daemon, don't
1344 * delay constructor execution.
1345 */
1346 ret = handle_register_done(sock_info);
1347 assert(!ret);
1348 ust_unlock();
d9e99d10 1349 goto end;
e7723462 1350 case sizeof(lum):
74d81a6c 1351 print_cmd(lum.cmd, lum.handle);
11ff9c7d 1352 ret = handle_message(sock_info, sock, &lum);
7bc53e94 1353 if (ret) {
11ff9c7d 1354 ERR("Error handling message for %s socket", sock_info->name);
d9e99d10
MD
1355 }
1356 continue;
7bc53e94
MD
1357 default:
1358 if (len < 0) {
1359 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1360 } else {
1361 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1362 }
1363 if (len == -ECONNRESET) {
1364 DBG("%s remote end closed connection", sock_info->name);
d9e99d10
MD
1365 goto end;
1366 }
1367 goto end;
d9e99d10
MD
1368 }
1369
1370 }
1371end:
3327ac33 1372 if (ust_lock()) {
d5e1fea6
MD
1373 goto quit;
1374 }
f59ed768
MD
1375 /* Cleanup socket handles before trying to reconnect */
1376 lttng_ust_objd_table_owner_cleanup(sock_info);
1377 ust_unlock();
9eb62b9c 1378 goto restart; /* try to reconnect */
e33f3265 1379
1ea11eab 1380quit:
e33f3265 1381 ust_unlock();
3327ac33
MD
1382
1383 pthread_mutex_lock(&ust_exit_mutex);
1384 sock_info->thread_active = 0;
1385 pthread_mutex_unlock(&ust_exit_mutex);
d9e99d10
MD
1386 return NULL;
1387}
1388
2594a5b4
MD
1389/*
1390 * Weak symbol to call when the ust malloc wrapper is not loaded.
1391 */
1392__attribute__((weak))
1393void lttng_ust_malloc_wrapper_init(void)
1394{
1395}
1396
2691221a
MD
1397/*
1398 * sessiond monitoring thread: monitor presence of global and per-user
1399 * sessiond by polling the application common named pipe.
1400 */
edaa1431 1401void __attribute__((constructor)) lttng_ust_init(void)
2691221a 1402{
11ff9c7d 1403 struct timespec constructor_timeout;
ae6a58bf 1404 sigset_t sig_all_blocked, orig_parent_mask;
1879f67f 1405 pthread_attr_t thread_attr;
cf12a773 1406 int timeout_mode;
2691221a
MD
1407 int ret;
1408
edaa1431
MD
1409 if (uatomic_xchg(&initialized, 1) == 1)
1410 return;
1411
eddd8d5d
MD
1412 /*
1413 * Fixup interdependency between TLS fixup mutex (which happens
1414 * to be the dynamic linker mutex) and ust_lock, taken within
1415 * the ust lock.
1416 */
f645cfa7 1417 lttng_fixup_ringbuffer_tls();
4158a15a 1418 lttng_fixup_vtid_tls();
a903623f 1419 lttng_fixup_nest_count_tls();
009745db 1420 lttng_fixup_procname_tls();
d58d1454 1421 lttng_fixup_ust_mutex_nest_tls();
eddd8d5d 1422
edaa1431
MD
1423 /*
1424 * We want precise control over the order in which we construct
1425 * our sub-libraries vs starting to receive commands from
1426 * sessiond (otherwise leading to errors when trying to create
1427 * sessiond before the init functions are completed).
1428 */
2691221a 1429 init_usterr();
edaa1431 1430 init_tracepoint();
bd703713 1431 lttng_ust_baddr_statedump_init();
7dd08bec
MD
1432 lttng_ring_buffer_metadata_client_init();
1433 lttng_ring_buffer_client_overwrite_init();
34a91bdb 1434 lttng_ring_buffer_client_overwrite_rt_init();
7dd08bec 1435 lttng_ring_buffer_client_discard_init();
34a91bdb 1436 lttng_ring_buffer_client_discard_rt_init();
d58d1454 1437 lttng_perf_counter_init();
a0a3bef9 1438 lttng_context_init();
2594a5b4
MD
1439 /*
1440 * Invoke ust malloc wrapper init before starting other threads.
1441 */
1442 lttng_ust_malloc_wrapper_init();
2691221a 1443
ff517991 1444 timeout_mode = get_constructor_timeout(&constructor_timeout);
11ff9c7d 1445
95259bd0 1446 ret = sem_init(&constructor_wait, 0, 0);
11ff9c7d
MD
1447 assert(!ret);
1448
8d20bf54 1449 ret = setup_local_apps();
2691221a 1450 if (ret) {
9ec6895c 1451 DBG("local apps setup returned %d", ret);
2691221a 1452 }
ae6a58bf
WP
1453
1454 /* A new thread created by pthread_create inherits the signal mask
1455 * from the parent. To avoid any signal being received by the
1456 * listener thread, we block all signals temporarily in the parent,
1457 * while we create the listener thread.
1458 */
1459 sigfillset(&sig_all_blocked);
1460 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1461 if (ret) {
d94d802c 1462 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1463 }
1464
1879f67f
MG
1465 ret = pthread_attr_init(&thread_attr);
1466 if (ret) {
1467 ERR("pthread_attr_init: %s", strerror(ret));
1468 }
1469 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1470 if (ret) {
1471 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1472 }
1473
c0bbbd5a 1474 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1475 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
dde70ea0 1476 ust_listener_thread, &global_apps);
d94d802c
MD
1477 if (ret) {
1478 ERR("pthread_create global: %s", strerror(ret));
1479 }
e33f3265 1480 global_apps.thread_active = 1;
c0bbbd5a 1481 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1482
8d20bf54 1483 if (local_apps.allowed) {
c0bbbd5a 1484 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1485 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
dde70ea0 1486 ust_listener_thread, &local_apps);
d94d802c
MD
1487 if (ret) {
1488 ERR("pthread_create local: %s", strerror(ret));
1489 }
e33f3265 1490 local_apps.thread_active = 1;
c0bbbd5a 1491 pthread_mutex_unlock(&ust_exit_mutex);
8d20bf54
MD
1492 } else {
1493 handle_register_done(&local_apps);
1494 }
1879f67f
MG
1495 ret = pthread_attr_destroy(&thread_attr);
1496 if (ret) {
1497 ERR("pthread_attr_destroy: %s", strerror(ret));
1498 }
8d20bf54 1499
ae6a58bf
WP
1500 /* Restore original signal mask in parent */
1501 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1502 if (ret) {
d94d802c 1503 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1504 }
1505
cf12a773
MD
1506 switch (timeout_mode) {
1507 case 1: /* timeout wait */
95259bd0
MD
1508 do {
1509 ret = sem_timedwait(&constructor_wait,
1510 &constructor_timeout);
1511 } while (ret < 0 && errno == EINTR);
cf12a773 1512 if (ret < 0 && errno == ETIMEDOUT) {
7dd08bec 1513 ERR("Timed out waiting for lttng-sessiond");
cf12a773
MD
1514 } else {
1515 assert(!ret);
1516 }
1517 break;
7b766b16 1518 case -1:/* wait forever */
95259bd0
MD
1519 do {
1520 ret = sem_wait(&constructor_wait);
1521 } while (ret < 0 && errno == EINTR);
11ff9c7d 1522 assert(!ret);
cf12a773 1523 break;
7b766b16 1524 case 0: /* no timeout */
cf12a773 1525 break;
11ff9c7d 1526 }
2691221a
MD
1527}
1528
17dfb34b
MD
1529static
1530void lttng_ust_cleanup(int exiting)
1531{
efe0de09 1532 cleanup_sock_info(&global_apps, exiting);
17dfb34b 1533 if (local_apps.allowed) {
efe0de09 1534 cleanup_sock_info(&local_apps, exiting);
17dfb34b 1535 }
efe0de09
MD
1536 /*
1537 * The teardown in this function all affect data structures
1538 * accessed under the UST lock by the listener thread. This
1539 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1540 * that none of these threads are accessing this data at this
1541 * point.
1542 */
17dfb34b 1543 lttng_ust_abi_exit();
003fedf4 1544 lttng_ust_events_exit();
a0a3bef9 1545 lttng_context_exit();
d58d1454 1546 lttng_perf_counter_exit();
34a91bdb 1547 lttng_ring_buffer_client_discard_rt_exit();
7dd08bec 1548 lttng_ring_buffer_client_discard_exit();
34a91bdb 1549 lttng_ring_buffer_client_overwrite_rt_exit();
7dd08bec
MD
1550 lttng_ring_buffer_client_overwrite_exit();
1551 lttng_ring_buffer_metadata_client_exit();
bd703713 1552 lttng_ust_baddr_statedump_destroy();
17dfb34b
MD
1553 exit_tracepoint();
1554 if (!exiting) {
1555 /* Reinitialize values for fork */
1556 sem_count = 2;
1557 lttng_ust_comm_should_quit = 0;
1558 initialized = 0;
1559 }
1560}
1561
edaa1431 1562void __attribute__((destructor)) lttng_ust_exit(void)
2691221a
MD
1563{
1564 int ret;
1565
9eb62b9c
MD
1566 /*
1567 * Using pthread_cancel here because:
1568 * A) we don't want to hang application teardown.
1569 * B) the thread is not allocating any resource.
1570 */
1ea11eab
MD
1571
1572 /*
1573 * Require the communication thread to quit. Synchronize with
1574 * mutexes to ensure it is not in a mutex critical section when
1575 * pthread_cancel is later called.
1576 */
3327ac33 1577 ust_lock_nocheck();
1ea11eab 1578 lttng_ust_comm_should_quit = 1;
3327ac33 1579 ust_unlock();
1ea11eab 1580
3327ac33 1581 pthread_mutex_lock(&ust_exit_mutex);
f5f94532 1582 /* cancel threads */
e33f3265
MD
1583 if (global_apps.thread_active) {
1584 ret = pthread_cancel(global_apps.ust_listener);
1585 if (ret) {
1586 ERR("Error cancelling global ust listener thread: %s",
1587 strerror(ret));
1588 } else {
1589 global_apps.thread_active = 0;
1590 }
2691221a 1591 }
e33f3265 1592 if (local_apps.thread_active) {
8d20bf54
MD
1593 ret = pthread_cancel(local_apps.ust_listener);
1594 if (ret) {
d94d802c
MD
1595 ERR("Error cancelling local ust listener thread: %s",
1596 strerror(ret));
e33f3265
MD
1597 } else {
1598 local_apps.thread_active = 0;
8d20bf54 1599 }
8d20bf54 1600 }
3327ac33 1601 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1602
efe0de09
MD
1603 /*
1604 * Do NOT join threads: use of sys_futex makes it impossible to
1605 * join the threads without using async-cancel, but async-cancel
1606 * is delivered by a signal, which could hit the target thread
1607 * anywhere in its code path, including while the ust_lock() is
1608 * held, causing a deadlock for the other thread. Let the OS
1609 * cleanup the threads if there are stalled in a syscall.
1610 */
17dfb34b 1611 lttng_ust_cleanup(1);
2691221a 1612}
e822f505
MD
1613
1614/*
1615 * We exclude the worker threads across fork and clone (except
1616 * CLONE_VM), because these system calls only keep the forking thread
1617 * running in the child. Therefore, we don't want to call fork or clone
1618 * in the middle of an tracepoint or ust tracing state modification.
1619 * Holding this mutex protects these structures across fork and clone.
1620 */
b728d87e 1621void ust_before_fork(sigset_t *save_sigset)
e822f505
MD
1622{
1623 /*
1624 * Disable signals. This is to avoid that the child intervenes
1625 * before it is properly setup for tracing. It is safer to
1626 * disable all signals, because then we know we are not breaking
1627 * anything by restoring the original mask.
1628 */
1629 sigset_t all_sigs;
1630 int ret;
1631
8c90a710 1632 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1633 return;
e822f505
MD
1634 /* Disable signals */
1635 sigfillset(&all_sigs);
b728d87e 1636 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
e822f505
MD
1637 if (ret == -1) {
1638 PERROR("sigprocmask");
1639 }
458d678c
PW
1640
1641 pthread_mutex_lock(&ust_fork_mutex);
1642
3327ac33 1643 ust_lock_nocheck();
e822f505
MD
1644 rcu_bp_before_fork();
1645}
1646
b728d87e 1647static void ust_after_fork_common(sigset_t *restore_sigset)
e822f505
MD
1648{
1649 int ret;
1650
17dfb34b
MD
1651 DBG("process %d", getpid());
1652 ust_unlock();
458d678c
PW
1653
1654 pthread_mutex_unlock(&ust_fork_mutex);
1655
e822f505 1656 /* Restore signals */
23c8854a 1657 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
e822f505
MD
1658 if (ret == -1) {
1659 PERROR("sigprocmask");
1660 }
1661}
1662
b728d87e 1663void ust_after_fork_parent(sigset_t *restore_sigset)
e822f505 1664{
8c90a710 1665 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1666 return;
17dfb34b 1667 DBG("process %d", getpid());
e822f505
MD
1668 rcu_bp_after_fork_parent();
1669 /* Release mutexes and reenable signals */
b728d87e 1670 ust_after_fork_common(restore_sigset);
e822f505
MD
1671}
1672
17dfb34b
MD
1673/*
1674 * After fork, in the child, we need to cleanup all the leftover state,
1675 * except the worker thread which already magically disappeared thanks
1676 * to the weird Linux fork semantics. After tyding up, we call
1677 * lttng_ust_init() again to start over as a new PID.
1678 *
1679 * This is meant for forks() that have tracing in the child between the
1680 * fork and following exec call (if there is any).
1681 */
b728d87e 1682void ust_after_fork_child(sigset_t *restore_sigset)
e822f505 1683{
8c90a710 1684 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1685 return;
17dfb34b 1686 DBG("process %d", getpid());
e822f505
MD
1687 /* Release urcu mutexes */
1688 rcu_bp_after_fork_child();
17dfb34b 1689 lttng_ust_cleanup(0);
a93bfc45 1690 lttng_context_vtid_reset();
e822f505 1691 /* Release mutexes and reenable signals */
b728d87e 1692 ust_after_fork_common(restore_sigset);
318dfea9 1693 lttng_ust_init();
e822f505 1694}
95c25348 1695
246be17e 1696void lttng_ust_sockinfo_session_enabled(void *owner)
95c25348
PW
1697{
1698 struct sock_info *sock_info = owner;
37dddb65 1699 sock_info->statedump_pending = 1;
95c25348 1700}
This page took 0.120169 seconds and 4 git commands to generate.