Fix: application exit race with pthread cancel
[lttng-ust.git] / liblttng-ust / lttng-ust-comm.c
CommitLineData
2691221a
MD
1/*
2 * lttng-ust-comm.c
3 *
4 * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
80e2814b 22#define _LGPL_SOURCE
2691221a
MD
23#include <sys/types.h>
24#include <sys/socket.h>
7fc90dca
MD
25#include <sys/mman.h>
26#include <sys/stat.h>
58d4b2a2
MD
27#include <sys/types.h>
28#include <sys/wait.h>
7fc90dca 29#include <fcntl.h>
2691221a
MD
30#include <unistd.h>
31#include <errno.h>
d9e99d10 32#include <pthread.h>
11ff9c7d
MD
33#include <semaphore.h>
34#include <time.h>
1ea11eab 35#include <assert.h>
e822f505 36#include <signal.h>
95259bd0 37#include <urcu/uatomic.h>
80e2814b 38#include <urcu/futex.h>
c117fb1b 39#include <urcu/compiler.h>
1ea11eab 40
4318ae1b 41#include <lttng/ust-events.h>
4318ae1b 42#include <lttng/ust-abi.h>
4318ae1b 43#include <lttng/ust.h>
7bc53e94 44#include <lttng/ust-error.h>
74d81a6c 45#include <lttng/ust-ctl.h>
8c90a710 46#include <urcu/tls-compat.h>
44c72f10
MD
47#include <ust-comm.h>
48#include <usterr-signal-safe.h>
cd54f6d9 49#include <helper.h>
44c72f10 50#include "tracepoint-internal.h"
7dd08bec 51#include "lttng-tracer-core.h"
08114193 52#include "compat.h"
f645cfa7 53#include "../libringbuffer/tlsfixup.h"
394598c1 54#include "lttng-ust-baddr.h"
ab7ffb47 55#include "getenv.h"
edaa1431
MD
56
57/*
58 * Has lttng ust comm constructor been called ?
59 */
60static int initialized;
61
1ea11eab 62/*
17dfb34b
MD
63 * The ust_lock/ust_unlock lock is used as a communication thread mutex.
64 * Held when handling a command, also held by fork() to deal with
65 * removal of threads, and by exit path.
3327ac33
MD
66 *
67 * The UST lock is the centralized mutex across UST tracing control and
68 * probe registration.
69 *
70 * ust_exit_mutex must never nest in ust_mutex.
d58d1454 71 *
4770bd47
MD
72 * ust_fork_mutex must never nest in ust_mutex.
73 *
d58d1454
MD
74 * ust_mutex_nest is a per-thread nesting counter, allowing the perf
75 * counter lazy initialization called by events within the statedump,
76 * which traces while the ust_mutex is held.
4770bd47
MD
77 *
78 * ust_lock nests within the dynamic loader lock (within glibc) because
79 * it is taken within the library constructor.
3327ac33
MD
80 */
81static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
82
d58d1454
MD
83/* Allow nesting the ust_mutex within the same thread. */
84static DEFINE_URCU_TLS(int, ust_mutex_nest);
85
3327ac33
MD
86/*
87 * ust_exit_mutex protects thread_active variable wrt thread exit. It
88 * cannot be done by ust_mutex because pthread_cancel(), which takes an
89 * internal libc lock, cannot nest within ust_mutex.
90 *
91 * It never nests within a ust_mutex.
1ea11eab 92 */
3327ac33 93static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
1ea11eab 94
458d678c
PW
95/*
96 * ust_fork_mutex protects base address statedump tracing against forks. It
97 * prevents the dynamic loader lock to be taken (by base address statedump
98 * tracing) while a fork is happening, thus preventing deadlock issues with
99 * the dynamic loader lock.
100 */
101static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
102
1ea11eab
MD
103/* Should the ust comm thread quit ? */
104static int lttng_ust_comm_should_quit;
105
3327ac33 106/*
d58d1454 107 * Return 0 on success, -1 if should quit.
3327ac33 108 * The lock is taken in both cases.
d58d1454 109 * Signal-safe.
3327ac33
MD
110 */
111int ust_lock(void)
112{
d58d1454 113 sigset_t sig_all_blocked, orig_mask;
815ef2ca 114 int ret, oldstate;
d58d1454 115
815ef2ca
MD
116 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
117 if (ret) {
118 ERR("pthread_setcancelstate: %s", strerror(ret));
119 }
120 if (oldstate != PTHREAD_CANCEL_ENABLE) {
121 ERR("pthread_setcancelstate: unexpected oldstate");
122 }
d58d1454
MD
123 sigfillset(&sig_all_blocked);
124 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
125 if (ret) {
126 ERR("pthread_sigmask: %s", strerror(ret));
127 }
128 if (!URCU_TLS(ust_mutex_nest)++)
129 pthread_mutex_lock(&ust_mutex);
130 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
131 if (ret) {
132 ERR("pthread_sigmask: %s", strerror(ret));
133 }
3327ac33
MD
134 if (lttng_ust_comm_should_quit) {
135 return -1;
136 } else {
137 return 0;
138 }
139}
140
141/*
142 * ust_lock_nocheck() can be used in constructors/destructors, because
143 * they are already nested within the dynamic loader lock, and therefore
144 * have exclusive access against execution of liblttng-ust destructor.
d58d1454 145 * Signal-safe.
3327ac33
MD
146 */
147void ust_lock_nocheck(void)
148{
d58d1454 149 sigset_t sig_all_blocked, orig_mask;
815ef2ca 150 int ret, oldstate;
d58d1454 151
815ef2ca
MD
152 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
153 if (ret) {
154 ERR("pthread_setcancelstate: %s", strerror(ret));
155 }
156 if (oldstate != PTHREAD_CANCEL_ENABLE) {
157 ERR("pthread_setcancelstate: unexpected oldstate");
158 }
d58d1454
MD
159 sigfillset(&sig_all_blocked);
160 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
161 if (ret) {
162 ERR("pthread_sigmask: %s", strerror(ret));
163 }
164 if (!URCU_TLS(ust_mutex_nest)++)
165 pthread_mutex_lock(&ust_mutex);
166 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
167 if (ret) {
168 ERR("pthread_sigmask: %s", strerror(ret));
169 }
3327ac33
MD
170}
171
d58d1454
MD
172/*
173 * Signal-safe.
174 */
3327ac33
MD
175void ust_unlock(void)
176{
d58d1454 177 sigset_t sig_all_blocked, orig_mask;
815ef2ca 178 int ret, oldstate;
d58d1454
MD
179
180 sigfillset(&sig_all_blocked);
181 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
182 if (ret) {
183 ERR("pthread_sigmask: %s", strerror(ret));
184 }
185 if (!--URCU_TLS(ust_mutex_nest))
186 pthread_mutex_unlock(&ust_mutex);
187 ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
188 if (ret) {
189 ERR("pthread_sigmask: %s", strerror(ret));
190 }
815ef2ca
MD
191 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
192 if (ret) {
193 ERR("pthread_setcancelstate: %s", strerror(ret));
194 }
195 if (oldstate != PTHREAD_CANCEL_DISABLE) {
196 ERR("pthread_setcancelstate: unexpected oldstate");
197 }
3327ac33
MD
198}
199
11ff9c7d
MD
200/*
201 * Wait for either of these before continuing to the main
202 * program:
203 * - the register_done message from sessiond daemon
204 * (will let the sessiond daemon enable sessions before main
205 * starts.)
206 * - sessiond daemon is not reachable.
207 * - timeout (ensuring applications are resilient to session
208 * daemon problems).
209 */
210static sem_t constructor_wait;
950aab0c
MD
211/*
212 * Doing this for both the global and local sessiond.
213 */
95259bd0 214static int sem_count = { 2 };
11ff9c7d 215
e8508a49
MD
216/*
217 * Counting nesting within lttng-ust. Used to ensure that calling fork()
218 * from liblttng-ust does not execute the pre/post fork handlers.
219 */
8c90a710 220static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
e8508a49 221
1ea11eab
MD
222/*
223 * Info about socket and associated listener thread.
224 */
225struct sock_info {
11ff9c7d 226 const char *name;
1ea11eab 227 pthread_t ust_listener; /* listener thread */
46050b1a 228 int root_handle;
8d20bf54
MD
229 int constructor_sem_posted;
230 int allowed;
44e073f5 231 int global;
e33f3265 232 int thread_active;
7fc90dca
MD
233
234 char sock_path[PATH_MAX];
235 int socket;
32ce8569 236 int notify_socket;
7fc90dca
MD
237
238 char wait_shm_path[PATH_MAX];
239 char *wait_shm_mmap;
37dddb65
MD
240 /* Keep track of lazy state dump not performed yet. */
241 int statedump_pending;
1ea11eab 242};
2691221a
MD
243
244/* Socket from app (connect) to session daemon (listen) for communication */
1ea11eab 245struct sock_info global_apps = {
11ff9c7d 246 .name = "global",
44e073f5 247 .global = 1,
7fc90dca 248
46050b1a 249 .root_handle = -1,
8d20bf54 250 .allowed = 1,
e33f3265 251 .thread_active = 0,
7fc90dca 252
32ce8569 253 .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
7fc90dca 254 .socket = -1,
32ce8569 255 .notify_socket = -1,
7fc90dca 256
32ce8569 257 .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
95c25348 258
37dddb65 259 .statedump_pending = 0,
1ea11eab 260};
2691221a
MD
261
262/* TODO: allow global_apps_sock_path override */
263
1ea11eab 264struct sock_info local_apps = {
11ff9c7d 265 .name = "local",
44e073f5 266 .global = 0,
46050b1a 267 .root_handle = -1,
8d20bf54 268 .allowed = 0, /* Check setuid bit first */
e33f3265 269 .thread_active = 0,
7fc90dca
MD
270
271 .socket = -1,
32ce8569 272 .notify_socket = -1,
95c25348 273
37dddb65 274 .statedump_pending = 0,
1ea11eab 275};
2691221a 276
37ed587a
MD
277static int wait_poll_fallback;
278
74d81a6c
MD
279static const char *cmd_name_mapping[] = {
280 [ LTTNG_UST_RELEASE ] = "Release",
281 [ LTTNG_UST_SESSION ] = "Create Session",
282 [ LTTNG_UST_TRACER_VERSION ] = "Get Tracer Version",
283
284 [ LTTNG_UST_TRACEPOINT_LIST ] = "Create Tracepoint List",
285 [ LTTNG_UST_WAIT_QUIESCENT ] = "Wait for Quiescent State",
286 [ LTTNG_UST_REGISTER_DONE ] = "Registration Done",
287 [ LTTNG_UST_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
288
289 /* Session FD commands */
290 [ LTTNG_UST_CHANNEL ] = "Create Channel",
291 [ LTTNG_UST_SESSION_START ] = "Start Session",
292 [ LTTNG_UST_SESSION_STOP ] = "Stop Session",
293
294 /* Channel FD commands */
295 [ LTTNG_UST_STREAM ] = "Create Stream",
296 [ LTTNG_UST_EVENT ] = "Create Event",
297
298 /* Event and Channel FD commands */
299 [ LTTNG_UST_CONTEXT ] = "Create Context",
300 [ LTTNG_UST_FLUSH_BUFFER ] = "Flush Buffer",
301
302 /* Event, Channel and Session commands */
303 [ LTTNG_UST_ENABLE ] = "Enable",
304 [ LTTNG_UST_DISABLE ] = "Disable",
305
306 /* Tracepoint list commands */
307 [ LTTNG_UST_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
308 [ LTTNG_UST_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
309
310 /* Event FD commands */
311 [ LTTNG_UST_FILTER ] = "Create Filter",
75582b3a 312 [ LTTNG_UST_EXCLUSION ] = "Add exclusions to event",
74d81a6c
MD
313};
314
ff517991
MD
315static const char *str_timeout;
316static int got_timeout_env;
317
7dd08bec 318extern void lttng_ring_buffer_client_overwrite_init(void);
34a91bdb 319extern void lttng_ring_buffer_client_overwrite_rt_init(void);
7dd08bec 320extern void lttng_ring_buffer_client_discard_init(void);
34a91bdb 321extern void lttng_ring_buffer_client_discard_rt_init(void);
7dd08bec
MD
322extern void lttng_ring_buffer_metadata_client_init(void);
323extern void lttng_ring_buffer_client_overwrite_exit(void);
34a91bdb 324extern void lttng_ring_buffer_client_overwrite_rt_exit(void);
7dd08bec 325extern void lttng_ring_buffer_client_discard_exit(void);
34a91bdb 326extern void lttng_ring_buffer_client_discard_rt_exit(void);
7dd08bec 327extern void lttng_ring_buffer_metadata_client_exit(void);
edaa1431 328
3c6f6263
AM
329/*
330 * Returns the HOME directory path. Caller MUST NOT free(3) the returned
331 * pointer.
332 */
333static
334const char *get_lttng_home_dir(void)
335{
336 const char *val;
337
ab7ffb47 338 val = (const char *) lttng_secure_getenv("LTTNG_HOME");
3c6f6263
AM
339 if (val != NULL) {
340 return val;
341 }
ab7ffb47 342 return (const char *) lttng_secure_getenv("HOME");
3c6f6263
AM
343}
344
a903623f
MD
345/*
346 * Force a read (imply TLS fixup for dlopen) of TLS variables.
347 */
348static
349void lttng_fixup_nest_count_tls(void)
350{
8c90a710 351 asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
a903623f
MD
352}
353
d58d1454
MD
354static
355void lttng_fixup_ust_mutex_nest_tls(void)
356{
357 asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
358}
359
3df53fae
MD
360/*
361 * Fixup urcu bp TLS.
362 */
363static
364void lttng_fixup_urcu_bp_tls(void)
365{
366 rcu_read_lock();
367 rcu_read_unlock();
368}
369
32ce8569
MD
370int lttng_get_notify_socket(void *owner)
371{
372 struct sock_info *info = owner;
373
374 return info->notify_socket;
375}
376
74d81a6c
MD
377static
378void print_cmd(int cmd, int handle)
379{
380 const char *cmd_name = "Unknown";
381
fd67a004
MD
382 if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
383 && cmd_name_mapping[cmd]) {
74d81a6c
MD
384 cmd_name = cmd_name_mapping[cmd];
385 }
fd67a004
MD
386 DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
387 cmd_name, cmd,
74d81a6c
MD
388 lttng_ust_obj_get_name(handle), handle);
389}
390
2691221a 391static
8d20bf54 392int setup_local_apps(void)
2691221a
MD
393{
394 const char *home_dir;
7fc90dca 395 uid_t uid;
2691221a 396
7fc90dca 397 uid = getuid();
8d20bf54
MD
398 /*
399 * Disallow per-user tracing for setuid binaries.
400 */
7fc90dca 401 if (uid != geteuid()) {
9ec6895c 402 assert(local_apps.allowed == 0);
d0a1ae63 403 return 0;
8d20bf54 404 }
3c6f6263 405 home_dir = get_lttng_home_dir();
9ec6895c
MD
406 if (!home_dir) {
407 WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
408 assert(local_apps.allowed == 0);
2691221a 409 return -ENOENT;
9ec6895c
MD
410 }
411 local_apps.allowed = 1;
32ce8569
MD
412 snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
413 home_dir,
414 LTTNG_DEFAULT_HOME_RUNDIR,
415 LTTNG_UST_SOCK_FILENAME);
416 snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
417 LTTNG_UST_WAIT_FILENAME,
418 uid);
2691221a
MD
419 return 0;
420}
421
ff517991
MD
422/*
423 * Get notify_sock timeout, in ms.
6612d11a 424 * -1: wait forever. 0: don't wait. >0: timeout, in ms.
ff517991
MD
425 */
426static
427long get_timeout(void)
428{
429 long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
430
431 if (!got_timeout_env) {
432 str_timeout = getenv("LTTNG_UST_REGISTER_TIMEOUT");
433 got_timeout_env = 1;
434 }
435 if (str_timeout)
436 constructor_delay_ms = strtol(str_timeout, NULL, 10);
437 return constructor_delay_ms;
438}
439
440static
441long get_notify_sock_timeout(void)
442{
443 return get_timeout();
444}
445
446/*
6612d11a 447 * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
ff517991
MD
448 */
449static
450int get_constructor_timeout(struct timespec *constructor_timeout)
451{
452 long constructor_delay_ms;
453 int ret;
454
455 constructor_delay_ms = get_timeout();
456
457 switch (constructor_delay_ms) {
458 case -1:/* fall-through */
459 case 0:
460 return constructor_delay_ms;
461 default:
462 break;
463 }
464
465 /*
466 * If we are unable to find the current time, don't wait.
467 */
468 ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
469 if (ret) {
6612d11a
JG
470 /* Don't wait. */
471 return 0;
ff517991
MD
472 }
473 constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
474 constructor_timeout->tv_nsec +=
475 (constructor_delay_ms % 1000UL) * 1000000UL;
476 if (constructor_timeout->tv_nsec >= 1000000000UL) {
477 constructor_timeout->tv_sec++;
478 constructor_timeout->tv_nsec -= 1000000000UL;
479 }
6612d11a 480 /* Timeout wait (constructor_delay_ms). */
ff517991
MD
481 return 1;
482}
483
2691221a 484static
32ce8569 485int register_to_sessiond(int socket, enum ustctl_socket_type type)
2691221a 486{
32ce8569
MD
487 return ustcomm_send_reg_msg(socket,
488 type,
489 CAA_BITS_PER_LONG,
490 lttng_alignof(uint8_t) * CHAR_BIT,
491 lttng_alignof(uint16_t) * CHAR_BIT,
492 lttng_alignof(uint32_t) * CHAR_BIT,
493 lttng_alignof(uint64_t) * CHAR_BIT,
494 lttng_alignof(unsigned long) * CHAR_BIT);
2691221a
MD
495}
496
d9e99d10 497static
57773204 498int send_reply(int sock, struct ustcomm_ust_reply *lur)
d9e99d10 499{
9eb62b9c 500 ssize_t len;
d3a492d1 501
57773204 502 len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
d3a492d1 503 switch (len) {
a4be8962 504 case sizeof(*lur):
d3a492d1
MD
505 DBG("message successfully sent");
506 return 0;
7bc53e94
MD
507 default:
508 if (len == -ECONNRESET) {
509 DBG("remote end closed connection");
d3a492d1
MD
510 return 0;
511 }
7bc53e94
MD
512 if (len < 0)
513 return len;
514 DBG("incorrect message size: %zd", len);
515 return -EINVAL;
d3a492d1
MD
516 }
517}
518
519static
edaa1431 520int handle_register_done(struct sock_info *sock_info)
11ff9c7d
MD
521{
522 int ret;
523
edaa1431
MD
524 if (sock_info->constructor_sem_posted)
525 return 0;
526 sock_info->constructor_sem_posted = 1;
56cd7e2f
MD
527 if (uatomic_read(&sem_count) <= 0) {
528 return 0;
529 }
95259bd0
MD
530 ret = uatomic_add_return(&sem_count, -1);
531 if (ret == 0) {
532 ret = sem_post(&constructor_wait);
533 assert(!ret);
534 }
11ff9c7d
MD
535 return 0;
536}
537
37dddb65
MD
538/*
539 * Only execute pending statedump after the constructor semaphore has
540 * been posted by each listener thread. This means statedump will only
541 * be performed after the "registration done" command is received from
542 * each session daemon the application is connected to.
543 *
544 * This ensures we don't run into deadlock issues with the dynamic
545 * loader mutex, which is held while the constructor is called and
546 * waiting on the constructor semaphore. All operations requiring this
547 * dynamic loader lock need to be postponed using this mechanism.
548 */
549static
550void handle_pending_statedump(struct sock_info *sock_info)
551{
552 int ctor_passed = sock_info->constructor_sem_posted;
553
554 if (ctor_passed && sock_info->statedump_pending) {
555 sock_info->statedump_pending = 0;
2932a87f 556 pthread_mutex_lock(&ust_fork_mutex);
37dddb65 557 lttng_handle_pending_statedump(sock_info);
458d678c 558 pthread_mutex_unlock(&ust_fork_mutex);
37dddb65
MD
559 }
560}
561
11ff9c7d
MD
562static
563int handle_message(struct sock_info *sock_info,
57773204 564 int sock, struct ustcomm_ust_msg *lum)
d3a492d1 565{
1ea11eab 566 int ret = 0;
b61ce3b2 567 const struct lttng_ust_objd_ops *ops;
57773204 568 struct ustcomm_ust_reply lur;
ef9ff354 569 union ust_args args;
40003310 570 ssize_t len;
1ea11eab 571
46050b1a
MD
572 memset(&lur, 0, sizeof(lur));
573
3327ac33 574 if (ust_lock()) {
74d81a6c 575 ret = -LTTNG_UST_ERR_EXITING;
1ea11eab
MD
576 goto end;
577 }
9eb62b9c 578
46050b1a
MD
579 ops = objd_ops(lum->handle);
580 if (!ops) {
581 ret = -ENOENT;
582 goto end;
1ea11eab 583 }
46050b1a
MD
584
585 switch (lum->cmd) {
11ff9c7d
MD
586 case LTTNG_UST_REGISTER_DONE:
587 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
edaa1431 588 ret = handle_register_done(sock_info);
11ff9c7d
MD
589 else
590 ret = -EINVAL;
591 break;
46050b1a
MD
592 case LTTNG_UST_RELEASE:
593 if (lum->handle == LTTNG_UST_ROOT_HANDLE)
594 ret = -EPERM;
595 else
1849ef7c 596 ret = lttng_ust_objd_unref(lum->handle, 1);
d9e99d10 597 break;
2d78951a
MD
598 case LTTNG_UST_FILTER:
599 {
600 /* Receive filter data */
f488575f 601 struct lttng_ust_filter_bytecode_node *bytecode;
2d78951a 602
cd54f6d9 603 if (lum->u.filter.data_size > FILTER_BYTECODE_MAX_LEN) {
7bc53e94 604 ERR("Filter data size is too large: %u bytes",
2d78951a
MD
605 lum->u.filter.data_size);
606 ret = -EINVAL;
607 goto error;
608 }
2734ca65 609
885b1dfd 610 if (lum->u.filter.reloc_offset > lum->u.filter.data_size) {
7bc53e94 611 ERR("Filter reloc offset %u is not within data",
2734ca65
CB
612 lum->u.filter.reloc_offset);
613 ret = -EINVAL;
614 goto error;
615 }
616
cd54f6d9
MD
617 bytecode = zmalloc(sizeof(*bytecode) + lum->u.filter.data_size);
618 if (!bytecode) {
619 ret = -ENOMEM;
620 goto error;
621 }
f488575f 622 len = ustcomm_recv_unix_sock(sock, bytecode->bc.data,
2d78951a
MD
623 lum->u.filter.data_size);
624 switch (len) {
625 case 0: /* orderly shutdown */
626 ret = 0;
cd54f6d9 627 free(bytecode);
2d78951a 628 goto error;
2d78951a
MD
629 default:
630 if (len == lum->u.filter.data_size) {
7bc53e94 631 DBG("filter data received");
2d78951a 632 break;
7bc53e94
MD
633 } else if (len < 0) {
634 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
635 if (len == -ECONNRESET) {
636 ERR("%s remote end closed connection", sock_info->name);
637 ret = len;
638 free(bytecode);
639 goto error;
640 }
641 ret = len;
eb8bf361 642 free(bytecode);
7bc53e94 643 goto end;
2d78951a 644 } else {
7bc53e94 645 DBG("incorrect filter data message size: %zd", len);
2d78951a 646 ret = -EINVAL;
cd54f6d9 647 free(bytecode);
2d78951a
MD
648 goto end;
649 }
650 }
f488575f
MD
651 bytecode->bc.len = lum->u.filter.data_size;
652 bytecode->bc.reloc_offset = lum->u.filter.reloc_offset;
3f6fd224 653 bytecode->bc.seqnum = lum->u.filter.seqnum;
cd54f6d9 654 if (ops->cmd) {
2d78951a 655 ret = ops->cmd(lum->handle, lum->cmd,
cd54f6d9 656 (unsigned long) bytecode,
f59ed768 657 &args, sock_info);
cd54f6d9
MD
658 if (ret) {
659 free(bytecode);
660 }
661 /* don't free bytecode if everything went fine. */
662 } else {
2d78951a 663 ret = -ENOSYS;
cd54f6d9
MD
664 free(bytecode);
665 }
2d78951a
MD
666 break;
667 }
86e36163
JI
668 case LTTNG_UST_EXCLUSION:
669 {
670 /* Receive exclusion names */
671 struct lttng_ust_excluder_node *node;
672 unsigned int count;
673
674 count = lum->u.exclusion.count;
675 if (count == 0) {
676 /* There are no names to read */
677 ret = 0;
678 goto error;
679 }
680 node = zmalloc(sizeof(*node) +
681 count * LTTNG_UST_SYM_NAME_LEN);
682 if (!node) {
683 ret = -ENOMEM;
684 goto error;
685 }
686 node->excluder.count = count;
687 len = ustcomm_recv_unix_sock(sock, node->excluder.names,
688 count * LTTNG_UST_SYM_NAME_LEN);
689 switch (len) {
690 case 0: /* orderly shutdown */
691 ret = 0;
692 free(node);
693 goto error;
694 default:
695 if (len == count * LTTNG_UST_SYM_NAME_LEN) {
696 DBG("Exclusion data received");
697 break;
698 } else if (len < 0) {
699 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
700 if (len == -ECONNRESET) {
701 ERR("%s remote end closed connection", sock_info->name);
702 ret = len;
703 free(node);
704 goto error;
705 }
706 ret = len;
707 free(node);
708 goto end;
709 } else {
710 DBG("Incorrect exclusion data message size: %zd", len);
711 ret = -EINVAL;
712 free(node);
713 goto end;
714 }
715 }
716 if (ops->cmd) {
717 ret = ops->cmd(lum->handle, lum->cmd,
718 (unsigned long) node,
719 &args, sock_info);
720 if (ret) {
721 free(node);
722 }
723 /* Don't free exclusion data if everything went fine. */
724 } else {
725 ret = -ENOSYS;
726 free(node);
727 }
728 break;
729 }
74d81a6c
MD
730 case LTTNG_UST_CHANNEL:
731 {
732 void *chan_data;
ff0f5728 733 int wakeup_fd;
74d81a6c
MD
734
735 len = ustcomm_recv_channel_from_sessiond(sock,
ff0f5728
MD
736 &chan_data, lum->u.channel.len,
737 &wakeup_fd);
74d81a6c
MD
738 switch (len) {
739 case 0: /* orderly shutdown */
740 ret = 0;
741 goto error;
742 default:
743 if (len == lum->u.channel.len) {
744 DBG("channel data received");
745 break;
746 } else if (len < 0) {
747 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
748 if (len == -ECONNRESET) {
749 ERR("%s remote end closed connection", sock_info->name);
750 ret = len;
751 goto error;
752 }
753 ret = len;
754 goto end;
755 } else {
756 DBG("incorrect channel data message size: %zd", len);
757 ret = -EINVAL;
758 goto end;
759 }
760 }
761 args.channel.chan_data = chan_data;
ff0f5728 762 args.channel.wakeup_fd = wakeup_fd;
74d81a6c
MD
763 if (ops->cmd)
764 ret = ops->cmd(lum->handle, lum->cmd,
765 (unsigned long) &lum->u,
766 &args, sock_info);
767 else
768 ret = -ENOSYS;
769 break;
770 }
771 case LTTNG_UST_STREAM:
772 {
773 /* Receive shm_fd, wakeup_fd */
774 ret = ustcomm_recv_stream_from_sessiond(sock,
775 &lum->u.stream.len,
776 &args.stream.shm_fd,
777 &args.stream.wakeup_fd);
778 if (ret) {
779 goto end;
780 }
781 if (ops->cmd)
782 ret = ops->cmd(lum->handle, lum->cmd,
783 (unsigned long) &lum->u,
784 &args, sock_info);
785 else
786 ret = -ENOSYS;
787 break;
788 }
d9e99d10 789 default:
46050b1a
MD
790 if (ops->cmd)
791 ret = ops->cmd(lum->handle, lum->cmd,
ef9ff354 792 (unsigned long) &lum->u,
f59ed768 793 &args, sock_info);
46050b1a
MD
794 else
795 ret = -ENOSYS;
796 break;
d9e99d10 797 }
46050b1a 798
1ea11eab 799end:
46050b1a
MD
800 lur.handle = lum->handle;
801 lur.cmd = lum->cmd;
802 lur.ret_val = ret;
803 if (ret >= 0) {
7bc53e94 804 lur.ret_code = LTTNG_UST_OK;
46050b1a 805 } else {
7bc53e94
MD
806 /*
807 * Use -LTTNG_UST_ERR as wildcard for UST internal
808 * error that are not caused by the transport, except if
809 * we already have a more precise error message to
810 * report.
811 */
64b2564e
DG
812 if (ret > -LTTNG_UST_ERR) {
813 /* Translate code to UST error. */
814 switch (ret) {
815 case -EEXIST:
816 lur.ret_code = -LTTNG_UST_ERR_EXIST;
817 break;
818 case -EINVAL:
819 lur.ret_code = -LTTNG_UST_ERR_INVAL;
820 break;
821 case -ENOENT:
822 lur.ret_code = -LTTNG_UST_ERR_NOENT;
823 break;
824 case -EPERM:
825 lur.ret_code = -LTTNG_UST_ERR_PERM;
826 break;
827 case -ENOSYS:
828 lur.ret_code = -LTTNG_UST_ERR_NOSYS;
829 break;
830 default:
831 lur.ret_code = -LTTNG_UST_ERR;
832 break;
833 }
834 } else {
7bc53e94 835 lur.ret_code = ret;
64b2564e 836 }
46050b1a 837 }
e6ea14c5
MD
838 if (ret >= 0) {
839 switch (lum->cmd) {
e6ea14c5
MD
840 case LTTNG_UST_TRACER_VERSION:
841 lur.u.version = lum->u.version;
842 break;
843 case LTTNG_UST_TRACEPOINT_LIST_GET:
844 memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
845 break;
846 }
381c0f1e 847 }
74d81a6c 848 DBG("Return value: %d", lur.ret_val);
46050b1a 849 ret = send_reply(sock, &lur);
193183fb 850 if (ret < 0) {
7bc53e94 851 DBG("error sending reply");
193183fb
MD
852 goto error;
853 }
46050b1a 854
40003310
MD
855 /*
856 * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
857 * after the reply.
858 */
7bc53e94 859 if (lur.ret_code == LTTNG_UST_OK) {
40003310
MD
860 switch (lum->cmd) {
861 case LTTNG_UST_TRACEPOINT_FIELD_LIST_GET:
862 len = ustcomm_send_unix_sock(sock,
863 &args.field_list.entry,
864 sizeof(args.field_list.entry));
7bc53e94
MD
865 if (len < 0) {
866 ret = len;
867 goto error;
868 }
40003310 869 if (len != sizeof(args.field_list.entry)) {
7bc53e94 870 ret = -EINVAL;
40003310
MD
871 goto error;
872 }
873 }
874 }
ef9ff354 875
381c0f1e 876error:
17dfb34b 877 ust_unlock();
d9e99d10 878
37dddb65
MD
879 /*
880 * Performed delayed statedump operations outside of the UST
881 * lock. We need to take the dynamic loader lock before we take
882 * the UST lock internally within handle_pending_statedump().
883 */
884 handle_pending_statedump(sock_info);
246be17e 885
37dddb65 886 return ret;
246be17e
PW
887}
888
46050b1a 889static
efe0de09 890void cleanup_sock_info(struct sock_info *sock_info, int exiting)
46050b1a
MD
891{
892 int ret;
893
5b14aab3
MD
894 if (sock_info->root_handle != -1) {
895 ret = lttng_ust_objd_unref(sock_info->root_handle, 1);
896 if (ret) {
897 ERR("Error unref root handle");
898 }
899 sock_info->root_handle = -1;
900 }
901 sock_info->constructor_sem_posted = 0;
902
903 /*
904 * wait_shm_mmap, socket and notify socket are used by listener
905 * threads outside of the ust lock, so we cannot tear them down
906 * ourselves, because we cannot join on these threads. Leave
907 * responsibility of cleaning up these resources to the OS
908 * process exit.
909 */
910 if (exiting)
911 return;
912
46050b1a 913 if (sock_info->socket != -1) {
e6973a89 914 ret = ustcomm_close_unix_sock(sock_info->socket);
46050b1a 915 if (ret) {
32ce8569 916 ERR("Error closing ust cmd socket");
46050b1a
MD
917 }
918 sock_info->socket = -1;
919 }
32ce8569
MD
920 if (sock_info->notify_socket != -1) {
921 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
922 if (ret) {
923 ERR("Error closing ust notify socket");
924 }
925 sock_info->notify_socket = -1;
926 }
5b14aab3 927 if (sock_info->wait_shm_mmap) {
172d6b68
MD
928 long page_size;
929
930 page_size = sysconf(_SC_PAGE_SIZE);
931 if (page_size > 0) {
932 ret = munmap(sock_info->wait_shm_mmap, page_size);
933 if (ret) {
934 ERR("Error unmapping wait shm");
935 }
7fc90dca
MD
936 }
937 sock_info->wait_shm_mmap = NULL;
938 }
939}
940
58d4b2a2 941/*
33bbeb90
MD
942 * Using fork to set umask in the child process (not multi-thread safe).
943 * We deal with the shm_open vs ftruncate race (happening when the
944 * sessiond owns the shm and does not let everybody modify it, to ensure
945 * safety against shm_unlink) by simply letting the mmap fail and
946 * retrying after a few seconds.
947 * For global shm, everybody has rw access to it until the sessiond
948 * starts.
58d4b2a2 949 */
7fc90dca 950static
58d4b2a2 951int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
7fc90dca 952{
7fc90dca 953 int wait_shm_fd, ret;
58d4b2a2 954 pid_t pid;
44e073f5 955
58d4b2a2 956 /*
33bbeb90 957 * Try to open read-only.
58d4b2a2 958 */
33bbeb90 959 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2 960 if (wait_shm_fd >= 0) {
7aa76730
MD
961 int32_t tmp_read;
962 ssize_t len;
963 size_t bytes_read = 0;
964
965 /*
966 * Try to read the fd. If unable to do so, try opening
967 * it in write mode.
968 */
969 do {
970 len = read(wait_shm_fd,
971 &((char *) &tmp_read)[bytes_read],
972 sizeof(tmp_read) - bytes_read);
973 if (len > 0) {
974 bytes_read += len;
975 }
976 } while ((len < 0 && errno == EINTR)
977 || (len > 0 && bytes_read < sizeof(tmp_read)));
978 if (bytes_read != sizeof(tmp_read)) {
979 ret = close(wait_shm_fd);
980 if (ret) {
981 ERR("close wait_shm_fd");
982 }
983 goto open_write;
984 }
58d4b2a2
MD
985 goto end;
986 } else if (wait_shm_fd < 0 && errno != ENOENT) {
987 /*
33bbeb90
MD
988 * Real-only open did not work, and it's not because the
989 * entry was not present. It's a failure that prohibits
990 * using shm.
58d4b2a2 991 */
7fc90dca 992 ERR("Error opening shm %s", sock_info->wait_shm_path);
58d4b2a2 993 goto end;
7fc90dca 994 }
7aa76730
MD
995
996open_write:
7fc90dca 997 /*
7aa76730
MD
998 * If the open failed because the file did not exist, or because
999 * the file was not truncated yet, try creating it ourself.
7fc90dca 1000 */
8c90a710 1001 URCU_TLS(lttng_ust_nest_count)++;
58d4b2a2 1002 pid = fork();
8c90a710 1003 URCU_TLS(lttng_ust_nest_count)--;
58d4b2a2
MD
1004 if (pid > 0) {
1005 int status;
1006
1007 /*
1008 * Parent: wait for child to return, in which case the
1009 * shared memory map will have been created.
1010 */
1011 pid = wait(&status);
b7d3cb32 1012 if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
58d4b2a2
MD
1013 wait_shm_fd = -1;
1014 goto end;
7fc90dca 1015 }
58d4b2a2
MD
1016 /*
1017 * Try to open read-only again after creation.
1018 */
33bbeb90 1019 wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
58d4b2a2
MD
1020 if (wait_shm_fd < 0) {
1021 /*
1022 * Real-only open did not work. It's a failure
1023 * that prohibits using shm.
1024 */
1025 ERR("Error opening shm %s", sock_info->wait_shm_path);
1026 goto end;
1027 }
1028 goto end;
1029 } else if (pid == 0) {
1030 int create_mode;
1031
1032 /* Child */
33bbeb90 1033 create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
58d4b2a2 1034 if (sock_info->global)
33bbeb90 1035 create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
58d4b2a2
MD
1036 /*
1037 * We're alone in a child process, so we can modify the
1038 * process-wide umask.
1039 */
33bbeb90 1040 umask(~create_mode);
58d4b2a2 1041 /*
33bbeb90
MD
1042 * Try creating shm (or get rw access).
1043 * We don't do an exclusive open, because we allow other
1044 * processes to create+ftruncate it concurrently.
58d4b2a2
MD
1045 */
1046 wait_shm_fd = shm_open(sock_info->wait_shm_path,
1047 O_RDWR | O_CREAT, create_mode);
1048 if (wait_shm_fd >= 0) {
1049 ret = ftruncate(wait_shm_fd, mmap_size);
1050 if (ret) {
1051 PERROR("ftruncate");
b0c1425d 1052 _exit(EXIT_FAILURE);
58d4b2a2 1053 }
b0c1425d 1054 _exit(EXIT_SUCCESS);
58d4b2a2 1055 }
33bbeb90
MD
1056 /*
1057 * For local shm, we need to have rw access to accept
1058 * opening it: this means the local sessiond will be
1059 * able to wake us up. For global shm, we open it even
1060 * if rw access is not granted, because the root.root
1061 * sessiond will be able to override all rights and wake
1062 * us up.
1063 */
1064 if (!sock_info->global && errno != EACCES) {
58d4b2a2 1065 ERR("Error opening shm %s", sock_info->wait_shm_path);
5d3bc5ed 1066 _exit(EXIT_FAILURE);
58d4b2a2
MD
1067 }
1068 /*
33bbeb90
MD
1069 * The shm exists, but we cannot open it RW. Report
1070 * success.
58d4b2a2 1071 */
5d3bc5ed 1072 _exit(EXIT_SUCCESS);
58d4b2a2
MD
1073 } else {
1074 return -1;
7fc90dca 1075 }
58d4b2a2 1076end:
33bbeb90
MD
1077 if (wait_shm_fd >= 0 && !sock_info->global) {
1078 struct stat statbuf;
1079
1080 /*
1081 * Ensure that our user is the owner of the shm file for
1082 * local shm. If we do not own the file, it means our
1083 * sessiond will not have access to wake us up (there is
1084 * probably a rogue process trying to fake our
1085 * sessiond). Fallback to polling method in this case.
1086 */
1087 ret = fstat(wait_shm_fd, &statbuf);
1088 if (ret) {
1089 PERROR("fstat");
1090 goto error_close;
1091 }
1092 if (statbuf.st_uid != getuid())
1093 goto error_close;
1094 }
58d4b2a2 1095 return wait_shm_fd;
33bbeb90
MD
1096
1097error_close:
1098 ret = close(wait_shm_fd);
1099 if (ret) {
1100 PERROR("Error closing fd");
1101 }
1102 return -1;
58d4b2a2
MD
1103}
1104
1105static
1106char *get_map_shm(struct sock_info *sock_info)
1107{
172d6b68 1108 long page_size;
58d4b2a2
MD
1109 int wait_shm_fd, ret;
1110 char *wait_shm_mmap;
1111
172d6b68
MD
1112 page_size = sysconf(_SC_PAGE_SIZE);
1113 if (page_size < 0) {
1114 goto error;
1115 }
1116
1117 wait_shm_fd = get_wait_shm(sock_info, page_size);
58d4b2a2
MD
1118 if (wait_shm_fd < 0) {
1119 goto error;
44e073f5 1120 }
172d6b68 1121 wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
7fc90dca 1122 MAP_SHARED, wait_shm_fd, 0);
7fc90dca
MD
1123 /* close shm fd immediately after taking the mmap reference */
1124 ret = close(wait_shm_fd);
1125 if (ret) {
33bbeb90
MD
1126 PERROR("Error closing fd");
1127 }
1128 if (wait_shm_mmap == MAP_FAILED) {
1129 DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
1130 goto error;
7fc90dca
MD
1131 }
1132 return wait_shm_mmap;
1133
1134error:
1135 return NULL;
1136}
1137
1138static
1139void wait_for_sessiond(struct sock_info *sock_info)
1140{
3327ac33 1141 if (ust_lock()) {
7fc90dca
MD
1142 goto quit;
1143 }
37ed587a
MD
1144 if (wait_poll_fallback) {
1145 goto error;
1146 }
7fc90dca
MD
1147 if (!sock_info->wait_shm_mmap) {
1148 sock_info->wait_shm_mmap = get_map_shm(sock_info);
1149 if (!sock_info->wait_shm_mmap)
1150 goto error;
1151 }
1152 ust_unlock();
1153
1154 DBG("Waiting for %s apps sessiond", sock_info->name);
80e2814b 1155 /* Wait for futex wakeup */
1ab81a2e
MD
1156 if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
1157 goto end_wait;
1158
1159 while (futex_async((int32_t *) sock_info->wait_shm_mmap,
1160 FUTEX_WAIT, 0, NULL, NULL, 0)) {
1161 switch (errno) {
1162 case EWOULDBLOCK:
1163 /* Value already changed. */
1164 goto end_wait;
1165 case EINTR:
1166 /* Retry if interrupted by signal. */
1167 break; /* Get out of switch. */
1168 case EFAULT:
1169 wait_poll_fallback = 1;
1170 DBG(
37ed587a
MD
1171"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
1172"do not support FUTEX_WAKE on read-only memory mappings correctly. "
1173"Please upgrade your kernel "
1174"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
1175"mainline). LTTng-UST will use polling mode fallback.");
1ab81a2e
MD
1176 if (ust_debug())
1177 PERROR("futex");
1178 goto end_wait;
80e2814b
MD
1179 }
1180 }
1ab81a2e 1181end_wait:
7fc90dca
MD
1182 return;
1183
1184quit:
1185 ust_unlock();
1186 return;
1187
1188error:
1189 ust_unlock();
7fc90dca 1190 return;
46050b1a
MD
1191}
1192
1ea11eab
MD
1193/*
1194 * This thread does not allocate any resource, except within
1195 * handle_message, within mutex protection. This mutex protects against
1196 * fork and exit.
98bf993f 1197 * The other moment it allocates resources is at socket connection, which
1ea11eab
MD
1198 * is also protected by the mutex.
1199 */
d9e99d10
MD
1200static
1201void *ust_listener_thread(void *arg)
1202{
1ea11eab 1203 struct sock_info *sock_info = arg;
c0eedf81 1204 int sock, ret, prev_connect_failed = 0, has_waited = 0;
ff517991 1205 long timeout;
d9e99d10 1206
9eb62b9c
MD
1207 /* Restart trying to connect to the session daemon */
1208restart:
c0eedf81
MD
1209 if (prev_connect_failed) {
1210 /* Wait for sessiond availability with pipe */
1211 wait_for_sessiond(sock_info);
1212 if (has_waited) {
1213 has_waited = 0;
1214 /*
1215 * Sleep for 5 seconds before retrying after a
1216 * sequence of failure / wait / failure. This
1217 * deals with a killed or broken session daemon.
1218 */
1219 sleep(5);
1220 }
1221 has_waited = 1;
1222 prev_connect_failed = 0;
1223 }
9eb62b9c 1224
1ea11eab 1225 if (sock_info->socket != -1) {
e6973a89 1226 ret = ustcomm_close_unix_sock(sock_info->socket);
1ea11eab 1227 if (ret) {
32ce8569
MD
1228 ERR("Error closing %s ust cmd socket",
1229 sock_info->name);
1ea11eab
MD
1230 }
1231 sock_info->socket = -1;
1232 }
32ce8569
MD
1233 if (sock_info->notify_socket != -1) {
1234 ret = ustcomm_close_unix_sock(sock_info->notify_socket);
1235 if (ret) {
1236 ERR("Error closing %s ust notify socket",
1237 sock_info->name);
1238 }
1239 sock_info->notify_socket = -1;
1240 }
46050b1a 1241
321f2351
MD
1242 /*
1243 * Register. We need to perform both connect and sending
1244 * registration message before doing the next connect otherwise
1245 * we may reach unix socket connect queue max limits and block
1246 * on the 2nd connect while the session daemon is awaiting the
1247 * first connect registration message.
1248 */
1249 /* Connect cmd socket */
1250 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1251 if (ret < 0) {
1252 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1253 prev_connect_failed = 1;
5b14aab3 1254
3327ac33 1255 if (ust_lock()) {
321f2351 1256 goto quit;
32ce8569 1257 }
46050b1a 1258
e3426ddc 1259 /*
321f2351
MD
1260 * If we cannot find the sessiond daemon, don't delay
1261 * constructor execution.
e3426ddc 1262 */
321f2351
MD
1263 ret = handle_register_done(sock_info);
1264 assert(!ret);
1265 ust_unlock();
1266 goto restart;
27fe9f21 1267 }
321f2351 1268 sock_info->socket = ret;
27fe9f21 1269
3327ac33 1270 if (ust_lock()) {
5b14aab3
MD
1271 goto quit;
1272 }
1273
46050b1a
MD
1274 /*
1275 * Create only one root handle per listener thread for the whole
f59ed768
MD
1276 * process lifetime, so we ensure we get ID which is statically
1277 * assigned to the root handle.
46050b1a
MD
1278 */
1279 if (sock_info->root_handle == -1) {
1280 ret = lttng_abi_create_root_handle();
a51070bb 1281 if (ret < 0) {
46050b1a 1282 ERR("Error creating root handle");
46050b1a
MD
1283 goto quit;
1284 }
1285 sock_info->root_handle = ret;
9eb62b9c 1286 }
1ea11eab 1287
32ce8569 1288 ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
9eb62b9c 1289 if (ret < 0) {
32ce8569
MD
1290 ERR("Error registering to %s ust cmd socket",
1291 sock_info->name);
c0eedf81 1292 prev_connect_failed = 1;
11ff9c7d
MD
1293 /*
1294 * If we cannot register to the sessiond daemon, don't
1295 * delay constructor execution.
1296 */
edaa1431 1297 ret = handle_register_done(sock_info);
11ff9c7d 1298 assert(!ret);
17dfb34b 1299 ust_unlock();
9eb62b9c
MD
1300 goto restart;
1301 }
321f2351
MD
1302
1303 ust_unlock();
1304
1305 /* Connect notify socket */
1306 ret = ustcomm_connect_unix_sock(sock_info->sock_path);
1307 if (ret < 0) {
1308 DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
1309 prev_connect_failed = 1;
1310
3327ac33 1311 if (ust_lock()) {
321f2351
MD
1312 goto quit;
1313 }
1314
1315 /*
1316 * If we cannot find the sessiond daemon, don't delay
1317 * constructor execution.
1318 */
1319 ret = handle_register_done(sock_info);
1320 assert(!ret);
1321 ust_unlock();
1322 goto restart;
1323 }
1324 sock_info->notify_socket = ret;
1325
1326 timeout = get_notify_sock_timeout();
1327 if (timeout >= 0) {
1328 /*
1329 * Give at least 10ms to sessiond to reply to
1330 * notifications.
1331 */
1332 if (timeout < 10)
1333 timeout = 10;
1334 ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
1335 timeout);
1336 if (ret < 0) {
1337 WARN("Error setting socket receive timeout");
1338 }
1339 ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
1340 timeout);
1341 if (ret < 0) {
1342 WARN("Error setting socket send timeout");
1343 }
1344 } else if (timeout < -1) {
1345 WARN("Unsupported timeout value %ld", timeout);
1346 }
1347
3327ac33 1348 if (ust_lock()) {
321f2351
MD
1349 goto quit;
1350 }
1351
32ce8569
MD
1352 ret = register_to_sessiond(sock_info->notify_socket,
1353 USTCTL_SOCKET_NOTIFY);
1354 if (ret < 0) {
1355 ERR("Error registering to %s ust notify socket",
1356 sock_info->name);
1357 prev_connect_failed = 1;
1358 /*
1359 * If we cannot register to the sessiond daemon, don't
1360 * delay constructor execution.
1361 */
1362 ret = handle_register_done(sock_info);
1363 assert(!ret);
1364 ust_unlock();
1365 goto restart;
1366 }
1367 sock = sock_info->socket;
1368
17dfb34b 1369 ust_unlock();
46050b1a 1370
d9e99d10
MD
1371 for (;;) {
1372 ssize_t len;
57773204 1373 struct ustcomm_ust_msg lum;
d9e99d10 1374
57773204 1375 len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
d9e99d10
MD
1376 switch (len) {
1377 case 0: /* orderly shutdown */
7dd08bec 1378 DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
3327ac33 1379 if (ust_lock()) {
d5e1fea6
MD
1380 goto quit;
1381 }
8236ba10
MD
1382 /*
1383 * Either sessiond has shutdown or refused us by closing the socket.
1384 * In either case, we don't want to delay construction execution,
1385 * and we need to wait before retry.
1386 */
1387 prev_connect_failed = 1;
1388 /*
1389 * If we cannot register to the sessiond daemon, don't
1390 * delay constructor execution.
1391 */
1392 ret = handle_register_done(sock_info);
1393 assert(!ret);
1394 ust_unlock();
d9e99d10 1395 goto end;
e7723462 1396 case sizeof(lum):
74d81a6c 1397 print_cmd(lum.cmd, lum.handle);
11ff9c7d 1398 ret = handle_message(sock_info, sock, &lum);
7bc53e94 1399 if (ret) {
11ff9c7d 1400 ERR("Error handling message for %s socket", sock_info->name);
d9e99d10
MD
1401 }
1402 continue;
7bc53e94
MD
1403 default:
1404 if (len < 0) {
1405 DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
1406 } else {
1407 DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
1408 }
1409 if (len == -ECONNRESET) {
1410 DBG("%s remote end closed connection", sock_info->name);
d9e99d10
MD
1411 goto end;
1412 }
1413 goto end;
d9e99d10
MD
1414 }
1415
1416 }
1417end:
3327ac33 1418 if (ust_lock()) {
d5e1fea6
MD
1419 goto quit;
1420 }
f59ed768
MD
1421 /* Cleanup socket handles before trying to reconnect */
1422 lttng_ust_objd_table_owner_cleanup(sock_info);
1423 ust_unlock();
9eb62b9c 1424 goto restart; /* try to reconnect */
e33f3265 1425
1ea11eab 1426quit:
e33f3265 1427 ust_unlock();
3327ac33
MD
1428
1429 pthread_mutex_lock(&ust_exit_mutex);
1430 sock_info->thread_active = 0;
1431 pthread_mutex_unlock(&ust_exit_mutex);
d9e99d10
MD
1432 return NULL;
1433}
1434
2594a5b4
MD
1435/*
1436 * Weak symbol to call when the ust malloc wrapper is not loaded.
1437 */
1438__attribute__((weak))
1439void lttng_ust_malloc_wrapper_init(void)
1440{
1441}
1442
2691221a
MD
1443/*
1444 * sessiond monitoring thread: monitor presence of global and per-user
1445 * sessiond by polling the application common named pipe.
1446 */
edaa1431 1447void __attribute__((constructor)) lttng_ust_init(void)
2691221a 1448{
11ff9c7d 1449 struct timespec constructor_timeout;
ae6a58bf 1450 sigset_t sig_all_blocked, orig_parent_mask;
1879f67f 1451 pthread_attr_t thread_attr;
cf12a773 1452 int timeout_mode;
2691221a
MD
1453 int ret;
1454
edaa1431
MD
1455 if (uatomic_xchg(&initialized, 1) == 1)
1456 return;
1457
eddd8d5d
MD
1458 /*
1459 * Fixup interdependency between TLS fixup mutex (which happens
1460 * to be the dynamic linker mutex) and ust_lock, taken within
1461 * the ust lock.
1462 */
3df53fae 1463 lttng_fixup_urcu_bp_tls();
f645cfa7 1464 lttng_fixup_ringbuffer_tls();
4158a15a 1465 lttng_fixup_vtid_tls();
a903623f 1466 lttng_fixup_nest_count_tls();
009745db 1467 lttng_fixup_procname_tls();
d58d1454 1468 lttng_fixup_ust_mutex_nest_tls();
eddd8d5d 1469
edaa1431
MD
1470 /*
1471 * We want precise control over the order in which we construct
1472 * our sub-libraries vs starting to receive commands from
1473 * sessiond (otherwise leading to errors when trying to create
1474 * sessiond before the init functions are completed).
1475 */
2691221a 1476 init_usterr();
edaa1431 1477 init_tracepoint();
bd703713 1478 lttng_ust_baddr_statedump_init();
7dd08bec
MD
1479 lttng_ring_buffer_metadata_client_init();
1480 lttng_ring_buffer_client_overwrite_init();
34a91bdb 1481 lttng_ring_buffer_client_overwrite_rt_init();
7dd08bec 1482 lttng_ring_buffer_client_discard_init();
34a91bdb 1483 lttng_ring_buffer_client_discard_rt_init();
d58d1454 1484 lttng_perf_counter_init();
a0a3bef9 1485 lttng_context_init();
2594a5b4
MD
1486 /*
1487 * Invoke ust malloc wrapper init before starting other threads.
1488 */
1489 lttng_ust_malloc_wrapper_init();
2691221a 1490
ff517991 1491 timeout_mode = get_constructor_timeout(&constructor_timeout);
11ff9c7d 1492
95259bd0 1493 ret = sem_init(&constructor_wait, 0, 0);
11ff9c7d
MD
1494 assert(!ret);
1495
8d20bf54 1496 ret = setup_local_apps();
2691221a 1497 if (ret) {
9ec6895c 1498 DBG("local apps setup returned %d", ret);
2691221a 1499 }
ae6a58bf
WP
1500
1501 /* A new thread created by pthread_create inherits the signal mask
1502 * from the parent. To avoid any signal being received by the
1503 * listener thread, we block all signals temporarily in the parent,
1504 * while we create the listener thread.
1505 */
1506 sigfillset(&sig_all_blocked);
1507 ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1508 if (ret) {
d94d802c 1509 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1510 }
1511
1879f67f
MG
1512 ret = pthread_attr_init(&thread_attr);
1513 if (ret) {
1514 ERR("pthread_attr_init: %s", strerror(ret));
1515 }
1516 ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
1517 if (ret) {
1518 ERR("pthread_attr_setdetachstate: %s", strerror(ret));
1519 }
1520
c0bbbd5a 1521 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1522 ret = pthread_create(&global_apps.ust_listener, &thread_attr,
dde70ea0 1523 ust_listener_thread, &global_apps);
d94d802c
MD
1524 if (ret) {
1525 ERR("pthread_create global: %s", strerror(ret));
1526 }
e33f3265 1527 global_apps.thread_active = 1;
c0bbbd5a 1528 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1529
8d20bf54 1530 if (local_apps.allowed) {
c0bbbd5a 1531 pthread_mutex_lock(&ust_exit_mutex);
1879f67f 1532 ret = pthread_create(&local_apps.ust_listener, &thread_attr,
dde70ea0 1533 ust_listener_thread, &local_apps);
d94d802c
MD
1534 if (ret) {
1535 ERR("pthread_create local: %s", strerror(ret));
1536 }
e33f3265 1537 local_apps.thread_active = 1;
c0bbbd5a 1538 pthread_mutex_unlock(&ust_exit_mutex);
8d20bf54
MD
1539 } else {
1540 handle_register_done(&local_apps);
1541 }
1879f67f
MG
1542 ret = pthread_attr_destroy(&thread_attr);
1543 if (ret) {
1544 ERR("pthread_attr_destroy: %s", strerror(ret));
1545 }
8d20bf54 1546
ae6a58bf
WP
1547 /* Restore original signal mask in parent */
1548 ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1549 if (ret) {
d94d802c 1550 ERR("pthread_sigmask: %s", strerror(ret));
ae6a58bf
WP
1551 }
1552
cf12a773
MD
1553 switch (timeout_mode) {
1554 case 1: /* timeout wait */
95259bd0
MD
1555 do {
1556 ret = sem_timedwait(&constructor_wait,
1557 &constructor_timeout);
1558 } while (ret < 0 && errno == EINTR);
cf12a773 1559 if (ret < 0 && errno == ETIMEDOUT) {
7dd08bec 1560 ERR("Timed out waiting for lttng-sessiond");
cf12a773
MD
1561 } else {
1562 assert(!ret);
1563 }
1564 break;
7b766b16 1565 case -1:/* wait forever */
95259bd0
MD
1566 do {
1567 ret = sem_wait(&constructor_wait);
1568 } while (ret < 0 && errno == EINTR);
11ff9c7d 1569 assert(!ret);
cf12a773 1570 break;
7b766b16 1571 case 0: /* no timeout */
cf12a773 1572 break;
11ff9c7d 1573 }
2691221a
MD
1574}
1575
17dfb34b
MD
1576static
1577void lttng_ust_cleanup(int exiting)
1578{
efe0de09 1579 cleanup_sock_info(&global_apps, exiting);
dda3bcd6 1580 cleanup_sock_info(&local_apps, exiting);
efe0de09
MD
1581 /*
1582 * The teardown in this function all affect data structures
1583 * accessed under the UST lock by the listener thread. This
1584 * lock, along with the lttng_ust_comm_should_quit flag, ensure
1585 * that none of these threads are accessing this data at this
1586 * point.
1587 */
17dfb34b 1588 lttng_ust_abi_exit();
003fedf4 1589 lttng_ust_events_exit();
a0a3bef9 1590 lttng_context_exit();
d58d1454 1591 lttng_perf_counter_exit();
34a91bdb 1592 lttng_ring_buffer_client_discard_rt_exit();
7dd08bec 1593 lttng_ring_buffer_client_discard_exit();
34a91bdb 1594 lttng_ring_buffer_client_overwrite_rt_exit();
7dd08bec
MD
1595 lttng_ring_buffer_client_overwrite_exit();
1596 lttng_ring_buffer_metadata_client_exit();
bd703713 1597 lttng_ust_baddr_statedump_destroy();
17dfb34b
MD
1598 exit_tracepoint();
1599 if (!exiting) {
1600 /* Reinitialize values for fork */
1601 sem_count = 2;
1602 lttng_ust_comm_should_quit = 0;
1603 initialized = 0;
1604 }
1605}
1606
edaa1431 1607void __attribute__((destructor)) lttng_ust_exit(void)
2691221a
MD
1608{
1609 int ret;
1610
9eb62b9c
MD
1611 /*
1612 * Using pthread_cancel here because:
1613 * A) we don't want to hang application teardown.
1614 * B) the thread is not allocating any resource.
1615 */
1ea11eab
MD
1616
1617 /*
1618 * Require the communication thread to quit. Synchronize with
1619 * mutexes to ensure it is not in a mutex critical section when
1620 * pthread_cancel is later called.
1621 */
3327ac33 1622 ust_lock_nocheck();
1ea11eab 1623 lttng_ust_comm_should_quit = 1;
3327ac33 1624 ust_unlock();
1ea11eab 1625
3327ac33 1626 pthread_mutex_lock(&ust_exit_mutex);
f5f94532 1627 /* cancel threads */
e33f3265
MD
1628 if (global_apps.thread_active) {
1629 ret = pthread_cancel(global_apps.ust_listener);
1630 if (ret) {
1631 ERR("Error cancelling global ust listener thread: %s",
1632 strerror(ret));
1633 } else {
1634 global_apps.thread_active = 0;
1635 }
2691221a 1636 }
e33f3265 1637 if (local_apps.thread_active) {
8d20bf54
MD
1638 ret = pthread_cancel(local_apps.ust_listener);
1639 if (ret) {
d94d802c
MD
1640 ERR("Error cancelling local ust listener thread: %s",
1641 strerror(ret));
e33f3265
MD
1642 } else {
1643 local_apps.thread_active = 0;
8d20bf54 1644 }
8d20bf54 1645 }
3327ac33 1646 pthread_mutex_unlock(&ust_exit_mutex);
e33f3265 1647
efe0de09
MD
1648 /*
1649 * Do NOT join threads: use of sys_futex makes it impossible to
1650 * join the threads without using async-cancel, but async-cancel
1651 * is delivered by a signal, which could hit the target thread
1652 * anywhere in its code path, including while the ust_lock() is
1653 * held, causing a deadlock for the other thread. Let the OS
1654 * cleanup the threads if there are stalled in a syscall.
1655 */
17dfb34b 1656 lttng_ust_cleanup(1);
2691221a 1657}
e822f505
MD
1658
1659/*
1660 * We exclude the worker threads across fork and clone (except
1661 * CLONE_VM), because these system calls only keep the forking thread
1662 * running in the child. Therefore, we don't want to call fork or clone
1663 * in the middle of an tracepoint or ust tracing state modification.
1664 * Holding this mutex protects these structures across fork and clone.
1665 */
b728d87e 1666void ust_before_fork(sigset_t *save_sigset)
e822f505
MD
1667{
1668 /*
1669 * Disable signals. This is to avoid that the child intervenes
1670 * before it is properly setup for tracing. It is safer to
1671 * disable all signals, because then we know we are not breaking
1672 * anything by restoring the original mask.
1673 */
1674 sigset_t all_sigs;
1675 int ret;
1676
8c90a710 1677 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1678 return;
e822f505
MD
1679 /* Disable signals */
1680 sigfillset(&all_sigs);
b728d87e 1681 ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
e822f505
MD
1682 if (ret == -1) {
1683 PERROR("sigprocmask");
1684 }
458d678c
PW
1685
1686 pthread_mutex_lock(&ust_fork_mutex);
1687
3327ac33 1688 ust_lock_nocheck();
e822f505
MD
1689 rcu_bp_before_fork();
1690}
1691
b728d87e 1692static void ust_after_fork_common(sigset_t *restore_sigset)
e822f505
MD
1693{
1694 int ret;
1695
17dfb34b
MD
1696 DBG("process %d", getpid());
1697 ust_unlock();
458d678c
PW
1698
1699 pthread_mutex_unlock(&ust_fork_mutex);
1700
e822f505 1701 /* Restore signals */
23c8854a 1702 ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
e822f505
MD
1703 if (ret == -1) {
1704 PERROR("sigprocmask");
1705 }
1706}
1707
b728d87e 1708void ust_after_fork_parent(sigset_t *restore_sigset)
e822f505 1709{
8c90a710 1710 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1711 return;
17dfb34b 1712 DBG("process %d", getpid());
e822f505
MD
1713 rcu_bp_after_fork_parent();
1714 /* Release mutexes and reenable signals */
b728d87e 1715 ust_after_fork_common(restore_sigset);
e822f505
MD
1716}
1717
17dfb34b
MD
1718/*
1719 * After fork, in the child, we need to cleanup all the leftover state,
1720 * except the worker thread which already magically disappeared thanks
1721 * to the weird Linux fork semantics. After tyding up, we call
1722 * lttng_ust_init() again to start over as a new PID.
1723 *
1724 * This is meant for forks() that have tracing in the child between the
1725 * fork and following exec call (if there is any).
1726 */
b728d87e 1727void ust_after_fork_child(sigset_t *restore_sigset)
e822f505 1728{
8c90a710 1729 if (URCU_TLS(lttng_ust_nest_count))
e8508a49 1730 return;
17dfb34b 1731 DBG("process %d", getpid());
e822f505
MD
1732 /* Release urcu mutexes */
1733 rcu_bp_after_fork_child();
17dfb34b 1734 lttng_ust_cleanup(0);
a93bfc45 1735 lttng_context_vtid_reset();
e822f505 1736 /* Release mutexes and reenable signals */
b728d87e 1737 ust_after_fork_common(restore_sigset);
318dfea9 1738 lttng_ust_init();
e822f505 1739}
95c25348 1740
246be17e 1741void lttng_ust_sockinfo_session_enabled(void *owner)
95c25348
PW
1742{
1743 struct sock_info *sock_info = owner;
37dddb65 1744 sock_info->statedump_pending = 1;
95c25348 1745}
This page took 0.119035 seconds and 4 git commands to generate.