Make app socket directories per-user v2
[ust.git] / libust / tracectl.c
1 /* Copyright (C) 2009 Pierre-Marc Fournier
2 *
3 * This library is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU Lesser General Public
5 * License as published by the Free Software Foundation; either
6 * version 2.1 of the License, or (at your option) any later version.
7 *
8 * This library is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * Lesser General Public License for more details.
12 *
13 * You should have received a copy of the GNU Lesser General Public
14 * License along with this library; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
16 */
17
18 /* This file contains the implementation of the UST listener thread, which
19 * receives trace control commands. It also coordinates the initialization of
20 * libust.
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stdint.h>
27 #include <pthread.h>
28 #include <signal.h>
29 #include <sys/epoll.h>
30 #include <sys/time.h>
31 #include <sys/types.h>
32 #include <sys/socket.h>
33 #include <fcntl.h>
34 #include <poll.h>
35 #include <regex.h>
36 #include <urcu/uatomic_arch.h>
37 #include <urcu/list.h>
38
39 #include <ust/marker.h>
40 #include <ust/tracepoint.h>
41 #include <ust/tracectl.h>
42 #include <ust/clock.h>
43 #include "tracer.h"
44 #include "usterr.h"
45 #include "ustcomm.h"
46 #include "buffers.h"
47 #include "marker-control.h"
48
49 /* This should only be accessed by the constructor, before the creation
50 * of the listener, and then only by the listener.
51 */
52 s64 pidunique = -1LL;
53
54 /* The process pid is used to detect a non-traceable fork
55 * and allow the non-traceable fork to be ignored
56 * by destructor sequences in libust
57 */
58 static pid_t processpid = 0;
59
60 static struct ustcomm_header _receive_header;
61 static struct ustcomm_header *receive_header = &_receive_header;
62 static char receive_buffer[USTCOMM_BUFFER_SIZE];
63 static char send_buffer[USTCOMM_BUFFER_SIZE];
64
65 static int epoll_fd;
66
67 /*
68 * Listener thread data vs fork() protection mechanism. Ensures that no listener
69 * thread mutexes and data structures are being concurrently modified or held by
70 * other threads when fork() is executed.
71 */
72 static pthread_mutex_t listener_thread_data_mutex = PTHREAD_MUTEX_INITIALIZER;
73
74 /* Mutex protecting listen_sock. Nests inside listener_thread_data_mutex. */
75 static pthread_mutex_t listen_sock_mutex = PTHREAD_MUTEX_INITIALIZER;
76 static struct ustcomm_sock *listen_sock;
77
78 extern struct chan_info_struct chan_infos[];
79
80 static struct cds_list_head ust_socks = CDS_LIST_HEAD_INIT(ust_socks);
81
82 /* volatile because shared between the listener and the main thread */
83 int buffers_to_export = 0;
84
85 int ust_clock_source;
86
87 static long long make_pidunique(void)
88 {
89 s64 retval;
90 struct timeval tv;
91
92 gettimeofday(&tv, NULL);
93
94 retval = tv.tv_sec;
95 retval <<= 32;
96 retval |= tv.tv_usec;
97
98 return retval;
99 }
100
101 static void print_markers(FILE *fp)
102 {
103 struct marker_iter iter;
104
105 lock_markers();
106 marker_iter_reset(&iter);
107 marker_iter_start(&iter);
108
109 while (iter.marker) {
110 fprintf(fp, "marker: %s/%s %d \"%s\" %p\n",
111 (*iter.marker)->channel,
112 (*iter.marker)->name,
113 (int)imv_read((*iter.marker)->state),
114 (*iter.marker)->format,
115 (*iter.marker)->location);
116 marker_iter_next(&iter);
117 }
118 unlock_markers();
119 }
120
121 static void print_trace_events(FILE *fp)
122 {
123 struct trace_event_iter iter;
124
125 lock_trace_events();
126 trace_event_iter_reset(&iter);
127 trace_event_iter_start(&iter);
128
129 while (iter.trace_event) {
130 fprintf(fp, "trace_event: %s\n", (*iter.trace_event)->name);
131 trace_event_iter_next(&iter);
132 }
133 unlock_trace_events();
134 }
135
136 static int connect_ustconsumer(void)
137 {
138 int result, fd;
139 char default_daemon_path[] = SOCK_DIR "/ustconsumer";
140 char *explicit_daemon_path, *daemon_path;
141
142 explicit_daemon_path = getenv("UST_DAEMON_SOCKET");
143 if (explicit_daemon_path) {
144 daemon_path = explicit_daemon_path;
145 } else {
146 daemon_path = default_daemon_path;
147 }
148
149 DBG("Connecting to daemon_path %s", daemon_path);
150
151 result = ustcomm_connect_path(daemon_path, &fd);
152 if (result < 0) {
153 WARN("connect_ustconsumer failed, daemon_path: %s",
154 daemon_path);
155 return result;
156 }
157
158 return fd;
159 }
160
161
162 static void request_buffer_consumer(int sock,
163 const char *trace,
164 const char *channel,
165 int cpu)
166 {
167 struct ustcomm_header send_header, recv_header;
168 struct ustcomm_buffer_info buf_inf;
169 int result = 0;
170
171 result = ustcomm_pack_buffer_info(&send_header,
172 &buf_inf,
173 trace,
174 channel,
175 cpu);
176
177 if (result < 0) {
178 ERR("failed to pack buffer info message %s_%d",
179 channel, cpu);
180 return;
181 }
182
183 buf_inf.pid = getpid();
184 send_header.command = CONSUME_BUFFER;
185
186 result = ustcomm_req(sock, &send_header, (char *) &buf_inf,
187 &recv_header, NULL);
188 if (result <= 0) {
189 PERROR("request for buffer consumer failed, is the daemon online?");
190 }
191
192 return;
193 }
194
195 /* Ask the daemon to collect a trace called trace_name and being
196 * produced by this pid.
197 *
198 * The trace must be at least allocated. (It can also be started.)
199 * This is because _ltt_trace_find is used.
200 */
201
202 static void inform_consumer_daemon(const char *trace_name)
203 {
204 int sock, i,j;
205 struct ust_trace *trace;
206 const char *ch_name;
207
208 sock = connect_ustconsumer();
209 if (sock < 0) {
210 return;
211 }
212
213 DBG("Connected to ustconsumer");
214
215 ltt_lock_traces();
216
217 trace = _ltt_trace_find(trace_name);
218 if (trace == NULL) {
219 WARN("inform_consumer_daemon: could not find trace \"%s\"; it is probably already destroyed", trace_name);
220 goto unlock_traces;
221 }
222
223 for (i=0; i < trace->nr_channels; i++) {
224 if (trace->channels[i].request_collection) {
225 /* iterate on all cpus */
226 for (j=0; j<trace->channels[i].n_cpus; j++) {
227 ch_name = trace->channels[i].channel_name;
228 request_buffer_consumer(sock, trace_name,
229 ch_name, j);
230 CMM_STORE_SHARED(buffers_to_export,
231 CMM_LOAD_SHARED(buffers_to_export)+1);
232 }
233 }
234 }
235
236 unlock_traces:
237 ltt_unlock_traces();
238
239 close(sock);
240 }
241
242 static struct ust_channel *find_channel(const char *ch_name,
243 struct ust_trace *trace)
244 {
245 int i;
246
247 for (i=0; i<trace->nr_channels; i++) {
248 if (!strcmp(trace->channels[i].channel_name, ch_name)) {
249 return &trace->channels[i];
250 }
251 }
252
253 return NULL;
254 }
255
256 static int get_buffer_shmid_pipe_fd(const char *trace_name, const char *ch_name,
257 int ch_cpu,
258 int *buf_shmid,
259 int *buf_struct_shmid,
260 int *buf_pipe_fd)
261 {
262 struct ust_trace *trace;
263 struct ust_channel *channel;
264 struct ust_buffer *buf;
265
266 DBG("get_buffer_shmid_pipe_fd");
267
268 ltt_lock_traces();
269 trace = _ltt_trace_find(trace_name);
270 ltt_unlock_traces();
271
272 if (trace == NULL) {
273 ERR("cannot find trace!");
274 return -ENODATA;
275 }
276
277 channel = find_channel(ch_name, trace);
278 if (!channel) {
279 ERR("cannot find channel %s!", ch_name);
280 return -ENODATA;
281 }
282
283 buf = channel->buf[ch_cpu];
284
285 *buf_shmid = buf->shmid;
286 *buf_struct_shmid = channel->buf_struct_shmids[ch_cpu];
287 *buf_pipe_fd = buf->data_ready_fd_read;
288
289 return 0;
290 }
291
292 static int get_subbuf_num_size(const char *trace_name, const char *ch_name,
293 int *num, int *size)
294 {
295 struct ust_trace *trace;
296 struct ust_channel *channel;
297
298 DBG("get_subbuf_size");
299
300 ltt_lock_traces();
301 trace = _ltt_trace_find(trace_name);
302 ltt_unlock_traces();
303
304 if (!trace) {
305 ERR("cannot find trace!");
306 return -ENODATA;
307 }
308
309 channel = find_channel(ch_name, trace);
310 if (!channel) {
311 ERR("unable to find channel");
312 return -ENODATA;
313 }
314
315 *num = channel->subbuf_cnt;
316 *size = channel->subbuf_size;
317
318 return 0;
319 }
320
321 /* Return the power of two which is equal or higher to v */
322
323 static unsigned int pow2_higher_or_eq(unsigned int v)
324 {
325 int hb = fls(v);
326 int retval = 1<<(hb-1);
327
328 if (v-retval == 0)
329 return retval;
330 else
331 return retval<<1;
332 }
333
334 static int set_subbuf_size(const char *trace_name, const char *ch_name,
335 unsigned int size)
336 {
337 unsigned int power;
338 int retval = 0;
339 struct ust_trace *trace;
340 struct ust_channel *channel;
341
342 DBG("set_subbuf_size");
343
344 power = pow2_higher_or_eq(size);
345 power = max_t(unsigned int, 2u, power);
346 if (power != size) {
347 WARN("using the next power of two for buffer size = %u\n", power);
348 }
349
350 ltt_lock_traces();
351 trace = _ltt_trace_find_setup(trace_name);
352 if (trace == NULL) {
353 ERR("cannot find trace!");
354 retval = -ENODATA;
355 goto unlock_traces;
356 }
357
358 channel = find_channel(ch_name, trace);
359 if (!channel) {
360 ERR("unable to find channel");
361 retval = -ENODATA;
362 goto unlock_traces;
363 }
364
365 channel->subbuf_size = power;
366 DBG("the set_subbuf_size for the requested channel is %zu", channel->subbuf_size);
367
368 unlock_traces:
369 ltt_unlock_traces();
370
371 return retval;
372 }
373
374 static int set_subbuf_num(const char *trace_name, const char *ch_name,
375 unsigned int num)
376 {
377 struct ust_trace *trace;
378 struct ust_channel *channel;
379 int retval = 0;
380
381 DBG("set_subbuf_num");
382
383 if (num < 2) {
384 ERR("subbuffer count should be greater than 2");
385 return -EINVAL;
386 }
387
388 ltt_lock_traces();
389 trace = _ltt_trace_find_setup(trace_name);
390 if (trace == NULL) {
391 ERR("cannot find trace!");
392 retval = -ENODATA;
393 goto unlock_traces;
394 }
395
396 channel = find_channel(ch_name, trace);
397 if (!channel) {
398 ERR("unable to find channel");
399 retval = -ENODATA;
400 goto unlock_traces;
401 }
402
403 channel->subbuf_cnt = num;
404 DBG("the set_subbuf_cnt for the requested channel is %u", channel->subbuf_cnt);
405
406 unlock_traces:
407 ltt_unlock_traces();
408 return retval;
409 }
410
411 static int get_subbuffer(const char *trace_name, const char *ch_name,
412 int ch_cpu, long *consumed_old)
413 {
414 int retval = 0;
415 struct ust_trace *trace;
416 struct ust_channel *channel;
417 struct ust_buffer *buf;
418
419 DBG("get_subbuf");
420
421 *consumed_old = 0;
422
423 ltt_lock_traces();
424 trace = _ltt_trace_find(trace_name);
425
426 if (!trace) {
427 DBG("Cannot find trace. It was likely destroyed by the user.");
428 retval = -ENODATA;
429 goto unlock_traces;
430 }
431
432 channel = find_channel(ch_name, trace);
433 if (!channel) {
434 ERR("unable to find channel");
435 retval = -ENODATA;
436 goto unlock_traces;
437 }
438
439 buf = channel->buf[ch_cpu];
440
441 retval = ust_buffers_get_subbuf(buf, consumed_old);
442 if (retval < 0) {
443 WARN("missed buffer?");
444 }
445
446 unlock_traces:
447 ltt_unlock_traces();
448
449 return retval;
450 }
451
452
453 static int notify_buffer_mapped(const char *trace_name,
454 const char *ch_name,
455 int ch_cpu)
456 {
457 int retval = 0;
458 struct ust_trace *trace;
459 struct ust_channel *channel;
460 struct ust_buffer *buf;
461
462 DBG("get_buffer_fd");
463
464 ltt_lock_traces();
465 trace = _ltt_trace_find(trace_name);
466
467 if (!trace) {
468 retval = -ENODATA;
469 DBG("Cannot find trace. It was likely destroyed by the user.");
470 goto unlock_traces;
471 }
472
473 channel = find_channel(ch_name, trace);
474 if (!channel) {
475 retval = -ENODATA;
476 ERR("unable to find channel");
477 goto unlock_traces;
478 }
479
480 buf = channel->buf[ch_cpu];
481
482 /* Being here is the proof the daemon has mapped the buffer in its
483 * memory. We may now decrement buffers_to_export.
484 */
485 if (uatomic_read(&buf->consumed) == 0) {
486 DBG("decrementing buffers_to_export");
487 CMM_STORE_SHARED(buffers_to_export, CMM_LOAD_SHARED(buffers_to_export)-1);
488 }
489
490 unlock_traces:
491 ltt_unlock_traces();
492
493 return retval;
494 }
495
496 static int put_subbuffer(const char *trace_name, const char *ch_name,
497 int ch_cpu, long consumed_old)
498 {
499 int retval = 0;
500 struct ust_trace *trace;
501 struct ust_channel *channel;
502 struct ust_buffer *buf;
503
504 DBG("put_subbuf");
505
506 ltt_lock_traces();
507 trace = _ltt_trace_find(trace_name);
508
509 if (!trace) {
510 retval = -ENODATA;
511 DBG("Cannot find trace. It was likely destroyed by the user.");
512 goto unlock_traces;
513 }
514
515 channel = find_channel(ch_name, trace);
516 if (!channel) {
517 retval = -ENODATA;
518 ERR("unable to find channel");
519 goto unlock_traces;
520 }
521
522 buf = channel->buf[ch_cpu];
523
524 retval = ust_buffers_put_subbuf(buf, consumed_old);
525 if (retval < 0) {
526 WARN("ust_buffers_put_subbuf: error (subbuf=%s_%d)",
527 ch_name, ch_cpu);
528 } else {
529 DBG("ust_buffers_put_subbuf: success (subbuf=%s_%d)",
530 ch_name, ch_cpu);
531 }
532
533 unlock_traces:
534 ltt_unlock_traces();
535
536 return retval;
537 }
538
539 static void release_listener_mutex(void *ptr)
540 {
541 pthread_mutex_unlock(&listener_thread_data_mutex);
542 }
543
544 static void listener_cleanup(void *ptr)
545 {
546 pthread_mutex_lock(&listen_sock_mutex);
547 if (listen_sock) {
548 ustcomm_del_named_sock(listen_sock, 0);
549 listen_sock = NULL;
550 }
551 pthread_mutex_unlock(&listen_sock_mutex);
552 }
553
554 static int force_subbuf_switch(const char *trace_name)
555 {
556 struct ust_trace *trace;
557 int i, j, retval = 0;
558
559 ltt_lock_traces();
560 trace = _ltt_trace_find(trace_name);
561 if (!trace) {
562 retval = -ENODATA;
563 DBG("Cannot find trace. It was likely destroyed by the user.");
564 goto unlock_traces;
565 }
566
567 for (i = 0; i < trace->nr_channels; i++) {
568 for (j = 0; j < trace->channels[i].n_cpus; j++) {
569 ltt_force_switch(trace->channels[i].buf[j],
570 FORCE_FLUSH);
571 }
572 }
573
574 unlock_traces:
575 ltt_unlock_traces();
576
577 return retval;
578 }
579
580 static int process_trace_cmd(int command, char *trace_name)
581 {
582 int result;
583 char trace_type[] = "ustrelay";
584
585 switch(command) {
586 case START:
587 /* start is an operation that setups the trace, allocates it and starts it */
588 result = ltt_trace_setup(trace_name);
589 if (result < 0) {
590 ERR("ltt_trace_setup failed");
591 return result;
592 }
593
594 result = ltt_trace_set_type(trace_name, trace_type);
595 if (result < 0) {
596 ERR("ltt_trace_set_type failed");
597 return result;
598 }
599
600 result = ltt_trace_alloc(trace_name);
601 if (result < 0) {
602 ERR("ltt_trace_alloc failed");
603 return result;
604 }
605
606 inform_consumer_daemon(trace_name);
607
608 result = ltt_trace_start(trace_name);
609 if (result < 0) {
610 ERR("ltt_trace_start failed");
611 return result;
612 }
613
614 return 0;
615 case SETUP_TRACE:
616 DBG("trace setup");
617
618 result = ltt_trace_setup(trace_name);
619 if (result < 0) {
620 ERR("ltt_trace_setup failed");
621 return result;
622 }
623
624 result = ltt_trace_set_type(trace_name, trace_type);
625 if (result < 0) {
626 ERR("ltt_trace_set_type failed");
627 return result;
628 }
629
630 return 0;
631 case ALLOC_TRACE:
632 DBG("trace alloc");
633
634 result = ltt_trace_alloc(trace_name);
635 if (result < 0) {
636 ERR("ltt_trace_alloc failed");
637 return result;
638 }
639 inform_consumer_daemon(trace_name);
640
641 return 0;
642
643 case CREATE_TRACE:
644 DBG("trace create");
645
646 result = ltt_trace_setup(trace_name);
647 if (result < 0) {
648 ERR("ltt_trace_setup failed");
649 return result;
650 }
651
652 result = ltt_trace_set_type(trace_name, trace_type);
653 if (result < 0) {
654 ERR("ltt_trace_set_type failed");
655 return result;
656 }
657
658 return 0;
659 case START_TRACE:
660 DBG("trace start");
661
662 result = ltt_trace_alloc(trace_name);
663 if (result < 0) {
664 ERR("ltt_trace_alloc failed");
665 return result;
666 }
667 if (!result) {
668 inform_consumer_daemon(trace_name);
669 }
670
671 result = ltt_trace_start(trace_name);
672 if (result < 0) {
673 ERR("ltt_trace_start failed");
674 return result;
675 }
676
677 return 0;
678 case STOP_TRACE:
679 DBG("trace stop");
680
681 result = ltt_trace_stop(trace_name);
682 if (result < 0) {
683 ERR("ltt_trace_stop failed");
684 return result;
685 }
686
687 return 0;
688 case DESTROY_TRACE:
689 DBG("trace destroy");
690
691 result = ltt_trace_destroy(trace_name, 0);
692 if (result < 0) {
693 ERR("ltt_trace_destroy failed");
694 return result;
695 }
696 return 0;
697 case FORCE_SUBBUF_SWITCH:
698 DBG("force switch");
699
700 result = force_subbuf_switch(trace_name);
701 if (result < 0) {
702 ERR("force_subbuf_switch failed");
703 return result;
704 }
705 return 0;
706 }
707
708 return 0;
709 }
710
711
712 static void process_channel_cmd(int sock, int command,
713 struct ustcomm_channel_info *ch_inf)
714 {
715 struct ustcomm_header _reply_header;
716 struct ustcomm_header *reply_header = &_reply_header;
717 struct ustcomm_channel_info *reply_msg =
718 (struct ustcomm_channel_info *)send_buffer;
719 int result, offset = 0, num, size;
720
721 memset(reply_header, 0, sizeof(*reply_header));
722
723 switch (command) {
724 case GET_SUBBUF_NUM_SIZE:
725 result = get_subbuf_num_size(ch_inf->trace,
726 ch_inf->channel,
727 &num, &size);
728 if (result < 0) {
729 reply_header->result = result;
730 break;
731 }
732
733 reply_msg->channel = USTCOMM_POISON_PTR;
734 reply_msg->subbuf_num = num;
735 reply_msg->subbuf_size = size;
736
737
738 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
739
740 break;
741 case SET_SUBBUF_NUM:
742 reply_header->result = set_subbuf_num(ch_inf->trace,
743 ch_inf->channel,
744 ch_inf->subbuf_num);
745
746 break;
747 case SET_SUBBUF_SIZE:
748 reply_header->result = set_subbuf_size(ch_inf->trace,
749 ch_inf->channel,
750 ch_inf->subbuf_size);
751
752
753 break;
754 }
755 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
756 ERR("ustcomm_send failed");
757 }
758 }
759
760 static void process_buffer_cmd(int sock, int command,
761 struct ustcomm_buffer_info *buf_inf)
762 {
763 struct ustcomm_header _reply_header;
764 struct ustcomm_header *reply_header = &_reply_header;
765 struct ustcomm_buffer_info *reply_msg =
766 (struct ustcomm_buffer_info *)send_buffer;
767 int result, offset = 0, buf_shmid, buf_struct_shmid, buf_pipe_fd;
768 long consumed_old;
769
770 memset(reply_header, 0, sizeof(*reply_header));
771
772 switch (command) {
773 case GET_BUF_SHMID_PIPE_FD:
774 result = get_buffer_shmid_pipe_fd(buf_inf->trace,
775 buf_inf->channel,
776 buf_inf->ch_cpu,
777 &buf_shmid,
778 &buf_struct_shmid,
779 &buf_pipe_fd);
780 if (result < 0) {
781 reply_header->result = result;
782 break;
783 }
784
785 reply_msg->channel = USTCOMM_POISON_PTR;
786 reply_msg->buf_shmid = buf_shmid;
787 reply_msg->buf_struct_shmid = buf_struct_shmid;
788
789 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
790 reply_header->fd_included = 1;
791
792 if (ustcomm_send_fd(sock, reply_header, (char *)reply_msg,
793 &buf_pipe_fd) < 0) {
794 ERR("ustcomm_send failed");
795 }
796 return;
797
798 case NOTIFY_BUF_MAPPED:
799 reply_header->result =
800 notify_buffer_mapped(buf_inf->trace,
801 buf_inf->channel,
802 buf_inf->ch_cpu);
803 break;
804 case GET_SUBBUFFER:
805 result = get_subbuffer(buf_inf->trace, buf_inf->channel,
806 buf_inf->ch_cpu, &consumed_old);
807 if (result < 0) {
808 reply_header->result = result;
809 break;
810 }
811
812 reply_msg->channel = USTCOMM_POISON_PTR;
813 reply_msg->consumed_old = consumed_old;
814
815 reply_header->size = COMPUTE_MSG_SIZE(reply_msg, offset);
816
817 break;
818 case PUT_SUBBUFFER:
819 result = put_subbuffer(buf_inf->trace, buf_inf->channel,
820 buf_inf->ch_cpu,
821 buf_inf->consumed_old);
822 reply_header->result = result;
823
824 break;
825 }
826
827 if (ustcomm_send(sock, reply_header, (char *)reply_msg) < 0) {
828 ERR("ustcomm_send failed");
829 }
830
831 }
832
833 static void process_marker_cmd(int sock, int command,
834 struct ustcomm_marker_info *marker_inf)
835 {
836 struct ustcomm_header _reply_header;
837 struct ustcomm_header *reply_header = &_reply_header;
838 int result = 0;
839
840 memset(reply_header, 0, sizeof(*reply_header));
841
842 switch(command) {
843 case ENABLE_MARKER:
844
845 result = ltt_marker_connect(marker_inf->channel,
846 marker_inf->marker,
847 "default");
848 if (result < 0) {
849 WARN("could not enable marker; channel=%s,"
850 " name=%s",
851 marker_inf->channel,
852 marker_inf->marker);
853
854 }
855 break;
856 case DISABLE_MARKER:
857 result = ltt_marker_disconnect(marker_inf->channel,
858 marker_inf->marker,
859 "default");
860 if (result < 0) {
861 WARN("could not disable marker; channel=%s,"
862 " name=%s",
863 marker_inf->channel,
864 marker_inf->marker);
865 }
866 break;
867 }
868
869 reply_header->result = result;
870
871 if (ustcomm_send(sock, reply_header, NULL) < 0) {
872 ERR("ustcomm_send failed");
873 }
874
875 }
876 static void process_client_cmd(struct ustcomm_header *recv_header,
877 char *recv_buf, int sock)
878 {
879 int result;
880 struct ustcomm_header _reply_header;
881 struct ustcomm_header *reply_header = &_reply_header;
882 char *send_buf = send_buffer;
883
884 memset(reply_header, 0, sizeof(*reply_header));
885 memset(send_buf, 0, sizeof(send_buffer));
886
887 switch(recv_header->command) {
888 case GET_SUBBUF_NUM_SIZE:
889 case SET_SUBBUF_NUM:
890 case SET_SUBBUF_SIZE:
891 {
892 struct ustcomm_channel_info *ch_inf;
893 ch_inf = (struct ustcomm_channel_info *)recv_buf;
894 result = ustcomm_unpack_channel_info(ch_inf);
895 if (result < 0) {
896 ERR("couldn't unpack channel info");
897 reply_header->result = -EINVAL;
898 goto send_response;
899 }
900 process_channel_cmd(sock, recv_header->command, ch_inf);
901 return;
902 }
903 case GET_BUF_SHMID_PIPE_FD:
904 case NOTIFY_BUF_MAPPED:
905 case GET_SUBBUFFER:
906 case PUT_SUBBUFFER:
907 {
908 struct ustcomm_buffer_info *buf_inf;
909 buf_inf = (struct ustcomm_buffer_info *)recv_buf;
910 result = ustcomm_unpack_buffer_info(buf_inf);
911 if (result < 0) {
912 ERR("couldn't unpack buffer info");
913 reply_header->result = -EINVAL;
914 goto send_response;
915 }
916 process_buffer_cmd(sock, recv_header->command, buf_inf);
917 return;
918 }
919 case ENABLE_MARKER:
920 case DISABLE_MARKER:
921 {
922 struct ustcomm_marker_info *marker_inf;
923 marker_inf = (struct ustcomm_marker_info *)recv_buf;
924 result = ustcomm_unpack_marker_info(marker_inf);
925 if (result < 0) {
926 ERR("couldn't unpack marker info");
927 reply_header->result = -EINVAL;
928 goto send_response;
929 }
930 process_marker_cmd(sock, recv_header->command, marker_inf);
931 return;
932 }
933 case LIST_MARKERS:
934 {
935 char *ptr;
936 size_t size;
937 FILE *fp;
938
939 fp = open_memstream(&ptr, &size);
940 if (fp == NULL) {
941 ERR("opening memstream failed");
942 return;
943 }
944 print_markers(fp);
945 fclose(fp);
946
947 reply_header->size = size + 1; /* Include final \0 */
948
949 result = ustcomm_send(sock, reply_header, ptr);
950
951 free(ptr);
952
953 if (result < 0) {
954 PERROR("failed to send markers list");
955 }
956
957 break;
958 }
959 case LIST_TRACE_EVENTS:
960 {
961 char *ptr;
962 size_t size;
963 FILE *fp;
964
965 fp = open_memstream(&ptr, &size);
966 if (fp == NULL) {
967 ERR("opening memstream failed");
968 return;
969 }
970 print_trace_events(fp);
971 fclose(fp);
972
973 reply_header->size = size + 1; /* Include final \0 */
974
975 result = ustcomm_send(sock, reply_header, ptr);
976
977 free(ptr);
978
979 if (result < 0) {
980 ERR("list_trace_events failed");
981 return;
982 }
983
984 break;
985 }
986 case LOAD_PROBE_LIB:
987 {
988 char *libfile;
989
990 /* FIXME: No functionality at all... */
991 libfile = recv_buf;
992
993 DBG("load_probe_lib loading %s", libfile);
994
995 break;
996 }
997 case GET_PIDUNIQUE:
998 {
999 struct ustcomm_pidunique *pid_msg;
1000 pid_msg = (struct ustcomm_pidunique *)send_buf;
1001
1002 pid_msg->pidunique = pidunique;
1003 reply_header->size = sizeof(pid_msg);
1004
1005 goto send_response;
1006
1007 }
1008 case GET_SOCK_PATH:
1009 {
1010 struct ustcomm_single_field *sock_msg;
1011 char *sock_path_env;
1012
1013 sock_msg = (struct ustcomm_single_field *)send_buf;
1014
1015 sock_path_env = getenv("UST_DAEMON_SOCKET");
1016
1017 if (!sock_path_env) {
1018 result = ustcomm_pack_single_field(reply_header,
1019 sock_msg,
1020 SOCK_DIR "/ustconsumer");
1021
1022 } else {
1023 result = ustcomm_pack_single_field(reply_header,
1024 sock_msg,
1025 sock_path_env);
1026 }
1027 reply_header->result = result;
1028
1029 goto send_response;
1030 }
1031 case SET_SOCK_PATH:
1032 {
1033 struct ustcomm_single_field *sock_msg;
1034 sock_msg = (struct ustcomm_single_field *)recv_buf;
1035 result = ustcomm_unpack_single_field(sock_msg);
1036 if (result < 0) {
1037 reply_header->result = -EINVAL;
1038 goto send_response;
1039 }
1040
1041 reply_header->result = setenv("UST_DAEMON_SOCKET",
1042 sock_msg->field, 1);
1043
1044 goto send_response;
1045 }
1046 case START:
1047 case SETUP_TRACE:
1048 case ALLOC_TRACE:
1049 case CREATE_TRACE:
1050 case START_TRACE:
1051 case STOP_TRACE:
1052 case DESTROY_TRACE:
1053 case FORCE_SUBBUF_SWITCH:
1054 {
1055 struct ustcomm_single_field *trace_inf =
1056 (struct ustcomm_single_field *)recv_buf;
1057
1058 result = ustcomm_unpack_single_field(trace_inf);
1059 if (result < 0) {
1060 ERR("couldn't unpack trace info");
1061 reply_header->result = -EINVAL;
1062 goto send_response;
1063 }
1064
1065 reply_header->result =
1066 process_trace_cmd(recv_header->command,
1067 trace_inf->field);
1068 goto send_response;
1069
1070 }
1071 default:
1072 reply_header->result = -EINVAL;
1073
1074 goto send_response;
1075 }
1076
1077 return;
1078
1079 send_response:
1080 ustcomm_send(sock, reply_header, send_buf);
1081 }
1082
1083 #define MAX_EVENTS 10
1084
1085 void *listener_main(void *p)
1086 {
1087 struct ustcomm_sock *epoll_sock;
1088 struct epoll_event events[MAX_EVENTS];
1089 struct sockaddr addr;
1090 int accept_fd, nfds, result, i, addr_size;
1091
1092 DBG("LISTENER");
1093
1094 pthread_cleanup_push(listener_cleanup, NULL);
1095
1096 for(;;) {
1097 nfds = epoll_wait(epoll_fd, events, MAX_EVENTS, -1);
1098 if (nfds == -1) {
1099 PERROR("listener_main: epoll_wait failed");
1100 continue;
1101 }
1102
1103 for (i = 0; i < nfds; i++) {
1104 pthread_mutex_lock(&listener_thread_data_mutex);
1105 pthread_cleanup_push(release_listener_mutex, NULL);
1106 epoll_sock = (struct ustcomm_sock *)events[i].data.ptr;
1107 if (epoll_sock == listen_sock) {
1108 addr_size = sizeof(struct sockaddr);
1109 accept_fd = accept(epoll_sock->fd,
1110 &addr,
1111 (socklen_t *)&addr_size);
1112 if (accept_fd == -1) {
1113 PERROR("listener_main: accept failed");
1114 continue;
1115 }
1116 ustcomm_init_sock(accept_fd, epoll_fd,
1117 &ust_socks);
1118 } else {
1119 memset(receive_header, 0,
1120 sizeof(*receive_header));
1121 memset(receive_buffer, 0,
1122 sizeof(receive_buffer));
1123 result = ustcomm_recv(epoll_sock->fd,
1124 receive_header,
1125 receive_buffer);
1126 if (result == 0) {
1127 ustcomm_del_sock(epoll_sock, 0);
1128 } else {
1129 process_client_cmd(receive_header,
1130 receive_buffer,
1131 epoll_sock->fd);
1132 }
1133 }
1134 pthread_cleanup_pop(1); /* release listener mutex */
1135 }
1136 }
1137
1138 pthread_cleanup_pop(1);
1139 }
1140
1141 /* These should only be accessed in the parent thread,
1142 * not the listener.
1143 */
1144 static volatile sig_atomic_t have_listener = 0;
1145 static pthread_t listener_thread;
1146
1147 void create_listener(void)
1148 {
1149 int result;
1150 sigset_t sig_all_blocked;
1151 sigset_t orig_parent_mask;
1152
1153 if (have_listener) {
1154 WARN("not creating listener because we already had one");
1155 return;
1156 }
1157
1158 /* A new thread created by pthread_create inherits the signal mask
1159 * from the parent. To avoid any signal being received by the
1160 * listener thread, we block all signals temporarily in the parent,
1161 * while we create the listener thread.
1162 */
1163
1164 sigfillset(&sig_all_blocked);
1165
1166 result = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
1167 if (result) {
1168 PERROR("pthread_sigmask: %s", strerror(result));
1169 }
1170
1171 result = pthread_create(&listener_thread, NULL, listener_main, NULL);
1172 if (result == -1) {
1173 PERROR("pthread_create");
1174 }
1175
1176 /* Restore original signal mask in parent */
1177 result = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
1178 if (result) {
1179 PERROR("pthread_sigmask: %s", strerror(result));
1180 } else {
1181 have_listener = 1;
1182 }
1183 }
1184
1185 #define AUTOPROBE_DISABLED 0
1186 #define AUTOPROBE_ENABLE_ALL 1
1187 #define AUTOPROBE_ENABLE_REGEX 2
1188 static int autoprobe_method = AUTOPROBE_DISABLED;
1189 static regex_t autoprobe_regex;
1190
1191 static void auto_probe_connect(struct marker *m)
1192 {
1193 int result;
1194
1195 char* concat_name = NULL;
1196 const char *probe_name = "default";
1197
1198 if (autoprobe_method == AUTOPROBE_DISABLED) {
1199 return;
1200 } else if (autoprobe_method == AUTOPROBE_ENABLE_REGEX) {
1201 result = asprintf(&concat_name, "%s/%s", m->channel, m->name);
1202 if (result == -1) {
1203 ERR("auto_probe_connect: asprintf failed (marker %s/%s)",
1204 m->channel, m->name);
1205 return;
1206 }
1207 if (regexec(&autoprobe_regex, concat_name, 0, NULL, 0)) {
1208 free(concat_name);
1209 return;
1210 }
1211 free(concat_name);
1212 }
1213
1214 result = ltt_marker_connect(m->channel, m->name, probe_name);
1215 if (result && result != -EEXIST)
1216 ERR("ltt_marker_connect (marker = %s/%s, errno = %d)", m->channel, m->name, -result);
1217
1218 DBG("auto connected marker %s (addr: %p) %s to probe default", m->channel, m, m->name);
1219
1220 }
1221
1222 static struct ustcomm_sock * init_app_socket(int epoll_fd)
1223 {
1224 char *dir_name, *sock_name;
1225 int result;
1226 struct ustcomm_sock *sock = NULL;
1227
1228 dir_name = ustcomm_user_sock_dir();
1229 if (!dir_name)
1230 return NULL;
1231
1232 result = asprintf(&sock_name, "%s/%d", dir_name, (int)getpid());
1233 if (result < 0) {
1234 ERR("string overflow allocating socket name, "
1235 "UST thread bailing");
1236 goto free_dir_name;
1237 }
1238
1239 result = ensure_dir_exists(dir_name);
1240 if (result == -1) {
1241 ERR("Unable to create socket directory %s, UST thread bailing",
1242 dir_name);
1243 goto free_sock_name;
1244 }
1245
1246 sock = ustcomm_init_named_socket(sock_name, epoll_fd);
1247 if (!sock) {
1248 ERR("Error initializing named socket (%s). Check that directory"
1249 "exists and that it is writable. UST thread bailing", sock_name);
1250 goto free_sock_name;
1251 }
1252
1253 free_sock_name:
1254 free(sock_name);
1255 free_dir_name:
1256 free(dir_name);
1257
1258 return sock;
1259 }
1260
1261 static void __attribute__((constructor)) init()
1262 {
1263 struct timespec ts;
1264 int result;
1265 char* autoprobe_val = NULL;
1266 char* subbuffer_size_val = NULL;
1267 char* subbuffer_count_val = NULL;
1268 unsigned int subbuffer_size;
1269 unsigned int subbuffer_count;
1270 unsigned int power;
1271
1272 /* Assign the pidunique, to be able to differentiate the processes with same
1273 * pid, (before and after an exec).
1274 */
1275 pidunique = make_pidunique();
1276 processpid = getpid();
1277
1278 DBG("Tracectl constructor");
1279
1280 /* Set up epoll */
1281 epoll_fd = epoll_create(MAX_EVENTS);
1282 if (epoll_fd == -1) {
1283 ERR("epoll_create failed, tracing shutting down");
1284 return;
1285 }
1286
1287 /* Create the socket */
1288 listen_sock = init_app_socket(epoll_fd);
1289 if (!listen_sock) {
1290 ERR("failed to create application socket,"
1291 " tracing shutting down");
1292 return;
1293 }
1294
1295 create_listener();
1296
1297 /* Get clock the clock source type */
1298
1299 /* Default clock source */
1300 ust_clock_source = CLOCK_TRACE;
1301 if (clock_gettime(ust_clock_source, &ts) != 0) {
1302 ust_clock_source = CLOCK_MONOTONIC;
1303 DBG("UST traces will not be synchronized with LTTng traces");
1304 }
1305
1306 autoprobe_val = getenv("UST_AUTOPROBE");
1307 if (autoprobe_val) {
1308 struct marker_iter iter;
1309
1310 DBG("Autoprobe enabled.");
1311
1312 /* Ensure markers are initialized */
1313 //init_markers();
1314
1315 /* Ensure marker control is initialized, for the probe */
1316 init_marker_control();
1317
1318 /* first, set the callback that will connect the
1319 * probe on new markers
1320 */
1321 if (autoprobe_val[0] == '/') {
1322 result = regcomp(&autoprobe_regex, autoprobe_val+1, 0);
1323 if (result) {
1324 char regexerr[150];
1325
1326 regerror(result, &autoprobe_regex, regexerr, sizeof(regexerr));
1327 ERR("cannot parse regex %s (%s), will ignore UST_AUTOPROBE", autoprobe_val, regexerr);
1328 /* don't crash the application just for this */
1329 } else {
1330 autoprobe_method = AUTOPROBE_ENABLE_REGEX;
1331 }
1332 } else {
1333 /* just enable all instrumentation */
1334 autoprobe_method = AUTOPROBE_ENABLE_ALL;
1335 }
1336
1337 marker_set_new_marker_cb(auto_probe_connect);
1338
1339 /* Now, connect the probes that were already registered. */
1340 marker_iter_reset(&iter);
1341 marker_iter_start(&iter);
1342
1343 DBG("now iterating on markers already registered");
1344 while (iter.marker) {
1345 DBG("now iterating on marker %s", (*iter.marker)->name);
1346 auto_probe_connect(*iter.marker);
1347 marker_iter_next(&iter);
1348 }
1349 }
1350
1351 if (getenv("UST_OVERWRITE")) {
1352 int val = atoi(getenv("UST_OVERWRITE"));
1353 if (val == 0 || val == 1) {
1354 CMM_STORE_SHARED(ust_channels_overwrite_by_default, val);
1355 } else {
1356 WARN("invalid value for UST_OVERWRITE");
1357 }
1358 }
1359
1360 if (getenv("UST_AUTOCOLLECT")) {
1361 int val = atoi(getenv("UST_AUTOCOLLECT"));
1362 if (val == 0 || val == 1) {
1363 CMM_STORE_SHARED(ust_channels_request_collection_by_default, val);
1364 } else {
1365 WARN("invalid value for UST_AUTOCOLLECT");
1366 }
1367 }
1368
1369 subbuffer_size_val = getenv("UST_SUBBUF_SIZE");
1370 if (subbuffer_size_val) {
1371 sscanf(subbuffer_size_val, "%u", &subbuffer_size);
1372 power = pow2_higher_or_eq(subbuffer_size);
1373 if (power != subbuffer_size)
1374 WARN("using the next power of two for buffer size = %u\n", power);
1375 chan_infos[LTT_CHANNEL_UST].def_subbufsize = power;
1376 }
1377
1378 subbuffer_count_val = getenv("UST_SUBBUF_NUM");
1379 if (subbuffer_count_val) {
1380 sscanf(subbuffer_count_val, "%u", &subbuffer_count);
1381 if (subbuffer_count < 2)
1382 subbuffer_count = 2;
1383 chan_infos[LTT_CHANNEL_UST].def_subbufcount = subbuffer_count;
1384 }
1385
1386 if (getenv("UST_TRACE")) {
1387 char trace_name[] = "auto";
1388 char trace_type[] = "ustrelay";
1389
1390 DBG("starting early tracing");
1391
1392 /* Ensure marker control is initialized */
1393 init_marker_control();
1394
1395 /* Ensure markers are initialized */
1396 init_markers();
1397
1398 /* Ensure buffers are initialized, for the transport to be available.
1399 * We are about to set a trace type and it will fail without this.
1400 */
1401 init_ustrelay_transport();
1402
1403 /* FIXME: When starting early tracing (here), depending on the
1404 * order of constructors, it is very well possible some marker
1405 * sections are not yet registered. Because of this, some
1406 * channels may not be registered. Yet, we are about to ask the
1407 * daemon to collect the channels. Channels which are not yet
1408 * registered will not be collected.
1409 *
1410 * Currently, in LTTng, there is no way to add a channel after
1411 * trace start. The reason for this is that it induces complex
1412 * concurrency issues on the trace structures, which can only
1413 * be resolved using RCU. This has not been done yet. As a
1414 * workaround, we are forcing the registration of the "ust"
1415 * channel here. This is the only channel (apart from metadata)
1416 * that can be reliably used in early tracing.
1417 *
1418 * Non-early tracing does not have this problem and can use
1419 * arbitrary channel names.
1420 */
1421 ltt_channels_register("ust");
1422
1423 result = ltt_trace_setup(trace_name);
1424 if (result < 0) {
1425 ERR("ltt_trace_setup failed");
1426 return;
1427 }
1428
1429 result = ltt_trace_set_type(trace_name, trace_type);
1430 if (result < 0) {
1431 ERR("ltt_trace_set_type failed");
1432 return;
1433 }
1434
1435 result = ltt_trace_alloc(trace_name);
1436 if (result < 0) {
1437 ERR("ltt_trace_alloc failed");
1438 return;
1439 }
1440
1441 result = ltt_trace_start(trace_name);
1442 if (result < 0) {
1443 ERR("ltt_trace_start failed");
1444 return;
1445 }
1446
1447 /* Do this after the trace is started in order to avoid creating confusion
1448 * if the trace fails to start. */
1449 inform_consumer_daemon(trace_name);
1450 }
1451
1452 return;
1453
1454 /* should decrementally destroy stuff if error */
1455
1456 }
1457
1458 /* This is only called if we terminate normally, not with an unhandled signal,
1459 * so we cannot rely on it. However, for now, LTTV requires that the header of
1460 * the last sub-buffer contain a valid end time for the trace. This is done
1461 * automatically only when the trace is properly stopped.
1462 *
1463 * If the traced program crashed, it is always possible to manually add the
1464 * right value in the header, or to open the trace in text mode.
1465 *
1466 * FIXME: Fix LTTV so it doesn't need this.
1467 */
1468
1469 static void destroy_traces(void)
1470 {
1471 int result;
1472
1473 /* if trace running, finish it */
1474
1475 DBG("destructor stopping traces");
1476
1477 result = ltt_trace_stop("auto");
1478 if (result == -1) {
1479 ERR("ltt_trace_stop error");
1480 }
1481
1482 result = ltt_trace_destroy("auto", 0);
1483 if (result == -1) {
1484 ERR("ltt_trace_destroy error");
1485 }
1486 }
1487
1488 static int trace_recording(void)
1489 {
1490 int retval = 0;
1491 struct ust_trace *trace;
1492
1493 ltt_lock_traces();
1494
1495 cds_list_for_each_entry(trace, &ltt_traces.head, list) {
1496 if (trace->active) {
1497 retval = 1;
1498 break;
1499 }
1500 }
1501
1502 ltt_unlock_traces();
1503
1504 return retval;
1505 }
1506
1507 int restarting_usleep(useconds_t usecs)
1508 {
1509 struct timespec tv;
1510 int result;
1511
1512 tv.tv_sec = 0;
1513 tv.tv_nsec = usecs * 1000;
1514
1515 do {
1516 result = nanosleep(&tv, &tv);
1517 } while (result == -1 && errno == EINTR);
1518
1519 return result;
1520 }
1521
1522 static void stop_listener(void)
1523 {
1524 int result;
1525
1526 if (!have_listener)
1527 return;
1528
1529 result = pthread_cancel(listener_thread);
1530 if (result != 0) {
1531 ERR("pthread_cancel: %s", strerror(result));
1532 }
1533 result = pthread_join(listener_thread, NULL);
1534 if (result != 0) {
1535 ERR("pthread_join: %s", strerror(result));
1536 }
1537 }
1538
1539 /* This destructor keeps the process alive for a few seconds in order
1540 * to leave time for ustconsumer to connect to its buffers. This is necessary
1541 * for programs whose execution is very short. It is also useful in all
1542 * programs when tracing is started close to the end of the program
1543 * execution.
1544 *
1545 * FIXME: For now, this only works for the first trace created in a
1546 * process.
1547 */
1548
1549 static void __attribute__((destructor)) keepalive()
1550 {
1551 if (processpid != getpid()) {
1552 return;
1553 }
1554
1555 if (trace_recording() && CMM_LOAD_SHARED(buffers_to_export)) {
1556 int total = 0;
1557 DBG("Keeping process alive for consumer daemon...");
1558 while (CMM_LOAD_SHARED(buffers_to_export)) {
1559 const int interv = 200000;
1560 restarting_usleep(interv);
1561 total += interv;
1562
1563 if (total >= 3000000) {
1564 WARN("non-consumed buffers remaining after wait limit; not waiting anymore");
1565 break;
1566 }
1567 }
1568 DBG("Finally dying...");
1569 }
1570
1571 destroy_traces();
1572
1573 /* Ask the listener to stop and clean up. */
1574 stop_listener();
1575 }
1576
1577 void ust_potential_exec(void)
1578 {
1579 trace_mark(ust, potential_exec, MARK_NOARGS);
1580
1581 DBG("test");
1582
1583 keepalive();
1584 }
1585
1586 /* Notify ust that there was a fork. This needs to be called inside
1587 * the new process, anytime a process whose memory is not shared with
1588 * the parent is created. If this function is not called, the events
1589 * of the new process will not be collected.
1590 *
1591 * Signals should be disabled before the fork and reenabled only after
1592 * this call in order to guarantee tracing is not started before ust_fork()
1593 * sanitizes the new process.
1594 */
1595
1596 static void ust_fork(void)
1597 {
1598 struct ustcomm_sock *sock, *sock_tmp;
1599 struct ust_trace *trace, *trace_tmp;
1600 int result;
1601
1602 /* FIXME: technically, the locks could have been taken before the fork */
1603 DBG("ust: forking");
1604
1605 /* Get the pid of the new process */
1606 processpid = getpid();
1607
1608 /*
1609 * FIXME: This could be prettier, we loop over the list twice and
1610 * following good locking practice should lock around the loop
1611 */
1612 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1613 ltt_trace_stop(trace->trace_name);
1614 }
1615
1616 /* Delete all active connections, but leave them in the epoll set */
1617 cds_list_for_each_entry_safe(sock, sock_tmp, &ust_socks, list) {
1618 ustcomm_del_sock(sock, 1);
1619 }
1620
1621 /*
1622 * FIXME: This could be prettier, we loop over the list twice and
1623 * following good locking practice should lock around the loop
1624 */
1625 cds_list_for_each_entry_safe(trace, trace_tmp, &ltt_traces.head, list) {
1626 ltt_trace_destroy(trace->trace_name, 1);
1627 }
1628
1629 /* Clean up the listener socket and epoll, keeping the socket file */
1630 if (listen_sock) {
1631 ustcomm_del_named_sock(listen_sock, 1);
1632 listen_sock = NULL;
1633 }
1634 close(epoll_fd);
1635
1636 /* Re-start the launch sequence */
1637 CMM_STORE_SHARED(buffers_to_export, 0);
1638 have_listener = 0;
1639
1640 /* Set up epoll */
1641 epoll_fd = epoll_create(MAX_EVENTS);
1642 if (epoll_fd == -1) {
1643 ERR("epoll_create failed, tracing shutting down");
1644 return;
1645 }
1646
1647 /* Create the socket */
1648 listen_sock = init_app_socket(epoll_fd);
1649 if (!listen_sock) {
1650 ERR("failed to create application socket,"
1651 " tracing shutting down");
1652 return;
1653 }
1654 create_listener();
1655 ltt_trace_setup("auto");
1656 result = ltt_trace_set_type("auto", "ustrelay");
1657 if (result < 0) {
1658 ERR("ltt_trace_set_type failed");
1659 return;
1660 }
1661
1662 ltt_trace_alloc("auto");
1663 ltt_trace_start("auto");
1664 inform_consumer_daemon("auto");
1665 }
1666
1667 void ust_before_fork(ust_fork_info_t *fork_info)
1668 {
1669 /* Disable signals. This is to avoid that the child
1670 * intervenes before it is properly setup for tracing. It is
1671 * safer to disable all signals, because then we know we are not
1672 * breaking anything by restoring the original mask.
1673 */
1674 sigset_t all_sigs;
1675 int result;
1676
1677 /* FIXME:
1678 - only do this if tracing is active
1679 */
1680
1681 /* Disable signals */
1682 sigfillset(&all_sigs);
1683 result = sigprocmask(SIG_BLOCK, &all_sigs, &fork_info->orig_sigs);
1684 if (result == -1) {
1685 PERROR("sigprocmask");
1686 return;
1687 }
1688
1689 /*
1690 * Take the fork lock to make sure we are not in the middle of
1691 * something in the listener thread.
1692 */
1693 pthread_mutex_lock(&listener_thread_data_mutex);
1694 /*
1695 * Hold listen_sock_mutex to protect from listen_sock teardown.
1696 */
1697 pthread_mutex_lock(&listen_sock_mutex);
1698 rcu_bp_before_fork();
1699 }
1700
1701 /* Don't call this function directly in a traced program */
1702 static void ust_after_fork_common(ust_fork_info_t *fork_info)
1703 {
1704 int result;
1705
1706 pthread_mutex_unlock(&listen_sock_mutex);
1707 pthread_mutex_unlock(&listener_thread_data_mutex);
1708
1709 /* Restore signals */
1710 result = sigprocmask(SIG_SETMASK, &fork_info->orig_sigs, NULL);
1711 if (result == -1) {
1712 PERROR("sigprocmask");
1713 return;
1714 }
1715 }
1716
1717 void ust_after_fork_parent(ust_fork_info_t *fork_info)
1718 {
1719 rcu_bp_after_fork_parent();
1720 /* Release mutexes and reenable signals */
1721 ust_after_fork_common(fork_info);
1722 }
1723
1724 void ust_after_fork_child(ust_fork_info_t *fork_info)
1725 {
1726 /* Release urcu mutexes */
1727 rcu_bp_after_fork_child();
1728
1729 /* Sanitize the child */
1730 ust_fork();
1731
1732 /* Then release mutexes and reenable signals */
1733 ust_after_fork_common(fork_info);
1734 }
1735
This page took 0.118886 seconds and 4 git commands to generate.