2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
32 #include <sys/socket.h>
34 #include <sys/types.h>
35 #include <urcu/list.h>
41 #include "libkernelctl.h"
42 #include "liblttsessiondcomm.h"
43 #include "kconsumerd.h"
45 /* Init the list of FDs */
46 static struct ltt_kconsumerd_fd_list kconsumerd_fd_list
= {
47 .head
= CDS_LIST_HEAD_INIT(kconsumerd_fd_list
.head
),
50 /* Number of element for the list below. */
51 static unsigned int fds_count
;
53 /* If the local array of FDs needs update in the poll function */
54 static unsigned int update_fd_array
= 1;
56 /* lock the fd array and structures */
57 static pthread_mutex_t kconsumerd_lock_fds
;
59 /* the two threads (receive fd and poll) */
60 static pthread_t threads
[2];
62 /* communication with splice */
63 static int thread_pipe
[2];
65 /* pipe to wake the poll thread when necessary */
66 static int poll_pipe
[2];
68 /* socket to communicate errors with sessiond */
69 static int error_socket
= -1;
71 /* to count the number of time the user pressed ctrl+c */
72 static int sigintcount
= 0;
74 /* flag to inform the polling thread to quit when all fd hung up */
77 /* Argument variables */
80 static int opt_daemon
;
81 static const char *progname
;
82 static char command_sock_path
[PATH_MAX
]; /* Global command socket path */
83 static char error_sock_path
[PATH_MAX
]; /* Global error path */
88 * Remove a fd from the global list protected by a mutex
90 static void del_fd(struct ltt_kconsumerd_fd
*lcf
)
92 DBG("Removing %d", lcf
->consumerd_fd
);
93 pthread_mutex_lock(&kconsumerd_lock_fds
);
94 cds_list_del(&lcf
->list
);
97 DBG("Removed ltt_kconsumerd_fd");
100 close(lcf
->consumerd_fd
);
105 pthread_mutex_unlock(&kconsumerd_lock_fds
);
111 * Cleanup the daemon's socket on exit
113 static void cleanup()
115 struct ltt_kconsumerd_fd
*iter
;
117 /* remove the socket file */
118 unlink(command_sock_path
);
120 /* unblock the threads */
121 WARN("Terminating the threads before exiting");
122 pthread_cancel(threads
[0]);
123 pthread_cancel(threads
[1]);
125 /* close all outfd */
126 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
134 * send return code to ltt-sessiond
136 static int send_error(enum lttcomm_return_code cmd
)
138 if (error_socket
> 0) {
139 return lttcomm_send_unix_sock(error_socket
, &cmd
,
140 sizeof(enum lttcomm_sessiond_command
));
149 * Add a fd to the global list protected by a mutex
151 static int add_fd(struct lttcomm_kconsumerd_msg
*buf
, int consumerd_fd
)
153 struct ltt_kconsumerd_fd
*tmp_fd
;
156 tmp_fd
= malloc(sizeof(struct ltt_kconsumerd_fd
));
157 tmp_fd
->sessiond_fd
= buf
->fd
;
158 tmp_fd
->consumerd_fd
= consumerd_fd
;
159 tmp_fd
->state
= buf
->state
;
160 tmp_fd
->max_sb_size
= buf
->max_sb_size
;
161 strncpy(tmp_fd
->path_name
, buf
->path_name
, PATH_MAX
);
163 /* Opening the tracefile in write mode */
164 DBG("Opening %s for writing", tmp_fd
->path_name
);
165 ret
= open(tmp_fd
->path_name
,
166 O_WRONLY
|O_CREAT
|O_TRUNC
, S_IRWXU
|S_IRWXG
|S_IRWXO
);
168 ERR("Opening %s", tmp_fd
->path_name
);
172 tmp_fd
->out_fd
= ret
;
173 tmp_fd
->out_fd_offset
= 0;
175 DBG("Adding %s (%d, %d, %d)", tmp_fd
->path_name
,
176 tmp_fd
->sessiond_fd
, tmp_fd
->consumerd_fd
, tmp_fd
->out_fd
);
178 pthread_mutex_lock(&kconsumerd_lock_fds
);
179 cds_list_add(&tmp_fd
->list
, &kconsumerd_fd_list
.head
);
181 pthread_mutex_unlock(&kconsumerd_lock_fds
);
191 * Signal handler for the daemon
193 static void sighandler(int sig
)
195 if (sig
== SIGINT
&& sigintcount
++ == 0) {
196 DBG("ignoring first SIGINT");
208 * Setup signal handler for :
209 * SIGINT, SIGTERM, SIGPIPE
211 static int set_signal_handler(void)
217 if ((ret
= sigemptyset(&sigset
)) < 0) {
218 perror("sigemptyset");
222 sa
.sa_handler
= sighandler
;
225 if ((ret
= sigaction(SIGTERM
, &sa
, NULL
)) < 0) {
230 if ((ret
= sigaction(SIGINT
, &sa
, NULL
)) < 0) {
235 if ((ret
= sigaction(SIGPIPE
, &sa
, NULL
)) < 0) {
244 * on_read_subbuffer_mmap
246 * mmap the ring buffer, read it and write the data to the tracefile.
247 * Returns the number of bytes written
249 static int on_read_subbuffer_mmap(struct ltt_kconsumerd_fd
*kconsumerd_fd
,
252 unsigned long mmap_len
;
253 unsigned long mmap_offset
;
254 unsigned long padded_len
;
255 unsigned long padding_len
;
257 char *padding
= NULL
;
259 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
260 int fd
= kconsumerd_fd
->consumerd_fd
;
261 int outfd
= kconsumerd_fd
->out_fd
;
263 /* get the padded subbuffer size to know the padding required */
264 ret
= kernctl_get_padded_subbuf_size(fd
, &padded_len
);
267 perror("kernctl_get_padded_subbuf_size");
270 padding_len
= padded_len
- len
;
271 padding
= malloc(padding_len
* sizeof(char));
272 memset(padding
, '\0', padding_len
);
274 /* get the len of the mmap region */
275 ret
= kernctl_get_mmap_len(fd
, &mmap_len
);
278 perror("kernctl_get_mmap_len");
282 /* get the offset inside the fd to mmap */
283 ret
= kernctl_get_mmap_read_offset(fd
, &mmap_offset
);
286 perror("kernctl_get_mmap_read_offset");
290 mmap_base
= mmap(NULL
, mmap_len
, PROT_READ
, MAP_PRIVATE
, fd
, mmap_offset
);
291 if (mmap_base
== MAP_FAILED
) {
292 perror("Error mmaping");
298 ret
= write(outfd
, mmap_base
, len
);
301 } else if (ret
< 0) {
303 perror("Error in file write");
306 /* This won't block, but will start writeout asynchronously */
307 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
308 SYNC_FILE_RANGE_WRITE
);
309 kconsumerd_fd
->out_fd_offset
+= ret
;
312 /* once all the data is written, write the padding to disk */
313 ret
= write(outfd
, padding
, padding_len
);
316 perror("Error writing padding to file");
321 * This does a blocking write-and-wait on any page that belongs to the
322 * subbuffer prior to the one we just wrote.
323 * Don't care about error values, as these are just hints and ways to
324 * limit the amount of page cache used.
326 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
327 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
328 kconsumerd_fd
->max_sb_size
,
329 SYNC_FILE_RANGE_WAIT_BEFORE
330 | SYNC_FILE_RANGE_WRITE
331 | SYNC_FILE_RANGE_WAIT_AFTER
);
333 * Give hints to the kernel about how we access the file:
334 * POSIX_FADV_DONTNEED : we won't re-access data in a near
335 * future after we write it.
336 * We need to call fadvise again after the file grows because
337 * the kernel does not seem to apply fadvise to non-existing
339 * Call fadvise _after_ having waited for the page writeback to
340 * complete because the dirty page writeback semantic is not
341 * well defined. So it can be expected to lead to lower
342 * throughput in streaming.
344 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
345 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
350 if (padding
!= NULL
) {
359 * Splice the data from the ring buffer to the tracefile.
360 * Returns the number of bytes spliced
362 static int on_read_subbuffer(struct ltt_kconsumerd_fd
*kconsumerd_fd
,
367 off_t orig_offset
= kconsumerd_fd
->out_fd_offset
;
368 int fd
= kconsumerd_fd
->consumerd_fd
;
369 int outfd
= kconsumerd_fd
->out_fd
;
372 DBG("splice chan to pipe offset %lu (fd : %d)",
373 (unsigned long)offset
, fd
);
374 ret
= splice(fd
, &offset
, thread_pipe
[1], NULL
, len
,
375 SPLICE_F_MOVE
| SPLICE_F_MORE
);
376 DBG("splice chan to pipe ret %ld", ret
);
379 perror("Error in relay splice");
383 ret
= splice(thread_pipe
[0], NULL
, outfd
, NULL
, ret
,
384 SPLICE_F_MOVE
| SPLICE_F_MORE
);
385 DBG("splice pipe to file %ld", ret
);
388 perror("Error in file splice");
394 /* This won't block, but will start writeout asynchronously */
395 sync_file_range(outfd
, kconsumerd_fd
->out_fd_offset
, ret
,
396 SYNC_FILE_RANGE_WRITE
);
397 kconsumerd_fd
->out_fd_offset
+= ret
;
401 * This does a blocking write-and-wait on any page that belongs to the
402 * subbuffer prior to the one we just wrote.
403 * Don't care about error values, as these are just hints and ways to
404 * limit the amount of page cache used.
406 if (orig_offset
>= kconsumerd_fd
->max_sb_size
) {
407 sync_file_range(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
408 kconsumerd_fd
->max_sb_size
,
409 SYNC_FILE_RANGE_WAIT_BEFORE
410 | SYNC_FILE_RANGE_WRITE
411 | SYNC_FILE_RANGE_WAIT_AFTER
);
413 * Give hints to the kernel about how we access the file:
414 * POSIX_FADV_DONTNEED : we won't re-access data in a near
415 * future after we write it.
416 * We need to call fadvise again after the file grows because
417 * the kernel does not seem to apply fadvise to non-existing
419 * Call fadvise _after_ having waited for the page writeback to
420 * complete because the dirty page writeback semantic is not
421 * well defined. So it can be expected to lead to lower
422 * throughput in streaming.
424 posix_fadvise(outfd
, orig_offset
- kconsumerd_fd
->max_sb_size
,
425 kconsumerd_fd
->max_sb_size
, POSIX_FADV_DONTNEED
);
430 /* send the appropriate error description to sessiond */
433 send_error(KCONSUMERD_SPLICE_EBADF
);
436 send_error(KCONSUMERD_SPLICE_EINVAL
);
439 send_error(KCONSUMERD_SPLICE_ENOMEM
);
442 send_error(KCONSUMERD_SPLICE_ESPIPE
);
453 * Consume data on a file descriptor and write it on a trace file
455 static int read_subbuffer(struct ltt_kconsumerd_fd
*kconsumerd_fd
)
460 int infd
= kconsumerd_fd
->consumerd_fd
;
462 DBG("In read_subbuffer (infd : %d)", infd
);
463 /* Get the next subbuffer */
464 err
= kernctl_get_next_subbuf(infd
);
467 perror("Reserving sub buffer failed (everything is normal, "
468 "it is due to concurrency)");
472 switch (DEFAULT_KERNEL_CHANNEL_OUTPUT
) {
473 case LTTNG_KERNEL_SPLICE
:
474 /* read the whole subbuffer */
475 err
= kernctl_get_padded_subbuf_size(infd
, &len
);
478 perror("Getting sub-buffer len failed.");
482 /* splice the subbuffer to the tracefile */
483 ret
= on_read_subbuffer(kconsumerd_fd
, len
);
486 * display the error but continue processing to try
487 * to release the subbuffer
489 ERR("Error splicing to tracefile");
492 case LTTNG_KERNEL_MMAP
:
493 /* read the used subbuffer size */
494 err
= kernctl_get_subbuf_size(infd
, &len
);
497 perror("Getting sub-buffer len failed.");
501 /* write the subbuffer to the tracefile */
502 ret
= on_read_subbuffer_mmap(kconsumerd_fd
, len
);
505 * display the error but continue processing to try
506 * to release the subbuffer
508 ERR("Error writing to tracefile");
512 ERR("Unknown output method");
516 err
= kernctl_put_next_subbuf(infd
);
519 if (errno
== EFAULT
) {
520 perror("Error in unreserving sub buffer\n");
521 } else if (errno
== EIO
) {
522 /* Should never happen with newer LTTng versions */
523 perror("Reader has been pushed by the writer, last sub-buffer corrupted.");
535 * Update a fd according to what we just received
537 static void change_fd_state(int sessiond_fd
,
538 enum kconsumerd_fd_state state
)
540 struct ltt_kconsumerd_fd
*iter
;
541 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
542 if (iter
->sessiond_fd
== sessiond_fd
) {
552 * Receives an array of file descriptors and the associated
553 * structures describing each fd (path name).
554 * Returns the size of received data
556 static int consumerd_recv_fd(int sfd
, int size
,
557 enum kconsumerd_command cmd_type
)
561 int ret
= 0, i
, tmp2
;
562 struct cmsghdr
*cmsg
;
564 char recv_fd
[CMSG_SPACE(sizeof(int))];
565 struct lttcomm_kconsumerd_msg lkm
;
567 /* the number of fds we are about to receive */
568 nb_fd
= size
/ sizeof(struct lttcomm_kconsumerd_msg
);
570 for (i
= 0; i
< nb_fd
; i
++) {
571 memset(&msg
, 0, sizeof(msg
));
573 /* Prepare to receive the structures */
574 iov
[0].iov_base
= &lkm
;
575 iov
[0].iov_len
= sizeof(lkm
);
579 msg
.msg_control
= recv_fd
;
580 msg
.msg_controllen
= sizeof(recv_fd
);
582 DBG("Waiting to receive fd");
583 if ((ret
= recvmsg(sfd
, &msg
, 0)) < 0) {
588 if (ret
!= (size
/ nb_fd
)) {
589 ERR("Received only %d, expected %d", ret
, size
);
590 send_error(KCONSUMERD_ERROR_RECV_FD
);
594 cmsg
= CMSG_FIRSTHDR(&msg
);
596 ERR("Invalid control message header");
598 send_error(KCONSUMERD_ERROR_RECV_FD
);
602 /* if we received fds */
603 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
606 DBG("add_fd %s (%d)", lkm
.path_name
, (CMSG_DATA(cmsg
)[0]));
607 ret
= add_fd(&lkm
, (CMSG_DATA(cmsg
)[0]));
609 send_error(KCONSUMERD_OUTFD_ERROR
);
614 change_fd_state(lkm
.fd
, lkm
.state
);
619 /* flag to tell the polling thread to update its fd array */
621 /* signal the poll thread */
622 tmp2
= write(poll_pipe
[1], "4", 1);
624 ERR("Didn't received any fd");
625 send_error(KCONSUMERD_ERROR_RECV_FD
);
632 DBG("consumerd_recv_fd thread exiting");
639 * This thread listens on the consumerd socket and
640 * receives the file descriptors from ltt-sessiond
642 static void *thread_receive_fds(void *data
)
644 int sock
, client_socket
, ret
;
645 struct lttcomm_kconsumerd_header tmp
;
647 DBG("Creating command socket %s", command_sock_path
);
648 unlink(command_sock_path
);
649 client_socket
= lttcomm_create_unix_sock(command_sock_path
);
650 if (client_socket
< 0) {
651 ERR("Cannot create command socket");
655 ret
= lttcomm_listen_unix_sock(client_socket
);
660 DBG("Sending ready command to ltt-sessiond");
661 ret
= send_error(KCONSUMERD_COMMAND_SOCK_READY
);
663 ERR("Error sending ready command to ltt-sessiond");
667 /* Blocking call, waiting for transmission */
668 sock
= lttcomm_accept_unix_sock(client_socket
);
674 /* We first get the number of fd we are about to receive */
675 ret
= lttcomm_recv_unix_sock(sock
, &tmp
,
676 sizeof(struct lttcomm_kconsumerd_header
));
678 ERR("Communication interrupted on command socket");
681 if (tmp
.cmd_type
== STOP
) {
682 DBG("Received STOP command");
685 /* we received a command to add or update fds */
686 ret
= consumerd_recv_fd(sock
, tmp
.payload_size
, tmp
.cmd_type
);
688 ERR("Receiving the FD, exiting");
694 DBG("thread_receive_fds exiting");
696 ret
= write(poll_pipe
[1], "4", 1);
698 perror("poll pipe write");
706 * Allocate the pollfd structure and the local view of the out fds
707 * to avoid doing a lookup in the linked list and concurrency issues
708 * when writing is needed.
709 * Returns the number of fds in the structures
711 static int update_poll_array(struct pollfd
**pollfd
,
712 struct ltt_kconsumerd_fd
**local_kconsumerd_fd
)
714 struct ltt_kconsumerd_fd
*iter
;
718 DBG("Updating poll fd array");
719 pthread_mutex_lock(&kconsumerd_lock_fds
);
721 cds_list_for_each_entry(iter
, &kconsumerd_fd_list
.head
, list
) {
722 DBG("Inside for each");
723 if (iter
->state
== ACTIVE_FD
) {
724 DBG("Active FD %d", iter
->consumerd_fd
);
725 (*pollfd
)[i
].fd
= iter
->consumerd_fd
;
726 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
727 local_kconsumerd_fd
[i
] = iter
;
732 * insert the poll_pipe at the end of the array and don't increment i
733 * so nb_fd is the number of real FD
735 (*pollfd
)[i
].fd
= poll_pipe
[0];
736 (*pollfd
)[i
].events
= POLLIN
;
739 pthread_mutex_unlock(&kconsumerd_lock_fds
);
747 * This thread polls the fds in the ltt_fd_list to consume the data
748 * and write it to tracefile if necessary.
750 static void *thread_poll_fds(void *data
)
752 int num_rdy
, num_hup
, high_prio
, ret
, i
;
753 struct pollfd
*pollfd
= NULL
;
754 /* local view of the fds */
755 struct ltt_kconsumerd_fd
**local_kconsumerd_fd
= NULL
;
756 /* local view of fds_count */
761 ret
= pipe(thread_pipe
);
763 perror("Error creating pipe");
767 local_kconsumerd_fd
= malloc(sizeof(struct ltt_kconsumerd_fd
));
774 * the ltt_fd_list has been updated, we need to update our
775 * local array as well
777 if (update_fd_array
== 1) {
778 if (pollfd
!= NULL
) {
782 if (local_kconsumerd_fd
!= NULL
) {
783 free(local_kconsumerd_fd
);
784 local_kconsumerd_fd
= NULL
;
786 /* allocate for all fds + 1 for the poll_pipe */
787 pollfd
= malloc((fds_count
+ 1) * sizeof(struct pollfd
));
788 if (pollfd
== NULL
) {
789 perror("pollfd malloc");
792 /* allocate for all fds + 1 for the poll_pipe */
793 local_kconsumerd_fd
= malloc((fds_count
+ 1) * sizeof(struct ltt_kconsumerd_fd
));
794 if (local_kconsumerd_fd
== NULL
) {
795 perror("local_kconsumerd_fd malloc");
799 ret
= update_poll_array(&pollfd
, local_kconsumerd_fd
);
801 ERR("Error in allocating pollfd or local_outfds");
802 send_error(KCONSUMERD_POLL_ERROR
);
808 /* poll on the array of fds */
809 DBG("polling on %d fd", nb_fd
+ 1);
810 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
811 DBG("poll num_rdy : %d", num_rdy
);
813 perror("Poll error");
814 send_error(KCONSUMERD_POLL_ERROR
);
818 /* No FDs and quit, cleanup the thread */
819 if (nb_fd
== 0 && quit
== 1) {
824 * if only the poll_pipe triggered poll to return just return to the
825 * beginning of the loop to update the array
827 if (num_rdy
== 1 && pollfd
[nb_fd
].revents
== POLLIN
) {
828 DBG("poll_pipe wake up");
829 tmp2
= read(poll_pipe
[0], &tmp
, 1);
833 /* Take care of high priority channels first. */
834 for (i
= 0; i
< nb_fd
; i
++) {
835 switch(pollfd
[i
].revents
) {
837 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
838 del_fd(local_kconsumerd_fd
[i
]);
843 ERR("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
844 del_fd(local_kconsumerd_fd
[i
]);
849 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
850 del_fd(local_kconsumerd_fd
[i
]);
855 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
857 ret
= read_subbuffer(local_kconsumerd_fd
[i
]);
858 /* it's ok to have an unavailable sub-buffer (FIXME : is it ?) */
866 /* If every buffer FD has hung up, we end the read loop here */
867 if (nb_fd
> 0 && num_hup
== nb_fd
) {
868 DBG("every buffer FD has hung up\n");
875 /* Take care of low priority channels. */
876 if (high_prio
== 0) {
877 for (i
= 0; i
< nb_fd
; i
++) {
878 if (pollfd
[i
].revents
== POLLIN
) {
879 DBG("Normal read on fd %d", pollfd
[i
].fd
);
880 ret
= read_subbuffer(local_kconsumerd_fd
[i
]);
881 /* it's ok to have an unavailable subbuffer (FIXME : is it ?) */
890 DBG("polling thread exiting");
891 if (pollfd
!= NULL
) {
895 if (local_kconsumerd_fd
!= NULL
) {
896 free(local_kconsumerd_fd
);
897 local_kconsumerd_fd
= NULL
;
904 * usage function on stderr
906 static void usage(void)
908 fprintf(stderr
, "Usage: %s OPTIONS\n\nOptions:\n", progname
);
909 fprintf(stderr
, " -h, --help "
910 "Display this usage.\n");
911 fprintf(stderr
, " -c, --kconsumerd-cmd-sock PATH "
912 "Specify path for the command socket\n");
913 fprintf(stderr
, " -e, --kconsumerd-err-sock PATH "
914 "Specify path for the error socket\n");
915 fprintf(stderr
, " -d, --daemonize "
916 "Start as a daemon.\n");
917 fprintf(stderr
, " -q, --quiet "
918 "No output at all.\n");
919 fprintf(stderr
, " -v, --verbose "
920 "Verbose mode. Activate DBG() macro.\n");
921 fprintf(stderr
, " -V, --version "
922 "Show version number.\n");
926 * daemon argument parsing
928 static void parse_args(int argc
, char **argv
)
932 static struct option long_options
[] = {
933 { "kconsumerd-cmd-sock", 1, 0, 'c' },
934 { "kconsumerd-err-sock", 1, 0, 'e' },
935 { "daemonize", 0, 0, 'd' },
936 { "help", 0, 0, 'h' },
937 { "quiet", 0, 0, 'q' },
938 { "verbose", 0, 0, 'v' },
939 { "version", 0, 0, 'V' },
944 int option_index
= 0;
945 c
= getopt_long(argc
, argv
, "dhqvV" "c:e:", long_options
, &option_index
);
952 fprintf(stderr
, "option %s", long_options
[option_index
].name
);
954 fprintf(stderr
, " with arg %s\n", optarg
);
958 snprintf(command_sock_path
, PATH_MAX
, "%s", optarg
);
961 snprintf(error_sock_path
, PATH_MAX
, "%s", optarg
);
976 fprintf(stdout
, "%s\n", VERSION
);
989 int main(int argc
, char **argv
)
995 /* Parse arguments */
997 parse_args(argc
, argv
);
1008 if (strlen(command_sock_path
) == 0) {
1009 snprintf(command_sock_path
, PATH_MAX
,
1010 KCONSUMERD_CMD_SOCK_PATH
);
1012 if (strlen(error_sock_path
) == 0) {
1013 snprintf(error_sock_path
, PATH_MAX
,
1014 KCONSUMERD_ERR_SOCK_PATH
);
1017 if (set_signal_handler() < 0) {
1021 /* create the pipe to wake to polling thread when needed */
1022 ret
= pipe(poll_pipe
);
1024 perror("Error creating poll pipe");
1028 /* Connect to the socket created by ltt-sessiond to report errors */
1029 DBG("Connecting to error socket %s", error_sock_path
);
1030 error_socket
= lttcomm_connect_unix_sock(error_sock_path
);
1031 /* not a fatal error, but all communication with ltt-sessiond will fail */
1032 if (error_socket
< 0) {
1033 WARN("Cannot connect to error socket, is ltt-sessiond started ?");
1036 /* Create the thread to manage the receive of fd */
1037 ret
= pthread_create(&threads
[0], NULL
, thread_receive_fds
, (void *) NULL
);
1039 perror("pthread_create");
1043 /* Create thread to manage the polling/writing of traces */
1044 ret
= pthread_create(&threads
[1], NULL
, thread_poll_fds
, (void *) NULL
);
1046 perror("pthread_create");
1050 for (i
= 0; i
< 2; i
++) {
1051 ret
= pthread_join(threads
[i
], &status
);
1053 perror("pthread_join");
1058 send_error(KCONSUMERD_EXIT_SUCCESS
);
1063 send_error(KCONSUMERD_EXIT_FAILURE
);