tracefile.c: Seek fix
[lttv.git] / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 #define DEFAULT_N_BLOCKS 32
55
56 /* from marker.c */
57 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
58 extern void marker_update_event_fields_offsets(GArray *fields_offsets,
59 struct marker_info *info);
60
61 /* Tracefile names used in this file */
62
63 GQuark LTT_TRACEFILE_NAME_METADATA;
64
65 #ifndef g_open
66 #define g_open open
67 #endif
68
69
70 #define __UNUSED__ __attribute__((__unused__))
71
72 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
73
74 #ifndef g_debug
75 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
76 #endif
77
78 #define g_close close
79
80 /* Those macros must be called from within a function where page_size is a known
81 * variable */
82 #define PAGE_MASK (~(page_size-1))
83 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
84
85 /* set the offset of the fields belonging to the event,
86 need the information of the archecture */
87 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
88 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
89
90 /* map a fixed size or a block information from the file (fd) */
91 static gint map_block(LttTracefile * tf, guint block_num);
92
93 /* calculate nsec per cycles for current block */
94 #if 0
95 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
96 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
97 #endif //0
98
99 /* go to the next event */
100 static int ltt_seek_next_event(LttTracefile *tf);
101
102 static int open_tracefiles(LttTrace *trace, gchar *root_path,
103 gchar *relative_path);
104 static int ltt_process_metadata_tracefile(LttTracefile *tf);
105 static void ltt_tracefile_time_span_get(LttTracefile *tf,
106 LttTime *start, LttTime *end);
107 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
108 static gint map_block(LttTracefile * tf, guint block_num);
109 static void ltt_update_event_size(LttTracefile *tf);
110
111 /* Enable event debugging */
112 static int a_event_debug = 0;
113
114 void ltt_event_debug(int state)
115 {
116 a_event_debug = state;
117 }
118
119 /* trace can be NULL
120 *
121 * Return value : 0 success, 1 bad tracefile
122 */
123 static int parse_trace_header(ltt_subbuffer_header_t *header,
124 LttTracefile *tf, LttTrace *t)
125 {
126 if (header->magic_number == LTT_MAGIC_NUMBER)
127 tf->reverse_bo = 0;
128 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
129 tf->reverse_bo = 1;
130 else /* invalid magic number, bad tracefile ! */
131 return 1;
132
133 if(t) {
134 t->ltt_major_version = header->major_version;
135 t->ltt_minor_version = header->minor_version;
136 t->arch_size = header->arch_size;
137 }
138 tf->alignment = header->alignment;
139
140 /* Get float byte order : might be different from int byte order
141 * (or is set to 0 if the trace has no float (kernel trace)) */
142 tf->float_word_order = 0;
143
144 switch(header->major_version) {
145 case 0:
146 case 1:
147 g_warning("Unsupported trace version : %hhu.%hhu",
148 header->major_version, header->minor_version);
149 return 1;
150 break;
151 case 2:
152 switch(header->minor_version) {
153 case 6:
154 {
155 struct ltt_subbuffer_header_2_6 *vheader = header;
156 tf->buffer_header_size = ltt_subbuffer_header_size();
157 tf->tscbits = 27;
158 tf->eventbits = 5;
159 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
160 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
161
162 if(t) {
163 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
164 &vheader->start_freq);
165 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
166 &vheader->freq_scale);
167 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
168 &vheader->cycle_count_begin);
169 t->start_monotonic = 0;
170 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
171 &vheader->start_time_sec);
172 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
173 &vheader->start_time_usec);
174 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
175
176 t->start_time_from_tsc =
177 ltt_time_from_uint64(tsc_to_uint64(t->freq_scale,
178 t->start_freq, t->start_tsc));
179 }
180 }
181 break;
182 default:
183 g_warning("Unsupported trace version : %hhu.%hhu",
184 header->major_version, header->minor_version);
185 return 1;
186 }
187 break;
188 default:
189 g_warning("Unsupported trace version : %hhu.%hhu",
190 header->major_version, header->minor_version);
191 return 1;
192 }
193 return 0;
194 }
195
196 int get_block_offset_size(LttTracefile *tf, guint block_num,
197 uint64_t *offset, uint32_t *size)
198 {
199 uint64_t offa, offb;
200
201 if (unlikely(block_num >= tf->num_blocks))
202 return -1;
203
204 offa = g_array_index(tf->buf_index, uint64_t, block_num);
205 if (likely(block_num < tf->num_blocks - 1))
206 offb = g_array_index(tf->buf_index, uint64_t, block_num + 1);
207 else
208 offb = tf->file_size;
209 *offset = offa;
210 *size = offb - offa;
211 return 0;
212 }
213
214 int ltt_trace_create_block_index(LttTracefile *tf)
215 {
216 int page_size = getpagesize();
217 uint64_t offset = 0;
218 unsigned long i = 0;
219 unsigned int header_map_size = PAGE_ALIGN(ltt_subbuffer_header_size());
220
221 tf->buf_index = g_array_sized_new(FALSE, TRUE, sizeof(uint64_t),
222 DEFAULT_N_BLOCKS);
223
224 g_assert(tf->buf_index->len == i);
225
226 while (offset < tf->file_size) {
227 ltt_subbuffer_header_t *header;
228 uint64_t *off;
229
230 tf->buf_index = g_array_set_size(tf->buf_index, i + 1);
231 off = &g_array_index(tf->buf_index, uint64_t, i);
232 *off = offset;
233
234 /* map block header */
235 header = mmap(0, header_map_size, PROT_READ,
236 MAP_PRIVATE, tf->fd, (off_t)offset);
237 if(header == MAP_FAILED) {
238 perror("Error in allocating memory for buffer of tracefile");
239 return -1;
240 }
241
242 /* read len, offset += len */
243 offset += ltt_get_uint32(LTT_GET_BO(tf), &header->sb_size);
244
245 /* unmap block header */
246 if(munmap(header, header_map_size)) {
247 g_warning("unmap size : %u\n", header_map_size);
248 perror("munmap error");
249 return -1;
250 }
251 ++i;
252 }
253 tf->num_blocks = i;
254
255 return 0;
256 }
257
258 /*****************************************************************************
259 *Function name
260 * ltt_tracefile_open : open a trace file, construct a LttTracefile
261 *Input params
262 * t : the trace containing the tracefile
263 * fileName : path name of the trace file
264 * tf : the tracefile structure
265 *Return value
266 * : 0 for success, -1 otherwise.
267 ****************************************************************************/
268
269 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
270 {
271 struct stat lTDFStat; /* Trace data file status */
272 ltt_subbuffer_header_t *header;
273 int page_size = getpagesize();
274
275 //open the file
276 tf->long_name = g_quark_from_string(fileName);
277 tf->trace = t;
278 tf->fd = open(fileName, O_RDONLY);
279 tf->buf_index = NULL;
280 if(tf->fd < 0){
281 g_warning("Unable to open input data file %s\n", fileName);
282 goto end;
283 }
284
285 // Get the file's status
286 if(fstat(tf->fd, &lTDFStat) < 0){
287 g_warning("Unable to get the status of the input data file %s\n", fileName);
288 goto close_file;
289 }
290
291 // Is the file large enough to contain a trace
292 if(lTDFStat.st_size <
293 (off_t)(ltt_subbuffer_header_size())){
294 g_print("The input data file %s does not contain a trace\n", fileName);
295 goto close_file;
296 }
297
298 /* Temporarily map the buffer start header to get trace information */
299 /* Multiple of pages aligned head */
300 tf->buffer.head = mmap(0,
301 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
302 MAP_PRIVATE, tf->fd, 0);
303 if(tf->buffer.head == MAP_FAILED) {
304 perror("Error in allocating memory for buffer of tracefile");
305 goto close_file;
306 }
307 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
308
309 header = (ltt_subbuffer_header_t *)tf->buffer.head;
310
311 if(parse_trace_header(header, tf, NULL)) {
312 g_warning("parse_trace_header error");
313 goto unmap_file;
314 }
315
316 //store the size of the file
317 tf->file_size = lTDFStat.st_size;
318 tf->events_lost = 0;
319 tf->subbuf_corrupt = 0;
320
321 if(munmap(tf->buffer.head,
322 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
323 g_warning("unmap size : %zu\n",
324 PAGE_ALIGN(ltt_subbuffer_header_size()));
325 perror("munmap error");
326 g_assert(0);
327 }
328 tf->buffer.head = NULL;
329
330 /* Create block index */
331 ltt_trace_create_block_index(tf);
332
333 //read the first block
334 if(map_block(tf,0)) {
335 perror("Cannot map block for tracefile");
336 goto close_file;
337 }
338
339 /* Create fields offset table */
340 tf->event.fields_offsets = g_array_sized_new(FALSE, FALSE,
341 sizeof(struct LttField), 1);
342 if (!tf->event.fields_offsets)
343 goto close_file;
344
345 return 0;
346
347 /* Error */
348 unmap_file:
349 if(munmap(tf->buffer.head,
350 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
351 g_warning("unmap size : %zu\n",
352 PAGE_ALIGN(ltt_subbuffer_header_size()));
353 perror("munmap error");
354 g_assert(0);
355 }
356 close_file:
357 close(tf->fd);
358 end:
359 if (tf->buf_index)
360 g_array_free(tf->buf_index, TRUE);
361 return -1;
362 }
363
364
365 /*****************************************************************************
366 *Function name
367 * ltt_tracefile_close: close a trace file,
368 *Input params
369 * t : tracefile which will be closed
370 ****************************************************************************/
371
372 static void ltt_tracefile_close(LttTracefile *t)
373 {
374 int page_size = getpagesize();
375
376 if(t->buffer.head != NULL)
377 if(munmap(t->buffer.head, PAGE_ALIGN(t->buffer.size))) {
378 g_warning("unmap size : %u\n",
379 PAGE_ALIGN(t->buffer.size));
380 perror("munmap error");
381 g_assert(0);
382 }
383
384 close(t->fd);
385 if (t->buf_index)
386 g_array_free(t->buf_index, TRUE);
387 g_array_free(t->event.fields_offsets, TRUE);
388 }
389
390 /****************************************************************************
391 * get_absolute_pathname
392 *
393 * return the unique pathname in the system
394 *
395 * MD : Fixed this function so it uses realpath, dealing well with
396 * forgotten cases (.. were not used correctly before).
397 *
398 ****************************************************************************/
399 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
400 {
401 abs_pathname[0] = '\0';
402
403 if (realpath(pathname, abs_pathname) != NULL)
404 return;
405 else
406 {
407 /* error, return the original path unmodified */
408 strcpy(abs_pathname, pathname);
409 return;
410 }
411 return;
412 }
413
414 /* Search for something like : .*_.*
415 *
416 * The left side is the name, the right side is the number.
417 * Exclude leading /.
418 * Exclude flight- prefix.
419 */
420
421 static int get_tracefile_name_number(gchar *raw_name,
422 GQuark *name,
423 guint *num,
424 gulong *tid,
425 gulong *pgid,
426 guint64 *creation)
427 {
428 guint raw_name_len = strlen(raw_name);
429 gchar char_name[PATH_MAX];
430 int i;
431 int underscore_pos;
432 long int cpu_num;
433 gchar *endptr;
434 gchar *tmpptr;
435
436 /* skip leading / */
437 for(i = 0; i < raw_name_len-1;i++) {
438 if(raw_name[i] != '/')
439 break;
440 }
441 raw_name = &raw_name[i];
442 raw_name_len = strlen(raw_name);
443
444 for(i=raw_name_len-1;i>=0;i--) {
445 if(raw_name[i] == '_') break;
446 }
447 if(i==-1) { /* Either not found or name length is 0 */
448 /* This is a userspace tracefile */
449 strncpy(char_name, raw_name, raw_name_len);
450 char_name[raw_name_len] = '\0';
451 *name = g_quark_from_string(char_name);
452 *num = 0; /* unknown cpu */
453 for(i=0;i<raw_name_len;i++) {
454 if(raw_name[i] == '/') {
455 break;
456 }
457 }
458 i++;
459 for(;i<raw_name_len;i++) {
460 if(raw_name[i] == '/') {
461 break;
462 }
463 }
464 i++;
465 for(;i<raw_name_len;i++) {
466 if(raw_name[i] == '-') {
467 break;
468 }
469 }
470 if(i == raw_name_len) return -1;
471 i++;
472 tmpptr = &raw_name[i];
473 for(;i<raw_name_len;i++) {
474 if(raw_name[i] == '.') {
475 raw_name[i] = ' ';
476 break;
477 }
478 }
479 *tid = strtoul(tmpptr, &endptr, 10);
480 if(endptr == tmpptr)
481 return -1; /* No digit */
482 if(*tid == ULONG_MAX)
483 return -1; /* underflow / overflow */
484 i++;
485 tmpptr = &raw_name[i];
486 for(;i<raw_name_len;i++) {
487 if(raw_name[i] == '.') {
488 raw_name[i] = ' ';
489 break;
490 }
491 }
492 *pgid = strtoul(tmpptr, &endptr, 10);
493 if(endptr == tmpptr)
494 return -1; /* No digit */
495 if(*pgid == ULONG_MAX)
496 return -1; /* underflow / overflow */
497 i++;
498 tmpptr = &raw_name[i];
499 *creation = strtoull(tmpptr, &endptr, 10);
500 if(endptr == tmpptr)
501 return -1; /* No digit */
502 if(*creation == G_MAXUINT64)
503 return -1; /* underflow / overflow */
504 } else {
505 underscore_pos = i;
506
507 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
508
509 if(endptr == raw_name+underscore_pos+1)
510 return -1; /* No digit */
511 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
512 return -1; /* underflow / overflow */
513
514 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
515 raw_name += sizeof("flight-") - 1;
516 underscore_pos -= sizeof("flight-") - 1;
517 }
518 strncpy(char_name, raw_name, underscore_pos);
519 char_name[underscore_pos] = '\0';
520 *name = g_quark_from_string(char_name);
521 *num = cpu_num;
522 }
523
524
525 return 0;
526 }
527
528
529 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
530 {
531 return &trace->tracefiles;
532 }
533
534
535 void compute_tracefile_group(GQuark key_id,
536 GArray *group,
537 struct compute_tracefile_group_args *args)
538 {
539 unsigned int i;
540 LttTracefile *tf;
541
542 for(i=0; i<group->len; i++) {
543 tf = &g_array_index (group, LttTracefile, i);
544 if(tf->cpu_online)
545 args->func(tf, args->func_args);
546 }
547 }
548
549
550 static void ltt_tracefile_group_destroy(gpointer data)
551 {
552 GArray *group = (GArray *)data;
553 unsigned int i;
554 LttTracefile *tf;
555
556 if (group->len > 0)
557 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
558 for(i=0; i<group->len; i++) {
559 tf = &g_array_index (group, LttTracefile, i);
560 if(tf->cpu_online)
561 ltt_tracefile_close(tf);
562 }
563 g_array_free(group, TRUE);
564 }
565
566 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
567 {
568 GArray *group = (GArray *)data;
569 unsigned int i;
570 LttTracefile *tf;
571
572 for(i=0; i<group->len; i++) {
573 tf = &g_array_index (group, LttTracefile, i);
574 if(tf->cpu_online)
575 return 1;
576 }
577 return 0;
578 }
579
580
581 /* Open each tracefile under a specific directory. Put them in a
582 * GData : permits to access them using their tracefile group pathname.
583 * i.e. access control/modules tracefile group by index :
584 * "control/module".
585 *
586 * relative path is the path relative to the trace root
587 * root path is the full path
588 *
589 * A tracefile group is simply an array where all the per cpu tracefiles sit.
590 */
591
592 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
593 {
594 DIR *dir = opendir(root_path);
595 struct dirent *entry;
596 struct stat stat_buf;
597 int ret, i;
598 struct marker_data *mdata;
599
600 gchar path[PATH_MAX];
601 int path_len;
602 gchar *path_ptr;
603
604 int rel_path_len;
605 gchar rel_path[PATH_MAX];
606 gchar *rel_path_ptr;
607 LttTracefile tmp_tf;
608
609 if(dir == NULL) {
610 perror(root_path);
611 return ENOENT;
612 }
613
614 strncpy(path, root_path, PATH_MAX-1);
615 path_len = strlen(path);
616 path[path_len] = '/';
617 path_len++;
618 path_ptr = path + path_len;
619
620 strncpy(rel_path, relative_path, PATH_MAX-1);
621 rel_path_len = strlen(rel_path);
622 rel_path[rel_path_len] = '/';
623 rel_path_len++;
624 rel_path_ptr = rel_path + rel_path_len;
625
626 while((entry = readdir(dir)) != NULL) {
627
628 if(entry->d_name[0] == '.') continue;
629
630 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
631 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
632
633 ret = stat(path, &stat_buf);
634 if(ret == -1) {
635 perror(path);
636 continue;
637 }
638
639 g_debug("Tracefile file or directory : %s\n", path);
640
641 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
642
643 if(S_ISDIR(stat_buf.st_mode)) {
644
645 g_debug("Entering subdirectory...\n");
646 ret = open_tracefiles(trace, path, rel_path);
647 if(ret < 0) continue;
648 } else if(S_ISREG(stat_buf.st_mode)) {
649 GQuark name;
650 guint num;
651 gulong tid, pgid;
652 guint64 creation;
653 GArray *group;
654 num = 0;
655 tid = pgid = 0;
656 creation = 0;
657 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
658 continue; /* invalid name */
659
660 g_debug("Opening file.\n");
661 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
662 g_info("Error opening tracefile %s", path);
663
664 continue; /* error opening the tracefile : bad magic number ? */
665 }
666
667 g_debug("Tracefile name is %s and number is %u",
668 g_quark_to_string(name), num);
669
670 mdata = NULL;
671 tmp_tf.cpu_online = 1;
672 tmp_tf.cpu_num = num;
673 tmp_tf.name = name;
674 tmp_tf.tid = tid;
675 tmp_tf.pgid = pgid;
676 tmp_tf.creation = creation;
677 group = g_datalist_id_get_data(&trace->tracefiles, name);
678 if(group == NULL) {
679 /* Elements are automatically cleared when the array is allocated.
680 * It makes the cpu_online variable set to 0 : cpu offline, by default.
681 */
682 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
683 g_datalist_id_set_data_full(&trace->tracefiles, name,
684 group, ltt_tracefile_group_destroy);
685 mdata = allocate_marker_data();
686 if (!mdata)
687 g_error("Error in allocating marker data");
688 }
689
690 /* Add the per cpu tracefile to the named group */
691 unsigned int old_len = group->len;
692 if(num+1 > old_len)
693 group = g_array_set_size(group, num+1);
694
695 g_assert(group->len > 0);
696 if (!mdata)
697 mdata = g_array_index (group, LttTracefile, 0).mdata;
698
699 g_array_index (group, LttTracefile, num) = tmp_tf;
700 g_array_index (group, LttTracefile, num).event.tracefile =
701 &g_array_index (group, LttTracefile, num);
702 for (i = 0; i < group->len; i++)
703 g_array_index (group, LttTracefile, i).mdata = mdata;
704 }
705 }
706
707 closedir(dir);
708
709 return 0;
710 }
711
712
713 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
714 * because it must be done just after the opening */
715 static int ltt_process_metadata_tracefile(LttTracefile *tf)
716 {
717 int err;
718
719 while(1) {
720 err = ltt_tracefile_read_seek(tf);
721 if(err == EPERM) goto seek_error;
722 else if(err == ERANGE) break; /* End of tracefile */
723
724 err = ltt_tracefile_read_update_event(tf);
725 if(err) goto update_error;
726
727 /* The rules are :
728 * It contains only core events :
729 * 0 : set_marker_id
730 * 1 : set_marker_format
731 */
732 if(tf->event.event_id >= MARKER_CORE_IDS) {
733 /* Should only contain core events */
734 g_warning("Error in processing metadata file %s, "
735 "should not contain event id %u.", g_quark_to_string(tf->name),
736 tf->event.event_id);
737 err = EPERM;
738 goto event_id_error;
739 } else {
740 char *pos;
741 const char *channel_name, *marker_name, *format;
742 uint16_t id;
743 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
744
745 switch((enum marker_id)tf->event.event_id) {
746 case MARKER_ID_SET_MARKER_ID:
747 channel_name = pos = tf->event.data;
748 pos += strlen(channel_name) + 1;
749 marker_name = pos;
750 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
751 channel_name, marker_name);
752 pos += strlen(marker_name) + 1;
753 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
754 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
755 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
756 channel_name, marker_name, id);
757 pos += sizeof(guint16);
758 int_size = *(guint8*)pos;
759 pos += sizeof(guint8);
760 long_size = *(guint8*)pos;
761 pos += sizeof(guint8);
762 pointer_size = *(guint8*)pos;
763 pos += sizeof(guint8);
764 size_t_size = *(guint8*)pos;
765 pos += sizeof(guint8);
766 alignment = *(guint8*)pos;
767 pos += sizeof(guint8);
768 marker_id_event(tf->trace,
769 g_quark_from_string(channel_name),
770 g_quark_from_string(marker_name),
771 id, int_size, long_size,
772 pointer_size, size_t_size, alignment);
773 break;
774 case MARKER_ID_SET_MARKER_FORMAT:
775 channel_name = pos = tf->event.data;
776 pos += strlen(channel_name) + 1;
777 marker_name = pos;
778 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
779 channel_name, marker_name);
780 pos += strlen(marker_name) + 1;
781 format = pos;
782 pos += strlen(format) + 1;
783 marker_format_event(tf->trace,
784 g_quark_from_string(channel_name),
785 g_quark_from_string(marker_name),
786 format);
787 /* get information from dictionary TODO */
788 break;
789 default:
790 g_warning("Error in processing metadata file %s, "
791 "unknown event id %hhu.",
792 g_quark_to_string(tf->name),
793 tf->event.event_id);
794 err = EPERM;
795 goto event_id_error;
796 }
797 }
798 }
799 return 0;
800
801 /* Error handling */
802 event_id_error:
803 update_error:
804 seek_error:
805 g_warning("An error occured in metadata tracefile parsing");
806 return err;
807 }
808
809 /*
810 * Open a trace and return its LttTrace handle.
811 *
812 * pathname must be the directory of the trace
813 */
814
815 LttTrace *ltt_trace_open(const gchar *pathname)
816 {
817 gchar abs_path[PATH_MAX];
818 LttTrace * t;
819 LttTracefile *tf;
820 GArray *group;
821 unsigned int i;
822 int ret;
823 ltt_subbuffer_header_t *header;
824 DIR *dir;
825 struct dirent *entry;
826 struct stat stat_buf;
827 gchar path[PATH_MAX];
828
829 t = g_new(LttTrace, 1);
830 if(!t) goto alloc_error;
831
832 get_absolute_pathname(pathname, abs_path);
833 t->pathname = g_quark_from_string(abs_path);
834
835 g_datalist_init(&t->tracefiles);
836
837 /* Test to see if it looks like a trace */
838 dir = opendir(abs_path);
839 if(dir == NULL) {
840 perror(abs_path);
841 goto open_error;
842 }
843 while((entry = readdir(dir)) != NULL) {
844 strcpy(path, abs_path);
845 strcat(path, "/");
846 strcat(path, entry->d_name);
847 ret = stat(path, &stat_buf);
848 if(ret == -1) {
849 perror(path);
850 continue;
851 }
852 }
853 closedir(dir);
854
855 /* Open all the tracefiles */
856 t->start_freq= 0;
857 if(open_tracefiles(t, abs_path, "")) {
858 g_warning("Error opening tracefile %s", abs_path);
859 goto find_error;
860 }
861
862 /* Parse each trace metadata_N files : get runtime fac. info */
863 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
864 if(group == NULL) {
865 g_warning("Trace %s has no metadata tracefile", abs_path);
866 goto find_error;
867 }
868
869 /*
870 * Get the trace information for the metadata_0 tracefile.
871 * Getting a correct trace start_time and start_tsc is insured by the fact
872 * that no subbuffers are supposed to be lost in the metadata channel.
873 * Therefore, the first subbuffer contains the start_tsc timestamp in its
874 * buffer header.
875 */
876 g_assert(group->len > 0);
877 tf = &g_array_index (group, LttTracefile, 0);
878 header = (ltt_subbuffer_header_t *)tf->buffer.head;
879 ret = parse_trace_header(header, tf, t);
880 g_assert(!ret);
881
882 t->num_cpu = group->len;
883 t->drift = 1.;
884 t->offset = 0.;
885
886 //ret = allocate_marker_data(t);
887 //if (ret)
888 // g_error("Error in allocating marker data");
889
890 for(i=0; i<group->len; i++) {
891 tf = &g_array_index (group, LttTracefile, i);
892 if (tf->cpu_online)
893 if(ltt_process_metadata_tracefile(tf))
894 goto find_error;
895 // goto metadata_error;
896 }
897
898 return t;
899
900 /* Error handling */
901 //metadata_error:
902 // destroy_marker_data(t);
903 find_error:
904 g_datalist_clear(&t->tracefiles);
905 open_error:
906 g_free(t);
907 alloc_error:
908 return NULL;
909
910 }
911
912 /* Open another, completely independant, instance of a trace.
913 *
914 * A read on this new instance will read the first event of the trace.
915 *
916 * When we copy a trace, we want all the opening actions to happen again :
917 * the trace will be reopened and totally independant from the original.
918 * That's why we call ltt_trace_open.
919 */
920 LttTrace *ltt_trace_copy(LttTrace *self)
921 {
922 return ltt_trace_open(g_quark_to_string(self->pathname));
923 }
924
925 /*
926 * Close a trace
927 */
928
929 void ltt_trace_close(LttTrace *t)
930 {
931 g_datalist_clear(&t->tracefiles);
932 g_free(t);
933 }
934
935
936 /*****************************************************************************
937 * Get the start time and end time of the trace
938 ****************************************************************************/
939
940 void ltt_tracefile_time_span_get(LttTracefile *tf,
941 LttTime *start, LttTime *end)
942 {
943 int err;
944
945 err = map_block(tf, 0);
946 if(unlikely(err)) {
947 g_error("Can not map block");
948 *start = ltt_time_infinite;
949 } else
950 *start = tf->buffer.begin.timestamp;
951
952 err = map_block(tf, tf->num_blocks - 1); /* Last block */
953 if(unlikely(err)) {
954 g_error("Can not map block");
955 *end = ltt_time_zero;
956 } else
957 *end = tf->buffer.end.timestamp;
958
959 g_assert(end->tv_sec <= G_MAXUINT);
960 }
961
962 struct tracefile_time_span_get_args {
963 LttTrace *t;
964 LttTime *start;
965 LttTime *end;
966 };
967
968 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
969 {
970 struct tracefile_time_span_get_args *args =
971 (struct tracefile_time_span_get_args*)user_data;
972
973 GArray *group = (GArray *)data;
974 unsigned int i;
975 LttTracefile *tf;
976 LttTime tmp_start;
977 LttTime tmp_end;
978
979 for(i=0; i<group->len; i++) {
980 tf = &g_array_index (group, LttTracefile, i);
981 if(tf->cpu_online) {
982 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
983 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
984 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
985 }
986 }
987 }
988
989 /* return the start and end time of a trace */
990
991 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
992 {
993 LttTime min_start = ltt_time_infinite;
994 LttTime max_end = ltt_time_zero;
995 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
996
997 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
998
999 if(start != NULL) *start = min_start;
1000 if(end != NULL) *end = max_end;
1001
1002 }
1003
1004
1005 /* Seek to the first event in a tracefile that has a time equal or greater than
1006 * the time passed in parameter.
1007 *
1008 * If the time parameter is outside the tracefile time span, seek to the first
1009 * event or if after, return ERANGE.
1010 *
1011 * If the time parameter is before the first event, we have to seek specially to
1012 * there.
1013 *
1014 * If the time is after the end of the trace, return ERANGE.
1015 *
1016 * Do a binary search to find the right block, then a sequential search in the
1017 * block to find the event.
1018 *
1019 * In the special case where the time requested fits inside a block that has no
1020 * event corresponding to the requested time, the first event of the next block
1021 * will be seeked.
1022 *
1023 * IMPORTANT NOTE : // FIXME everywhere...
1024 *
1025 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
1026 * you will jump over an event if you do.
1027 *
1028 * Return value : 0 : no error, the tf->event can be used
1029 * ERANGE : time if after the last event of the trace
1030 * otherwise : this is an error.
1031 *
1032 * */
1033
1034 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
1035 {
1036 int ret = 0;
1037 int err;
1038 unsigned int block_num, high, low;
1039
1040 /* seek at the beginning of trace */
1041 err = map_block(tf, 0); /* First block */
1042 if(unlikely(err)) {
1043 g_error("Can not map block");
1044 goto fail;
1045 }
1046
1047 /* If the time is lower or equal the beginning of the trace,
1048 * go to the first event. */
1049 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
1050 ret = ltt_tracefile_read(tf);
1051 if(ret == ERANGE) goto range;
1052 else if (ret) goto fail;
1053 goto found; /* There is either no event in the trace or the event points
1054 to the first event in the trace */
1055 }
1056
1057 err = map_block(tf, tf->num_blocks - 1); /* Last block */
1058 if(unlikely(err)) {
1059 g_error("Can not map block");
1060 goto fail;
1061 }
1062
1063 /* If the time is after the end of the trace, return ERANGE. */
1064 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1065 goto range;
1066 }
1067
1068 /* Binary search the block */
1069 high = tf->num_blocks - 1;
1070 low = 0;
1071
1072 while(1) {
1073 block_num = ((high-low) / 2) + low;
1074
1075 err = map_block(tf, block_num);
1076 if(unlikely(err)) {
1077 g_error("Can not map block");
1078 goto fail;
1079 }
1080 if(high == low) {
1081 /* We cannot divide anymore : this is what would happen if the time
1082 * requested was exactly between two consecutive buffers'end and start
1083 * timestamps. This is also what would happend if we didn't deal with out
1084 * of span cases prior in this function. */
1085 /* The event is right in the buffer!
1086 * (or in the next buffer first event) */
1087 while(1) {
1088 ret = ltt_tracefile_read(tf);
1089 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1090 else if(ret) goto fail;
1091
1092 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1093 goto found;
1094 }
1095
1096 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1097 /*
1098 * Go to lower part. We don't want block_num - 1 since block_num
1099 * can equal low , in which case high < low.
1100 */
1101 high = block_num;
1102 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1103 /* go to higher part */
1104 low = block_num + 1;
1105 } else {/* The event is right in the buffer!
1106 (or in the next buffer first event) */
1107 while(1) {
1108 ret = ltt_tracefile_read(tf);
1109 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1110 else if(ret) goto fail;
1111
1112 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1113 break;
1114 }
1115 goto found;
1116 }
1117 }
1118
1119 found:
1120 return 0;
1121 range:
1122 return ERANGE;
1123
1124 /* Error handling */
1125 fail:
1126 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1127 g_quark_to_string(tf->name));
1128 return EPERM;
1129 }
1130
1131 /* Seek to a position indicated by an LttEventPosition
1132 */
1133
1134 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1135 {
1136 int err;
1137
1138 if(ep->tracefile != tf) {
1139 goto fail;
1140 }
1141
1142 err = map_block(tf, ep->block);
1143 if(unlikely(err)) {
1144 g_error("Can not map block");
1145 goto fail;
1146 }
1147
1148 tf->event.offset = ep->offset;
1149
1150 /* Put back the event real tsc */
1151 tf->event.tsc = ep->tsc;
1152 tf->buffer.tsc = ep->tsc;
1153
1154 err = ltt_tracefile_read_update_event(tf);
1155 if(err) goto fail;
1156
1157 /* deactivate this, as it does nothing for now
1158 err = ltt_tracefile_read_op(tf);
1159 if(err) goto fail;
1160 */
1161
1162 return 0;
1163
1164 fail:
1165 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1166 g_quark_to_string(tf->name));
1167 return 1;
1168 }
1169
1170 /*
1171 * Convert a value in "TSC scale" to a value in nanoseconds
1172 */
1173 guint64 tsc_to_uint64(guint32 freq_scale, uint64_t start_freq, guint64 tsc)
1174 {
1175 return (double) tsc * NANOSECONDS_PER_SECOND * freq_scale / start_freq;
1176 }
1177
1178 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1179 * corresponds to.
1180 */
1181 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1182 {
1183 return ltt_time_from_uint64(tsc_to_uint64(tf->trace->freq_scale,
1184 tf->trace->start_freq, tf->trace->drift * tsc +
1185 tf->trace->offset));
1186 }
1187
1188 /* Calculate the real event time based on the buffer boundaries */
1189 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1190 {
1191 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1192 }
1193
1194
1195 /* Get the current event of the tracefile : valid until the next read */
1196 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1197 {
1198 return &tf->event;
1199 }
1200
1201
1202
1203 /*****************************************************************************
1204 *Function name
1205 * ltt_tracefile_read : Read the next event in the tracefile
1206 *Input params
1207 * t : tracefile
1208 *Return value
1209 *
1210 * Returns 0 if an event can be used in tf->event.
1211 * Returns ERANGE on end of trace. The event in tf->event still can be used
1212 * (if the last block was not empty).
1213 * Returns EPERM on error.
1214 *
1215 * This function does make the tracefile event structure point to the event
1216 * currently pointed to by the tf->event.
1217 *
1218 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1219 * reinitialize it after an error if you want results to be coherent.
1220 * It would be the case if a end of trace last buffer has no event : the end
1221 * of trace wouldn't be returned, but an error.
1222 * We make the assumption there is at least one event per buffer.
1223 ****************************************************************************/
1224
1225 int ltt_tracefile_read(LttTracefile *tf)
1226 {
1227 int err;
1228
1229 err = ltt_tracefile_read_seek(tf);
1230 if(err) return err;
1231 err = ltt_tracefile_read_update_event(tf);
1232 if(err) return err;
1233
1234 /* deactivate this, as it does nothing for now
1235 err = ltt_tracefile_read_op(tf);
1236 if(err) return err;
1237 */
1238
1239 return 0;
1240 }
1241
1242 int ltt_tracefile_read_seek(LttTracefile *tf)
1243 {
1244 int err;
1245
1246 /* Get next buffer until we finally have an event, or end of trace */
1247 while(1) {
1248 err = ltt_seek_next_event(tf);
1249 if(unlikely(err == ENOPROTOOPT)) {
1250 return EPERM;
1251 }
1252
1253 /* Are we at the end of the buffer ? */
1254 if(err == ERANGE) {
1255 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1256 return ERANGE;
1257 } else {
1258 /* get next block */
1259 err = map_block(tf, tf->buffer.index + 1);
1260 if(unlikely(err)) {
1261 g_error("Can not map block");
1262 return EPERM;
1263 }
1264 }
1265 } else break; /* We found an event ! */
1266 }
1267
1268 return 0;
1269 }
1270
1271 /* do an operation when reading a new event */
1272
1273 /* This function does nothing for now */
1274 #if 0
1275 int ltt_tracefile_read_op(LttTracefile *tf)
1276 {
1277 LttEvent *event;
1278
1279 event = &tf->event;
1280
1281 /* do event specific operation */
1282
1283 /* nothing */
1284
1285 return 0;
1286 }
1287 #endif
1288
1289 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1290 {
1291 unsigned int offset = 0;
1292 int i, j;
1293
1294 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1295 g_quark_to_string(ev->tracefile->long_name),
1296 (uint64_t)ev->tracefile->buffer.offset +
1297 (long)start_pos - (long)ev->tracefile->buffer.head);
1298
1299 while (offset < (long)end_pos - (long)start_pos) {
1300 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1301 g_printf(" ");
1302
1303 for (i = 0; i < 4 ; i++) {
1304 for (j = 0; j < 4; j++) {
1305 if (offset + ((i * 4) + j) <
1306 (long)end_pos - (long)start_pos)
1307 g_printf("%02hhX",
1308 ((char*)start_pos)[offset + ((i * 4) + j)]);
1309 else
1310 g_printf(" ");
1311 g_printf(" ");
1312 }
1313 if (i < 4)
1314 g_printf(" ");
1315 }
1316 offset+=16;
1317 g_printf("\n");
1318 }
1319 }
1320
1321
1322 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1323 * event specific operation. */
1324 int ltt_tracefile_read_update_event(LttTracefile *tf)
1325 {
1326 void * pos;
1327 LttEvent *event;
1328 void *pos_aligned;
1329 guint16 packed_evid; /* event id reader from the 5 bits in header */
1330
1331 event = &tf->event;
1332 pos = tf->buffer.head + event->offset;
1333
1334 /* Read event header */
1335
1336 /* Align the head */
1337 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1338 pos_aligned = pos;
1339
1340 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1341 event->event_id = packed_evid = event->timestamp >> tf->tscbits;
1342 event->timestamp = event->timestamp & tf->tsc_mask;
1343 pos += sizeof(guint32);
1344
1345 switch (packed_evid) {
1346 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1347 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1348 pos += sizeof(guint16);
1349 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1350 pos += sizeof(guint16);
1351 if (event->event_size == 0xFFFF) {
1352 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1353 pos += sizeof(guint32);
1354 }
1355 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1356 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1357 pos += sizeof(guint64);
1358 break;
1359 case 30: /* LTT_RFLAG_ID_SIZE */
1360 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1361 pos += sizeof(guint16);
1362 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1363 pos += sizeof(guint16);
1364 if (event->event_size == 0xFFFF) {
1365 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1366 pos += sizeof(guint32);
1367 }
1368 break;
1369 case 31: /* LTT_RFLAG_ID */
1370 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1371 pos += sizeof(guint16);
1372 event->event_size = G_MAXUINT;
1373 break;
1374 default:
1375 event->event_size = G_MAXUINT;
1376 break;
1377 }
1378
1379 if (likely(packed_evid != 29)) {
1380 /* No extended timestamp */
1381 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1382 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1383 + tf->tsc_mask_next_bit)
1384 | (guint64)event->timestamp;
1385 else
1386 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1387 | (guint64)event->timestamp;
1388 }
1389 event->tsc = tf->buffer.tsc;
1390
1391 event->event_time = ltt_interpolate_time(tf, event);
1392
1393 if (a_event_debug)
1394 print_debug_event_header(event, pos_aligned, pos);
1395
1396 event->data = pos;
1397
1398 /*
1399 * Let ltt_update_event_size update event->data according to the largest
1400 * alignment within the payload.
1401 * Get the data size and update the event fields with the current
1402 * information. */
1403 ltt_update_event_size(tf);
1404
1405 return 0;
1406 }
1407
1408
1409 /****************************************************************************
1410 *Function name
1411 * map_block : map a block from the file
1412 *Input Params
1413 * lttdes : ltt trace file
1414 * whichBlock : the block which will be read
1415 *return value
1416 * 0 : success
1417 * EINVAL : lseek fail
1418 * EIO : can not read from the file
1419 ****************************************************************************/
1420
1421 static gint map_block(LttTracefile * tf, guint block_num)
1422 {
1423 int page_size = getpagesize();
1424 ltt_subbuffer_header_t *header;
1425 uint64_t offset;
1426 uint32_t size;
1427 int ret;
1428
1429 g_assert(block_num < tf->num_blocks);
1430
1431 if(tf->buffer.head != NULL) {
1432 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buffer.size))) {
1433 g_warning("unmap size : %u\n",
1434 PAGE_ALIGN(tf->buffer.size));
1435 perror("munmap error");
1436 g_assert(0);
1437 }
1438 }
1439
1440 ret = get_block_offset_size(tf, block_num, &offset, &size);
1441 g_assert(!ret);
1442
1443 g_debug("Map block %u, offset %llu, size %u\n", block_num,
1444 (unsigned long long)offset, (unsigned int)size);
1445
1446 /* Multiple of pages aligned head */
1447 tf->buffer.head = mmap(0, (size_t)size, PROT_READ, MAP_PRIVATE,
1448 tf->fd, (off_t)offset);
1449
1450 if(tf->buffer.head == MAP_FAILED) {
1451 perror("Error in allocating memory for buffer of tracefile");
1452 g_assert(0);
1453 goto map_error;
1454 }
1455 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1456
1457 tf->buffer.index = block_num;
1458
1459 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1460
1461 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1462 &header->cycle_count_begin);
1463 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1464 &header->cycle_count_end);
1465 tf->buffer.offset = offset;
1466 tf->buffer.size = ltt_get_uint32(LTT_GET_BO(tf),
1467 &header->sb_size);
1468 tf->buffer.data_size = ltt_get_uint32(LTT_GET_BO(tf),
1469 &header->data_size);
1470 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1471 tf->event.tsc = tf->buffer.tsc;
1472 tf->buffer.freq = tf->buffer.begin.freq;
1473
1474 g_assert(size == tf->buffer.size);
1475 g_assert(tf->buffer.data_size <= tf->buffer.size);
1476
1477 if (tf->trace->start_freq)
1478 {
1479 tf->buffer.begin.freq = tf->trace->start_freq;
1480 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1481 tf->buffer.begin.cycle_count);
1482 tf->buffer.end.freq = tf->trace->start_freq;
1483 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1484 tf->buffer.end.cycle_count);
1485 }
1486
1487 /* Make the current event point to the beginning of the buffer :
1488 * it means that the event read must get the first event. */
1489 tf->event.tracefile = tf;
1490 tf->event.block = block_num;
1491 tf->event.offset = 0;
1492
1493 if (header->events_lost) {
1494 g_warning("%d events lost so far in tracefile %s at block %u",
1495 (guint)header->events_lost,
1496 g_quark_to_string(tf->long_name),
1497 block_num);
1498 tf->events_lost = header->events_lost;
1499 }
1500 if (header->subbuf_corrupt) {
1501 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1502 (guint)header->subbuf_corrupt,
1503 g_quark_to_string(tf->long_name),
1504 block_num);
1505 tf->subbuf_corrupt = header->subbuf_corrupt;
1506 }
1507
1508 return 0;
1509
1510 map_error:
1511 return -errno;
1512 }
1513
1514 static void print_debug_event_data(LttEvent *ev)
1515 {
1516 unsigned int offset = 0;
1517 int i, j;
1518
1519 if (!max(ev->event_size, ev->data_size))
1520 return;
1521
1522 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1523 g_quark_to_string(ev->tracefile->long_name),
1524 (uint64_t)ev->tracefile->buffer.offset
1525 + (long)ev->data - (long)ev->tracefile->buffer.head);
1526
1527 while (offset < max(ev->event_size, ev->data_size)) {
1528 g_printf("%8lx", (long)ev->data + offset
1529 - (long)ev->tracefile->buffer.head);
1530 g_printf(" ");
1531
1532 for (i = 0; i < 4 ; i++) {
1533 for (j = 0; j < 4; j++) {
1534 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1535 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1536 else
1537 g_printf(" ");
1538 g_printf(" ");
1539 }
1540 if (i < 4)
1541 g_printf(" ");
1542 }
1543
1544 g_printf(" ");
1545
1546 for (i = 0; i < 4; i++) {
1547 for (j = 0; j < 4; j++) {
1548 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1549 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1550 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1551 else
1552 g_printf(".");
1553 } else
1554 g_printf(" ");
1555 }
1556 }
1557 offset+=16;
1558 g_printf("\n");
1559 }
1560 }
1561
1562 /* It will update the fields offsets too */
1563 void ltt_update_event_size(LttTracefile *tf)
1564 {
1565 off_t size = 0;
1566 struct marker_info *info;
1567
1568 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1569 switch((enum marker_id)tf->event.event_id) {
1570 case MARKER_ID_SET_MARKER_ID:
1571 size = strlen((char*)tf->event.data) + 1;
1572 g_debug("marker %s id set", (char*)tf->event.data + size);
1573 size += strlen((char*)tf->event.data + size) + 1;
1574 size += ltt_align(size, sizeof(guint16), tf->alignment);
1575 size += sizeof(guint16);
1576 size += sizeof(guint8);
1577 size += sizeof(guint8);
1578 size += sizeof(guint8);
1579 size += sizeof(guint8);
1580 size += sizeof(guint8);
1581 break;
1582 case MARKER_ID_SET_MARKER_FORMAT:
1583 size = strlen((char*)tf->event.data) + 1;
1584 g_debug("marker %s format set", (char*)tf->event.data);
1585 size += strlen((char*)tf->event.data + size) + 1;
1586 size += strlen((char*)tf->event.data + size) + 1;
1587 break;
1588 }
1589 }
1590
1591 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1592
1593 if (tf->event.event_id >= MARKER_CORE_IDS)
1594 g_assert(info != NULL);
1595
1596 /* Do not update field offsets of core markers when initially reading the
1597 * metadata tracefile when the infos about these markers do not exist yet.
1598 */
1599 if (likely(info && info->fields)) {
1600 /* alignment */
1601 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1602 info->largest_align,
1603 info->alignment);
1604 /* size, dynamically computed */
1605 if (info->size != -1)
1606 size = info->size;
1607 else
1608 size = marker_update_fields_offsets(info, tf->event.data);
1609 /* Update per-tracefile offsets */
1610 marker_update_event_fields_offsets(tf->event.fields_offsets, info);
1611 }
1612
1613 tf->event.data_size = size;
1614
1615 /* Check consistency between kernel and LTTV structure sizes */
1616 if(tf->event.event_size == G_MAXUINT) {
1617 /* Event size too big to fit in the event size field */
1618 tf->event.event_size = tf->event.data_size;
1619 }
1620
1621 if (a_event_debug)
1622 print_debug_event_data(&tf->event);
1623
1624 if (tf->event.data_size != tf->event.event_size) {
1625 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1626 tf->event.event_id);
1627 if (!info)
1628 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1629 g_quark_to_string(tf->name));
1630 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1631 g_quark_to_string(info->name),
1632 tf->event.event_size, tf->event.data_size);
1633 exit(-1);
1634 }
1635 }
1636
1637
1638 /* Take the tf current event offset and use the event id to figure out where is
1639 * the next event offset.
1640 *
1641 * This is an internal function not aiming at being used elsewhere : it will
1642 * not jump over the current block limits. Please consider using
1643 * ltt_tracefile_read to do this.
1644 *
1645 * Returns 0 on success
1646 * ERANGE if we are at the end of the buffer.
1647 * ENOPROTOOPT if an error occured when getting the current event size.
1648 */
1649 static int ltt_seek_next_event(LttTracefile *tf)
1650 {
1651 int ret = 0;
1652 void *pos;
1653
1654 /* seek over the buffer header if we are at the buffer start */
1655 if(tf->event.offset == 0) {
1656 tf->event.offset += tf->buffer_header_size;
1657
1658 if(tf->event.offset == tf->buffer.data_size) {
1659 ret = ERANGE;
1660 }
1661 goto found;
1662 }
1663
1664 pos = tf->event.data;
1665
1666 if(tf->event.data_size < 0) goto error;
1667
1668 pos += (size_t)tf->event.data_size;
1669
1670 tf->event.offset = pos - tf->buffer.head;
1671
1672 if(tf->event.offset == tf->buffer.data_size) {
1673 ret = ERANGE;
1674 goto found;
1675 }
1676 g_assert(tf->event.offset < tf->buffer.data_size);
1677
1678 found:
1679 return ret;
1680
1681 error:
1682 g_error("Error in ltt_seek_next_event for tracefile %s",
1683 g_quark_to_string(tf->name));
1684 return ENOPROTOOPT;
1685 }
1686
1687
1688 /*****************************************************************************
1689 *Function name
1690 * ltt_get_int : get an integer number
1691 *Input params
1692 * reverse_byte_order: must we reverse the byte order ?
1693 * size : the size of the integer
1694 * ptr : the data pointer
1695 *Return value
1696 * gint64 : a 64 bits integer
1697 ****************************************************************************/
1698
1699 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
1700 {
1701 gint64 val;
1702
1703 switch(size) {
1704 case 1: val = *((gint8*)data); break;
1705 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
1706 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
1707 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
1708 default: val = ltt_get_int64(reverse_byte_order, data);
1709 g_critical("get_int : integer size %d unknown", size);
1710 break;
1711 }
1712
1713 return val;
1714 }
1715
1716 /*****************************************************************************
1717 *Function name
1718 * ltt_get_uint : get an unsigned integer number
1719 *Input params
1720 * reverse_byte_order: must we reverse the byte order ?
1721 * size : the size of the integer
1722 * ptr : the data pointer
1723 *Return value
1724 * guint64 : a 64 bits unsigned integer
1725 ****************************************************************************/
1726
1727 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
1728 {
1729 guint64 val;
1730
1731 switch(size) {
1732 case 1: val = *((gint8*)data); break;
1733 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
1734 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
1735 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
1736 default: val = ltt_get_uint64(reverse_byte_order, data);
1737 g_critical("get_uint : unsigned integer size %d unknown",
1738 size);
1739 break;
1740 }
1741
1742 return val;
1743 }
1744
1745
1746 /* get the node name of the system */
1747
1748 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
1749 {
1750 return s->node_name;
1751 }
1752
1753
1754 /* get the domain name of the system */
1755
1756 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
1757 {
1758 return s->domain_name;
1759 }
1760
1761
1762 /* get the description of the system */
1763
1764 char * ltt_trace_system_description_description (LttSystemDescription * s)
1765 {
1766 return s->description;
1767 }
1768
1769
1770 /* get the NTP corrected start time of the trace */
1771 LttTime ltt_trace_start_time(LttTrace *t)
1772 {
1773 return t->start_time;
1774 }
1775
1776 /* get the monotonic start time of the trace */
1777 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
1778 {
1779 return t->start_time_from_tsc;
1780 }
1781
1782 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
1783 {
1784 LttTracefile *tf;
1785 tf = g_new(LttTracefile, 1);
1786 tf->event.tracefile = tf;
1787 return tf;
1788 }
1789
1790 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
1791 {
1792 g_free(tf);
1793 }
1794
1795 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
1796 {
1797 *dest = *src;
1798 }
1799
1800 /* Before library loading... */
1801
1802 static __attribute__((constructor)) void init(void)
1803 {
1804 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
1805 }
1806
1807 /*****************************************************************************
1808 *Function name
1809 * ltt_tracefile_open_header : based on ltt_tracefile_open but it stops
1810 * when it gets the header
1811 *Input params
1812 * fileName : path to the tracefile
1813 * tf : the tracefile (metadata_0) where the header will be read
1814 *Return value
1815 * ltt_subbuffer_header_t : the header containing the version number
1816 ****************************************************************************/
1817 static ltt_subbuffer_header_t * ltt_tracefile_open_header(gchar *fileName, LttTracefile *tf)
1818 {
1819 struct stat lTDFStat; /* Trace data file status */
1820 ltt_subbuffer_header_t *header;
1821 int page_size = getpagesize();
1822
1823 /* open the file */
1824 tf->long_name = g_quark_from_string(fileName);
1825 tf->fd = open(fileName, O_RDONLY);
1826 if(tf->fd < 0){
1827 g_warning("Unable to open input data file %s\n", fileName);
1828 goto end;
1829 }
1830
1831 /* Get the file's status */
1832 if(fstat(tf->fd, &lTDFStat) < 0){
1833 g_warning("Unable to get the status of the input data file %s\n", fileName);
1834 goto close_file;
1835 }
1836
1837 /* Is the file large enough to contain a trace */
1838 if(lTDFStat.st_size < (off_t)(ltt_subbuffer_header_size())) {
1839 g_print("The input data file %s does not contain a trace\n", fileName);
1840 goto close_file;
1841 }
1842
1843 /* Temporarily map the buffer start header to get trace information */
1844 /* Multiple of pages aligned head */
1845 tf->buffer.head = mmap(0,PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ, MAP_PRIVATE, tf->fd, 0);
1846
1847 if(tf->buffer.head == MAP_FAILED) {
1848 perror("Error in allocating memory for buffer of tracefile");
1849 goto close_file;
1850 }
1851 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1852
1853 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1854
1855 return header;
1856
1857 close_file:
1858 close(tf->fd);
1859 end:
1860 return 0;
1861 }
1862
1863
1864 /*****************************************************************************
1865 *Function name
1866 * get_version : get the trace version from a metadata_0 trace file
1867 *Input params
1868 * pathname : path to the trace
1869 * version_number : the struct that will get the version number
1870 *Return value
1871 * int : 1 if succeed, -1 if error
1872 ****************************************************************************/
1873 int ltt_get_trace_version(const gchar *pathname, struct LttTraceVersion *version_number)
1874 {
1875 gchar abs_path[PATH_MAX];
1876 int ret = 0;
1877 DIR *dir;
1878 struct dirent *entry;
1879 struct stat stat_buf;
1880 gchar path[PATH_MAX];
1881
1882 LttTracefile tmp_tf;
1883 LttTrace * t;
1884 ltt_subbuffer_header_t *header;
1885
1886 t = g_new(LttTrace, 1);
1887
1888 get_absolute_pathname(pathname, abs_path);
1889
1890 /* Test to see if it looks like a trace */
1891 dir = opendir(abs_path);
1892
1893 if(dir == NULL) {
1894 perror(abs_path);
1895 goto open_error;
1896 }
1897
1898 while((entry = readdir(dir)) != NULL) {
1899 strcpy(path, abs_path);
1900 strcat(path, "/");
1901 strcat(path, entry->d_name);
1902 ret = stat(path, &stat_buf);
1903 if(ret == -1) {
1904 perror(path);
1905 continue;
1906 }
1907 }
1908
1909 closedir(dir);
1910 dir = opendir(abs_path);
1911
1912 while((entry = readdir(dir)) != NULL) {
1913 if(entry->d_name[0] == '.') continue;
1914 if(g_strcmp0(entry->d_name, "metadata_0") != 0) continue;
1915
1916 strcpy(path, abs_path);
1917 strcat(path, "/");
1918 strcat(path, entry->d_name);
1919 if(ret == -1) {
1920 perror(path);
1921 continue;
1922 }
1923
1924 header = ltt_tracefile_open_header(path, &tmp_tf);
1925
1926 if(header == NULL) {
1927 g_info("Error getting the header %s", path);
1928 continue; /* error opening the tracefile : bad magic number ? */
1929 }
1930
1931 version_number->ltt_major_version = header->major_version;
1932 version_number->ltt_minor_version = header->minor_version;
1933 }
1934
1935 return 0;
1936
1937 open_error:
1938 g_free(t);
1939 return -1;
1940 }
This page took 0.08312 seconds and 4 git commands to generate.