Add members to LttTrace to support time adjustments
[lttv.git] / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 #define DEFAULT_N_BLOCKS 32
55
56 /* from marker.c */
57 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
58
59 /* Tracefile names used in this file */
60
61 GQuark LTT_TRACEFILE_NAME_METADATA;
62
63 #ifndef g_open
64 #define g_open open
65 #endif
66
67
68 #define __UNUSED__ __attribute__((__unused__))
69
70 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
71
72 #ifndef g_debug
73 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
74 #endif
75
76 #define g_close close
77
78 /* Those macros must be called from within a function where page_size is a known
79 * variable */
80 #define PAGE_MASK (~(page_size-1))
81 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
82
83 /* set the offset of the fields belonging to the event,
84 need the information of the archecture */
85 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
86 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
87
88 /* map a fixed size or a block information from the file (fd) */
89 static gint map_block(LttTracefile * tf, guint block_num);
90
91 /* calculate nsec per cycles for current block */
92 #if 0
93 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
94 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
95 #endif //0
96
97 /* go to the next event */
98 static int ltt_seek_next_event(LttTracefile *tf);
99
100 static int open_tracefiles(LttTrace *trace, gchar *root_path,
101 gchar *relative_path);
102 static int ltt_process_metadata_tracefile(LttTracefile *tf);
103 static void ltt_tracefile_time_span_get(LttTracefile *tf,
104 LttTime *start, LttTime *end);
105 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
106 static gint map_block(LttTracefile * tf, guint block_num);
107 static void ltt_update_event_size(LttTracefile *tf);
108
109 /* Enable event debugging */
110 static int a_event_debug = 0;
111
112 void ltt_event_debug(int state)
113 {
114 a_event_debug = state;
115 }
116
117 /* trace can be NULL
118 *
119 * Return value : 0 success, 1 bad tracefile
120 */
121 static int parse_trace_header(ltt_subbuffer_header_t *header,
122 LttTracefile *tf, LttTrace *t)
123 {
124 if (header->magic_number == LTT_MAGIC_NUMBER)
125 tf->reverse_bo = 0;
126 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
127 tf->reverse_bo = 1;
128 else /* invalid magic number, bad tracefile ! */
129 return 1;
130
131 if(t) {
132 t->ltt_major_version = header->major_version;
133 t->ltt_minor_version = header->minor_version;
134 t->arch_size = header->arch_size;
135 }
136 tf->alignment = header->alignment;
137
138 /* Get float byte order : might be different from int byte order
139 * (or is set to 0 if the trace has no float (kernel trace)) */
140 tf->float_word_order = 0;
141
142 switch(header->major_version) {
143 case 0:
144 case 1:
145 g_warning("Unsupported trace version : %hhu.%hhu",
146 header->major_version, header->minor_version);
147 return 1;
148 break;
149 case 2:
150 switch(header->minor_version) {
151 case 5:
152 {
153 struct ltt_subbuffer_header_2_5 *vheader = header;
154 tf->buffer_header_size = ltt_subbuffer_header_size();
155 tf->tscbits = 27;
156 tf->eventbits = 5;
157 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
158 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
159
160 if(t) {
161 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
162 &vheader->start_freq);
163 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
164 &vheader->freq_scale);
165 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->cycle_count_begin);
167 t->start_monotonic = 0;
168 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
169 &vheader->start_time_sec);
170 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
171 &vheader->start_time_usec);
172 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
173
174 t->start_time_from_tsc =
175 ltt_time_from_uint64(tsc_to_uint64(t->freq_scale,
176 t->start_freq, t->start_tsc));
177 }
178 }
179 break;
180 default:
181 g_warning("Unsupported trace version : %hhu.%hhu",
182 header->major_version, header->minor_version);
183 return 1;
184 }
185 break;
186 default:
187 g_warning("Unsupported trace version : %hhu.%hhu",
188 header->major_version, header->minor_version);
189 return 1;
190 }
191 return 0;
192 }
193
194 int get_block_offset_size(LttTracefile *tf, guint block_num,
195 uint64_t *offset, uint32_t *size)
196 {
197 uint64_t offa, offb;
198
199 if (unlikely(block_num >= tf->num_blocks))
200 return -1;
201
202 offa = g_array_index(tf->buf_index, uint64_t, block_num);
203 if (likely(block_num < tf->num_blocks - 1))
204 offb = g_array_index(tf->buf_index, uint64_t, block_num + 1);
205 else
206 offb = tf->file_size;
207 *offset = offa;
208 *size = offb - offa;
209 return 0;
210 }
211
212 int ltt_trace_create_block_index(LttTracefile *tf)
213 {
214 int page_size = getpagesize();
215 uint64_t offset = 0;
216 unsigned long i = 0;
217 unsigned int header_map_size = PAGE_ALIGN(ltt_subbuffer_header_size());
218
219 tf->buf_index = g_array_sized_new(FALSE, TRUE, sizeof(uint64_t),
220 DEFAULT_N_BLOCKS);
221
222 g_assert(tf->buf_index->len == i);
223
224 while (offset < tf->file_size) {
225 ltt_subbuffer_header_t *header;
226 uint64_t *off;
227
228 tf->buf_index = g_array_set_size(tf->buf_index, i + 1);
229 off = &g_array_index(tf->buf_index, uint64_t, i);
230 *off = offset;
231
232 /* map block header */
233 header = mmap(0, header_map_size, PROT_READ,
234 MAP_PRIVATE, tf->fd, (off_t)offset);
235 if(header == MAP_FAILED) {
236 perror("Error in allocating memory for buffer of tracefile");
237 return -1;
238 }
239
240 /* read len, offset += len */
241 offset += ltt_get_uint32(LTT_GET_BO(tf), &header->sb_size);
242
243 /* unmap block header */
244 if(munmap(header, header_map_size)) {
245 g_warning("unmap size : %u\n", header_map_size);
246 perror("munmap error");
247 return -1;
248 }
249 ++i;
250 }
251 tf->num_blocks = i;
252
253 return 0;
254 }
255
256 /*****************************************************************************
257 *Function name
258 * ltt_tracefile_open : open a trace file, construct a LttTracefile
259 *Input params
260 * t : the trace containing the tracefile
261 * fileName : path name of the trace file
262 * tf : the tracefile structure
263 *Return value
264 * : 0 for success, -1 otherwise.
265 ****************************************************************************/
266
267 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
268 {
269 struct stat lTDFStat; /* Trace data file status */
270 ltt_subbuffer_header_t *header;
271 int page_size = getpagesize();
272
273 //open the file
274 tf->long_name = g_quark_from_string(fileName);
275 tf->trace = t;
276 tf->fd = open(fileName, O_RDONLY);
277 tf->buf_index = NULL;
278 if(tf->fd < 0){
279 g_warning("Unable to open input data file %s\n", fileName);
280 goto end;
281 }
282
283 // Get the file's status
284 if(fstat(tf->fd, &lTDFStat) < 0){
285 g_warning("Unable to get the status of the input data file %s\n", fileName);
286 goto close_file;
287 }
288
289 // Is the file large enough to contain a trace
290 if(lTDFStat.st_size <
291 (off_t)(ltt_subbuffer_header_size())){
292 g_print("The input data file %s does not contain a trace\n", fileName);
293 goto close_file;
294 }
295
296 /* Temporarily map the buffer start header to get trace information */
297 /* Multiple of pages aligned head */
298 tf->buffer.head = mmap(0,
299 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
300 MAP_PRIVATE, tf->fd, 0);
301 if(tf->buffer.head == MAP_FAILED) {
302 perror("Error in allocating memory for buffer of tracefile");
303 goto close_file;
304 }
305 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
306
307 header = (ltt_subbuffer_header_t *)tf->buffer.head;
308
309 if(parse_trace_header(header, tf, NULL)) {
310 g_warning("parse_trace_header error");
311 goto unmap_file;
312 }
313
314 //store the size of the file
315 tf->file_size = lTDFStat.st_size;
316 tf->events_lost = 0;
317 tf->subbuf_corrupt = 0;
318
319 if(munmap(tf->buffer.head,
320 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
321 g_warning("unmap size : %zu\n",
322 PAGE_ALIGN(ltt_subbuffer_header_size()));
323 perror("munmap error");
324 g_assert(0);
325 }
326 tf->buffer.head = NULL;
327
328 /* Create block index */
329 ltt_trace_create_block_index(tf);
330
331 //read the first block
332 if(map_block(tf,0)) {
333 perror("Cannot map block for tracefile");
334 goto close_file;
335 }
336
337 return 0;
338
339 /* Error */
340 unmap_file:
341 if(munmap(tf->buffer.head,
342 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
343 g_warning("unmap size : %zu\n",
344 PAGE_ALIGN(ltt_subbuffer_header_size()));
345 perror("munmap error");
346 g_assert(0);
347 }
348 close_file:
349 close(tf->fd);
350 end:
351 if (tf->buf_index)
352 g_array_free(tf->buf_index, TRUE);
353 return -1;
354 }
355
356
357 /*****************************************************************************
358 *Function name
359 * ltt_tracefile_close: close a trace file,
360 *Input params
361 * t : tracefile which will be closed
362 ****************************************************************************/
363
364 static void ltt_tracefile_close(LttTracefile *t)
365 {
366 int page_size = getpagesize();
367
368 if(t->buffer.head != NULL)
369 if(munmap(t->buffer.head, PAGE_ALIGN(t->buffer.size))) {
370 g_warning("unmap size : %u\n",
371 PAGE_ALIGN(t->buffer.size));
372 perror("munmap error");
373 g_assert(0);
374 }
375
376 close(t->fd);
377 if (t->buf_index)
378 g_array_free(t->buf_index, TRUE);
379 }
380
381 /****************************************************************************
382 * get_absolute_pathname
383 *
384 * return the unique pathname in the system
385 *
386 * MD : Fixed this function so it uses realpath, dealing well with
387 * forgotten cases (.. were not used correctly before).
388 *
389 ****************************************************************************/
390 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
391 {
392 abs_pathname[0] = '\0';
393
394 if (realpath(pathname, abs_pathname) != NULL)
395 return;
396 else
397 {
398 /* error, return the original path unmodified */
399 strcpy(abs_pathname, pathname);
400 return;
401 }
402 return;
403 }
404
405 /* Search for something like : .*_.*
406 *
407 * The left side is the name, the right side is the number.
408 * Exclude leading /.
409 * Exclude flight- prefix.
410 */
411
412 static int get_tracefile_name_number(gchar *raw_name,
413 GQuark *name,
414 guint *num,
415 gulong *tid,
416 gulong *pgid,
417 guint64 *creation)
418 {
419 guint raw_name_len = strlen(raw_name);
420 gchar char_name[PATH_MAX];
421 int i;
422 int underscore_pos;
423 long int cpu_num;
424 gchar *endptr;
425 gchar *tmpptr;
426
427 /* skip leading / */
428 for(i = 0; i < raw_name_len-1;i++) {
429 if(raw_name[i] != '/')
430 break;
431 }
432 raw_name = &raw_name[i];
433 raw_name_len = strlen(raw_name);
434
435 for(i=raw_name_len-1;i>=0;i--) {
436 if(raw_name[i] == '_') break;
437 }
438 if(i==-1) { /* Either not found or name length is 0 */
439 /* This is a userspace tracefile */
440 strncpy(char_name, raw_name, raw_name_len);
441 char_name[raw_name_len] = '\0';
442 *name = g_quark_from_string(char_name);
443 *num = 0; /* unknown cpu */
444 for(i=0;i<raw_name_len;i++) {
445 if(raw_name[i] == '/') {
446 break;
447 }
448 }
449 i++;
450 for(;i<raw_name_len;i++) {
451 if(raw_name[i] == '/') {
452 break;
453 }
454 }
455 i++;
456 for(;i<raw_name_len;i++) {
457 if(raw_name[i] == '-') {
458 break;
459 }
460 }
461 if(i == raw_name_len) return -1;
462 i++;
463 tmpptr = &raw_name[i];
464 for(;i<raw_name_len;i++) {
465 if(raw_name[i] == '.') {
466 raw_name[i] = ' ';
467 break;
468 }
469 }
470 *tid = strtoul(tmpptr, &endptr, 10);
471 if(endptr == tmpptr)
472 return -1; /* No digit */
473 if(*tid == ULONG_MAX)
474 return -1; /* underflow / overflow */
475 i++;
476 tmpptr = &raw_name[i];
477 for(;i<raw_name_len;i++) {
478 if(raw_name[i] == '.') {
479 raw_name[i] = ' ';
480 break;
481 }
482 }
483 *pgid = strtoul(tmpptr, &endptr, 10);
484 if(endptr == tmpptr)
485 return -1; /* No digit */
486 if(*pgid == ULONG_MAX)
487 return -1; /* underflow / overflow */
488 i++;
489 tmpptr = &raw_name[i];
490 *creation = strtoull(tmpptr, &endptr, 10);
491 if(endptr == tmpptr)
492 return -1; /* No digit */
493 if(*creation == G_MAXUINT64)
494 return -1; /* underflow / overflow */
495 } else {
496 underscore_pos = i;
497
498 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
499
500 if(endptr == raw_name+underscore_pos+1)
501 return -1; /* No digit */
502 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
503 return -1; /* underflow / overflow */
504
505 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
506 raw_name += sizeof("flight-") - 1;
507 underscore_pos -= sizeof("flight-") - 1;
508 }
509 strncpy(char_name, raw_name, underscore_pos);
510 char_name[underscore_pos] = '\0';
511 *name = g_quark_from_string(char_name);
512 *num = cpu_num;
513 }
514
515
516 return 0;
517 }
518
519
520 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
521 {
522 return &trace->tracefiles;
523 }
524
525
526 void compute_tracefile_group(GQuark key_id,
527 GArray *group,
528 struct compute_tracefile_group_args *args)
529 {
530 unsigned int i;
531 LttTracefile *tf;
532
533 for(i=0; i<group->len; i++) {
534 tf = &g_array_index (group, LttTracefile, i);
535 if(tf->cpu_online)
536 args->func(tf, args->func_args);
537 }
538 }
539
540
541 static void ltt_tracefile_group_destroy(gpointer data)
542 {
543 GArray *group = (GArray *)data;
544 unsigned int i;
545 LttTracefile *tf;
546
547 if (group->len > 0)
548 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
549 for(i=0; i<group->len; i++) {
550 tf = &g_array_index (group, LttTracefile, i);
551 if(tf->cpu_online)
552 ltt_tracefile_close(tf);
553 }
554 g_array_free(group, TRUE);
555 }
556
557 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
558 {
559 GArray *group = (GArray *)data;
560 unsigned int i;
561 LttTracefile *tf;
562
563 for(i=0; i<group->len; i++) {
564 tf = &g_array_index (group, LttTracefile, i);
565 if(tf->cpu_online)
566 return 1;
567 }
568 return 0;
569 }
570
571
572 /* Open each tracefile under a specific directory. Put them in a
573 * GData : permits to access them using their tracefile group pathname.
574 * i.e. access control/modules tracefile group by index :
575 * "control/module".
576 *
577 * relative path is the path relative to the trace root
578 * root path is the full path
579 *
580 * A tracefile group is simply an array where all the per cpu tracefiles sit.
581 */
582
583 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
584 {
585 DIR *dir = opendir(root_path);
586 struct dirent *entry;
587 struct stat stat_buf;
588 int ret, i;
589 struct marker_data *mdata;
590
591 gchar path[PATH_MAX];
592 int path_len;
593 gchar *path_ptr;
594
595 int rel_path_len;
596 gchar rel_path[PATH_MAX];
597 gchar *rel_path_ptr;
598 LttTracefile tmp_tf;
599
600 if(dir == NULL) {
601 perror(root_path);
602 return ENOENT;
603 }
604
605 strncpy(path, root_path, PATH_MAX-1);
606 path_len = strlen(path);
607 path[path_len] = '/';
608 path_len++;
609 path_ptr = path + path_len;
610
611 strncpy(rel_path, relative_path, PATH_MAX-1);
612 rel_path_len = strlen(rel_path);
613 rel_path[rel_path_len] = '/';
614 rel_path_len++;
615 rel_path_ptr = rel_path + rel_path_len;
616
617 while((entry = readdir(dir)) != NULL) {
618
619 if(entry->d_name[0] == '.') continue;
620
621 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
622 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
623
624 ret = stat(path, &stat_buf);
625 if(ret == -1) {
626 perror(path);
627 continue;
628 }
629
630 g_debug("Tracefile file or directory : %s\n", path);
631
632 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
633
634 if(S_ISDIR(stat_buf.st_mode)) {
635
636 g_debug("Entering subdirectory...\n");
637 ret = open_tracefiles(trace, path, rel_path);
638 if(ret < 0) continue;
639 } else if(S_ISREG(stat_buf.st_mode)) {
640 GQuark name;
641 guint num;
642 gulong tid, pgid;
643 guint64 creation;
644 GArray *group;
645 num = 0;
646 tid = pgid = 0;
647 creation = 0;
648 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
649 continue; /* invalid name */
650
651 g_debug("Opening file.\n");
652 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
653 g_info("Error opening tracefile %s", path);
654
655 continue; /* error opening the tracefile : bad magic number ? */
656 }
657
658 g_debug("Tracefile name is %s and number is %u",
659 g_quark_to_string(name), num);
660
661 mdata = NULL;
662 tmp_tf.cpu_online = 1;
663 tmp_tf.cpu_num = num;
664 tmp_tf.name = name;
665 tmp_tf.tid = tid;
666 tmp_tf.pgid = pgid;
667 tmp_tf.creation = creation;
668 group = g_datalist_id_get_data(&trace->tracefiles, name);
669 if(group == NULL) {
670 /* Elements are automatically cleared when the array is allocated.
671 * It makes the cpu_online variable set to 0 : cpu offline, by default.
672 */
673 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
674 g_datalist_id_set_data_full(&trace->tracefiles, name,
675 group, ltt_tracefile_group_destroy);
676 mdata = allocate_marker_data();
677 if (!mdata)
678 g_error("Error in allocating marker data");
679 }
680
681 /* Add the per cpu tracefile to the named group */
682 unsigned int old_len = group->len;
683 if(num+1 > old_len)
684 group = g_array_set_size(group, num+1);
685
686 g_assert(group->len > 0);
687 if (!mdata)
688 mdata = g_array_index (group, LttTracefile, 0).mdata;
689
690 g_array_index (group, LttTracefile, num) = tmp_tf;
691 g_array_index (group, LttTracefile, num).event.tracefile =
692 &g_array_index (group, LttTracefile, num);
693 for (i = 0; i < group->len; i++)
694 g_array_index (group, LttTracefile, i).mdata = mdata;
695 }
696 }
697
698 closedir(dir);
699
700 return 0;
701 }
702
703
704 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
705 * because it must be done just after the opening */
706 static int ltt_process_metadata_tracefile(LttTracefile *tf)
707 {
708 int err;
709
710 while(1) {
711 err = ltt_tracefile_read_seek(tf);
712 if(err == EPERM) goto seek_error;
713 else if(err == ERANGE) break; /* End of tracefile */
714
715 err = ltt_tracefile_read_update_event(tf);
716 if(err) goto update_error;
717
718 /* The rules are :
719 * It contains only core events :
720 * 0 : set_marker_id
721 * 1 : set_marker_format
722 */
723 if(tf->event.event_id >= MARKER_CORE_IDS) {
724 /* Should only contain core events */
725 g_warning("Error in processing metadata file %s, "
726 "should not contain event id %u.", g_quark_to_string(tf->name),
727 tf->event.event_id);
728 err = EPERM;
729 goto event_id_error;
730 } else {
731 char *pos;
732 const char *channel_name, *marker_name, *format;
733 uint16_t id;
734 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
735
736 switch((enum marker_id)tf->event.event_id) {
737 case MARKER_ID_SET_MARKER_ID:
738 channel_name = pos = tf->event.data;
739 pos += strlen(channel_name) + 1;
740 marker_name = pos;
741 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
742 channel_name, marker_name);
743 pos += strlen(marker_name) + 1;
744 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
745 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
746 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
747 channel_name, marker_name, id);
748 pos += sizeof(guint16);
749 int_size = *(guint8*)pos;
750 pos += sizeof(guint8);
751 long_size = *(guint8*)pos;
752 pos += sizeof(guint8);
753 pointer_size = *(guint8*)pos;
754 pos += sizeof(guint8);
755 size_t_size = *(guint8*)pos;
756 pos += sizeof(guint8);
757 alignment = *(guint8*)pos;
758 pos += sizeof(guint8);
759 marker_id_event(tf->trace,
760 g_quark_from_string(channel_name),
761 g_quark_from_string(marker_name),
762 id, int_size, long_size,
763 pointer_size, size_t_size, alignment);
764 break;
765 case MARKER_ID_SET_MARKER_FORMAT:
766 channel_name = pos = tf->event.data;
767 pos += strlen(channel_name) + 1;
768 marker_name = pos;
769 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
770 channel_name, marker_name);
771 pos += strlen(marker_name) + 1;
772 format = pos;
773 pos += strlen(format) + 1;
774 marker_format_event(tf->trace,
775 g_quark_from_string(channel_name),
776 g_quark_from_string(marker_name),
777 format);
778 /* get information from dictionary TODO */
779 break;
780 default:
781 g_warning("Error in processing metadata file %s, "
782 "unknown event id %hhu.",
783 g_quark_to_string(tf->name),
784 tf->event.event_id);
785 err = EPERM;
786 goto event_id_error;
787 }
788 }
789 }
790 return 0;
791
792 /* Error handling */
793 event_id_error:
794 update_error:
795 seek_error:
796 g_warning("An error occured in metadata tracefile parsing");
797 return err;
798 }
799
800 /*
801 * Open a trace and return its LttTrace handle.
802 *
803 * pathname must be the directory of the trace
804 */
805
806 LttTrace *ltt_trace_open(const gchar *pathname)
807 {
808 gchar abs_path[PATH_MAX];
809 LttTrace * t;
810 LttTracefile *tf;
811 GArray *group;
812 unsigned int i;
813 int ret;
814 ltt_subbuffer_header_t *header;
815 DIR *dir;
816 struct dirent *entry;
817 struct stat stat_buf;
818 gchar path[PATH_MAX];
819
820 t = g_new(LttTrace, 1);
821 if(!t) goto alloc_error;
822
823 get_absolute_pathname(pathname, abs_path);
824 t->pathname = g_quark_from_string(abs_path);
825
826 g_datalist_init(&t->tracefiles);
827
828 /* Test to see if it looks like a trace */
829 dir = opendir(abs_path);
830 if(dir == NULL) {
831 perror(abs_path);
832 goto open_error;
833 }
834 while((entry = readdir(dir)) != NULL) {
835 strcpy(path, abs_path);
836 strcat(path, "/");
837 strcat(path, entry->d_name);
838 ret = stat(path, &stat_buf);
839 if(ret == -1) {
840 perror(path);
841 continue;
842 }
843 }
844 closedir(dir);
845
846 /* Open all the tracefiles */
847 t->start_freq= 0;
848 if(open_tracefiles(t, abs_path, "")) {
849 g_warning("Error opening tracefile %s", abs_path);
850 goto find_error;
851 }
852
853 /* Parse each trace metadata_N files : get runtime fac. info */
854 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
855 if(group == NULL) {
856 g_warning("Trace %s has no metadata tracefile", abs_path);
857 goto find_error;
858 }
859
860 /*
861 * Get the trace information for the metadata_0 tracefile.
862 * Getting a correct trace start_time and start_tsc is insured by the fact
863 * that no subbuffers are supposed to be lost in the metadata channel.
864 * Therefore, the first subbuffer contains the start_tsc timestamp in its
865 * buffer header.
866 */
867 g_assert(group->len > 0);
868 tf = &g_array_index (group, LttTracefile, 0);
869 header = (ltt_subbuffer_header_t *)tf->buffer.head;
870 ret = parse_trace_header(header, tf, t);
871 g_assert(!ret);
872
873 t->num_cpu = group->len;
874 t->drift = 1.;
875 t->offset = 0.;
876
877 //ret = allocate_marker_data(t);
878 //if (ret)
879 // g_error("Error in allocating marker data");
880
881 for(i=0; i<group->len; i++) {
882 tf = &g_array_index (group, LttTracefile, i);
883 if (tf->cpu_online)
884 if(ltt_process_metadata_tracefile(tf))
885 goto find_error;
886 // goto metadata_error;
887 }
888
889 return t;
890
891 /* Error handling */
892 //metadata_error:
893 // destroy_marker_data(t);
894 find_error:
895 g_datalist_clear(&t->tracefiles);
896 open_error:
897 g_free(t);
898 alloc_error:
899 return NULL;
900
901 }
902
903 /* Open another, completely independant, instance of a trace.
904 *
905 * A read on this new instance will read the first event of the trace.
906 *
907 * When we copy a trace, we want all the opening actions to happen again :
908 * the trace will be reopened and totally independant from the original.
909 * That's why we call ltt_trace_open.
910 */
911 LttTrace *ltt_trace_copy(LttTrace *self)
912 {
913 return ltt_trace_open(g_quark_to_string(self->pathname));
914 }
915
916 /*
917 * Close a trace
918 */
919
920 void ltt_trace_close(LttTrace *t)
921 {
922 g_datalist_clear(&t->tracefiles);
923 g_free(t);
924 }
925
926
927 /*****************************************************************************
928 * Get the start time and end time of the trace
929 ****************************************************************************/
930
931 void ltt_tracefile_time_span_get(LttTracefile *tf,
932 LttTime *start, LttTime *end)
933 {
934 int err;
935
936 err = map_block(tf, 0);
937 if(unlikely(err)) {
938 g_error("Can not map block");
939 *start = ltt_time_infinite;
940 } else
941 *start = tf->buffer.begin.timestamp;
942
943 err = map_block(tf, tf->num_blocks - 1); /* Last block */
944 if(unlikely(err)) {
945 g_error("Can not map block");
946 *end = ltt_time_zero;
947 } else
948 *end = tf->buffer.end.timestamp;
949
950 g_assert(end->tv_sec <= G_MAXUINT);
951 }
952
953 struct tracefile_time_span_get_args {
954 LttTrace *t;
955 LttTime *start;
956 LttTime *end;
957 };
958
959 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
960 {
961 struct tracefile_time_span_get_args *args =
962 (struct tracefile_time_span_get_args*)user_data;
963
964 GArray *group = (GArray *)data;
965 unsigned int i;
966 LttTracefile *tf;
967 LttTime tmp_start;
968 LttTime tmp_end;
969
970 for(i=0; i<group->len; i++) {
971 tf = &g_array_index (group, LttTracefile, i);
972 if(tf->cpu_online) {
973 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
974 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
975 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
976 }
977 }
978 }
979
980 /* return the start and end time of a trace */
981
982 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
983 {
984 LttTime min_start = ltt_time_infinite;
985 LttTime max_end = ltt_time_zero;
986 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
987
988 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
989
990 if(start != NULL) *start = min_start;
991 if(end != NULL) *end = max_end;
992
993 }
994
995
996 /* Seek to the first event in a tracefile that has a time equal or greater than
997 * the time passed in parameter.
998 *
999 * If the time parameter is outside the tracefile time span, seek to the first
1000 * event or if after, return ERANGE.
1001 *
1002 * If the time parameter is before the first event, we have to seek specially to
1003 * there.
1004 *
1005 * If the time is after the end of the trace, return ERANGE.
1006 *
1007 * Do a binary search to find the right block, then a sequential search in the
1008 * block to find the event.
1009 *
1010 * In the special case where the time requested fits inside a block that has no
1011 * event corresponding to the requested time, the first event of the next block
1012 * will be seeked.
1013 *
1014 * IMPORTANT NOTE : // FIXME everywhere...
1015 *
1016 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
1017 * you will jump over an event if you do.
1018 *
1019 * Return value : 0 : no error, the tf->event can be used
1020 * ERANGE : time if after the last event of the trace
1021 * otherwise : this is an error.
1022 *
1023 * */
1024
1025 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
1026 {
1027 int ret = 0;
1028 int err;
1029 unsigned int block_num, high, low;
1030
1031 /* seek at the beginning of trace */
1032 err = map_block(tf, 0); /* First block */
1033 if(unlikely(err)) {
1034 g_error("Can not map block");
1035 goto fail;
1036 }
1037
1038 /* If the time is lower or equal the beginning of the trace,
1039 * go to the first event. */
1040 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
1041 ret = ltt_tracefile_read(tf);
1042 if(ret == ERANGE) goto range;
1043 else if (ret) goto fail;
1044 goto found; /* There is either no event in the trace or the event points
1045 to the first event in the trace */
1046 }
1047
1048 err = map_block(tf, tf->num_blocks - 1); /* Last block */
1049 if(unlikely(err)) {
1050 g_error("Can not map block");
1051 goto fail;
1052 }
1053
1054 /* If the time is after the end of the trace, return ERANGE. */
1055 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1056 goto range;
1057 }
1058
1059 /* Binary search the block */
1060 high = tf->num_blocks - 1;
1061 low = 0;
1062
1063 while(1) {
1064 block_num = ((high-low) / 2) + low;
1065
1066 err = map_block(tf, block_num);
1067 if(unlikely(err)) {
1068 g_error("Can not map block");
1069 goto fail;
1070 }
1071 if(high == low) {
1072 /* We cannot divide anymore : this is what would happen if the time
1073 * requested was exactly between two consecutive buffers'end and start
1074 * timestamps. This is also what would happend if we didn't deal with out
1075 * of span cases prior in this function. */
1076 /* The event is right in the buffer!
1077 * (or in the next buffer first event) */
1078 while(1) {
1079 ret = ltt_tracefile_read(tf);
1080 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1081 else if(ret) goto fail;
1082
1083 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1084 goto found;
1085 }
1086
1087 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1088 /* go to lower part */
1089 high = block_num - 1;
1090 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1091 /* go to higher part */
1092 low = block_num + 1;
1093 } else {/* The event is right in the buffer!
1094 (or in the next buffer first event) */
1095 while(1) {
1096 ret = ltt_tracefile_read(tf);
1097 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1098 else if(ret) goto fail;
1099
1100 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1101 break;
1102 }
1103 goto found;
1104 }
1105 }
1106
1107 found:
1108 return 0;
1109 range:
1110 return ERANGE;
1111
1112 /* Error handling */
1113 fail:
1114 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1115 g_quark_to_string(tf->name));
1116 return EPERM;
1117 }
1118
1119 /* Seek to a position indicated by an LttEventPosition
1120 */
1121
1122 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1123 {
1124 int err;
1125
1126 if(ep->tracefile != tf) {
1127 goto fail;
1128 }
1129
1130 err = map_block(tf, ep->block);
1131 if(unlikely(err)) {
1132 g_error("Can not map block");
1133 goto fail;
1134 }
1135
1136 tf->event.offset = ep->offset;
1137
1138 /* Put back the event real tsc */
1139 tf->event.tsc = ep->tsc;
1140 tf->buffer.tsc = ep->tsc;
1141
1142 err = ltt_tracefile_read_update_event(tf);
1143 if(err) goto fail;
1144
1145 /* deactivate this, as it does nothing for now
1146 err = ltt_tracefile_read_op(tf);
1147 if(err) goto fail;
1148 */
1149
1150 return 0;
1151
1152 fail:
1153 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1154 g_quark_to_string(tf->name));
1155 return 1;
1156 }
1157
1158 /*
1159 * Convert a value in "TSC scale" to a value in nanoseconds
1160 */
1161 guint64 tsc_to_uint64(guint32 freq_scale, uint64_t start_freq, guint64 tsc)
1162 {
1163 return (double) tsc * NANOSECONDS_PER_SECOND * freq_scale / start_freq;
1164 }
1165
1166 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1167 * corresponds to.
1168 */
1169 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1170 {
1171 return ltt_time_from_uint64(tsc_to_uint64(tf->trace->freq_scale,
1172 tf->trace->start_freq, tf->trace->drift * tsc +
1173 tf->trace->offset));
1174 }
1175
1176 /* Calculate the real event time based on the buffer boundaries */
1177 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1178 {
1179 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1180 }
1181
1182
1183 /* Get the current event of the tracefile : valid until the next read */
1184 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1185 {
1186 return &tf->event;
1187 }
1188
1189
1190
1191 /*****************************************************************************
1192 *Function name
1193 * ltt_tracefile_read : Read the next event in the tracefile
1194 *Input params
1195 * t : tracefile
1196 *Return value
1197 *
1198 * Returns 0 if an event can be used in tf->event.
1199 * Returns ERANGE on end of trace. The event in tf->event still can be used
1200 * (if the last block was not empty).
1201 * Returns EPERM on error.
1202 *
1203 * This function does make the tracefile event structure point to the event
1204 * currently pointed to by the tf->event.
1205 *
1206 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1207 * reinitialize it after an error if you want results to be coherent.
1208 * It would be the case if a end of trace last buffer has no event : the end
1209 * of trace wouldn't be returned, but an error.
1210 * We make the assumption there is at least one event per buffer.
1211 ****************************************************************************/
1212
1213 int ltt_tracefile_read(LttTracefile *tf)
1214 {
1215 int err;
1216
1217 err = ltt_tracefile_read_seek(tf);
1218 if(err) return err;
1219 err = ltt_tracefile_read_update_event(tf);
1220 if(err) return err;
1221
1222 /* deactivate this, as it does nothing for now
1223 err = ltt_tracefile_read_op(tf);
1224 if(err) return err;
1225 */
1226
1227 return 0;
1228 }
1229
1230 int ltt_tracefile_read_seek(LttTracefile *tf)
1231 {
1232 int err;
1233
1234 /* Get next buffer until we finally have an event, or end of trace */
1235 while(1) {
1236 err = ltt_seek_next_event(tf);
1237 if(unlikely(err == ENOPROTOOPT)) {
1238 return EPERM;
1239 }
1240
1241 /* Are we at the end of the buffer ? */
1242 if(err == ERANGE) {
1243 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1244 return ERANGE;
1245 } else {
1246 /* get next block */
1247 err = map_block(tf, tf->buffer.index + 1);
1248 if(unlikely(err)) {
1249 g_error("Can not map block");
1250 return EPERM;
1251 }
1252 }
1253 } else break; /* We found an event ! */
1254 }
1255
1256 return 0;
1257 }
1258
1259 /* do an operation when reading a new event */
1260
1261 /* This function does nothing for now */
1262 #if 0
1263 int ltt_tracefile_read_op(LttTracefile *tf)
1264 {
1265 LttEvent *event;
1266
1267 event = &tf->event;
1268
1269 /* do event specific operation */
1270
1271 /* nothing */
1272
1273 return 0;
1274 }
1275 #endif
1276
1277 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1278 {
1279 unsigned int offset = 0;
1280 int i, j;
1281
1282 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1283 g_quark_to_string(ev->tracefile->long_name),
1284 (uint64_t)ev->tracefile->buffer.offset +
1285 (long)start_pos - (long)ev->tracefile->buffer.head);
1286
1287 while (offset < (long)end_pos - (long)start_pos) {
1288 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1289 g_printf(" ");
1290
1291 for (i = 0; i < 4 ; i++) {
1292 for (j = 0; j < 4; j++) {
1293 if (offset + ((i * 4) + j) <
1294 (long)end_pos - (long)start_pos)
1295 g_printf("%02hhX",
1296 ((char*)start_pos)[offset + ((i * 4) + j)]);
1297 else
1298 g_printf(" ");
1299 g_printf(" ");
1300 }
1301 if (i < 4)
1302 g_printf(" ");
1303 }
1304 offset+=16;
1305 g_printf("\n");
1306 }
1307 }
1308
1309
1310 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1311 * event specific operation. */
1312 int ltt_tracefile_read_update_event(LttTracefile *tf)
1313 {
1314 void * pos;
1315 LttEvent *event;
1316 void *pos_aligned;
1317 guint16 packed_evid; /* event id reader from the 5 bits in header */
1318
1319 event = &tf->event;
1320 pos = tf->buffer.head + event->offset;
1321
1322 /* Read event header */
1323
1324 /* Align the head */
1325 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1326 pos_aligned = pos;
1327
1328 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1329 event->event_id = packed_evid = event->timestamp >> tf->tscbits;
1330 event->timestamp = event->timestamp & tf->tsc_mask;
1331 pos += sizeof(guint32);
1332
1333 switch (packed_evid) {
1334 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1335 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1336 pos += sizeof(guint16);
1337 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1338 pos += sizeof(guint16);
1339 if (event->event_size == 0xFFFF) {
1340 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1341 pos += sizeof(guint32);
1342 }
1343 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1344 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1345 pos += sizeof(guint64);
1346 break;
1347 case 30: /* LTT_RFLAG_ID_SIZE */
1348 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1349 pos += sizeof(guint16);
1350 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1351 pos += sizeof(guint16);
1352 if (event->event_size == 0xFFFF) {
1353 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1354 pos += sizeof(guint32);
1355 }
1356 break;
1357 case 31: /* LTT_RFLAG_ID */
1358 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1359 pos += sizeof(guint16);
1360 event->event_size = G_MAXUINT;
1361 break;
1362 default:
1363 event->event_size = G_MAXUINT;
1364 break;
1365 }
1366
1367 if (likely(packed_evid != 29)) {
1368 /* No extended timestamp */
1369 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1370 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1371 + tf->tsc_mask_next_bit)
1372 | (guint64)event->timestamp;
1373 else
1374 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1375 | (guint64)event->timestamp;
1376 }
1377 event->tsc = tf->buffer.tsc;
1378
1379 event->event_time = ltt_interpolate_time(tf, event);
1380
1381 if (a_event_debug)
1382 print_debug_event_header(event, pos_aligned, pos);
1383
1384 event->data = pos;
1385
1386 /*
1387 * Let ltt_update_event_size update event->data according to the largest
1388 * alignment within the payload.
1389 * Get the data size and update the event fields with the current
1390 * information. */
1391 ltt_update_event_size(tf);
1392
1393 return 0;
1394 }
1395
1396
1397 /****************************************************************************
1398 *Function name
1399 * map_block : map a block from the file
1400 *Input Params
1401 * lttdes : ltt trace file
1402 * whichBlock : the block which will be read
1403 *return value
1404 * 0 : success
1405 * EINVAL : lseek fail
1406 * EIO : can not read from the file
1407 ****************************************************************************/
1408
1409 static gint map_block(LttTracefile * tf, guint block_num)
1410 {
1411 int page_size = getpagesize();
1412 ltt_subbuffer_header_t *header;
1413 uint64_t offset;
1414 uint32_t size;
1415 int ret;
1416
1417 g_assert(block_num < tf->num_blocks);
1418
1419 if(tf->buffer.head != NULL) {
1420 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buffer.size))) {
1421 g_warning("unmap size : %u\n",
1422 PAGE_ALIGN(tf->buffer.size));
1423 perror("munmap error");
1424 g_assert(0);
1425 }
1426 }
1427
1428 ret = get_block_offset_size(tf, block_num, &offset, &size);
1429 g_assert(!ret);
1430
1431 g_debug("Map block %u, offset %llu, size %u\n", block_num,
1432 (unsigned long long)offset, (unsigned int)size);
1433
1434 /* Multiple of pages aligned head */
1435 tf->buffer.head = mmap(0, (size_t)size, PROT_READ, MAP_PRIVATE,
1436 tf->fd, (off_t)offset);
1437
1438 if(tf->buffer.head == MAP_FAILED) {
1439 perror("Error in allocating memory for buffer of tracefile");
1440 g_assert(0);
1441 goto map_error;
1442 }
1443 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1444
1445 tf->buffer.index = block_num;
1446
1447 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1448
1449 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1450 &header->cycle_count_begin);
1451 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1452 &header->cycle_count_end);
1453 tf->buffer.offset = offset;
1454 tf->buffer.size = ltt_get_uint32(LTT_GET_BO(tf),
1455 &header->sb_size);
1456 tf->buffer.data_size = ltt_get_uint32(LTT_GET_BO(tf),
1457 &header->data_size);
1458 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1459 tf->event.tsc = tf->buffer.tsc;
1460 tf->buffer.freq = tf->buffer.begin.freq;
1461
1462 g_assert(size == tf->buffer.size);
1463 g_assert(tf->buffer.data_size <= tf->buffer.size);
1464
1465 if (tf->trace->start_freq)
1466 {
1467 tf->buffer.begin.freq = tf->trace->start_freq;
1468 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1469 tf->buffer.begin.cycle_count);
1470 tf->buffer.end.freq = tf->trace->start_freq;
1471 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1472 tf->buffer.end.cycle_count);
1473 }
1474
1475 /* Make the current event point to the beginning of the buffer :
1476 * it means that the event read must get the first event. */
1477 tf->event.tracefile = tf;
1478 tf->event.block = block_num;
1479 tf->event.offset = 0;
1480
1481 if (header->events_lost) {
1482 g_warning("%d events lost so far in tracefile %s at block %u",
1483 (guint)header->events_lost,
1484 g_quark_to_string(tf->long_name),
1485 block_num);
1486 tf->events_lost = header->events_lost;
1487 }
1488 if (header->subbuf_corrupt) {
1489 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1490 (guint)header->subbuf_corrupt,
1491 g_quark_to_string(tf->long_name),
1492 block_num);
1493 tf->subbuf_corrupt = header->subbuf_corrupt;
1494 }
1495
1496 return 0;
1497
1498 map_error:
1499 return -errno;
1500 }
1501
1502 static void print_debug_event_data(LttEvent *ev)
1503 {
1504 unsigned int offset = 0;
1505 int i, j;
1506
1507 if (!max(ev->event_size, ev->data_size))
1508 return;
1509
1510 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1511 g_quark_to_string(ev->tracefile->long_name),
1512 (uint64_t)ev->tracefile->buffer.offset
1513 + (long)ev->data - (long)ev->tracefile->buffer.head);
1514
1515 while (offset < max(ev->event_size, ev->data_size)) {
1516 g_printf("%8lx", (long)ev->data + offset
1517 - (long)ev->tracefile->buffer.head);
1518 g_printf(" ");
1519
1520 for (i = 0; i < 4 ; i++) {
1521 for (j = 0; j < 4; j++) {
1522 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1523 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1524 else
1525 g_printf(" ");
1526 g_printf(" ");
1527 }
1528 if (i < 4)
1529 g_printf(" ");
1530 }
1531
1532 g_printf(" ");
1533
1534 for (i = 0; i < 4; i++) {
1535 for (j = 0; j < 4; j++) {
1536 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1537 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1538 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1539 else
1540 g_printf(".");
1541 } else
1542 g_printf(" ");
1543 }
1544 }
1545 offset+=16;
1546 g_printf("\n");
1547 }
1548 }
1549
1550 /* It will update the fields offsets too */
1551 void ltt_update_event_size(LttTracefile *tf)
1552 {
1553 off_t size = 0;
1554 struct marker_info *info;
1555
1556 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1557 switch((enum marker_id)tf->event.event_id) {
1558 case MARKER_ID_SET_MARKER_ID:
1559 size = strlen((char*)tf->event.data) + 1;
1560 g_debug("marker %s id set", (char*)tf->event.data + size);
1561 size += strlen((char*)tf->event.data + size) + 1;
1562 size += ltt_align(size, sizeof(guint16), tf->alignment);
1563 size += sizeof(guint16);
1564 size += sizeof(guint8);
1565 size += sizeof(guint8);
1566 size += sizeof(guint8);
1567 size += sizeof(guint8);
1568 size += sizeof(guint8);
1569 break;
1570 case MARKER_ID_SET_MARKER_FORMAT:
1571 size = strlen((char*)tf->event.data) + 1;
1572 g_debug("marker %s format set", (char*)tf->event.data);
1573 size += strlen((char*)tf->event.data + size) + 1;
1574 size += strlen((char*)tf->event.data + size) + 1;
1575 break;
1576 }
1577 }
1578
1579 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1580
1581 if (tf->event.event_id >= MARKER_CORE_IDS)
1582 g_assert(info != NULL);
1583
1584 /* Do not update field offsets of core markers when initially reading the
1585 * metadata tracefile when the infos about these markers do not exist yet.
1586 */
1587 if (likely(info && info->fields)) {
1588 /* alignment */
1589 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1590 info->largest_align,
1591 info->alignment);
1592 /* size, dynamically computed */
1593 if (info->size != -1)
1594 size = info->size;
1595 else
1596 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1597 tf->event.event_id), tf->event.data);
1598 }
1599
1600 tf->event.data_size = size;
1601
1602 /* Check consistency between kernel and LTTV structure sizes */
1603 if(tf->event.event_size == G_MAXUINT) {
1604 /* Event size too big to fit in the event size field */
1605 tf->event.event_size = tf->event.data_size;
1606 }
1607
1608 if (a_event_debug)
1609 print_debug_event_data(&tf->event);
1610
1611 if (tf->event.data_size != tf->event.event_size) {
1612 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1613 tf->event.event_id);
1614 if (!info)
1615 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1616 g_quark_to_string(tf->name));
1617 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1618 g_quark_to_string(info->name),
1619 tf->event.event_size, tf->event.data_size);
1620 exit(-1);
1621 }
1622 }
1623
1624
1625 /* Take the tf current event offset and use the event id to figure out where is
1626 * the next event offset.
1627 *
1628 * This is an internal function not aiming at being used elsewhere : it will
1629 * not jump over the current block limits. Please consider using
1630 * ltt_tracefile_read to do this.
1631 *
1632 * Returns 0 on success
1633 * ERANGE if we are at the end of the buffer.
1634 * ENOPROTOOPT if an error occured when getting the current event size.
1635 */
1636 static int ltt_seek_next_event(LttTracefile *tf)
1637 {
1638 int ret = 0;
1639 void *pos;
1640
1641 /* seek over the buffer header if we are at the buffer start */
1642 if(tf->event.offset == 0) {
1643 tf->event.offset += tf->buffer_header_size;
1644
1645 if(tf->event.offset == tf->buffer.data_size) {
1646 ret = ERANGE;
1647 }
1648 goto found;
1649 }
1650
1651 pos = tf->event.data;
1652
1653 if(tf->event.data_size < 0) goto error;
1654
1655 pos += (size_t)tf->event.data_size;
1656
1657 tf->event.offset = pos - tf->buffer.head;
1658
1659 if(tf->event.offset == tf->buffer.data_size) {
1660 ret = ERANGE;
1661 goto found;
1662 }
1663 g_assert(tf->event.offset < tf->buffer.data_size);
1664
1665 found:
1666 return ret;
1667
1668 error:
1669 g_error("Error in ltt_seek_next_event for tracefile %s",
1670 g_quark_to_string(tf->name));
1671 return ENOPROTOOPT;
1672 }
1673
1674
1675 /*****************************************************************************
1676 *Function name
1677 * ltt_get_int : get an integer number
1678 *Input params
1679 * reverse_byte_order: must we reverse the byte order ?
1680 * size : the size of the integer
1681 * ptr : the data pointer
1682 *Return value
1683 * gint64 : a 64 bits integer
1684 ****************************************************************************/
1685
1686 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
1687 {
1688 gint64 val;
1689
1690 switch(size) {
1691 case 1: val = *((gint8*)data); break;
1692 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
1693 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
1694 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
1695 default: val = ltt_get_int64(reverse_byte_order, data);
1696 g_critical("get_int : integer size %d unknown", size);
1697 break;
1698 }
1699
1700 return val;
1701 }
1702
1703 /*****************************************************************************
1704 *Function name
1705 * ltt_get_uint : get an unsigned integer number
1706 *Input params
1707 * reverse_byte_order: must we reverse the byte order ?
1708 * size : the size of the integer
1709 * ptr : the data pointer
1710 *Return value
1711 * guint64 : a 64 bits unsigned integer
1712 ****************************************************************************/
1713
1714 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
1715 {
1716 guint64 val;
1717
1718 switch(size) {
1719 case 1: val = *((gint8*)data); break;
1720 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
1721 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
1722 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
1723 default: val = ltt_get_uint64(reverse_byte_order, data);
1724 g_critical("get_uint : unsigned integer size %d unknown",
1725 size);
1726 break;
1727 }
1728
1729 return val;
1730 }
1731
1732
1733 /* get the node name of the system */
1734
1735 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
1736 {
1737 return s->node_name;
1738 }
1739
1740
1741 /* get the domain name of the system */
1742
1743 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
1744 {
1745 return s->domain_name;
1746 }
1747
1748
1749 /* get the description of the system */
1750
1751 char * ltt_trace_system_description_description (LttSystemDescription * s)
1752 {
1753 return s->description;
1754 }
1755
1756
1757 /* get the NTP corrected start time of the trace */
1758 LttTime ltt_trace_start_time(LttTrace *t)
1759 {
1760 return t->start_time;
1761 }
1762
1763 /* get the monotonic start time of the trace */
1764 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
1765 {
1766 return t->start_time_from_tsc;
1767 }
1768
1769 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
1770 {
1771 LttTracefile *tf;
1772 tf = g_new(LttTracefile, 1);
1773 tf->event.tracefile = tf;
1774 return tf;
1775 }
1776
1777 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
1778 {
1779 g_free(tf);
1780 }
1781
1782 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
1783 {
1784 *dest = *src;
1785 }
1786
1787 /* Before library loading... */
1788
1789 static __attribute__((constructor)) void init(void)
1790 {
1791 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
1792 }
This page took 0.105199 seconds and 4 git commands to generate.