1d6f2a8646619430a3751ff316a08eedf173aae3
[lttv.git] / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <glib/gprintf.h>
36 #include <malloc.h>
37 #include <sys/mman.h>
38 #include <string.h>
39 #include <ctype.h>
40 #include <inttypes.h>
41
42 // For realpath
43 #include <limits.h>
44 #include <stdlib.h>
45
46
47 #include <ltt/ltt.h>
48 #include "ltt-private.h"
49 #include <ltt/trace.h>
50 #include <ltt/event.h>
51 #include <ltt/ltt-types.h>
52 #include <ltt/marker.h>
53
54 #define DEFAULT_N_BLOCKS 32
55
56 /* from marker.c */
57 extern long marker_update_fields_offsets(struct marker_info *info, const char *data);
58
59 /* Tracefile names used in this file */
60
61 GQuark LTT_TRACEFILE_NAME_METADATA;
62
63 #ifndef g_open
64 #define g_open open
65 #endif
66
67
68 #define __UNUSED__ __attribute__((__unused__))
69
70 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
71
72 #ifndef g_debug
73 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
74 #endif
75
76 #define g_close close
77
78 /* Those macros must be called from within a function where page_size is a known
79 * variable */
80 #define PAGE_MASK (~(page_size-1))
81 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
82
83 /* set the offset of the fields belonging to the event,
84 need the information of the archecture */
85 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
86 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
87
88 /* map a fixed size or a block information from the file (fd) */
89 static gint map_block(LttTracefile * tf, guint block_num);
90
91 /* calculate nsec per cycles for current block */
92 #if 0
93 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
94 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
95 #endif //0
96
97 /* go to the next event */
98 static int ltt_seek_next_event(LttTracefile *tf);
99
100 static int open_tracefiles(LttTrace *trace, gchar *root_path,
101 gchar *relative_path);
102 static int ltt_process_metadata_tracefile(LttTracefile *tf);
103 static void ltt_tracefile_time_span_get(LttTracefile *tf,
104 LttTime *start, LttTime *end);
105 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
106 static gint map_block(LttTracefile * tf, guint block_num);
107 static void ltt_update_event_size(LttTracefile *tf);
108
109 /* Enable event debugging */
110 static int a_event_debug = 0;
111
112 void ltt_event_debug(int state)
113 {
114 a_event_debug = state;
115 }
116
117 /* trace can be NULL
118 *
119 * Return value : 0 success, 1 bad tracefile
120 */
121 static int parse_trace_header(ltt_subbuffer_header_t *header,
122 LttTracefile *tf, LttTrace *t)
123 {
124 if (header->magic_number == LTT_MAGIC_NUMBER)
125 tf->reverse_bo = 0;
126 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
127 tf->reverse_bo = 1;
128 else /* invalid magic number, bad tracefile ! */
129 return 1;
130
131 if(t) {
132 t->ltt_major_version = header->major_version;
133 t->ltt_minor_version = header->minor_version;
134 t->arch_size = header->arch_size;
135 }
136 tf->alignment = header->alignment;
137
138 /* Get float byte order : might be different from int byte order
139 * (or is set to 0 if the trace has no float (kernel trace)) */
140 tf->float_word_order = 0;
141
142 switch(header->major_version) {
143 case 0:
144 case 1:
145 g_warning("Unsupported trace version : %hhu.%hhu",
146 header->major_version, header->minor_version);
147 return 1;
148 break;
149 case 2:
150 switch(header->minor_version) {
151 case 5:
152 {
153 struct ltt_subbuffer_header_2_5 *vheader = header;
154 tf->buffer_header_size = ltt_subbuffer_header_size();
155 tf->tscbits = 27;
156 tf->eventbits = 5;
157 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
158 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
159
160 if(t) {
161 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
162 &vheader->start_freq);
163 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
164 &vheader->freq_scale);
165 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->cycle_count_begin);
167 t->start_monotonic = 0;
168 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
169 &vheader->start_time_sec);
170 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
171 &vheader->start_time_usec);
172 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
173
174 t->start_time_from_tsc = ltt_time_from_uint64(
175 (double)t->start_tsc
176 * 1000000000.0 * tf->trace->freq_scale
177 / (double)t->start_freq);
178 }
179 }
180 break;
181 default:
182 g_warning("Unsupported trace version : %hhu.%hhu",
183 header->major_version, header->minor_version);
184 return 1;
185 }
186 break;
187 default:
188 g_warning("Unsupported trace version : %hhu.%hhu",
189 header->major_version, header->minor_version);
190 return 1;
191 }
192 return 0;
193 }
194
195 int get_block_offset_size(LttTracefile *tf, guint block_num,
196 uint64_t *offset, uint32_t *size)
197 {
198 uint64_t offa, offb;
199
200 if (unlikely(block_num >= tf->num_blocks))
201 return -1;
202
203 offa = g_array_index(tf->buf_index, uint64_t, block_num);
204 if (likely(block_num < tf->num_blocks - 1))
205 offb = g_array_index(tf->buf_index, uint64_t, block_num + 1);
206 else
207 offb = tf->file_size;
208 *offset = offa;
209 *size = offb - offa;
210 return 0;
211 }
212
213 int ltt_trace_create_block_index(LttTracefile *tf)
214 {
215 int page_size = getpagesize();
216 uint64_t offset = 0;
217 unsigned long i = 0;
218 unsigned int header_map_size = PAGE_ALIGN(ltt_subbuffer_header_size());
219
220 tf->buf_index = g_array_sized_new(FALSE, TRUE, sizeof(uint64_t),
221 DEFAULT_N_BLOCKS);
222
223 g_assert(tf->buf_index->len == i);
224
225 while (offset < tf->file_size) {
226 ltt_subbuffer_header_t *header;
227 uint64_t *off;
228
229 tf->buf_index = g_array_set_size(tf->buf_index, i + 1);
230 off = &g_array_index(tf->buf_index, uint64_t, i);
231 *off = offset;
232
233 /* map block header */
234 header = mmap(0, header_map_size, PROT_READ,
235 MAP_PRIVATE, tf->fd, (off_t)offset);
236 if(header == MAP_FAILED) {
237 perror("Error in allocating memory for buffer of tracefile");
238 return -1;
239 }
240
241 /* read len, offset += len */
242 offset += ltt_get_uint32(LTT_GET_BO(tf), &header->sb_size);
243
244 /* unmap block header */
245 if(munmap(header, header_map_size)) {
246 g_warning("unmap size : %u\n", header_map_size);
247 perror("munmap error");
248 return -1;
249 }
250 ++i;
251 }
252 tf->num_blocks = i;
253
254 return 0;
255 }
256
257 /*****************************************************************************
258 *Function name
259 * ltt_tracefile_open : open a trace file, construct a LttTracefile
260 *Input params
261 * t : the trace containing the tracefile
262 * fileName : path name of the trace file
263 * tf : the tracefile structure
264 *Return value
265 * : 0 for success, -1 otherwise.
266 ****************************************************************************/
267
268 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
269 {
270 struct stat lTDFStat; /* Trace data file status */
271 ltt_subbuffer_header_t *header;
272 int page_size = getpagesize();
273
274 //open the file
275 tf->long_name = g_quark_from_string(fileName);
276 tf->trace = t;
277 tf->fd = open(fileName, O_RDONLY);
278 tf->buf_index = NULL;
279 if(tf->fd < 0){
280 g_warning("Unable to open input data file %s\n", fileName);
281 goto end;
282 }
283
284 // Get the file's status
285 if(fstat(tf->fd, &lTDFStat) < 0){
286 g_warning("Unable to get the status of the input data file %s\n", fileName);
287 goto close_file;
288 }
289
290 // Is the file large enough to contain a trace
291 if(lTDFStat.st_size <
292 (off_t)(ltt_subbuffer_header_size())){
293 g_print("The input data file %s does not contain a trace\n", fileName);
294 goto close_file;
295 }
296
297 /* Temporarily map the buffer start header to get trace information */
298 /* Multiple of pages aligned head */
299 tf->buffer.head = mmap(0,
300 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
301 MAP_PRIVATE, tf->fd, 0);
302 if(tf->buffer.head == MAP_FAILED) {
303 perror("Error in allocating memory for buffer of tracefile");
304 goto close_file;
305 }
306 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
307
308 header = (ltt_subbuffer_header_t *)tf->buffer.head;
309
310 if(parse_trace_header(header, tf, NULL)) {
311 g_warning("parse_trace_header error");
312 goto unmap_file;
313 }
314
315 //store the size of the file
316 tf->file_size = lTDFStat.st_size;
317 tf->events_lost = 0;
318 tf->subbuf_corrupt = 0;
319
320 if(munmap(tf->buffer.head,
321 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
322 g_warning("unmap size : %zu\n",
323 PAGE_ALIGN(ltt_subbuffer_header_size()));
324 perror("munmap error");
325 g_assert(0);
326 }
327 tf->buffer.head = NULL;
328
329 /* Create block index */
330 ltt_trace_create_block_index(tf);
331
332 //read the first block
333 if(map_block(tf,0)) {
334 perror("Cannot map block for tracefile");
335 goto close_file;
336 }
337
338 return 0;
339
340 /* Error */
341 unmap_file:
342 if(munmap(tf->buffer.head,
343 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
344 g_warning("unmap size : %zu\n",
345 PAGE_ALIGN(ltt_subbuffer_header_size()));
346 perror("munmap error");
347 g_assert(0);
348 }
349 close_file:
350 close(tf->fd);
351 end:
352 if (tf->buf_index)
353 g_array_free(tf->buf_index, TRUE);
354 return -1;
355 }
356
357
358 /*****************************************************************************
359 *Function name
360 * ltt_tracefile_close: close a trace file,
361 *Input params
362 * t : tracefile which will be closed
363 ****************************************************************************/
364
365 static void ltt_tracefile_close(LttTracefile *t)
366 {
367 int page_size = getpagesize();
368
369 if(t->buffer.head != NULL)
370 if(munmap(t->buffer.head, PAGE_ALIGN(t->buffer.size))) {
371 g_warning("unmap size : %u\n",
372 PAGE_ALIGN(t->buffer.size));
373 perror("munmap error");
374 g_assert(0);
375 }
376
377 close(t->fd);
378 if (t->buf_index)
379 g_array_free(t->buf_index, TRUE);
380 }
381
382 /****************************************************************************
383 * get_absolute_pathname
384 *
385 * return the unique pathname in the system
386 *
387 * MD : Fixed this function so it uses realpath, dealing well with
388 * forgotten cases (.. were not used correctly before).
389 *
390 ****************************************************************************/
391 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
392 {
393 abs_pathname[0] = '\0';
394
395 if (realpath(pathname, abs_pathname) != NULL)
396 return;
397 else
398 {
399 /* error, return the original path unmodified */
400 strcpy(abs_pathname, pathname);
401 return;
402 }
403 return;
404 }
405
406 /* Search for something like : .*_.*
407 *
408 * The left side is the name, the right side is the number.
409 * Exclude leading /.
410 * Exclude flight- prefix.
411 */
412
413 static int get_tracefile_name_number(gchar *raw_name,
414 GQuark *name,
415 guint *num,
416 gulong *tid,
417 gulong *pgid,
418 guint64 *creation)
419 {
420 guint raw_name_len = strlen(raw_name);
421 gchar char_name[PATH_MAX];
422 int i;
423 int underscore_pos;
424 long int cpu_num;
425 gchar *endptr;
426 gchar *tmpptr;
427
428 /* skip leading / */
429 for(i = 0; i < raw_name_len-1;i++) {
430 if(raw_name[i] != '/')
431 break;
432 }
433 raw_name = &raw_name[i];
434 raw_name_len = strlen(raw_name);
435
436 for(i=raw_name_len-1;i>=0;i--) {
437 if(raw_name[i] == '_') break;
438 }
439 if(i==-1) { /* Either not found or name length is 0 */
440 /* This is a userspace tracefile */
441 strncpy(char_name, raw_name, raw_name_len);
442 char_name[raw_name_len] = '\0';
443 *name = g_quark_from_string(char_name);
444 *num = 0; /* unknown cpu */
445 for(i=0;i<raw_name_len;i++) {
446 if(raw_name[i] == '/') {
447 break;
448 }
449 }
450 i++;
451 for(;i<raw_name_len;i++) {
452 if(raw_name[i] == '/') {
453 break;
454 }
455 }
456 i++;
457 for(;i<raw_name_len;i++) {
458 if(raw_name[i] == '-') {
459 break;
460 }
461 }
462 if(i == raw_name_len) return -1;
463 i++;
464 tmpptr = &raw_name[i];
465 for(;i<raw_name_len;i++) {
466 if(raw_name[i] == '.') {
467 raw_name[i] = ' ';
468 break;
469 }
470 }
471 *tid = strtoul(tmpptr, &endptr, 10);
472 if(endptr == tmpptr)
473 return -1; /* No digit */
474 if(*tid == ULONG_MAX)
475 return -1; /* underflow / overflow */
476 i++;
477 tmpptr = &raw_name[i];
478 for(;i<raw_name_len;i++) {
479 if(raw_name[i] == '.') {
480 raw_name[i] = ' ';
481 break;
482 }
483 }
484 *pgid = strtoul(tmpptr, &endptr, 10);
485 if(endptr == tmpptr)
486 return -1; /* No digit */
487 if(*pgid == ULONG_MAX)
488 return -1; /* underflow / overflow */
489 i++;
490 tmpptr = &raw_name[i];
491 *creation = strtoull(tmpptr, &endptr, 10);
492 if(endptr == tmpptr)
493 return -1; /* No digit */
494 if(*creation == G_MAXUINT64)
495 return -1; /* underflow / overflow */
496 } else {
497 underscore_pos = i;
498
499 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
500
501 if(endptr == raw_name+underscore_pos+1)
502 return -1; /* No digit */
503 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
504 return -1; /* underflow / overflow */
505
506 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
507 raw_name += sizeof("flight-") - 1;
508 underscore_pos -= sizeof("flight-") - 1;
509 }
510 strncpy(char_name, raw_name, underscore_pos);
511 char_name[underscore_pos] = '\0';
512 *name = g_quark_from_string(char_name);
513 *num = cpu_num;
514 }
515
516
517 return 0;
518 }
519
520
521 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
522 {
523 return &trace->tracefiles;
524 }
525
526
527 void compute_tracefile_group(GQuark key_id,
528 GArray *group,
529 struct compute_tracefile_group_args *args)
530 {
531 unsigned int i;
532 LttTracefile *tf;
533
534 for(i=0; i<group->len; i++) {
535 tf = &g_array_index (group, LttTracefile, i);
536 if(tf->cpu_online)
537 args->func(tf, args->func_args);
538 }
539 }
540
541
542 static void ltt_tracefile_group_destroy(gpointer data)
543 {
544 GArray *group = (GArray *)data;
545 unsigned int i;
546 LttTracefile *tf;
547
548 if (group->len > 0)
549 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
550 for(i=0; i<group->len; i++) {
551 tf = &g_array_index (group, LttTracefile, i);
552 if(tf->cpu_online)
553 ltt_tracefile_close(tf);
554 }
555 g_array_free(group, TRUE);
556 }
557
558 static __attribute__ ((__unused__)) gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
559 {
560 GArray *group = (GArray *)data;
561 unsigned int i;
562 LttTracefile *tf;
563
564 for(i=0; i<group->len; i++) {
565 tf = &g_array_index (group, LttTracefile, i);
566 if(tf->cpu_online)
567 return 1;
568 }
569 return 0;
570 }
571
572
573 /* Open each tracefile under a specific directory. Put them in a
574 * GData : permits to access them using their tracefile group pathname.
575 * i.e. access control/modules tracefile group by index :
576 * "control/module".
577 *
578 * relative path is the path relative to the trace root
579 * root path is the full path
580 *
581 * A tracefile group is simply an array where all the per cpu tracefiles sit.
582 */
583
584 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
585 {
586 DIR *dir = opendir(root_path);
587 struct dirent *entry;
588 struct stat stat_buf;
589 int ret, i;
590 struct marker_data *mdata;
591
592 gchar path[PATH_MAX];
593 int path_len;
594 gchar *path_ptr;
595
596 int rel_path_len;
597 gchar rel_path[PATH_MAX];
598 gchar *rel_path_ptr;
599 LttTracefile tmp_tf;
600
601 if(dir == NULL) {
602 perror(root_path);
603 return ENOENT;
604 }
605
606 strncpy(path, root_path, PATH_MAX-1);
607 path_len = strlen(path);
608 path[path_len] = '/';
609 path_len++;
610 path_ptr = path + path_len;
611
612 strncpy(rel_path, relative_path, PATH_MAX-1);
613 rel_path_len = strlen(rel_path);
614 rel_path[rel_path_len] = '/';
615 rel_path_len++;
616 rel_path_ptr = rel_path + rel_path_len;
617
618 while((entry = readdir(dir)) != NULL) {
619
620 if(entry->d_name[0] == '.') continue;
621
622 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
623 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
624
625 ret = stat(path, &stat_buf);
626 if(ret == -1) {
627 perror(path);
628 continue;
629 }
630
631 g_debug("Tracefile file or directory : %s\n", path);
632
633 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
634
635 if(S_ISDIR(stat_buf.st_mode)) {
636
637 g_debug("Entering subdirectory...\n");
638 ret = open_tracefiles(trace, path, rel_path);
639 if(ret < 0) continue;
640 } else if(S_ISREG(stat_buf.st_mode)) {
641 GQuark name;
642 guint num;
643 gulong tid, pgid;
644 guint64 creation;
645 GArray *group;
646 num = 0;
647 tid = pgid = 0;
648 creation = 0;
649 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
650 continue; /* invalid name */
651
652 g_debug("Opening file.\n");
653 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
654 g_info("Error opening tracefile %s", path);
655
656 continue; /* error opening the tracefile : bad magic number ? */
657 }
658
659 g_debug("Tracefile name is %s and number is %u",
660 g_quark_to_string(name), num);
661
662 mdata = NULL;
663 tmp_tf.cpu_online = 1;
664 tmp_tf.cpu_num = num;
665 tmp_tf.name = name;
666 tmp_tf.tid = tid;
667 tmp_tf.pgid = pgid;
668 tmp_tf.creation = creation;
669 group = g_datalist_id_get_data(&trace->tracefiles, name);
670 if(group == NULL) {
671 /* Elements are automatically cleared when the array is allocated.
672 * It makes the cpu_online variable set to 0 : cpu offline, by default.
673 */
674 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
675 g_datalist_id_set_data_full(&trace->tracefiles, name,
676 group, ltt_tracefile_group_destroy);
677 mdata = allocate_marker_data();
678 if (!mdata)
679 g_error("Error in allocating marker data");
680 }
681
682 /* Add the per cpu tracefile to the named group */
683 unsigned int old_len = group->len;
684 if(num+1 > old_len)
685 group = g_array_set_size(group, num+1);
686
687 g_assert(group->len > 0);
688 if (!mdata)
689 mdata = g_array_index (group, LttTracefile, 0).mdata;
690
691 g_array_index (group, LttTracefile, num) = tmp_tf;
692 g_array_index (group, LttTracefile, num).event.tracefile =
693 &g_array_index (group, LttTracefile, num);
694 for (i = 0; i < group->len; i++)
695 g_array_index (group, LttTracefile, i).mdata = mdata;
696 }
697 }
698
699 closedir(dir);
700
701 return 0;
702 }
703
704
705 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
706 * because it must be done just after the opening */
707 static int ltt_process_metadata_tracefile(LttTracefile *tf)
708 {
709 int err;
710
711 while(1) {
712 err = ltt_tracefile_read_seek(tf);
713 if(err == EPERM) goto seek_error;
714 else if(err == ERANGE) break; /* End of tracefile */
715
716 err = ltt_tracefile_read_update_event(tf);
717 if(err) goto update_error;
718
719 /* The rules are :
720 * It contains only core events :
721 * 0 : set_marker_id
722 * 1 : set_marker_format
723 */
724 if(tf->event.event_id >= MARKER_CORE_IDS) {
725 /* Should only contain core events */
726 g_warning("Error in processing metadata file %s, "
727 "should not contain event id %u.", g_quark_to_string(tf->name),
728 tf->event.event_id);
729 err = EPERM;
730 goto event_id_error;
731 } else {
732 char *pos;
733 const char *channel_name, *marker_name, *format;
734 uint16_t id;
735 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
736
737 switch((enum marker_id)tf->event.event_id) {
738 case MARKER_ID_SET_MARKER_ID:
739 channel_name = pos = tf->event.data;
740 pos += strlen(channel_name) + 1;
741 marker_name = pos;
742 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
743 channel_name, marker_name);
744 pos += strlen(marker_name) + 1;
745 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
746 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
747 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
748 channel_name, marker_name, id);
749 pos += sizeof(guint16);
750 int_size = *(guint8*)pos;
751 pos += sizeof(guint8);
752 long_size = *(guint8*)pos;
753 pos += sizeof(guint8);
754 pointer_size = *(guint8*)pos;
755 pos += sizeof(guint8);
756 size_t_size = *(guint8*)pos;
757 pos += sizeof(guint8);
758 alignment = *(guint8*)pos;
759 pos += sizeof(guint8);
760 marker_id_event(tf->trace,
761 g_quark_from_string(channel_name),
762 g_quark_from_string(marker_name),
763 id, int_size, long_size,
764 pointer_size, size_t_size, alignment);
765 break;
766 case MARKER_ID_SET_MARKER_FORMAT:
767 channel_name = pos = tf->event.data;
768 pos += strlen(channel_name) + 1;
769 marker_name = pos;
770 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
771 channel_name, marker_name);
772 pos += strlen(marker_name) + 1;
773 format = pos;
774 pos += strlen(format) + 1;
775 marker_format_event(tf->trace,
776 g_quark_from_string(channel_name),
777 g_quark_from_string(marker_name),
778 format);
779 /* get information from dictionary TODO */
780 break;
781 default:
782 g_warning("Error in processing metadata file %s, "
783 "unknown event id %hhu.",
784 g_quark_to_string(tf->name),
785 tf->event.event_id);
786 err = EPERM;
787 goto event_id_error;
788 }
789 }
790 }
791 return 0;
792
793 /* Error handling */
794 event_id_error:
795 update_error:
796 seek_error:
797 g_warning("An error occured in metadata tracefile parsing");
798 return err;
799 }
800
801 /*
802 * Open a trace and return its LttTrace handle.
803 *
804 * pathname must be the directory of the trace
805 */
806
807 LttTrace *ltt_trace_open(const gchar *pathname)
808 {
809 gchar abs_path[PATH_MAX];
810 LttTrace * t;
811 LttTracefile *tf;
812 GArray *group;
813 unsigned int i;
814 int ret;
815 ltt_subbuffer_header_t *header;
816 DIR *dir;
817 struct dirent *entry;
818 struct stat stat_buf;
819 gchar path[PATH_MAX];
820
821 t = g_new(LttTrace, 1);
822 if(!t) goto alloc_error;
823
824 get_absolute_pathname(pathname, abs_path);
825 t->pathname = g_quark_from_string(abs_path);
826
827 g_datalist_init(&t->tracefiles);
828
829 /* Test to see if it looks like a trace */
830 dir = opendir(abs_path);
831 if(dir == NULL) {
832 perror(abs_path);
833 goto open_error;
834 }
835 while((entry = readdir(dir)) != NULL) {
836 strcpy(path, abs_path);
837 strcat(path, "/");
838 strcat(path, entry->d_name);
839 ret = stat(path, &stat_buf);
840 if(ret == -1) {
841 perror(path);
842 continue;
843 }
844 }
845 closedir(dir);
846
847 /* Open all the tracefiles */
848 t->start_freq= 0;
849 if(open_tracefiles(t, abs_path, "")) {
850 g_warning("Error opening tracefile %s", abs_path);
851 goto find_error;
852 }
853
854 /* Parse each trace metadata_N files : get runtime fac. info */
855 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
856 if(group == NULL) {
857 g_warning("Trace %s has no metadata tracefile", abs_path);
858 goto find_error;
859 }
860
861 /*
862 * Get the trace information for the metadata_0 tracefile.
863 * Getting a correct trace start_time and start_tsc is insured by the fact
864 * that no subbuffers are supposed to be lost in the metadata channel.
865 * Therefore, the first subbuffer contains the start_tsc timestamp in its
866 * buffer header.
867 */
868 g_assert(group->len > 0);
869 tf = &g_array_index (group, LttTracefile, 0);
870 header = (ltt_subbuffer_header_t *)tf->buffer.head;
871 ret = parse_trace_header(header, tf, t);
872 g_assert(!ret);
873
874 t->num_cpu = group->len;
875
876 //ret = allocate_marker_data(t);
877 //if (ret)
878 // g_error("Error in allocating marker data");
879
880 for(i=0; i<group->len; i++) {
881 tf = &g_array_index (group, LttTracefile, i);
882 if (tf->cpu_online)
883 if(ltt_process_metadata_tracefile(tf))
884 goto find_error;
885 // goto metadata_error;
886 }
887
888 return t;
889
890 /* Error handling */
891 //metadata_error:
892 // destroy_marker_data(t);
893 find_error:
894 g_datalist_clear(&t->tracefiles);
895 open_error:
896 g_free(t);
897 alloc_error:
898 return NULL;
899
900 }
901
902 /* Open another, completely independant, instance of a trace.
903 *
904 * A read on this new instance will read the first event of the trace.
905 *
906 * When we copy a trace, we want all the opening actions to happen again :
907 * the trace will be reopened and totally independant from the original.
908 * That's why we call ltt_trace_open.
909 */
910 LttTrace *ltt_trace_copy(LttTrace *self)
911 {
912 return ltt_trace_open(g_quark_to_string(self->pathname));
913 }
914
915 /*
916 * Close a trace
917 */
918
919 void ltt_trace_close(LttTrace *t)
920 {
921 g_datalist_clear(&t->tracefiles);
922 g_free(t);
923 }
924
925
926 /*****************************************************************************
927 * Get the start time and end time of the trace
928 ****************************************************************************/
929
930 void ltt_tracefile_time_span_get(LttTracefile *tf,
931 LttTime *start, LttTime *end)
932 {
933 int err;
934
935 err = map_block(tf, 0);
936 if(unlikely(err)) {
937 g_error("Can not map block");
938 *start = ltt_time_infinite;
939 } else
940 *start = tf->buffer.begin.timestamp;
941
942 err = map_block(tf, tf->num_blocks - 1); /* Last block */
943 if(unlikely(err)) {
944 g_error("Can not map block");
945 *end = ltt_time_zero;
946 } else
947 *end = tf->buffer.end.timestamp;
948
949 g_assert(end->tv_sec <= G_MAXUINT);
950 }
951
952 struct tracefile_time_span_get_args {
953 LttTrace *t;
954 LttTime *start;
955 LttTime *end;
956 };
957
958 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
959 {
960 struct tracefile_time_span_get_args *args =
961 (struct tracefile_time_span_get_args*)user_data;
962
963 GArray *group = (GArray *)data;
964 unsigned int i;
965 LttTracefile *tf;
966 LttTime tmp_start;
967 LttTime tmp_end;
968
969 for(i=0; i<group->len; i++) {
970 tf = &g_array_index (group, LttTracefile, i);
971 if(tf->cpu_online) {
972 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
973 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
974 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
975 }
976 }
977 }
978
979 /* return the start and end time of a trace */
980
981 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
982 {
983 LttTime min_start = ltt_time_infinite;
984 LttTime max_end = ltt_time_zero;
985 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
986
987 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
988
989 if(start != NULL) *start = min_start;
990 if(end != NULL) *end = max_end;
991
992 }
993
994
995 /* Seek to the first event in a tracefile that has a time equal or greater than
996 * the time passed in parameter.
997 *
998 * If the time parameter is outside the tracefile time span, seek to the first
999 * event or if after, return ERANGE.
1000 *
1001 * If the time parameter is before the first event, we have to seek specially to
1002 * there.
1003 *
1004 * If the time is after the end of the trace, return ERANGE.
1005 *
1006 * Do a binary search to find the right block, then a sequential search in the
1007 * block to find the event.
1008 *
1009 * In the special case where the time requested fits inside a block that has no
1010 * event corresponding to the requested time, the first event of the next block
1011 * will be seeked.
1012 *
1013 * IMPORTANT NOTE : // FIXME everywhere...
1014 *
1015 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
1016 * you will jump over an event if you do.
1017 *
1018 * Return value : 0 : no error, the tf->event can be used
1019 * ERANGE : time if after the last event of the trace
1020 * otherwise : this is an error.
1021 *
1022 * */
1023
1024 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
1025 {
1026 int ret = 0;
1027 int err;
1028 unsigned int block_num, high, low;
1029
1030 /* seek at the beginning of trace */
1031 err = map_block(tf, 0); /* First block */
1032 if(unlikely(err)) {
1033 g_error("Can not map block");
1034 goto fail;
1035 }
1036
1037 /* If the time is lower or equal the beginning of the trace,
1038 * go to the first event. */
1039 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
1040 ret = ltt_tracefile_read(tf);
1041 if(ret == ERANGE) goto range;
1042 else if (ret) goto fail;
1043 goto found; /* There is either no event in the trace or the event points
1044 to the first event in the trace */
1045 }
1046
1047 err = map_block(tf, tf->num_blocks - 1); /* Last block */
1048 if(unlikely(err)) {
1049 g_error("Can not map block");
1050 goto fail;
1051 }
1052
1053 /* If the time is after the end of the trace, return ERANGE. */
1054 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1055 goto range;
1056 }
1057
1058 /* Binary search the block */
1059 high = tf->num_blocks - 1;
1060 low = 0;
1061
1062 while(1) {
1063 block_num = ((high-low) / 2) + low;
1064
1065 err = map_block(tf, block_num);
1066 if(unlikely(err)) {
1067 g_error("Can not map block");
1068 goto fail;
1069 }
1070 if(high == low) {
1071 /* We cannot divide anymore : this is what would happen if the time
1072 * requested was exactly between two consecutive buffers'end and start
1073 * timestamps. This is also what would happend if we didn't deal with out
1074 * of span cases prior in this function. */
1075 /* The event is right in the buffer!
1076 * (or in the next buffer first event) */
1077 while(1) {
1078 ret = ltt_tracefile_read(tf);
1079 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1080 else if(ret) goto fail;
1081
1082 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1083 goto found;
1084 }
1085
1086 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1087 /* go to lower part */
1088 high = block_num - 1;
1089 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1090 /* go to higher part */
1091 low = block_num + 1;
1092 } else {/* The event is right in the buffer!
1093 (or in the next buffer first event) */
1094 while(1) {
1095 ret = ltt_tracefile_read(tf);
1096 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1097 else if(ret) goto fail;
1098
1099 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1100 break;
1101 }
1102 goto found;
1103 }
1104 }
1105
1106 found:
1107 return 0;
1108 range:
1109 return ERANGE;
1110
1111 /* Error handling */
1112 fail:
1113 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1114 g_quark_to_string(tf->name));
1115 return EPERM;
1116 }
1117
1118 /* Seek to a position indicated by an LttEventPosition
1119 */
1120
1121 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1122 {
1123 int err;
1124
1125 if(ep->tracefile != tf) {
1126 goto fail;
1127 }
1128
1129 err = map_block(tf, ep->block);
1130 if(unlikely(err)) {
1131 g_error("Can not map block");
1132 goto fail;
1133 }
1134
1135 tf->event.offset = ep->offset;
1136
1137 /* Put back the event real tsc */
1138 tf->event.tsc = ep->tsc;
1139 tf->buffer.tsc = ep->tsc;
1140
1141 err = ltt_tracefile_read_update_event(tf);
1142 if(err) goto fail;
1143
1144 /* deactivate this, as it does nothing for now
1145 err = ltt_tracefile_read_op(tf);
1146 if(err) goto fail;
1147 */
1148
1149 return 0;
1150
1151 fail:
1152 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1153 g_quark_to_string(tf->name));
1154 return 1;
1155 }
1156
1157 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1158 * corresponds to.
1159 */
1160
1161 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1162 {
1163 LttTime time;
1164
1165 if(tsc > tf->trace->start_tsc) {
1166 time = ltt_time_from_uint64(
1167 (double)(tsc - tf->trace->start_tsc)
1168 * 1000000000.0 * tf->trace->freq_scale
1169 / (double)tf->trace->start_freq);
1170 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1171 } else {
1172 time = ltt_time_from_uint64(
1173 (double)(tf->trace->start_tsc - tsc)
1174 * 1000000000.0 * tf->trace->freq_scale
1175 / (double)tf->trace->start_freq);
1176 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1177 }
1178 return time;
1179 }
1180
1181 /* Calculate the real event time based on the buffer boundaries */
1182 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1183 {
1184 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1185 }
1186
1187
1188 /* Get the current event of the tracefile : valid until the next read */
1189 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1190 {
1191 return &tf->event;
1192 }
1193
1194
1195
1196 /*****************************************************************************
1197 *Function name
1198 * ltt_tracefile_read : Read the next event in the tracefile
1199 *Input params
1200 * t : tracefile
1201 *Return value
1202 *
1203 * Returns 0 if an event can be used in tf->event.
1204 * Returns ERANGE on end of trace. The event in tf->event still can be used
1205 * (if the last block was not empty).
1206 * Returns EPERM on error.
1207 *
1208 * This function does make the tracefile event structure point to the event
1209 * currently pointed to by the tf->event.
1210 *
1211 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1212 * reinitialize it after an error if you want results to be coherent.
1213 * It would be the case if a end of trace last buffer has no event : the end
1214 * of trace wouldn't be returned, but an error.
1215 * We make the assumption there is at least one event per buffer.
1216 ****************************************************************************/
1217
1218 int ltt_tracefile_read(LttTracefile *tf)
1219 {
1220 int err;
1221
1222 err = ltt_tracefile_read_seek(tf);
1223 if(err) return err;
1224 err = ltt_tracefile_read_update_event(tf);
1225 if(err) return err;
1226
1227 /* deactivate this, as it does nothing for now
1228 err = ltt_tracefile_read_op(tf);
1229 if(err) return err;
1230 */
1231
1232 return 0;
1233 }
1234
1235 int ltt_tracefile_read_seek(LttTracefile *tf)
1236 {
1237 int err;
1238
1239 /* Get next buffer until we finally have an event, or end of trace */
1240 while(1) {
1241 err = ltt_seek_next_event(tf);
1242 if(unlikely(err == ENOPROTOOPT)) {
1243 return EPERM;
1244 }
1245
1246 /* Are we at the end of the buffer ? */
1247 if(err == ERANGE) {
1248 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1249 return ERANGE;
1250 } else {
1251 /* get next block */
1252 err = map_block(tf, tf->buffer.index + 1);
1253 if(unlikely(err)) {
1254 g_error("Can not map block");
1255 return EPERM;
1256 }
1257 }
1258 } else break; /* We found an event ! */
1259 }
1260
1261 return 0;
1262 }
1263
1264 /* do an operation when reading a new event */
1265
1266 /* This function does nothing for now */
1267 #if 0
1268 int ltt_tracefile_read_op(LttTracefile *tf)
1269 {
1270 LttEvent *event;
1271
1272 event = &tf->event;
1273
1274 /* do event specific operation */
1275
1276 /* nothing */
1277
1278 return 0;
1279 }
1280 #endif
1281
1282 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1283 {
1284 unsigned int offset = 0;
1285 int i, j;
1286
1287 g_printf("Event header (tracefile %s offset %" PRIx64 "):\n",
1288 g_quark_to_string(ev->tracefile->long_name),
1289 (uint64_t)ev->tracefile->buffer.offset +
1290 (long)start_pos - (long)ev->tracefile->buffer.head);
1291
1292 while (offset < (long)end_pos - (long)start_pos) {
1293 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1294 g_printf(" ");
1295
1296 for (i = 0; i < 4 ; i++) {
1297 for (j = 0; j < 4; j++) {
1298 if (offset + ((i * 4) + j) <
1299 (long)end_pos - (long)start_pos)
1300 g_printf("%02hhX",
1301 ((char*)start_pos)[offset + ((i * 4) + j)]);
1302 else
1303 g_printf(" ");
1304 g_printf(" ");
1305 }
1306 if (i < 4)
1307 g_printf(" ");
1308 }
1309 offset+=16;
1310 g_printf("\n");
1311 }
1312 }
1313
1314
1315 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1316 * event specific operation. */
1317 int ltt_tracefile_read_update_event(LttTracefile *tf)
1318 {
1319 void * pos;
1320 LttEvent *event;
1321 void *pos_aligned;
1322 guint16 packed_evid; /* event id reader from the 5 bits in header */
1323
1324 event = &tf->event;
1325 pos = tf->buffer.head + event->offset;
1326
1327 /* Read event header */
1328
1329 /* Align the head */
1330 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1331 pos_aligned = pos;
1332
1333 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1334 event->event_id = packed_evid = event->timestamp >> tf->tscbits;
1335 event->timestamp = event->timestamp & tf->tsc_mask;
1336 pos += sizeof(guint32);
1337
1338 switch (packed_evid) {
1339 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1340 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1341 pos += sizeof(guint16);
1342 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1343 pos += sizeof(guint16);
1344 if (event->event_size == 0xFFFF) {
1345 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1346 pos += sizeof(guint32);
1347 }
1348 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1349 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1350 pos += sizeof(guint64);
1351 break;
1352 case 30: /* LTT_RFLAG_ID_SIZE */
1353 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1354 pos += sizeof(guint16);
1355 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1356 pos += sizeof(guint16);
1357 if (event->event_size == 0xFFFF) {
1358 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1359 pos += sizeof(guint32);
1360 }
1361 break;
1362 case 31: /* LTT_RFLAG_ID */
1363 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1364 pos += sizeof(guint16);
1365 event->event_size = G_MAXUINT;
1366 break;
1367 default:
1368 event->event_size = G_MAXUINT;
1369 break;
1370 }
1371
1372 if (likely(packed_evid != 29)) {
1373 /* No extended timestamp */
1374 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1375 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1376 + tf->tsc_mask_next_bit)
1377 | (guint64)event->timestamp;
1378 else
1379 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1380 | (guint64)event->timestamp;
1381 }
1382 event->tsc = tf->buffer.tsc;
1383
1384 event->event_time = ltt_interpolate_time(tf, event);
1385
1386 if (a_event_debug)
1387 print_debug_event_header(event, pos_aligned, pos);
1388
1389 event->data = pos;
1390
1391 /*
1392 * Let ltt_update_event_size update event->data according to the largest
1393 * alignment within the payload.
1394 * Get the data size and update the event fields with the current
1395 * information. */
1396 ltt_update_event_size(tf);
1397
1398 return 0;
1399 }
1400
1401
1402 /****************************************************************************
1403 *Function name
1404 * map_block : map a block from the file
1405 *Input Params
1406 * lttdes : ltt trace file
1407 * whichBlock : the block which will be read
1408 *return value
1409 * 0 : success
1410 * EINVAL : lseek fail
1411 * EIO : can not read from the file
1412 ****************************************************************************/
1413
1414 static gint map_block(LttTracefile * tf, guint block_num)
1415 {
1416 int page_size = getpagesize();
1417 ltt_subbuffer_header_t *header;
1418 uint64_t offset;
1419 uint32_t size;
1420 int ret;
1421
1422 g_assert(block_num < tf->num_blocks);
1423
1424 if(tf->buffer.head != NULL) {
1425 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buffer.size))) {
1426 g_warning("unmap size : %u\n",
1427 PAGE_ALIGN(tf->buffer.size));
1428 perror("munmap error");
1429 g_assert(0);
1430 }
1431 }
1432
1433 ret = get_block_offset_size(tf, block_num, &offset, &size);
1434 g_assert(!ret);
1435
1436 g_debug("Map block %u, offset %llu, size %u\n", block_num,
1437 (unsigned long long)offset, (unsigned int)size);
1438
1439 /* Multiple of pages aligned head */
1440 tf->buffer.head = mmap(0, (size_t)size, PROT_READ, MAP_PRIVATE,
1441 tf->fd, (off_t)offset);
1442
1443 if(tf->buffer.head == MAP_FAILED) {
1444 perror("Error in allocating memory for buffer of tracefile");
1445 g_assert(0);
1446 goto map_error;
1447 }
1448 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1449
1450 tf->buffer.index = block_num;
1451
1452 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1453
1454 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1455 &header->cycle_count_begin);
1456 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1457 &header->cycle_count_end);
1458 tf->buffer.offset = offset;
1459 tf->buffer.size = ltt_get_uint32(LTT_GET_BO(tf),
1460 &header->sb_size);
1461 tf->buffer.data_size = ltt_get_uint32(LTT_GET_BO(tf),
1462 &header->data_size);
1463 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1464 tf->event.tsc = tf->buffer.tsc;
1465 tf->buffer.freq = tf->buffer.begin.freq;
1466
1467 g_assert(size == tf->buffer.size);
1468 g_assert(tf->buffer.data_size <= tf->buffer.size);
1469
1470 if (tf->trace->start_freq)
1471 {
1472 tf->buffer.begin.freq = tf->trace->start_freq;
1473 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1474 tf->buffer.begin.cycle_count);
1475 tf->buffer.end.freq = tf->trace->start_freq;
1476 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1477 tf->buffer.end.cycle_count);
1478 }
1479
1480 /* Make the current event point to the beginning of the buffer :
1481 * it means that the event read must get the first event. */
1482 tf->event.tracefile = tf;
1483 tf->event.block = block_num;
1484 tf->event.offset = 0;
1485
1486 if (header->events_lost) {
1487 g_warning("%d events lost so far in tracefile %s at block %u",
1488 (guint)header->events_lost,
1489 g_quark_to_string(tf->long_name),
1490 block_num);
1491 tf->events_lost = header->events_lost;
1492 }
1493 if (header->subbuf_corrupt) {
1494 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1495 (guint)header->subbuf_corrupt,
1496 g_quark_to_string(tf->long_name),
1497 block_num);
1498 tf->subbuf_corrupt = header->subbuf_corrupt;
1499 }
1500
1501 return 0;
1502
1503 map_error:
1504 return -errno;
1505 }
1506
1507 static void print_debug_event_data(LttEvent *ev)
1508 {
1509 unsigned int offset = 0;
1510 int i, j;
1511
1512 if (!max(ev->event_size, ev->data_size))
1513 return;
1514
1515 g_printf("Event data (tracefile %s offset %" PRIx64 "):\n",
1516 g_quark_to_string(ev->tracefile->long_name),
1517 (uint64_t)ev->tracefile->buffer.offset
1518 + (long)ev->data - (long)ev->tracefile->buffer.head);
1519
1520 while (offset < max(ev->event_size, ev->data_size)) {
1521 g_printf("%8lx", (long)ev->data + offset
1522 - (long)ev->tracefile->buffer.head);
1523 g_printf(" ");
1524
1525 for (i = 0; i < 4 ; i++) {
1526 for (j = 0; j < 4; j++) {
1527 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1528 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1529 else
1530 g_printf(" ");
1531 g_printf(" ");
1532 }
1533 if (i < 4)
1534 g_printf(" ");
1535 }
1536
1537 g_printf(" ");
1538
1539 for (i = 0; i < 4; i++) {
1540 for (j = 0; j < 4; j++) {
1541 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1542 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1543 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1544 else
1545 g_printf(".");
1546 } else
1547 g_printf(" ");
1548 }
1549 }
1550 offset+=16;
1551 g_printf("\n");
1552 }
1553 }
1554
1555 /* It will update the fields offsets too */
1556 void ltt_update_event_size(LttTracefile *tf)
1557 {
1558 off_t size = 0;
1559 struct marker_info *info;
1560
1561 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1562 switch((enum marker_id)tf->event.event_id) {
1563 case MARKER_ID_SET_MARKER_ID:
1564 size = strlen((char*)tf->event.data) + 1;
1565 g_debug("marker %s id set", (char*)tf->event.data + size);
1566 size += strlen((char*)tf->event.data + size) + 1;
1567 size += ltt_align(size, sizeof(guint16), tf->alignment);
1568 size += sizeof(guint16);
1569 size += sizeof(guint8);
1570 size += sizeof(guint8);
1571 size += sizeof(guint8);
1572 size += sizeof(guint8);
1573 size += sizeof(guint8);
1574 break;
1575 case MARKER_ID_SET_MARKER_FORMAT:
1576 size = strlen((char*)tf->event.data) + 1;
1577 g_debug("marker %s format set", (char*)tf->event.data);
1578 size += strlen((char*)tf->event.data + size) + 1;
1579 size += strlen((char*)tf->event.data + size) + 1;
1580 break;
1581 }
1582 }
1583
1584 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1585
1586 if (tf->event.event_id >= MARKER_CORE_IDS)
1587 g_assert(info != NULL);
1588
1589 /* Do not update field offsets of core markers when initially reading the
1590 * metadata tracefile when the infos about these markers do not exist yet.
1591 */
1592 if (likely(info && info->fields)) {
1593 /* alignment */
1594 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1595 info->largest_align,
1596 info->alignment);
1597 /* size, dynamically computed */
1598 if (info->size != -1)
1599 size = info->size;
1600 else
1601 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1602 tf->event.event_id), tf->event.data);
1603 }
1604
1605 tf->event.data_size = size;
1606
1607 /* Check consistency between kernel and LTTV structure sizes */
1608 if(tf->event.event_size == G_MAXUINT) {
1609 /* Event size too big to fit in the event size field */
1610 tf->event.event_size = tf->event.data_size;
1611 }
1612
1613 if (a_event_debug)
1614 print_debug_event_data(&tf->event);
1615
1616 if (tf->event.data_size != tf->event.event_size) {
1617 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1618 tf->event.event_id);
1619 if (!info)
1620 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1621 g_quark_to_string(tf->name));
1622 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1623 g_quark_to_string(info->name),
1624 tf->event.event_size, tf->event.data_size);
1625 exit(-1);
1626 }
1627 }
1628
1629
1630 /* Take the tf current event offset and use the event id to figure out where is
1631 * the next event offset.
1632 *
1633 * This is an internal function not aiming at being used elsewhere : it will
1634 * not jump over the current block limits. Please consider using
1635 * ltt_tracefile_read to do this.
1636 *
1637 * Returns 0 on success
1638 * ERANGE if we are at the end of the buffer.
1639 * ENOPROTOOPT if an error occured when getting the current event size.
1640 */
1641 static int ltt_seek_next_event(LttTracefile *tf)
1642 {
1643 int ret = 0;
1644 void *pos;
1645
1646 /* seek over the buffer header if we are at the buffer start */
1647 if(tf->event.offset == 0) {
1648 tf->event.offset += tf->buffer_header_size;
1649
1650 if(tf->event.offset == tf->buffer.data_size) {
1651 ret = ERANGE;
1652 }
1653 goto found;
1654 }
1655
1656 pos = tf->event.data;
1657
1658 if(tf->event.data_size < 0) goto error;
1659
1660 pos += (size_t)tf->event.data_size;
1661
1662 tf->event.offset = pos - tf->buffer.head;
1663
1664 if(tf->event.offset == tf->buffer.data_size) {
1665 ret = ERANGE;
1666 goto found;
1667 }
1668 g_assert(tf->event.offset < tf->buffer.data_size);
1669
1670 found:
1671 return ret;
1672
1673 error:
1674 g_error("Error in ltt_seek_next_event for tracefile %s",
1675 g_quark_to_string(tf->name));
1676 return ENOPROTOOPT;
1677 }
1678
1679
1680 /*****************************************************************************
1681 *Function name
1682 * ltt_get_int : get an integer number
1683 *Input params
1684 * reverse_byte_order: must we reverse the byte order ?
1685 * size : the size of the integer
1686 * ptr : the data pointer
1687 *Return value
1688 * gint64 : a 64 bits integer
1689 ****************************************************************************/
1690
1691 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
1692 {
1693 gint64 val;
1694
1695 switch(size) {
1696 case 1: val = *((gint8*)data); break;
1697 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
1698 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
1699 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
1700 default: val = ltt_get_int64(reverse_byte_order, data);
1701 g_critical("get_int : integer size %d unknown", size);
1702 break;
1703 }
1704
1705 return val;
1706 }
1707
1708 /*****************************************************************************
1709 *Function name
1710 * ltt_get_uint : get an unsigned integer number
1711 *Input params
1712 * reverse_byte_order: must we reverse the byte order ?
1713 * size : the size of the integer
1714 * ptr : the data pointer
1715 *Return value
1716 * guint64 : a 64 bits unsigned integer
1717 ****************************************************************************/
1718
1719 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
1720 {
1721 guint64 val;
1722
1723 switch(size) {
1724 case 1: val = *((gint8*)data); break;
1725 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
1726 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
1727 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
1728 default: val = ltt_get_uint64(reverse_byte_order, data);
1729 g_critical("get_uint : unsigned integer size %d unknown",
1730 size);
1731 break;
1732 }
1733
1734 return val;
1735 }
1736
1737
1738 /* get the node name of the system */
1739
1740 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
1741 {
1742 return s->node_name;
1743 }
1744
1745
1746 /* get the domain name of the system */
1747
1748 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
1749 {
1750 return s->domain_name;
1751 }
1752
1753
1754 /* get the description of the system */
1755
1756 char * ltt_trace_system_description_description (LttSystemDescription * s)
1757 {
1758 return s->description;
1759 }
1760
1761
1762 /* get the NTP corrected start time of the trace */
1763 LttTime ltt_trace_start_time(LttTrace *t)
1764 {
1765 return t->start_time;
1766 }
1767
1768 /* get the monotonic start time of the trace */
1769 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
1770 {
1771 return t->start_time_from_tsc;
1772 }
1773
1774 static __attribute__ ((__unused__)) LttTracefile *ltt_tracefile_new()
1775 {
1776 LttTracefile *tf;
1777 tf = g_new(LttTracefile, 1);
1778 tf->event.tracefile = tf;
1779 return tf;
1780 }
1781
1782 static __attribute__ ((__unused__)) void ltt_tracefile_destroy(LttTracefile *tf)
1783 {
1784 g_free(tf);
1785 }
1786
1787 static __attribute__ ((__unused__)) void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
1788 {
1789 *dest = *src;
1790 }
1791
1792 /* Before library loading... */
1793
1794 static __attribute__((constructor)) void init(void)
1795 {
1796 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
1797 }
This page took 0.105697 seconds and 3 git commands to generate.