Update LTTV to trace format 2.3
[lttv.git] / trunk / lttv / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <malloc.h>
36 #include <sys/mman.h>
37 #include <string.h>
38
39 // For realpath
40 #include <limits.h>
41 #include <stdlib.h>
42
43
44 #include <ltt/ltt.h>
45 #include "ltt-private.h"
46 #include <ltt/trace.h>
47 #include <ltt/event.h>
48 #include <ltt/ltt-types.h>
49 #include <ltt/marker.h>
50
51 /* Tracefile names used in this file */
52
53 GQuark LTT_TRACEFILE_NAME_METADATA;
54
55 #ifndef g_open
56 #define g_open open
57 #endif
58
59
60 #define __UNUSED__ __attribute__((__unused__))
61
62 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
63
64 #ifndef g_debug
65 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
66 #endif
67
68 #define g_close close
69
70 /* Those macros must be called from within a function where page_size is a known
71 * variable */
72 #define PAGE_MASK (~(page_size-1))
73 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
74
75 LttTrace *father_trace = NULL;
76
77 /* set the offset of the fields belonging to the event,
78 need the information of the archecture */
79 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
80 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
81
82 #if 0
83 /* get the size of the field type according to
84 * The facility size information. */
85 static inline void preset_field_type_size(LttTracefile *tf,
86 LttEventType *event_type,
87 off_t offset_root, off_t offset_parent,
88 enum field_status *fixed_root, enum field_status *fixed_parent,
89 LttField *field);
90 #endif //0
91
92 /* map a fixed size or a block information from the file (fd) */
93 static gint map_block(LttTracefile * tf, guint block_num);
94
95 /* calculate nsec per cycles for current block */
96 #if 0
97 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
98 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
99 #endif //0
100
101 /* go to the next event */
102 static int ltt_seek_next_event(LttTracefile *tf);
103
104 static int open_tracefiles(LttTrace *trace, gchar *root_path,
105 gchar *relative_path);
106 static int ltt_process_metadata_tracefile(LttTracefile *tf);
107 static void ltt_tracefile_time_span_get(LttTracefile *tf,
108 LttTime *start, LttTime *end);
109 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
110 static gint map_block(LttTracefile * tf, guint block_num);
111 static void ltt_update_event_size(LttTracefile *tf);
112
113 /* Enable event debugging */
114 static int a_event_debug = 0;
115
116 void ltt_event_debug(int state)
117 {
118 a_event_debug = state;
119 }
120
121 /* trace can be NULL
122 *
123 * Return value : 0 success, 1 bad tracefile
124 */
125 static int parse_trace_header(ltt_subbuffer_header_t *header,
126 LttTracefile *tf, LttTrace *t)
127 {
128 if (header->magic_number == LTT_MAGIC_NUMBER)
129 tf->reverse_bo = 0;
130 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
131 tf->reverse_bo = 1;
132 else /* invalid magic number, bad tracefile ! */
133 return 1;
134
135 if(t) {
136 t->ltt_major_version = header->major_version;
137 t->ltt_minor_version = header->minor_version;
138 t->arch_size = header->arch_size;
139 }
140 tf->alignment = header->alignment;
141
142 /* Get float byte order : might be different from int byte order
143 * (or is set to 0 if the trace has no float (kernel trace)) */
144 tf->float_word_order = 0;
145
146 switch(header->major_version) {
147 case 0:
148 case 1:
149 g_warning("Unsupported trace version : %hhu.%hhu",
150 header->major_version, header->minor_version);
151 return 1;
152 break;
153 case 2:
154 switch(header->minor_version) {
155 case 3:
156 {
157 struct ltt_subbuffer_header_2_3 *vheader = header;
158 tf->buffer_header_size = ltt_subbuffer_header_size();
159 tf->tscbits = 27;
160 tf->eventbits = 5;
161 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
162 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
163
164 if(t) {
165 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->start_freq);
167 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
168 &vheader->freq_scale);
169 if(father_trace) {
170 t->start_freq = father_trace->start_freq;
171 t->freq_scale = father_trace->freq_scale;
172 } else {
173 father_trace = t;
174 }
175 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
176 &vheader->cycle_count_begin);
177 t->start_monotonic = 0;
178 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
179 &vheader->start_time_sec);
180 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
181 &vheader->start_time_usec);
182 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
183
184 t->start_time_from_tsc = ltt_time_from_uint64(
185 (double)t->start_tsc
186 * 1000000000.0 * tf->trace->freq_scale
187 / (double)t->start_freq);
188 }
189 }
190 break;
191 default:
192 g_warning("Unsupported trace version : %hhu.%hhu",
193 header->major_version, header->minor_version);
194 return 1;
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 return 0;
203 }
204
205
206
207 /*****************************************************************************
208 *Function name
209 * ltt_tracefile_open : open a trace file, construct a LttTracefile
210 *Input params
211 * t : the trace containing the tracefile
212 * fileName : path name of the trace file
213 * tf : the tracefile structure
214 *Return value
215 * : 0 for success, -1 otherwise.
216 ****************************************************************************/
217
218 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
219 {
220 struct stat lTDFStat; /* Trace data file status */
221 ltt_subbuffer_header_t *header;
222 int page_size = getpagesize();
223
224 //open the file
225 tf->long_name = g_quark_from_string(fileName);
226 tf->trace = t;
227 tf->fd = open(fileName, O_RDONLY);
228 if(tf->fd < 0){
229 g_warning("Unable to open input data file %s\n", fileName);
230 goto end;
231 }
232
233 // Get the file's status
234 if(fstat(tf->fd, &lTDFStat) < 0){
235 g_warning("Unable to get the status of the input data file %s\n", fileName);
236 goto close_file;
237 }
238
239 // Is the file large enough to contain a trace
240 if(lTDFStat.st_size <
241 (off_t)(ltt_subbuffer_header_size())){
242 g_print("The input data file %s does not contain a trace\n", fileName);
243 goto close_file;
244 }
245
246 /* Temporarily map the buffer start header to get trace information */
247 /* Multiple of pages aligned head */
248 tf->buffer.head = mmap(0,
249 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
250 MAP_PRIVATE, tf->fd, 0);
251 if(tf->buffer.head == MAP_FAILED) {
252 perror("Error in allocating memory for buffer of tracefile");
253 goto close_file;
254 }
255 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
256
257 header = (ltt_subbuffer_header_t *)tf->buffer.head;
258
259 if(parse_trace_header(header, tf, NULL)) {
260 g_warning("parse_trace_header error");
261 goto unmap_file;
262 }
263
264 //store the size of the file
265 tf->file_size = lTDFStat.st_size;
266 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
267 tf->num_blocks = tf->file_size / tf->buf_size;
268 tf->events_lost = 0;
269 tf->subbuf_corrupt = 0;
270
271 if(munmap(tf->buffer.head,
272 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
273 g_warning("unmap size : %u\n",
274 PAGE_ALIGN(ltt_subbuffer_header_size()));
275 perror("munmap error");
276 g_assert(0);
277 }
278 tf->buffer.head = NULL;
279
280 //read the first block
281 if(map_block(tf,0)) {
282 perror("Cannot map block for tracefile");
283 goto close_file;
284 }
285
286 return 0;
287
288 /* Error */
289 unmap_file:
290 if(munmap(tf->buffer.head,
291 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
292 g_warning("unmap size : %u\n",
293 PAGE_ALIGN(ltt_subbuffer_header_size()));
294 perror("munmap error");
295 g_assert(0);
296 }
297 close_file:
298 close(tf->fd);
299 end:
300 return -1;
301 }
302
303
304 /*****************************************************************************
305 *Function name
306 * ltt_tracefile_close: close a trace file,
307 *Input params
308 * t : tracefile which will be closed
309 ****************************************************************************/
310
311 static void ltt_tracefile_close(LttTracefile *t)
312 {
313 int page_size = getpagesize();
314
315 if(t->buffer.head != NULL)
316 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
317 g_warning("unmap size : %u\n",
318 PAGE_ALIGN(t->buf_size));
319 perror("munmap error");
320 g_assert(0);
321 }
322
323 close(t->fd);
324 }
325
326 /****************************************************************************
327 * get_absolute_pathname
328 *
329 * return the unique pathname in the system
330 *
331 * MD : Fixed this function so it uses realpath, dealing well with
332 * forgotten cases (.. were not used correctly before).
333 *
334 ****************************************************************************/
335 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
336 {
337 abs_pathname[0] = '\0';
338
339 if (realpath(pathname, abs_pathname) != NULL)
340 return;
341 else
342 {
343 /* error, return the original path unmodified */
344 strcpy(abs_pathname, pathname);
345 return;
346 }
347 return;
348 }
349
350 /* Search for something like : .*_.*
351 *
352 * The left side is the name, the right side is the number.
353 * Exclude leading /.
354 */
355
356 static int get_tracefile_name_number(gchar *raw_name,
357 GQuark *name,
358 guint *num,
359 gulong *tid,
360 gulong *pgid,
361 guint64 *creation)
362 {
363 guint raw_name_len = strlen(raw_name);
364 gchar char_name[PATH_MAX];
365 int i;
366 int underscore_pos;
367 long int cpu_num;
368 gchar *endptr;
369 gchar *tmpptr;
370
371 /* skip leading / */
372 for(i = 0; i < raw_name_len-1;i++) {
373 if(raw_name[i] != '/')
374 break;
375 }
376 raw_name = &raw_name[i];
377 raw_name_len = strlen(raw_name);
378
379 for(i=raw_name_len-1;i>=0;i--) {
380 if(raw_name[i] == '_') break;
381 }
382 if(i==-1) { /* Either not found or name length is 0 */
383 /* This is a userspace tracefile */
384 strncpy(char_name, raw_name, raw_name_len);
385 char_name[raw_name_len] = '\0';
386 *name = g_quark_from_string(char_name);
387 *num = 0; /* unknown cpu */
388 for(i=0;i<raw_name_len;i++) {
389 if(raw_name[i] == '/') {
390 break;
391 }
392 }
393 i++;
394 for(;i<raw_name_len;i++) {
395 if(raw_name[i] == '/') {
396 break;
397 }
398 }
399 i++;
400 for(;i<raw_name_len;i++) {
401 if(raw_name[i] == '-') {
402 break;
403 }
404 }
405 if(i == raw_name_len) return -1;
406 i++;
407 tmpptr = &raw_name[i];
408 for(;i<raw_name_len;i++) {
409 if(raw_name[i] == '.') {
410 raw_name[i] = ' ';
411 break;
412 }
413 }
414 *tid = strtoul(tmpptr, &endptr, 10);
415 if(endptr == tmpptr)
416 return -1; /* No digit */
417 if(*tid == ULONG_MAX)
418 return -1; /* underflow / overflow */
419 i++;
420 tmpptr = &raw_name[i];
421 for(;i<raw_name_len;i++) {
422 if(raw_name[i] == '.') {
423 raw_name[i] = ' ';
424 break;
425 }
426 }
427 *pgid = strtoul(tmpptr, &endptr, 10);
428 if(endptr == tmpptr)
429 return -1; /* No digit */
430 if(*pgid == ULONG_MAX)
431 return -1; /* underflow / overflow */
432 i++;
433 tmpptr = &raw_name[i];
434 *creation = strtoull(tmpptr, &endptr, 10);
435 if(endptr == tmpptr)
436 return -1; /* No digit */
437 if(*creation == G_MAXUINT64)
438 return -1; /* underflow / overflow */
439 } else {
440 underscore_pos = i;
441
442 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
443
444 if(endptr == raw_name+underscore_pos+1)
445 return -1; /* No digit */
446 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
447 return -1; /* underflow / overflow */
448
449 strncpy(char_name, raw_name, underscore_pos);
450 char_name[underscore_pos] = '\0';
451
452 *name = g_quark_from_string(char_name);
453 *num = cpu_num;
454 }
455
456
457 return 0;
458 }
459
460
461 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
462 {
463 return &trace->tracefiles;
464 }
465
466
467 void compute_tracefile_group(GQuark key_id,
468 GArray *group,
469 struct compute_tracefile_group_args *args)
470 {
471 int i;
472 LttTracefile *tf;
473
474 for(i=0; i<group->len; i++) {
475 tf = &g_array_index (group, LttTracefile, i);
476 if(tf->cpu_online)
477 args->func(tf, args->func_args);
478 }
479 }
480
481
482 static void ltt_tracefile_group_destroy(gpointer data)
483 {
484 GArray *group = (GArray *)data;
485 int i;
486 LttTracefile *tf;
487
488 if (group->len > 0)
489 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
490 for(i=0; i<group->len; i++) {
491 tf = &g_array_index (group, LttTracefile, i);
492 if(tf->cpu_online)
493 ltt_tracefile_close(tf);
494 }
495 g_array_free(group, TRUE);
496 }
497
498 static gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
499 {
500 GArray *group = (GArray *)data;
501 int i;
502 LttTracefile *tf;
503
504 for(i=0; i<group->len; i++) {
505 tf = &g_array_index (group, LttTracefile, i);
506 if(tf->cpu_online)
507 return 1;
508 }
509 return 0;
510 }
511
512
513 /* Open each tracefile under a specific directory. Put them in a
514 * GData : permits to access them using their tracefile group pathname.
515 * i.e. access control/modules tracefile group by index :
516 * "control/module".
517 *
518 * relative path is the path relative to the trace root
519 * root path is the full path
520 *
521 * A tracefile group is simply an array where all the per cpu tracefiles sit.
522 */
523
524 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
525 {
526 DIR *dir = opendir(root_path);
527 struct dirent *entry;
528 struct stat stat_buf;
529 int ret, i;
530 struct marker_data *mdata;
531
532 gchar path[PATH_MAX];
533 int path_len;
534 gchar *path_ptr;
535
536 int rel_path_len;
537 gchar rel_path[PATH_MAX];
538 gchar *rel_path_ptr;
539 LttTracefile tmp_tf;
540
541 if(dir == NULL) {
542 perror(root_path);
543 return ENOENT;
544 }
545
546 strncpy(path, root_path, PATH_MAX-1);
547 path_len = strlen(path);
548 path[path_len] = '/';
549 path_len++;
550 path_ptr = path + path_len;
551
552 strncpy(rel_path, relative_path, PATH_MAX-1);
553 rel_path_len = strlen(rel_path);
554 rel_path[rel_path_len] = '/';
555 rel_path_len++;
556 rel_path_ptr = rel_path + rel_path_len;
557
558 while((entry = readdir(dir)) != NULL) {
559
560 if(entry->d_name[0] == '.') continue;
561
562 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
563 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
564
565 ret = stat(path, &stat_buf);
566 if(ret == -1) {
567 perror(path);
568 continue;
569 }
570
571 g_debug("Tracefile file or directory : %s\n", path);
572
573 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
574
575 if(S_ISDIR(stat_buf.st_mode)) {
576
577 g_debug("Entering subdirectory...\n");
578 ret = open_tracefiles(trace, path, rel_path);
579 if(ret < 0) continue;
580 } else if(S_ISREG(stat_buf.st_mode)) {
581 GQuark name;
582 guint num;
583 gulong tid, pgid;
584 guint64 creation;
585 GArray *group;
586 num = 0;
587 tid = pgid = 0;
588 creation = 0;
589 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
590 continue; /* invalid name */
591
592 g_debug("Opening file.\n");
593 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
594 g_info("Error opening tracefile %s", path);
595
596 continue; /* error opening the tracefile : bad magic number ? */
597 }
598
599 g_debug("Tracefile name is %s and number is %u",
600 g_quark_to_string(name), num);
601
602 mdata = NULL;
603 tmp_tf.cpu_online = 1;
604 tmp_tf.cpu_num = num;
605 tmp_tf.name = name;
606 tmp_tf.tid = tid;
607 tmp_tf.pgid = pgid;
608 tmp_tf.creation = creation;
609 group = g_datalist_id_get_data(&trace->tracefiles, name);
610 if(group == NULL) {
611 /* Elements are automatically cleared when the array is allocated.
612 * It makes the cpu_online variable set to 0 : cpu offline, by default.
613 */
614 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
615 g_datalist_id_set_data_full(&trace->tracefiles, name,
616 group, ltt_tracefile_group_destroy);
617 mdata = allocate_marker_data();
618 if (!mdata)
619 g_error("Error in allocating marker data");
620 }
621
622 /* Add the per cpu tracefile to the named group */
623 unsigned int old_len = group->len;
624 if(num+1 > old_len)
625 group = g_array_set_size(group, num+1);
626
627 g_assert(group->len > 0);
628 if (!mdata)
629 mdata = g_array_index (group, LttTracefile, 0).mdata;
630
631 g_array_index (group, LttTracefile, num) = tmp_tf;
632 g_array_index (group, LttTracefile, num).event.tracefile =
633 &g_array_index (group, LttTracefile, num);
634 for (i = 0; i < group->len; i++)
635 g_array_index (group, LttTracefile, i).mdata = mdata;
636 }
637 }
638
639 closedir(dir);
640
641 return 0;
642 }
643
644
645 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
646 * because it must be done just after the opening */
647 static int ltt_process_metadata_tracefile(LttTracefile *tf)
648 {
649 int err;
650 guint i;
651
652 while(1) {
653 err = ltt_tracefile_read_seek(tf);
654 if(err == EPERM) goto seek_error;
655 else if(err == ERANGE) break; /* End of tracefile */
656
657 err = ltt_tracefile_read_update_event(tf);
658 if(err) goto update_error;
659
660 /* The rules are :
661 * It contains only core events :
662 * 0 : set_marker_id
663 * 1 : set_marker_format
664 */
665 if(tf->event.event_id >= MARKER_CORE_IDS) {
666 /* Should only contain core events */
667 g_warning("Error in processing metadata file %s, "
668 "should not contain event id %u.", g_quark_to_string(tf->name),
669 tf->event.event_id);
670 err = EPERM;
671 goto event_id_error;
672 } else {
673 char *pos;
674 const char *channel_name, *marker_name, *format;
675 uint16_t id;
676 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
677
678 switch((enum marker_id)tf->event.event_id) {
679 case MARKER_ID_SET_MARKER_ID:
680 channel_name = pos = tf->event.data;
681 pos += strlen(channel_name) + 1;
682 marker_name = pos;
683 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
684 channel_name, marker_name);
685 pos += strlen(marker_name) + 1;
686 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
687 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
688 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
689 channel_name, marker_name, id);
690 pos += sizeof(guint16);
691 int_size = *(guint8*)pos;
692 pos += sizeof(guint8);
693 long_size = *(guint8*)pos;
694 pos += sizeof(guint8);
695 pointer_size = *(guint8*)pos;
696 pos += sizeof(guint8);
697 size_t_size = *(guint8*)pos;
698 pos += sizeof(guint8);
699 alignment = *(guint8*)pos;
700 pos += sizeof(guint8);
701 marker_id_event(tf->trace,
702 g_quark_from_string(channel_name),
703 g_quark_from_string(marker_name),
704 id, int_size, long_size,
705 pointer_size, size_t_size, alignment);
706 break;
707 case MARKER_ID_SET_MARKER_FORMAT:
708 channel_name = pos = tf->event.data;
709 pos += strlen(channel_name) + 1;
710 marker_name = pos;
711 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
712 channel_name, marker_name);
713 pos += strlen(marker_name) + 1;
714 format = pos;
715 pos += strlen(format) + 1;
716 marker_format_event(tf->trace,
717 g_quark_from_string(channel_name),
718 g_quark_from_string(marker_name),
719 format);
720 /* get information from dictionary TODO */
721 break;
722 default:
723 g_warning("Error in processing metadata file %s, "
724 "unknown event id %hhu.",
725 g_quark_to_string(tf->name),
726 tf->event.event_id);
727 err = EPERM;
728 goto event_id_error;
729 }
730 }
731 }
732 return 0;
733
734 /* Error handling */
735 event_id_error:
736 update_error:
737 seek_error:
738 g_warning("An error occured in metadata tracefile parsing");
739 return err;
740 }
741
742 /*
743 * Open a trace and return its LttTrace handle.
744 *
745 * pathname must be the directory of the trace
746 */
747
748 LttTrace *ltt_trace_open(const gchar *pathname)
749 {
750 gchar abs_path[PATH_MAX];
751 LttTrace * t;
752 LttTracefile *tf;
753 GArray *group;
754 int i, ret;
755 ltt_subbuffer_header_t *header;
756 DIR *dir;
757 struct dirent *entry;
758 struct stat stat_buf;
759 gchar path[PATH_MAX];
760
761 t = g_new(LttTrace, 1);
762 if(!t) goto alloc_error;
763
764 get_absolute_pathname(pathname, abs_path);
765 t->pathname = g_quark_from_string(abs_path);
766
767 g_datalist_init(&t->tracefiles);
768
769 /* Test to see if it looks like a trace */
770 dir = opendir(abs_path);
771 if(dir == NULL) {
772 perror(abs_path);
773 goto open_error;
774 }
775 while((entry = readdir(dir)) != NULL) {
776 strcpy(path, abs_path);
777 strcat(path, "/");
778 strcat(path, entry->d_name);
779 ret = stat(path, &stat_buf);
780 if(ret == -1) {
781 perror(path);
782 continue;
783 }
784 }
785 closedir(dir);
786
787 /* Open all the tracefiles */
788 if(open_tracefiles(t, abs_path, "")) {
789 g_warning("Error opening tracefile %s", abs_path);
790 goto find_error;
791 }
792
793 /* Parse each trace metadata_N files : get runtime fac. info */
794 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
795 if(group == NULL) {
796 g_error("Trace %s has no metadata tracefile", abs_path);
797 g_assert(0);
798 goto find_error;
799 }
800
801 /*
802 * Get the trace information for the metadata_0 tracefile.
803 * Getting a correct trace start_time and start_tsc is insured by the fact
804 * that no subbuffers are supposed to be lost in the metadata channel.
805 * Therefore, the first subbuffer contains the start_tsc timestamp in its
806 * buffer header.
807 */
808 g_assert(group->len > 0);
809 tf = &g_array_index (group, LttTracefile, 0);
810 header = (ltt_subbuffer_header_t *)tf->buffer.head;
811 ret = parse_trace_header(header, tf, t);
812 g_assert(!ret);
813
814 t->num_cpu = group->len;
815
816 //ret = allocate_marker_data(t);
817 //if (ret)
818 // g_error("Error in allocating marker data");
819
820 for(i=0; i<group->len; i++) {
821 tf = &g_array_index (group, LttTracefile, i);
822 if (tf->cpu_online)
823 if(ltt_process_metadata_tracefile(tf))
824 goto find_error;
825 // goto metadata_error;
826 }
827
828 return t;
829
830 /* Error handling */
831 //metadata_error:
832 // destroy_marker_data(t);
833 find_error:
834 g_datalist_clear(&t->tracefiles);
835 open_error:
836 g_free(t);
837 alloc_error:
838 return NULL;
839
840 }
841
842 /* Open another, completely independant, instance of a trace.
843 *
844 * A read on this new instance will read the first event of the trace.
845 *
846 * When we copy a trace, we want all the opening actions to happen again :
847 * the trace will be reopened and totally independant from the original.
848 * That's why we call ltt_trace_open.
849 */
850 LttTrace *ltt_trace_copy(LttTrace *self)
851 {
852 return ltt_trace_open(g_quark_to_string(self->pathname));
853 }
854
855 /*
856 * Close a trace
857 */
858
859 void ltt_trace_close(LttTrace *t)
860 {
861 g_datalist_clear(&t->tracefiles);
862 g_free(t);
863 }
864
865
866 /*****************************************************************************
867 * Get the start time and end time of the trace
868 ****************************************************************************/
869
870 void ltt_tracefile_time_span_get(LttTracefile *tf,
871 LttTime *start, LttTime *end)
872 {
873 int err;
874
875 err = map_block(tf, 0);
876 if(unlikely(err)) {
877 g_error("Can not map block");
878 *start = ltt_time_infinite;
879 } else
880 *start = tf->buffer.begin.timestamp;
881
882 err = map_block(tf, tf->num_blocks - 1); /* Last block */
883 if(unlikely(err)) {
884 g_error("Can not map block");
885 *end = ltt_time_zero;
886 } else
887 *end = tf->buffer.end.timestamp;
888 }
889
890 struct tracefile_time_span_get_args {
891 LttTrace *t;
892 LttTime *start;
893 LttTime *end;
894 };
895
896 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
897 {
898 struct tracefile_time_span_get_args *args =
899 (struct tracefile_time_span_get_args*)user_data;
900
901 GArray *group = (GArray *)data;
902 int i;
903 LttTracefile *tf;
904 LttTime tmp_start;
905 LttTime tmp_end;
906
907 for(i=0; i<group->len; i++) {
908 tf = &g_array_index (group, LttTracefile, i);
909 if(tf->cpu_online) {
910 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
911 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
912 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
913 }
914 }
915 }
916
917 /* return the start and end time of a trace */
918
919 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
920 {
921 LttTime min_start = ltt_time_infinite;
922 LttTime max_end = ltt_time_zero;
923 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
924
925 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
926
927 if(start != NULL) *start = min_start;
928 if(end != NULL) *end = max_end;
929
930 }
931
932
933 /* Seek to the first event in a tracefile that has a time equal or greater than
934 * the time passed in parameter.
935 *
936 * If the time parameter is outside the tracefile time span, seek to the first
937 * event or if after, return ERANGE.
938 *
939 * If the time parameter is before the first event, we have to seek specially to
940 * there.
941 *
942 * If the time is after the end of the trace, return ERANGE.
943 *
944 * Do a binary search to find the right block, then a sequential search in the
945 * block to find the event.
946 *
947 * In the special case where the time requested fits inside a block that has no
948 * event corresponding to the requested time, the first event of the next block
949 * will be seeked.
950 *
951 * IMPORTANT NOTE : // FIXME everywhere...
952 *
953 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
954 * you will jump over an event if you do.
955 *
956 * Return value : 0 : no error, the tf->event can be used
957 * ERANGE : time if after the last event of the trace
958 * otherwise : this is an error.
959 *
960 * */
961
962 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
963 {
964 int ret = 0;
965 int err;
966 unsigned int block_num, high, low;
967
968 /* seek at the beginning of trace */
969 err = map_block(tf, 0); /* First block */
970 if(unlikely(err)) {
971 g_error("Can not map block");
972 goto fail;
973 }
974
975 /* If the time is lower or equal the beginning of the trace,
976 * go to the first event. */
977 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
978 ret = ltt_tracefile_read(tf);
979 if(ret == ERANGE) goto range;
980 else if (ret) goto fail;
981 goto found; /* There is either no event in the trace or the event points
982 to the first event in the trace */
983 }
984
985 err = map_block(tf, tf->num_blocks - 1); /* Last block */
986 if(unlikely(err)) {
987 g_error("Can not map block");
988 goto fail;
989 }
990
991 /* If the time is after the end of the trace, return ERANGE. */
992 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
993 goto range;
994 }
995
996 /* Binary search the block */
997 high = tf->num_blocks - 1;
998 low = 0;
999
1000 while(1) {
1001 block_num = ((high-low) / 2) + low;
1002
1003 err = map_block(tf, block_num);
1004 if(unlikely(err)) {
1005 g_error("Can not map block");
1006 goto fail;
1007 }
1008 if(high == low) {
1009 /* We cannot divide anymore : this is what would happen if the time
1010 * requested was exactly between two consecutive buffers'end and start
1011 * timestamps. This is also what would happend if we didn't deal with out
1012 * of span cases prior in this function. */
1013 /* The event is right in the buffer!
1014 * (or in the next buffer first event) */
1015 while(1) {
1016 ret = ltt_tracefile_read(tf);
1017 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1018 else if(ret) goto fail;
1019
1020 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1021 goto found;
1022 }
1023
1024 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1025 /* go to lower part */
1026 high = block_num - 1;
1027 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1028 /* go to higher part */
1029 low = block_num + 1;
1030 } else {/* The event is right in the buffer!
1031 (or in the next buffer first event) */
1032 while(1) {
1033 ret = ltt_tracefile_read(tf);
1034 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1035 else if(ret) goto fail;
1036
1037 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1038 break;
1039 }
1040 goto found;
1041 }
1042 }
1043
1044 found:
1045 return 0;
1046 range:
1047 return ERANGE;
1048
1049 /* Error handling */
1050 fail:
1051 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1052 g_quark_to_string(tf->name));
1053 return EPERM;
1054 }
1055
1056 /* Seek to a position indicated by an LttEventPosition
1057 */
1058
1059 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1060 {
1061 int err;
1062
1063 if(ep->tracefile != tf) {
1064 goto fail;
1065 }
1066
1067 err = map_block(tf, ep->block);
1068 if(unlikely(err)) {
1069 g_error("Can not map block");
1070 goto fail;
1071 }
1072
1073 tf->event.offset = ep->offset;
1074
1075 /* Put back the event real tsc */
1076 tf->event.tsc = ep->tsc;
1077 tf->buffer.tsc = ep->tsc;
1078
1079 err = ltt_tracefile_read_update_event(tf);
1080 if(err) goto fail;
1081
1082 /* deactivate this, as it does nothing for now
1083 err = ltt_tracefile_read_op(tf);
1084 if(err) goto fail;
1085 */
1086
1087 return 0;
1088
1089 fail:
1090 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1091 g_quark_to_string(tf->name));
1092 return 1;
1093 }
1094
1095 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1096 * corresponds to.
1097 */
1098
1099 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1100 {
1101 LttTime time;
1102
1103 if(tsc > tf->trace->start_tsc) {
1104 time = ltt_time_from_uint64(
1105 (double)(tsc - tf->trace->start_tsc)
1106 * 1000000000.0 * tf->trace->freq_scale
1107 / (double)tf->trace->start_freq);
1108 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1109 } else {
1110 time = ltt_time_from_uint64(
1111 (double)(tf->trace->start_tsc - tsc)
1112 * 1000000000.0 * tf->trace->freq_scale
1113 / (double)tf->trace->start_freq);
1114 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1115 }
1116 return time;
1117 }
1118
1119 /* Calculate the real event time based on the buffer boundaries */
1120 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1121 {
1122 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1123 }
1124
1125
1126 /* Get the current event of the tracefile : valid until the next read */
1127 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1128 {
1129 return &tf->event;
1130 }
1131
1132
1133
1134 /*****************************************************************************
1135 *Function name
1136 * ltt_tracefile_read : Read the next event in the tracefile
1137 *Input params
1138 * t : tracefile
1139 *Return value
1140 *
1141 * Returns 0 if an event can be used in tf->event.
1142 * Returns ERANGE on end of trace. The event in tf->event still can be used
1143 * (if the last block was not empty).
1144 * Returns EPERM on error.
1145 *
1146 * This function does make the tracefile event structure point to the event
1147 * currently pointed to by the tf->event.
1148 *
1149 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1150 * reinitialize it after an error if you want results to be coherent.
1151 * It would be the case if a end of trace last buffer has no event : the end
1152 * of trace wouldn't be returned, but an error.
1153 * We make the assumption there is at least one event per buffer.
1154 ****************************************************************************/
1155
1156 int ltt_tracefile_read(LttTracefile *tf)
1157 {
1158 int err;
1159
1160 err = ltt_tracefile_read_seek(tf);
1161 if(err) return err;
1162 err = ltt_tracefile_read_update_event(tf);
1163 if(err) return err;
1164
1165 /* deactivate this, as it does nothing for now
1166 err = ltt_tracefile_read_op(tf);
1167 if(err) return err;
1168 */
1169
1170 return 0;
1171 }
1172
1173 int ltt_tracefile_read_seek(LttTracefile *tf)
1174 {
1175 int err;
1176
1177 /* Get next buffer until we finally have an event, or end of trace */
1178 while(1) {
1179 err = ltt_seek_next_event(tf);
1180 if(unlikely(err == ENOPROTOOPT)) {
1181 return EPERM;
1182 }
1183
1184 /* Are we at the end of the buffer ? */
1185 if(err == ERANGE) {
1186 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1187 return ERANGE;
1188 } else {
1189 /* get next block */
1190 err = map_block(tf, tf->buffer.index + 1);
1191 if(unlikely(err)) {
1192 g_error("Can not map block");
1193 return EPERM;
1194 }
1195 }
1196 } else break; /* We found an event ! */
1197 }
1198
1199 return 0;
1200 }
1201
1202 /* do an operation when reading a new event */
1203
1204 /* This function does nothing for now */
1205 #if 0
1206 int ltt_tracefile_read_op(LttTracefile *tf)
1207 {
1208 LttEvent *event;
1209
1210 event = &tf->event;
1211
1212 /* do event specific operation */
1213
1214 /* nothing */
1215
1216 return 0;
1217 }
1218 #endif
1219
1220 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1221 {
1222 unsigned int offset = 0;
1223 int i, j;
1224
1225 g_printf("Event header (tracefile %s offset %llx):\n",
1226 g_quark_to_string(ev->tracefile->long_name),
1227 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1228 + (long)start_pos - (long)ev->tracefile->buffer.head);
1229
1230 while (offset < (long)end_pos - (long)start_pos) {
1231 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1232 g_printf(" ");
1233
1234 for (i = 0; i < 4 ; i++) {
1235 for (j = 0; j < 4; j++) {
1236 if (offset + ((i * 4) + j) <
1237 (long)end_pos - (long)start_pos)
1238 g_printf("%02hhX",
1239 ((char*)start_pos)[offset + ((i * 4) + j)]);
1240 else
1241 g_printf(" ");
1242 g_printf(" ");
1243 }
1244 if (i < 4)
1245 g_printf(" ");
1246 }
1247 offset+=16;
1248 g_printf("\n");
1249 }
1250 }
1251
1252
1253 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1254 * event specific operation. */
1255 int ltt_tracefile_read_update_event(LttTracefile *tf)
1256 {
1257 void * pos;
1258 LttEvent *event;
1259 void *pos_aligned;
1260
1261 event = &tf->event;
1262 pos = tf->buffer.head + event->offset;
1263
1264 /* Read event header */
1265
1266 /* Align the head */
1267 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1268 pos_aligned = pos;
1269
1270 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1271 event->event_id = event->timestamp >> tf->tscbits;
1272 event->timestamp = event->timestamp & tf->tsc_mask;
1273 pos += sizeof(guint32);
1274
1275 switch (event->event_id) {
1276 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1277 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1278 pos += sizeof(guint16);
1279 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1280 pos += sizeof(guint16);
1281 if (event->event_size == 0xFFFF) {
1282 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1283 pos += sizeof(guint32);
1284 }
1285 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1286 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1287 pos += sizeof(guint64);
1288 break;
1289 case 30: /* LTT_RFLAG_ID_SIZE */
1290 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1291 pos += sizeof(guint16);
1292 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1293 pos += sizeof(guint16);
1294 if (event->event_size == 0xFFFF) {
1295 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1296 pos += sizeof(guint32);
1297 }
1298 break;
1299 case 31: /* LTT_RFLAG_ID */
1300 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1301 pos += sizeof(guint16);
1302 event->event_size = G_MAXUINT;
1303 break;
1304 default:
1305 event->event_size = G_MAXUINT;
1306 break;
1307 }
1308
1309 if (likely(event->event_id != 29)) {
1310 /* No extended timestamp */
1311 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1312 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1313 + tf->tsc_mask_next_bit)
1314 | (guint64)event->timestamp;
1315 else
1316 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1317 | (guint64)event->timestamp;
1318 }
1319 event->tsc = tf->buffer.tsc;
1320
1321 event->event_time = ltt_interpolate_time(tf, event);
1322
1323 if (a_event_debug)
1324 print_debug_event_header(event, pos_aligned, pos);
1325
1326 event->data = pos;
1327
1328 /*
1329 * Let ltt_update_event_size update event->data according to the largest
1330 * alignment within the payload.
1331 * Get the data size and update the event fields with the current
1332 * information. */
1333 ltt_update_event_size(tf);
1334
1335 return 0;
1336 }
1337
1338
1339 /****************************************************************************
1340 *Function name
1341 * map_block : map a block from the file
1342 *Input Params
1343 * lttdes : ltt trace file
1344 * whichBlock : the block which will be read
1345 *return value
1346 * 0 : success
1347 * EINVAL : lseek fail
1348 * EIO : can not read from the file
1349 ****************************************************************************/
1350
1351 static gint map_block(LttTracefile * tf, guint block_num)
1352 {
1353 int page_size = getpagesize();
1354 ltt_subbuffer_header_t *header;
1355
1356 g_assert(block_num < tf->num_blocks);
1357
1358 if(tf->buffer.head != NULL) {
1359 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1360 g_warning("unmap size : %u\n",
1361 PAGE_ALIGN(tf->buf_size));
1362 perror("munmap error");
1363 g_assert(0);
1364 }
1365 }
1366
1367 /* Multiple of pages aligned head */
1368 tf->buffer.head = mmap(0,
1369 PAGE_ALIGN(tf->buf_size),
1370 PROT_READ, MAP_PRIVATE, tf->fd,
1371 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1372
1373 if(tf->buffer.head == MAP_FAILED) {
1374 perror("Error in allocating memory for buffer of tracefile");
1375 g_assert(0);
1376 goto map_error;
1377 }
1378 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1379
1380
1381 tf->buffer.index = block_num;
1382
1383 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1384
1385 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1386 &header->cycle_count_begin);
1387 tf->buffer.begin.freq = tf->trace->start_freq;
1388
1389 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1390 tf->buffer.begin.cycle_count);
1391 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1392 &header->cycle_count_end);
1393 tf->buffer.end.freq = tf->trace->start_freq;
1394
1395 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1396 &header->lost_size);
1397 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1398 tf->buffer.end.cycle_count);
1399 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1400 tf->event.tsc = tf->buffer.tsc;
1401 tf->buffer.freq = tf->buffer.begin.freq;
1402
1403 /* FIXME
1404 * eventually support variable buffer size : will need a partial pre-read of
1405 * the headers to create an index when we open the trace... eventually. */
1406 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1407 &header->buf_size));
1408
1409 /* Make the current event point to the beginning of the buffer :
1410 * it means that the event read must get the first event. */
1411 tf->event.tracefile = tf;
1412 tf->event.block = block_num;
1413 tf->event.offset = 0;
1414
1415 if (header->events_lost) {
1416 g_warning("%d events lost so far in tracefile %s at block %u",
1417 (guint)header->events_lost,
1418 g_quark_to_string(tf->long_name),
1419 block_num);
1420 tf->events_lost = header->events_lost;
1421 }
1422 if (header->subbuf_corrupt) {
1423 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1424 (guint)header->subbuf_corrupt,
1425 g_quark_to_string(tf->long_name),
1426 block_num);
1427 tf->subbuf_corrupt = header->subbuf_corrupt;
1428 }
1429
1430 return 0;
1431
1432 map_error:
1433 return -errno;
1434 }
1435
1436 static void print_debug_event_data(LttEvent *ev)
1437 {
1438 unsigned int offset = 0;
1439 int i, j;
1440
1441 if (!max(ev->event_size, ev->data_size))
1442 return;
1443
1444 g_printf("Event data (tracefile %s offset %llx):\n",
1445 g_quark_to_string(ev->tracefile->long_name),
1446 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1447 + (long)ev->data - (long)ev->tracefile->buffer.head);
1448
1449 while (offset < max(ev->event_size, ev->data_size)) {
1450 g_printf("%8lx", (long)ev->data + offset
1451 - (long)ev->tracefile->buffer.head);
1452 g_printf(" ");
1453
1454 for (i = 0; i < 4 ; i++) {
1455 for (j = 0; j < 4; j++) {
1456 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1457 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1458 else
1459 g_printf(" ");
1460 g_printf(" ");
1461 }
1462 if (i < 4)
1463 g_printf(" ");
1464 }
1465
1466 g_printf(" ");
1467
1468 for (i = 0; i < 4; i++) {
1469 for (j = 0; j < 4; j++) {
1470 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1471 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1472 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1473 else
1474 g_printf(".");
1475 } else
1476 g_printf(" ");
1477 }
1478 }
1479 offset+=16;
1480 g_printf("\n");
1481 }
1482 }
1483
1484 /* It will update the fields offsets too */
1485 void ltt_update_event_size(LttTracefile *tf)
1486 {
1487 off_t size = 0;
1488 char *tscdata;
1489 struct marker_info *info;
1490
1491 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1492 switch((enum marker_id)tf->event.event_id) {
1493 case MARKER_ID_SET_MARKER_ID:
1494 size = strlen((char*)tf->event.data) + 1;
1495 g_debug("marker %s id set", (char*)tf->event.data + size);
1496 size += strlen((char*)tf->event.data + size) + 1;
1497 size += ltt_align(size, sizeof(guint16), tf->alignment);
1498 size += sizeof(guint16);
1499 size += sizeof(guint8);
1500 size += sizeof(guint8);
1501 size += sizeof(guint8);
1502 size += sizeof(guint8);
1503 size += sizeof(guint8);
1504 break;
1505 case MARKER_ID_SET_MARKER_FORMAT:
1506 size = strlen((char*)tf->event.data) + 1;
1507 g_debug("marker %s format set", (char*)tf->event.data);
1508 size += strlen((char*)tf->event.data + size) + 1;
1509 size += strlen((char*)tf->event.data + size) + 1;
1510 break;
1511 }
1512 }
1513
1514 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1515
1516 if (tf->event.event_id >= MARKER_CORE_IDS)
1517 g_assert(info != NULL);
1518
1519 /* Do not update field offsets of core markers when initially reading the
1520 * metadata tracefile when the infos about these markers do not exist yet.
1521 */
1522 if (likely(info && info->fields)) {
1523 /* alignment */
1524 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1525 info->largest_align,
1526 info->alignment);
1527 /* size, dynamically computed */
1528 if (info->size != -1)
1529 size = info->size;
1530 else
1531 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1532 tf->event.event_id), tf->event.data);
1533 }
1534
1535 tf->event.data_size = size;
1536
1537 /* Check consistency between kernel and LTTV structure sizes */
1538 if(tf->event.event_size == G_MAXUINT) {
1539 /* Event size too big to fit in the event size field */
1540 tf->event.event_size = tf->event.data_size;
1541 }
1542
1543 if (a_event_debug)
1544 print_debug_event_data(&tf->event);
1545
1546 if (tf->event.data_size != tf->event.event_size) {
1547 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1548 tf->event.event_id);
1549 if (!info)
1550 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1551 g_quark_to_string(tf->name));
1552 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1553 g_quark_to_string(info->name),
1554 tf->event.event_size, tf->event.data_size);
1555 exit(-1);
1556 }
1557 }
1558
1559
1560 /* Take the tf current event offset and use the event id to figure out where is
1561 * the next event offset.
1562 *
1563 * This is an internal function not aiming at being used elsewhere : it will
1564 * not jump over the current block limits. Please consider using
1565 * ltt_tracefile_read to do this.
1566 *
1567 * Returns 0 on success
1568 * ERANGE if we are at the end of the buffer.
1569 * ENOPROTOOPT if an error occured when getting the current event size.
1570 */
1571 static int ltt_seek_next_event(LttTracefile *tf)
1572 {
1573 int ret = 0;
1574 void *pos;
1575
1576 /* seek over the buffer header if we are at the buffer start */
1577 if(tf->event.offset == 0) {
1578 tf->event.offset += tf->buffer_header_size;
1579
1580 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1581 ret = ERANGE;
1582 }
1583 goto found;
1584 }
1585
1586 pos = tf->event.data;
1587
1588 if(tf->event.data_size < 0) goto error;
1589
1590 pos += (size_t)tf->event.data_size;
1591
1592 tf->event.offset = pos - tf->buffer.head;
1593
1594 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1595 ret = ERANGE;
1596 goto found;
1597 }
1598 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1599
1600 found:
1601 return ret;
1602
1603 error:
1604 g_error("Error in ltt_seek_next_event for tracefile %s",
1605 g_quark_to_string(tf->name));
1606 return ENOPROTOOPT;
1607 }
1608
1609 #if 0
1610 /*****************************************************************************
1611 *Function name
1612 * set_fields_offsets : set the precomputable offset of the fields
1613 *Input params
1614 * tracefile : opened trace file
1615 * event_type : the event type
1616 ****************************************************************************/
1617
1618 void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1619 {
1620 LttField *field = event_type->root_field;
1621 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1622
1623 if(likely(field))
1624 preset_field_type_size(tf, event_type, 0, 0,
1625 &fixed_root, &fixed_parent,
1626 field);
1627
1628 }
1629 #endif //0
1630
1631
1632 /*****************************************************************************
1633 *Function name
1634 * get_alignment : Get the alignment needed for a field.
1635 *Input params
1636 * field : field
1637 *
1638 * returns : The size on which it must be aligned.
1639 *
1640 ****************************************************************************/
1641 #if 0
1642 off_t get_alignment(LttField *field)
1643 {
1644 LttType *type = &field->field_type;
1645
1646 switch(type->type_class) {
1647 case LTT_INT_FIXED:
1648 case LTT_UINT_FIXED:
1649 case LTT_POINTER:
1650 case LTT_CHAR:
1651 case LTT_UCHAR:
1652 case LTT_SHORT:
1653 case LTT_USHORT:
1654 case LTT_INT:
1655 case LTT_UINT:
1656 case LTT_LONG:
1657 case LTT_ULONG:
1658 case LTT_SIZE_T:
1659 case LTT_SSIZE_T:
1660 case LTT_OFF_T:
1661 case LTT_FLOAT:
1662 case LTT_ENUM:
1663 /* Align offset on type size */
1664 g_assert(field->field_size != 0);
1665 return field->field_size;
1666 break;
1667 case LTT_STRING:
1668 return 1;
1669 break;
1670 case LTT_ARRAY:
1671 g_assert(type->fields->len == 1);
1672 {
1673 LttField *child = &g_array_index(type->fields, LttField, 0);
1674 return get_alignment(child);
1675 }
1676 break;
1677 case LTT_SEQUENCE:
1678 g_assert(type->fields->len == 2);
1679 {
1680 off_t localign = 1;
1681 LttField *child = &g_array_index(type->fields, LttField, 0);
1682
1683 localign = max(localign, get_alignment(child));
1684
1685 child = &g_array_index(type->fields, LttField, 1);
1686 localign = max(localign, get_alignment(child));
1687
1688 return localign;
1689 }
1690 break;
1691 case LTT_STRUCT:
1692 case LTT_UNION:
1693 {
1694 guint i;
1695 off_t localign = 1;
1696
1697 for(i=0; i<type->fields->len; i++) {
1698 LttField *child = &g_array_index(type->fields, LttField, i);
1699 localign = max(localign, get_alignment(child));
1700 }
1701 return localign;
1702 }
1703 break;
1704 case LTT_NONE:
1705 default:
1706 g_error("get_alignment : unknown type");
1707 return -1;
1708 }
1709 }
1710
1711 #endif //0
1712
1713 /*****************************************************************************
1714 *Function name
1715 * field_compute_static_size : Determine the size of fields known by their
1716 * sole definition. Unions, arrays and struct sizes might be known, but
1717 * the parser does not give that information.
1718 *Input params
1719 * tf : tracefile
1720 * field : field
1721 *
1722 ****************************************************************************/
1723 #if 0
1724 void field_compute_static_size(LttFacility *fac, LttField *field)
1725 {
1726 LttType *type = &field->field_type;
1727
1728 switch(type->type_class) {
1729 case LTT_INT_FIXED:
1730 case LTT_UINT_FIXED:
1731 case LTT_POINTER:
1732 case LTT_CHAR:
1733 case LTT_UCHAR:
1734 case LTT_SHORT:
1735 case LTT_USHORT:
1736 case LTT_INT:
1737 case LTT_UINT:
1738 case LTT_LONG:
1739 case LTT_ULONG:
1740 case LTT_SIZE_T:
1741 case LTT_SSIZE_T:
1742 case LTT_OFF_T:
1743 case LTT_FLOAT:
1744 case LTT_ENUM:
1745 case LTT_STRING:
1746 /* nothing to do */
1747 break;
1748 case LTT_ARRAY:
1749 /* note this : array type size is the number of elements in the array,
1750 * while array field size of the length of the array in bytes */
1751 g_assert(type->fields->len == 1);
1752 {
1753 LttField *child = &g_array_index(type->fields, LttField, 0);
1754 field_compute_static_size(fac, child);
1755
1756 if(child->field_size != 0) {
1757 field->field_size = type->size * child->field_size;
1758 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1759 sizeof(off_t), type->size);
1760 } else {
1761 field->field_size = 0;
1762 }
1763 }
1764 break;
1765 case LTT_SEQUENCE:
1766 g_assert(type->fields->len == 2);
1767 {
1768 off_t local_offset = 0;
1769 LttField *child = &g_array_index(type->fields, LttField, 1);
1770 field_compute_static_size(fac, child);
1771 field->field_size = 0;
1772 type->size = 0;
1773 if(child->field_size != 0) {
1774 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1775 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1776 }
1777 }
1778 break;
1779 case LTT_STRUCT:
1780 case LTT_UNION:
1781 {
1782 guint i;
1783 for(i=0;i<type->fields->len;i++) {
1784 LttField *child = &g_array_index(type->fields, LttField, i);
1785 field_compute_static_size(fac, child);
1786 if(child->field_size != 0) {
1787 type->size += ltt_align(type->size, get_alignment(child),
1788 fac->alignment);
1789 type->size += child->field_size;
1790 } else {
1791 /* As soon as we find a child with variable size, we have
1792 * a variable size */
1793 type->size = 0;
1794 break;
1795 }
1796 }
1797 field->field_size = type->size;
1798 }
1799 break;
1800 default:
1801 g_error("field_static_size : unknown type");
1802 }
1803
1804 }
1805 #endif //0
1806
1807
1808 /*****************************************************************************
1809 *Function name
1810 * precompute_fields_offsets : set the precomputable offset of the fields
1811 *Input params
1812 * fac : facility
1813 * field : the field
1814 * offset : pointer to the current offset, must be incremented
1815 *
1816 * return : 1 : found a variable length field, stop the processing.
1817 * 0 otherwise.
1818 ****************************************************************************/
1819
1820 #if 0
1821 gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1822 {
1823 LttType *type = &field->field_type;
1824
1825 if(unlikely(is_compact)) {
1826 g_assert(field->field_size != 0);
1827 /* FIXME THIS IS A HUUUUUGE hack :
1828 * offset is between the compact_data field in struct LttEvent
1829 * and the address of the field root in the memory map.
1830 * ark. Both will stay at the same addresses while the event
1831 * is readable, so it's ok.
1832 */
1833 field->offset_root = 0;
1834 field->fixed_root = FIELD_FIXED;
1835 return 0;
1836 }
1837
1838 switch(type->type_class) {
1839 case LTT_INT_FIXED:
1840 case LTT_UINT_FIXED:
1841 case LTT_POINTER:
1842 case LTT_CHAR:
1843 case LTT_UCHAR:
1844 case LTT_SHORT:
1845 case LTT_USHORT:
1846 case LTT_INT:
1847 case LTT_UINT:
1848 case LTT_LONG:
1849 case LTT_ULONG:
1850 case LTT_SIZE_T:
1851 case LTT_SSIZE_T:
1852 case LTT_OFF_T:
1853 case LTT_FLOAT:
1854 case LTT_ENUM:
1855 g_assert(field->field_size != 0);
1856 /* Align offset on type size */
1857 *offset += ltt_align(*offset, get_alignment(field),
1858 fac->alignment);
1859 /* remember offset */
1860 field->offset_root = *offset;
1861 field->fixed_root = FIELD_FIXED;
1862 /* Increment offset */
1863 *offset += field->field_size;
1864 return 0;
1865 break;
1866 case LTT_STRING:
1867 field->offset_root = *offset;
1868 field->fixed_root = FIELD_FIXED;
1869 return 1;
1870 break;
1871 case LTT_ARRAY:
1872 g_assert(type->fields->len == 1);
1873 {
1874 LttField *child = &g_array_index(type->fields, LttField, 0);
1875
1876 *offset += ltt_align(*offset, get_alignment(field),
1877 fac->alignment);
1878
1879 /* remember offset */
1880 field->offset_root = *offset;
1881 field->array_offset = *offset;
1882 field->fixed_root = FIELD_FIXED;
1883
1884 /* Let the child be variable */
1885 //precompute_fields_offsets(tf, child, offset);
1886
1887 if(field->field_size != 0) {
1888 /* Increment offset */
1889 /* field_size is the array size in bytes */
1890 *offset += field->field_size;
1891 return 0;
1892 } else {
1893 return 1;
1894 }
1895 }
1896 break;
1897 case LTT_SEQUENCE:
1898 g_assert(type->fields->len == 2);
1899 {
1900 LttField *child;
1901 guint ret;
1902
1903 *offset += ltt_align(*offset, get_alignment(field),
1904 fac->alignment);
1905
1906 /* remember offset */
1907 field->offset_root = *offset;
1908 field->fixed_root = FIELD_FIXED;
1909
1910 child = &g_array_index(type->fields, LttField, 0);
1911 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1912 g_assert(ret == 0); /* Seq len cannot have variable len */
1913
1914 child = &g_array_index(type->fields, LttField, 1);
1915 *offset += ltt_align(*offset, get_alignment(child),
1916 fac->alignment);
1917 field->array_offset = *offset;
1918 /* Let the child be variable. */
1919 //ret = precompute_fields_offsets(fac, child, offset);
1920
1921 /* Cannot precompute fields offsets of sequence members, and has
1922 * variable length. */
1923 return 1;
1924 }
1925 break;
1926 case LTT_STRUCT:
1927 {
1928 LttField *child;
1929 guint i;
1930 gint ret=0;
1931
1932 *offset += ltt_align(*offset, get_alignment(field),
1933 fac->alignment);
1934 /* remember offset */
1935 field->offset_root = *offset;
1936 field->fixed_root = FIELD_FIXED;
1937
1938 for(i=0; i< type->fields->len; i++) {
1939 child = &g_array_index(type->fields, LttField, i);
1940 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1941
1942 if(ret) break;
1943 }
1944 return ret;
1945 }
1946 break;
1947 case LTT_UNION:
1948 {
1949 LttField *child;
1950 guint i;
1951 gint ret=0;
1952
1953 *offset += ltt_align(*offset, get_alignment(field),
1954 fac->alignment);
1955 /* remember offset */
1956 field->offset_root = *offset;
1957 field->fixed_root = FIELD_FIXED;
1958
1959 for(i=0; i< type->fields->len; i++) {
1960 *offset = field->offset_root;
1961 child = &g_array_index(type->fields, LttField, i);
1962 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1963
1964 if(ret) break;
1965 }
1966 *offset = field->offset_root + field->field_size;
1967 return ret;
1968 }
1969
1970 break;
1971 case LTT_NONE:
1972 default:
1973 g_error("precompute_fields_offsets : unknown type");
1974 return 1;
1975 }
1976
1977 }
1978
1979 #endif //0
1980
1981 #if 0
1982 /*****************************************************************************
1983 *Function name
1984 * precompute_offsets : set the precomputable offset of an event type
1985 *Input params
1986 * tf : tracefile
1987 * event : event type
1988 *
1989 ****************************************************************************/
1990 void precompute_offsets(LttFacility *fac, LttEventType *event)
1991 {
1992 guint i;
1993 off_t offset = 0;
1994 gint ret;
1995
1996 /* First, compute the size of fixed size fields. Will determine size for
1997 * arrays, struct and unions, which is not done by the parser */
1998 for(i=0; i<event->fields->len; i++) {
1999 LttField *field = &g_array_index(event->fields, LttField, i);
2000 field_compute_static_size(fac, field);
2001 }
2002
2003 /* Precompute all known offsets */
2004 for(i=0; i<event->fields->len; i++) {
2005 LttField *field = &g_array_index(event->fields, LttField, i);
2006 if(event->has_compact_data && i == 0)
2007 ret = precompute_fields_offsets(fac, field, &offset, 1);
2008 else
2009 ret = precompute_fields_offsets(fac, field, &offset, 0);
2010 if(ret) break;
2011 }
2012 }
2013 #endif //0
2014
2015
2016
2017 /*****************************************************************************
2018 *Function name
2019 * preset_field_type_size : set the fixed sizes of the field type
2020 *Input params
2021 * tf : tracefile
2022 * event_type : event type
2023 * offset_root : offset from the root
2024 * offset_parent : offset from the parent
2025 * fixed_root : Do we know a fixed offset to the root ?
2026 * fixed_parent : Do we know a fixed offset to the parent ?
2027 * field : field
2028 ****************************************************************************/
2029
2030
2031
2032 // preset the fixed size offsets. Calculate them just like genevent-new : an
2033 // increment of a *to value that represents the offset from the start of the
2034 // event data.
2035 // The preset information is : offsets up to (and including) the first element
2036 // of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
2037 #if 0
2038 void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
2039 off_t offset_root, off_t offset_parent,
2040 enum field_status *fixed_root, enum field_status *fixed_parent,
2041 LttField *field)
2042 {
2043 enum field_status local_fixed_root, local_fixed_parent;
2044 guint i;
2045 LttType *type;
2046
2047 g_assert(field->fixed_root == FIELD_UNKNOWN);
2048 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2049 g_assert(field->fixed_size == FIELD_UNKNOWN);
2050
2051 type = field->field_type;
2052
2053 field->fixed_root = *fixed_root;
2054 if(field->fixed_root == FIELD_FIXED)
2055 field->offset_root = offset_root;
2056 else
2057 field->offset_root = 0;
2058
2059 field->fixed_parent = *fixed_parent;
2060 if(field->fixed_parent == FIELD_FIXED)
2061 field->offset_parent = offset_parent;
2062 else
2063 field->offset_parent = 0;
2064
2065 size_t current_root_offset;
2066 size_t current_offset;
2067 enum field_status current_child_status, final_child_status;
2068 size_t max_size;
2069
2070 switch(type->type_class) {
2071 case LTT_INT_FIXED:
2072 case LTT_UINT_FIXED:
2073 case LTT_CHAR:
2074 case LTT_UCHAR:
2075 case LTT_SHORT:
2076 case LTT_USHORT:
2077 case LTT_INT:
2078 case LTT_UINT:
2079 case LTT_FLOAT:
2080 case LTT_ENUM:
2081 field->field_size = ltt_type_size(tf->trace, type);
2082 field->fixed_size = FIELD_FIXED;
2083 break;
2084 case LTT_POINTER:
2085 field->field_size = (off_t)event_type->facility->pointer_size;
2086 field->fixed_size = FIELD_FIXED;
2087 break;
2088 case LTT_LONG:
2089 case LTT_ULONG:
2090 field->field_size = (off_t)event_type->facility->long_size;
2091 field->fixed_size = FIELD_FIXED;
2092 break;
2093 case LTT_SIZE_T:
2094 case LTT_SSIZE_T:
2095 case LTT_OFF_T:
2096 field->field_size = (off_t)event_type->facility->size_t_size;
2097 field->fixed_size = FIELD_FIXED;
2098 break;
2099 case LTT_SEQUENCE:
2100 local_fixed_root = FIELD_VARIABLE;
2101 local_fixed_parent = FIELD_VARIABLE;
2102 preset_field_type_size(tf, event_type,
2103 0, 0,
2104 &local_fixed_root, &local_fixed_parent,
2105 field->child[0]);
2106 field->fixed_size = FIELD_VARIABLE;
2107 field->field_size = 0;
2108 *fixed_root = FIELD_VARIABLE;
2109 *fixed_parent = FIELD_VARIABLE;
2110 break;
2111 case LTT_STRING:
2112 field->fixed_size = FIELD_VARIABLE;
2113 field->field_size = 0;
2114 *fixed_root = FIELD_VARIABLE;
2115 *fixed_parent = FIELD_VARIABLE;
2116 break;
2117 case LTT_ARRAY:
2118 local_fixed_root = FIELD_VARIABLE;
2119 local_fixed_parent = FIELD_VARIABLE;
2120 preset_field_type_size(tf, event_type,
2121 0, 0,
2122 &local_fixed_root, &local_fixed_parent,
2123 field->child[0]);
2124 field->fixed_size = field->child[0]->fixed_size;
2125 if(field->fixed_size == FIELD_FIXED) {
2126 field->field_size = type->element_number * field->child[0]->field_size;
2127 } else {
2128 field->field_size = 0;
2129 *fixed_root = FIELD_VARIABLE;
2130 *fixed_parent = FIELD_VARIABLE;
2131 }
2132 break;
2133 case LTT_STRUCT:
2134 current_root_offset = field->offset_root;
2135 current_offset = 0;
2136 current_child_status = FIELD_FIXED;
2137 for(i=0;i<type->element_number;i++) {
2138 preset_field_type_size(tf, event_type,
2139 current_root_offset, current_offset,
2140 fixed_root, &current_child_status,
2141 field->child[i]);
2142 if(current_child_status == FIELD_FIXED) {
2143 current_root_offset += field->child[i]->field_size;
2144 current_offset += field->child[i]->field_size;
2145 } else {
2146 current_root_offset = 0;
2147 current_offset = 0;
2148 }
2149 }
2150 if(current_child_status != FIELD_FIXED) {
2151 *fixed_parent = current_child_status;
2152 field->field_size = 0;
2153 field->fixed_size = current_child_status;
2154 } else {
2155 field->field_size = current_offset;
2156 field->fixed_size = FIELD_FIXED;
2157 }
2158 break;
2159 case LTT_UNION:
2160 current_root_offset = field->offset_root;
2161 current_offset = 0;
2162 max_size = 0;
2163 final_child_status = FIELD_FIXED;
2164 for(i=0;i<type->element_number;i++) {
2165 enum field_status current_root_child_status = FIELD_FIXED;
2166 enum field_status current_child_status = FIELD_FIXED;
2167 preset_field_type_size(tf, event_type,
2168 current_root_offset, current_offset,
2169 &current_root_child_status, &current_child_status,
2170 field->child[i]);
2171 if(current_child_status != FIELD_FIXED)
2172 final_child_status = current_child_status;
2173 else
2174 max_size = max(max_size, field->child[i]->field_size);
2175 }
2176 if(final_child_status != FIELD_FIXED) {
2177 g_error("LTTV does not support variable size fields in unions.");
2178 /* This will stop the application. */
2179 *fixed_root = final_child_status;
2180 *fixed_parent = final_child_status;
2181 field->field_size = 0;
2182 field->fixed_size = current_child_status;
2183 } else {
2184 field->field_size = max_size;
2185 field->fixed_size = FIELD_FIXED;
2186 }
2187 break;
2188 case LTT_NONE:
2189 g_error("unexpected type NONE");
2190 break;
2191 }
2192
2193 }
2194 #endif //0
2195
2196 /*****************************************************************************
2197 *Function name
2198 * check_fields_compatibility : Check for compatibility between two fields :
2199 * do they use the same inner structure ?
2200 *Input params
2201 * event_type1 : event type
2202 * event_type2 : event type
2203 * field1 : field
2204 * field2 : field
2205 *Returns : 0 if identical
2206 * 1 if not.
2207 ****************************************************************************/
2208 // this function checks for equality of field types. Therefore, it does not use
2209 // per se offsets. For instance, an aligned version of a structure is
2210 // compatible with an unaligned version of the same structure.
2211 #if 0
2212 gint check_fields_compatibility(LttEventType *event_type1,
2213 LttEventType *event_type2,
2214 LttField *field1, LttField *field2)
2215 {
2216 guint different = 0;
2217 LttType *type1;
2218 LttType *type2;
2219
2220 if(field1 == NULL) {
2221 if(field2 == NULL) goto end;
2222 else {
2223 different = 1;
2224 goto end;
2225 }
2226 } else if(field2 == NULL) {
2227 different = 1;
2228 goto end;
2229 }
2230
2231 type1 = &field1->field_type;
2232 type2 = &field2->field_type;
2233
2234 if(type1->type_class != type2->type_class) {
2235 different = 1;
2236 goto end;
2237 }
2238 if(type1->network != type2->network) {
2239 different = 1;
2240 goto end;
2241 }
2242
2243 switch(type1->type_class) {
2244 case LTT_INT_FIXED:
2245 case LTT_UINT_FIXED:
2246 case LTT_POINTER:
2247 case LTT_CHAR:
2248 case LTT_UCHAR:
2249 case LTT_SHORT:
2250 case LTT_USHORT:
2251 case LTT_INT:
2252 case LTT_UINT:
2253 case LTT_LONG:
2254 case LTT_ULONG:
2255 case LTT_SIZE_T:
2256 case LTT_SSIZE_T:
2257 case LTT_OFF_T:
2258 case LTT_FLOAT:
2259 case LTT_ENUM:
2260 if(field1->field_size != field2->field_size)
2261 different = 1;
2262 break;
2263 case LTT_STRING:
2264 break;
2265 case LTT_ARRAY:
2266 {
2267 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2268 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2269
2270 if(type1->size != type2->size)
2271 different = 1;
2272 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2273 different = 1;
2274 }
2275 break;
2276 case LTT_SEQUENCE:
2277 {
2278 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2279 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2280
2281 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2282 different = 1;
2283 }
2284 break;
2285 case LTT_STRUCT:
2286 case LTT_UNION:
2287 {
2288 LttField *child;
2289 guint i;
2290
2291 if(type1->fields->len != type2->fields->len) {
2292 different = 1;
2293 goto end;
2294 }
2295
2296 for(i=0; i< type1->fields->len; i++) {
2297 LttField *child1;
2298 LttField *child2;
2299 child1 = &g_array_index(type1->fields, LttField, i);
2300 child2 = &g_array_index(type2->fields, LttField, i);
2301 different = check_fields_compatibility(event_type1,
2302 event_type2, child1, child2);
2303
2304 if(different) break;
2305 }
2306 }
2307 break;
2308 case LTT_NONE:
2309 default:
2310 g_error("check_fields_compatibility : unknown type");
2311 }
2312
2313 end:
2314 return different;
2315 }
2316 #endif //0
2317
2318 #if 0
2319 gint check_fields_compatibility(LttEventType *event_type1,
2320 LttEventType *event_type2,
2321 LttField *field1, LttField *field2)
2322 {
2323 guint different = 0;
2324 guint i;
2325 LttType *type1;
2326 LttType *type2;
2327
2328 if(field1 == NULL) {
2329 if(field2 == NULL) goto end;
2330 else {
2331 different = 1;
2332 goto end;
2333 }
2334 } else if(field2 == NULL) {
2335 different = 1;
2336 goto end;
2337 }
2338
2339 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2340 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2341 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2342 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2343 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2344 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2345
2346 type1 = field1->field_type;
2347 type2 = field2->field_type;
2348
2349 if(type1->type_class != type2->type_class) {
2350 different = 1;
2351 goto end;
2352 }
2353 if(type1->element_name != type2->element_name) {
2354 different = 1;
2355 goto end;
2356 }
2357
2358 switch(type1->type_class) {
2359 case LTT_INT_FIXED:
2360 case LTT_UINT_FIXED:
2361 case LTT_POINTER:
2362 case LTT_CHAR:
2363 case LTT_UCHAR:
2364 case LTT_SHORT:
2365 case LTT_USHORT:
2366 case LTT_INT:
2367 case LTT_UINT:
2368 case LTT_FLOAT:
2369 case LTT_POINTER:
2370 case LTT_LONG:
2371 case LTT_ULONG:
2372 case LTT_SIZE_T:
2373 case LTT_SSIZE_T:
2374 case LTT_OFF_T:
2375 if(field1->field_size != field2->field_size) {
2376 different = 1;
2377 goto end;
2378 }
2379 break;
2380 case LTT_ENUM:
2381 if(type1->element_number != type2->element_number) {
2382 different = 1;
2383 goto end;
2384 }
2385 for(i=0;i<type1->element_number;i++) {
2386 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2387 different = 1;
2388 goto end;
2389 }
2390 }
2391 break;
2392 case LTT_SEQUENCE:
2393 /* Two elements : size and child */
2394 g_assert(type1->element_number != type2->element_number);
2395 for(i=0;i<type1->element_number;i++) {
2396 if(check_fields_compatibility(event_type1, event_type2,
2397 field1->child[0], field2->child[0])) {
2398 different = 1;
2399 goto end;
2400 }
2401 }
2402 break;
2403 case LTT_STRING:
2404 break;
2405 case LTT_ARRAY:
2406 if(field1->field_size != field2->field_size) {
2407 different = 1;
2408 goto end;
2409 }
2410 /* Two elements : size and child */
2411 g_assert(type1->element_number != type2->element_number);
2412 for(i=0;i<type1->element_number;i++) {
2413 if(check_fields_compatibility(event_type1, event_type2,
2414 field1->child[0], field2->child[0])) {
2415 different = 1;
2416 goto end;
2417 }
2418 }
2419 break;
2420 case LTT_STRUCT:
2421 case LTT_UNION:
2422 if(type1->element_number != type2->element_number) {
2423 different = 1;
2424 break;
2425 }
2426 for(i=0;i<type1->element_number;i++) {
2427 if(check_fields_compatibility(event_type1, event_type2,
2428 field1->child[0], field2->child[0])) {
2429 different = 1;
2430 goto end;
2431 }
2432 }
2433 break;
2434 }
2435 end:
2436 return different;
2437 }
2438 #endif //0
2439
2440
2441 /*****************************************************************************
2442 *Function name
2443 * ltt_get_int : get an integer number
2444 *Input params
2445 * reverse_byte_order: must we reverse the byte order ?
2446 * size : the size of the integer
2447 * ptr : the data pointer
2448 *Return value
2449 * gint64 : a 64 bits integer
2450 ****************************************************************************/
2451
2452 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2453 {
2454 gint64 val;
2455
2456 switch(size) {
2457 case 1: val = *((gint8*)data); break;
2458 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2459 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2460 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2461 default: val = ltt_get_int64(reverse_byte_order, data);
2462 g_critical("get_int : integer size %d unknown", size);
2463 break;
2464 }
2465
2466 return val;
2467 }
2468
2469 /*****************************************************************************
2470 *Function name
2471 * ltt_get_uint : get an unsigned integer number
2472 *Input params
2473 * reverse_byte_order: must we reverse the byte order ?
2474 * size : the size of the integer
2475 * ptr : the data pointer
2476 *Return value
2477 * guint64 : a 64 bits unsigned integer
2478 ****************************************************************************/
2479
2480 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2481 {
2482 guint64 val;
2483
2484 switch(size) {
2485 case 1: val = *((gint8*)data); break;
2486 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2487 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2488 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2489 default: val = ltt_get_uint64(reverse_byte_order, data);
2490 g_critical("get_uint : unsigned integer size %d unknown",
2491 size);
2492 break;
2493 }
2494
2495 return val;
2496 }
2497
2498
2499 /* get the node name of the system */
2500
2501 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2502 {
2503 return s->node_name;
2504 }
2505
2506
2507 /* get the domain name of the system */
2508
2509 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2510 {
2511 return s->domain_name;
2512 }
2513
2514
2515 /* get the description of the system */
2516
2517 char * ltt_trace_system_description_description (LttSystemDescription * s)
2518 {
2519 return s->description;
2520 }
2521
2522
2523 /* get the NTP corrected start time of the trace */
2524 LttTime ltt_trace_start_time(LttTrace *t)
2525 {
2526 return t->start_time;
2527 }
2528
2529 /* get the monotonic start time of the trace */
2530 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2531 {
2532 return t->start_time_from_tsc;
2533 }
2534
2535 static LttTracefile *ltt_tracefile_new()
2536 {
2537 LttTracefile *tf;
2538 tf = g_new(LttTracefile, 1);
2539 tf->event.tracefile = tf;
2540 return tf;
2541 }
2542
2543 static void ltt_tracefile_destroy(LttTracefile *tf)
2544 {
2545 g_free(tf);
2546 }
2547
2548 static void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2549 {
2550 *dest = *src;
2551 }
2552
2553 /* Before library loading... */
2554
2555 static __attribute__((constructor)) void init(void)
2556 {
2557 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
2558 }
This page took 0.080683 seconds and 4 git commands to generate.