update
[lttv.git] / trunk / lttv / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <malloc.h>
36 #include <sys/mman.h>
37 #include <string.h>
38
39 // For realpath
40 #include <limits.h>
41 #include <stdlib.h>
42
43
44 #include <ltt/ltt.h>
45 #include "ltt-private.h"
46 #include <ltt/trace.h>
47 #include <ltt/event.h>
48 #include <ltt/ltt-types.h>
49 #include <ltt/marker.h>
50
51 /* Tracefile names used in this file */
52
53 GQuark LTT_TRACEFILE_NAME_METADATA;
54
55 #ifndef g_open
56 #define g_open open
57 #endif
58
59
60 #define __UNUSED__ __attribute__((__unused__))
61
62 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
63
64 #ifndef g_debug
65 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
66 #endif
67
68 #define g_close close
69
70 /* Those macros must be called from within a function where page_size is a known
71 * variable */
72 #define PAGE_MASK (~(page_size-1))
73 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
74
75 LttTrace *father_trace = NULL;
76
77 /* set the offset of the fields belonging to the event,
78 need the information of the archecture */
79 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
80 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
81
82 #if 0
83 /* get the size of the field type according to
84 * The facility size information. */
85 static inline void preset_field_type_size(LttTracefile *tf,
86 LttEventType *event_type,
87 off_t offset_root, off_t offset_parent,
88 enum field_status *fixed_root, enum field_status *fixed_parent,
89 LttField *field);
90 #endif //0
91
92 /* map a fixed size or a block information from the file (fd) */
93 static gint map_block(LttTracefile * tf, guint block_num);
94
95 /* calculate nsec per cycles for current block */
96 #if 0
97 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
98 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
99 #endif //0
100
101 /* go to the next event */
102 static int ltt_seek_next_event(LttTracefile *tf);
103
104 static int open_tracefiles(LttTrace *trace, gchar *root_path,
105 gchar *relative_path);
106 static int ltt_process_metadata_tracefile(LttTracefile *tf);
107 static void ltt_tracefile_time_span_get(LttTracefile *tf,
108 LttTime *start, LttTime *end);
109 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
110 static gint map_block(LttTracefile * tf, guint block_num);
111 static void ltt_update_event_size(LttTracefile *tf);
112
113 /* Enable event debugging */
114 static int a_event_debug = 0;
115
116 void ltt_event_debug(int state)
117 {
118 a_event_debug = state;
119 }
120
121 /* trace can be NULL
122 *
123 * Return value : 0 success, 1 bad tracefile
124 */
125 static int parse_trace_header(ltt_subbuffer_header_t *header,
126 LttTracefile *tf, LttTrace *t)
127 {
128 if (header->magic_number == LTT_MAGIC_NUMBER)
129 tf->reverse_bo = 0;
130 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
131 tf->reverse_bo = 1;
132 else /* invalid magic number, bad tracefile ! */
133 return 1;
134
135 if(t) {
136 t->ltt_major_version = header->major_version;
137 t->ltt_minor_version = header->minor_version;
138 t->arch_size = header->arch_size;
139 }
140 tf->alignment = header->alignment;
141
142 /* Get float byte order : might be different from int byte order
143 * (or is set to 0 if the trace has no float (kernel trace)) */
144 tf->float_word_order = 0;
145
146 switch(header->major_version) {
147 case 0:
148 case 1:
149 g_warning("Unsupported trace version : %hhu.%hhu",
150 header->major_version, header->minor_version);
151 return 1;
152 break;
153 case 2:
154 switch(header->minor_version) {
155 case 2:
156 {
157 struct ltt_subbuffer_header_2_2 *vheader = header;
158 tf->buffer_header_size = ltt_subbuffer_header_size();
159 tf->tscbits = 27;
160 tf->eventbits = 5;
161 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
162 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
163
164 if(t) {
165 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->start_freq);
167 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
168 &vheader->freq_scale);
169 if(father_trace) {
170 t->start_freq = father_trace->start_freq;
171 t->freq_scale = father_trace->freq_scale;
172 } else {
173 father_trace = t;
174 }
175 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
176 &vheader->cycle_count_begin);
177 t->start_monotonic = 0;
178 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
179 &vheader->start_time_sec);
180 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
181 &vheader->start_time_usec);
182 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
183
184 t->start_time_from_tsc = ltt_time_from_uint64(
185 (double)t->start_tsc
186 * (1000000000.0 / tf->trace->freq_scale)
187 / (double)t->start_freq);
188 }
189 }
190 break;
191 default:
192 g_warning("Unsupported trace version : %hhu.%hhu",
193 header->major_version, header->minor_version);
194 return 1;
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 return 0;
203 }
204
205
206
207 /*****************************************************************************
208 *Function name
209 * ltt_tracefile_open : open a trace file, construct a LttTracefile
210 *Input params
211 * t : the trace containing the tracefile
212 * fileName : path name of the trace file
213 * tf : the tracefile structure
214 *Return value
215 * : 0 for success, -1 otherwise.
216 ****************************************************************************/
217
218 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
219 {
220 struct stat lTDFStat; /* Trace data file status */
221 ltt_subbuffer_header_t *header;
222 int page_size = getpagesize();
223
224 //open the file
225 tf->long_name = g_quark_from_string(fileName);
226 tf->trace = t;
227 tf->fd = open(fileName, O_RDONLY);
228 if(tf->fd < 0){
229 g_warning("Unable to open input data file %s\n", fileName);
230 goto end;
231 }
232
233 // Get the file's status
234 if(fstat(tf->fd, &lTDFStat) < 0){
235 g_warning("Unable to get the status of the input data file %s\n", fileName);
236 goto close_file;
237 }
238
239 // Is the file large enough to contain a trace
240 if(lTDFStat.st_size <
241 (off_t)(ltt_subbuffer_header_size())){
242 g_print("The input data file %s does not contain a trace\n", fileName);
243 goto close_file;
244 }
245
246 /* Temporarily map the buffer start header to get trace information */
247 /* Multiple of pages aligned head */
248 tf->buffer.head = mmap(0,
249 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
250 MAP_PRIVATE, tf->fd, 0);
251 if(tf->buffer.head == MAP_FAILED) {
252 perror("Error in allocating memory for buffer of tracefile");
253 goto close_file;
254 }
255 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
256
257 header = (ltt_subbuffer_header_t *)tf->buffer.head;
258
259 if(parse_trace_header(header, tf, NULL)) {
260 g_warning("parse_trace_header error");
261 goto unmap_file;
262 }
263
264 //store the size of the file
265 tf->file_size = lTDFStat.st_size;
266 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
267 tf->num_blocks = tf->file_size / tf->buf_size;
268 tf->events_lost = 0;
269 tf->subbuf_corrupt = 0;
270
271 if(munmap(tf->buffer.head,
272 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
273 g_warning("unmap size : %u\n",
274 PAGE_ALIGN(ltt_subbuffer_header_size()));
275 perror("munmap error");
276 g_assert(0);
277 }
278 tf->buffer.head = NULL;
279
280 //read the first block
281 if(map_block(tf,0)) {
282 perror("Cannot map block for tracefile");
283 goto close_file;
284 }
285
286 return 0;
287
288 /* Error */
289 unmap_file:
290 if(munmap(tf->buffer.head,
291 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
292 g_warning("unmap size : %u\n",
293 PAGE_ALIGN(ltt_subbuffer_header_size()));
294 perror("munmap error");
295 g_assert(0);
296 }
297 close_file:
298 close(tf->fd);
299 end:
300 return -1;
301 }
302
303
304 /*****************************************************************************
305 *Function name
306 * ltt_tracefile_close: close a trace file,
307 *Input params
308 * t : tracefile which will be closed
309 ****************************************************************************/
310
311 static void ltt_tracefile_close(LttTracefile *t)
312 {
313 int page_size = getpagesize();
314
315 if(t->buffer.head != NULL)
316 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
317 g_warning("unmap size : %u\n",
318 PAGE_ALIGN(t->buf_size));
319 perror("munmap error");
320 g_assert(0);
321 }
322
323 close(t->fd);
324 }
325
326 /****************************************************************************
327 * get_absolute_pathname
328 *
329 * return the unique pathname in the system
330 *
331 * MD : Fixed this function so it uses realpath, dealing well with
332 * forgotten cases (.. were not used correctly before).
333 *
334 ****************************************************************************/
335 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
336 {
337 abs_pathname[0] = '\0';
338
339 if (realpath(pathname, abs_pathname) != NULL)
340 return;
341 else
342 {
343 /* error, return the original path unmodified */
344 strcpy(abs_pathname, pathname);
345 return;
346 }
347 return;
348 }
349
350 /* Search for something like : .*_.*
351 *
352 * The left side is the name, the right side is the number.
353 */
354
355 static int get_tracefile_name_number(gchar *raw_name,
356 GQuark *name,
357 guint *num,
358 gulong *tid,
359 gulong *pgid,
360 guint64 *creation)
361 {
362 guint raw_name_len = strlen(raw_name);
363 gchar char_name[PATH_MAX];
364 int i;
365 int underscore_pos;
366 long int cpu_num;
367 gchar *endptr;
368 gchar *tmpptr;
369
370 for(i=raw_name_len-1;i>=0;i--) {
371 if(raw_name[i] == '_') break;
372 }
373 if(i==-1) { /* Either not found or name length is 0 */
374 /* This is a userspace tracefile */
375 strncpy(char_name, raw_name, raw_name_len);
376 char_name[raw_name_len] = '\0';
377 *name = g_quark_from_string(char_name);
378 *num = 0; /* unknown cpu */
379 for(i=0;i<raw_name_len;i++) {
380 if(raw_name[i] == '/') {
381 break;
382 }
383 }
384 i++;
385 for(;i<raw_name_len;i++) {
386 if(raw_name[i] == '/') {
387 break;
388 }
389 }
390 i++;
391 for(;i<raw_name_len;i++) {
392 if(raw_name[i] == '-') {
393 break;
394 }
395 }
396 if(i == raw_name_len) return -1;
397 i++;
398 tmpptr = &raw_name[i];
399 for(;i<raw_name_len;i++) {
400 if(raw_name[i] == '.') {
401 raw_name[i] = ' ';
402 break;
403 }
404 }
405 *tid = strtoul(tmpptr, &endptr, 10);
406 if(endptr == tmpptr)
407 return -1; /* No digit */
408 if(*tid == ULONG_MAX)
409 return -1; /* underflow / overflow */
410 i++;
411 tmpptr = &raw_name[i];
412 for(;i<raw_name_len;i++) {
413 if(raw_name[i] == '.') {
414 raw_name[i] = ' ';
415 break;
416 }
417 }
418 *pgid = strtoul(tmpptr, &endptr, 10);
419 if(endptr == tmpptr)
420 return -1; /* No digit */
421 if(*pgid == ULONG_MAX)
422 return -1; /* underflow / overflow */
423 i++;
424 tmpptr = &raw_name[i];
425 *creation = strtoull(tmpptr, &endptr, 10);
426 if(endptr == tmpptr)
427 return -1; /* No digit */
428 if(*creation == G_MAXUINT64)
429 return -1; /* underflow / overflow */
430 } else {
431 underscore_pos = i;
432
433 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
434
435 if(endptr == raw_name+underscore_pos+1)
436 return -1; /* No digit */
437 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
438 return -1; /* underflow / overflow */
439
440 strncpy(char_name, raw_name, underscore_pos);
441 char_name[underscore_pos] = '\0';
442
443 *name = g_quark_from_string(char_name);
444 *num = cpu_num;
445 }
446
447
448 return 0;
449 }
450
451
452 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
453 {
454 return &trace->tracefiles;
455 }
456
457
458 void compute_tracefile_group(GQuark key_id,
459 GArray *group,
460 struct compute_tracefile_group_args *args)
461 {
462 int i;
463 LttTracefile *tf;
464
465 for(i=0; i<group->len; i++) {
466 tf = &g_array_index (group, LttTracefile, i);
467 if(tf->cpu_online)
468 args->func(tf, args->func_args);
469 }
470 }
471
472
473 static void ltt_tracefile_group_destroy(gpointer data)
474 {
475 GArray *group = (GArray *)data;
476 int i;
477 LttTracefile *tf;
478
479 for(i=0; i<group->len; i++) {
480 tf = &g_array_index (group, LttTracefile, i);
481 if(tf->cpu_online)
482 ltt_tracefile_close(tf);
483 }
484 g_array_free(group, TRUE);
485 }
486
487 static gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
488 {
489 GArray *group = (GArray *)data;
490 int i;
491 LttTracefile *tf;
492
493 for(i=0; i<group->len; i++) {
494 tf = &g_array_index (group, LttTracefile, i);
495 if(tf->cpu_online)
496 return 1;
497 }
498 return 0;
499 }
500
501
502 /* Open each tracefile under a specific directory. Put them in a
503 * GData : permits to access them using their tracefile group pathname.
504 * i.e. access control/modules tracefile group by index :
505 * "control/module".
506 *
507 * relative path is the path relative to the trace root
508 * root path is the full path
509 *
510 * A tracefile group is simply an array where all the per cpu tracefiles sit.
511 */
512
513 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
514 {
515 DIR *dir = opendir(root_path);
516 struct dirent *entry;
517 struct stat stat_buf;
518 int ret;
519
520 gchar path[PATH_MAX];
521 int path_len;
522 gchar *path_ptr;
523
524 int rel_path_len;
525 gchar rel_path[PATH_MAX];
526 gchar *rel_path_ptr;
527 LttTracefile tmp_tf;
528
529 if(dir == NULL) {
530 perror(root_path);
531 return ENOENT;
532 }
533
534 strncpy(path, root_path, PATH_MAX-1);
535 path_len = strlen(path);
536 path[path_len] = '/';
537 path_len++;
538 path_ptr = path + path_len;
539
540 strncpy(rel_path, relative_path, PATH_MAX-1);
541 rel_path_len = strlen(rel_path);
542 rel_path[rel_path_len] = '/';
543 rel_path_len++;
544 rel_path_ptr = rel_path + rel_path_len;
545
546 while((entry = readdir(dir)) != NULL) {
547
548 if(entry->d_name[0] == '.') continue;
549
550 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
551 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
552
553 ret = stat(path, &stat_buf);
554 if(ret == -1) {
555 perror(path);
556 continue;
557 }
558
559 g_debug("Tracefile file or directory : %s\n", path);
560
561 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
562
563 if(S_ISDIR(stat_buf.st_mode)) {
564
565 g_debug("Entering subdirectory...\n");
566 ret = open_tracefiles(trace, path, rel_path);
567 if(ret < 0) continue;
568 } else if(S_ISREG(stat_buf.st_mode)) {
569 GQuark name;
570 guint num;
571 gulong tid, pgid;
572 guint64 creation;
573 GArray *group;
574 num = 0;
575 tid = pgid = 0;
576 creation = 0;
577 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
578 continue; /* invalid name */
579
580 g_debug("Opening file.\n");
581 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
582 g_info("Error opening tracefile %s", path);
583
584 continue; /* error opening the tracefile : bad magic number ? */
585 }
586
587 g_debug("Tracefile name is %s and number is %u",
588 g_quark_to_string(name), num);
589
590 tmp_tf.cpu_online = 1;
591 tmp_tf.cpu_num = num;
592 tmp_tf.name = name;
593 tmp_tf.tid = tid;
594 tmp_tf.pgid = pgid;
595 tmp_tf.creation = creation;
596 group = g_datalist_id_get_data(&trace->tracefiles, name);
597 if(group == NULL) {
598 /* Elements are automatically cleared when the array is allocated.
599 * It makes the cpu_online variable set to 0 : cpu offline, by default.
600 */
601 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
602 g_datalist_id_set_data_full(&trace->tracefiles, name,
603 group, ltt_tracefile_group_destroy);
604 }
605
606 /* Add the per cpu tracefile to the named group */
607 unsigned int old_len = group->len;
608 if(num+1 > old_len)
609 group = g_array_set_size(group, num+1);
610 g_array_index (group, LttTracefile, num) = tmp_tf;
611 g_array_index (group, LttTracefile, num).event.tracefile =
612 &g_array_index (group, LttTracefile, num);
613 }
614 }
615
616 closedir(dir);
617
618 return 0;
619 }
620
621
622 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
623 * because it must be done just after the opening */
624 static int ltt_process_metadata_tracefile(LttTracefile *tf)
625 {
626 int err;
627 guint i;
628
629 while(1) {
630 err = ltt_tracefile_read_seek(tf);
631 if(err == EPERM) goto seek_error;
632 else if(err == ERANGE) break; /* End of tracefile */
633
634 err = ltt_tracefile_read_update_event(tf);
635 if(err) goto update_error;
636
637 /* The rules are :
638 * It contains only core events :
639 * 0 : set_marker_id
640 * 1 : set_marker_format
641 */
642 if(tf->event.event_id >= MARKER_CORE_IDS) {
643 /* Should only contain core events */
644 g_warning("Error in processing metadata file %s, "
645 "should not contain event id %u.", g_quark_to_string(tf->name),
646 tf->event.event_id);
647 err = EPERM;
648 goto event_id_error;
649 } else {
650 char *pos;
651 const char *marker_name, *format;
652 uint16_t id;
653 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
654
655 switch((enum marker_id)tf->event.event_id) {
656 case MARKER_ID_SET_MARKER_ID:
657 marker_name = pos = tf->event.data;
658 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s", marker_name);
659 pos += strlen(marker_name) + 1;
660 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
661 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
662 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s id %hu",
663 marker_name, id);
664 pos += sizeof(guint16);
665 int_size = *(guint8*)pos;
666 pos += sizeof(guint8);
667 long_size = *(guint8*)pos;
668 pos += sizeof(guint8);
669 pointer_size = *(guint8*)pos;
670 pos += sizeof(guint8);
671 size_t_size = *(guint8*)pos;
672 pos += sizeof(guint8);
673 alignment = *(guint8*)pos;
674 pos += sizeof(guint8);
675 marker_id_event(tf->trace, g_quark_from_string(marker_name),
676 id, int_size, long_size,
677 pointer_size, size_t_size, alignment);
678 break;
679 case MARKER_ID_SET_MARKER_FORMAT:
680 marker_name = pos = tf->event.data;
681 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s",
682 marker_name);
683 pos += strlen(marker_name) + 1;
684 format = pos;
685 pos += strlen(format) + 1;
686 marker_format_event(tf->trace, g_quark_from_string(marker_name),
687 format);
688 /* get information from dictionary TODO */
689 break;
690 default:
691 g_warning("Error in processing metadata file %s, "
692 "unknown event id %hhu.",
693 g_quark_to_string(tf->name),
694 tf->event.event_id);
695 err = EPERM;
696 goto event_id_error;
697 }
698 }
699 }
700 return 0;
701
702 /* Error handling */
703 event_id_error:
704 update_error:
705 seek_error:
706 g_warning("An error occured in metadata tracefile parsing");
707 return err;
708 }
709
710 /*
711 * Open a trace and return its LttTrace handle.
712 *
713 * pathname must be the directory of the trace
714 */
715
716 LttTrace *ltt_trace_open(const gchar *pathname)
717 {
718 gchar abs_path[PATH_MAX];
719 LttTrace * t;
720 LttTracefile *tf;
721 GArray *group;
722 int i, ret;
723 ltt_subbuffer_header_t *header;
724 DIR *dir;
725 struct dirent *entry;
726 guint control_found = 0;
727 struct stat stat_buf;
728 gchar path[PATH_MAX];
729
730 t = g_new(LttTrace, 1);
731 if(!t) goto alloc_error;
732
733 get_absolute_pathname(pathname, abs_path);
734 t->pathname = g_quark_from_string(abs_path);
735
736 g_datalist_init(&t->tracefiles);
737
738 /* Test to see if it looks like a trace */
739 dir = opendir(abs_path);
740 if(dir == NULL) {
741 perror(abs_path);
742 goto open_error;
743 }
744 while((entry = readdir(dir)) != NULL) {
745 strcpy(path, abs_path);
746 strcat(path, "/");
747 strcat(path, entry->d_name);
748 ret = stat(path, &stat_buf);
749 if(ret == -1) {
750 perror(path);
751 continue;
752 }
753 if(S_ISDIR(stat_buf.st_mode)) {
754 if(strcmp(entry->d_name, "control") == 0) {
755 control_found = 1;
756 }
757 }
758 }
759 closedir(dir);
760
761 if(!control_found) goto find_error;
762
763 /* Open all the tracefiles */
764 if(open_tracefiles(t, abs_path, "")) {
765 g_warning("Error opening tracefile %s", abs_path);
766 goto find_error;
767 }
768
769 /* Parse each trace control/metadata_N files : get runtime fac. info */
770 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
771 if(group == NULL) {
772 g_error("Trace %s has no metadata tracefile", abs_path);
773 g_assert(0);
774 goto metadata_error;
775 }
776
777 /*
778 * Get the trace information for the control/metadata_0 tracefile.
779 * Getting a correct trace start_time and start_tsc is insured by the fact
780 * that no subbuffers are supposed to be lost in the metadata channel.
781 * Therefore, the first subbuffer contains the start_tsc timestamp in its
782 * buffer header.
783 */
784 g_assert(group->len > 0);
785 tf = &g_array_index (group, LttTracefile, 0);
786 header = (ltt_subbuffer_header_t *)tf->buffer.head;
787 ret = parse_trace_header(header, tf, t);
788 g_assert(!ret);
789
790 t->num_cpu = group->len;
791
792 ret = allocate_marker_data(t);
793 if (ret)
794 g_error("Error in allocating marker data");
795
796 for(i=0; i<group->len; i++) {
797 tf = &g_array_index (group, LttTracefile, i);
798 if (tf->cpu_online)
799 if(ltt_process_metadata_tracefile(tf))
800 goto metadata_error;
801 }
802
803 return t;
804
805 /* Error handling */
806 metadata_error:
807 destroy_marker_data(t);
808 find_error:
809 g_datalist_clear(&t->tracefiles);
810 open_error:
811 g_free(t);
812 alloc_error:
813 return NULL;
814
815 }
816
817 /* Open another, completely independant, instance of a trace.
818 *
819 * A read on this new instance will read the first event of the trace.
820 *
821 * When we copy a trace, we want all the opening actions to happen again :
822 * the trace will be reopened and totally independant from the original.
823 * That's why we call ltt_trace_open.
824 */
825 LttTrace *ltt_trace_copy(LttTrace *self)
826 {
827 return ltt_trace_open(g_quark_to_string(self->pathname));
828 }
829
830 /*
831 * Close a trace
832 */
833
834 void ltt_trace_close(LttTrace *t)
835 {
836 g_datalist_clear(&t->tracefiles);
837 g_free(t);
838 }
839
840
841 /*****************************************************************************
842 * Get the start time and end time of the trace
843 ****************************************************************************/
844
845 void ltt_tracefile_time_span_get(LttTracefile *tf,
846 LttTime *start, LttTime *end)
847 {
848 int err;
849
850 err = map_block(tf, 0);
851 if(unlikely(err)) {
852 g_error("Can not map block");
853 *start = ltt_time_infinite;
854 } else
855 *start = tf->buffer.begin.timestamp;
856
857 err = map_block(tf, tf->num_blocks - 1); /* Last block */
858 if(unlikely(err)) {
859 g_error("Can not map block");
860 *end = ltt_time_zero;
861 } else
862 *end = tf->buffer.end.timestamp;
863 }
864
865 struct tracefile_time_span_get_args {
866 LttTrace *t;
867 LttTime *start;
868 LttTime *end;
869 };
870
871 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
872 {
873 struct tracefile_time_span_get_args *args =
874 (struct tracefile_time_span_get_args*)user_data;
875
876 GArray *group = (GArray *)data;
877 int i;
878 LttTracefile *tf;
879 LttTime tmp_start;
880 LttTime tmp_end;
881
882 for(i=0; i<group->len; i++) {
883 tf = &g_array_index (group, LttTracefile, i);
884 if(tf->cpu_online) {
885 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
886 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
887 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
888 }
889 }
890 }
891
892 /* return the start and end time of a trace */
893
894 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
895 {
896 LttTime min_start = ltt_time_infinite;
897 LttTime max_end = ltt_time_zero;
898 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
899
900 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
901
902 if(start != NULL) *start = min_start;
903 if(end != NULL) *end = max_end;
904
905 }
906
907
908 /* Seek to the first event in a tracefile that has a time equal or greater than
909 * the time passed in parameter.
910 *
911 * If the time parameter is outside the tracefile time span, seek to the first
912 * event or if after, return ERANGE.
913 *
914 * If the time parameter is before the first event, we have to seek specially to
915 * there.
916 *
917 * If the time is after the end of the trace, return ERANGE.
918 *
919 * Do a binary search to find the right block, then a sequential search in the
920 * block to find the event.
921 *
922 * In the special case where the time requested fits inside a block that has no
923 * event corresponding to the requested time, the first event of the next block
924 * will be seeked.
925 *
926 * IMPORTANT NOTE : // FIXME everywhere...
927 *
928 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
929 * you will jump over an event if you do.
930 *
931 * Return value : 0 : no error, the tf->event can be used
932 * ERANGE : time if after the last event of the trace
933 * otherwise : this is an error.
934 *
935 * */
936
937 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
938 {
939 int ret = 0;
940 int err;
941 unsigned int block_num, high, low;
942
943 /* seek at the beginning of trace */
944 err = map_block(tf, 0); /* First block */
945 if(unlikely(err)) {
946 g_error("Can not map block");
947 goto fail;
948 }
949
950 /* If the time is lower or equal the beginning of the trace,
951 * go to the first event. */
952 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
953 ret = ltt_tracefile_read(tf);
954 if(ret == ERANGE) goto range;
955 else if (ret) goto fail;
956 goto found; /* There is either no event in the trace or the event points
957 to the first event in the trace */
958 }
959
960 err = map_block(tf, tf->num_blocks - 1); /* Last block */
961 if(unlikely(err)) {
962 g_error("Can not map block");
963 goto fail;
964 }
965
966 /* If the time is after the end of the trace, return ERANGE. */
967 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
968 goto range;
969 }
970
971 /* Binary search the block */
972 high = tf->num_blocks - 1;
973 low = 0;
974
975 while(1) {
976 block_num = ((high-low) / 2) + low;
977
978 err = map_block(tf, block_num);
979 if(unlikely(err)) {
980 g_error("Can not map block");
981 goto fail;
982 }
983 if(high == low) {
984 /* We cannot divide anymore : this is what would happen if the time
985 * requested was exactly between two consecutive buffers'end and start
986 * timestamps. This is also what would happend if we didn't deal with out
987 * of span cases prior in this function. */
988 /* The event is right in the buffer!
989 * (or in the next buffer first event) */
990 while(1) {
991 ret = ltt_tracefile_read(tf);
992 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
993 else if(ret) goto fail;
994
995 if(ltt_time_compare(time, tf->event.event_time) <= 0)
996 goto found;
997 }
998
999 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1000 /* go to lower part */
1001 high = block_num - 1;
1002 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1003 /* go to higher part */
1004 low = block_num + 1;
1005 } else {/* The event is right in the buffer!
1006 (or in the next buffer first event) */
1007 while(1) {
1008 ret = ltt_tracefile_read(tf);
1009 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1010 else if(ret) goto fail;
1011
1012 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1013 break;
1014 }
1015 goto found;
1016 }
1017 }
1018
1019 found:
1020 return 0;
1021 range:
1022 return ERANGE;
1023
1024 /* Error handling */
1025 fail:
1026 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1027 g_quark_to_string(tf->name));
1028 return EPERM;
1029 }
1030
1031 /* Seek to a position indicated by an LttEventPosition
1032 */
1033
1034 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1035 {
1036 int err;
1037
1038 if(ep->tracefile != tf) {
1039 goto fail;
1040 }
1041
1042 err = map_block(tf, ep->block);
1043 if(unlikely(err)) {
1044 g_error("Can not map block");
1045 goto fail;
1046 }
1047
1048 tf->event.offset = ep->offset;
1049
1050 /* Put back the event real tsc */
1051 tf->event.tsc = ep->tsc;
1052 tf->buffer.tsc = ep->tsc;
1053
1054 err = ltt_tracefile_read_update_event(tf);
1055 if(err) goto fail;
1056
1057 /* deactivate this, as it does nothing for now
1058 err = ltt_tracefile_read_op(tf);
1059 if(err) goto fail;
1060 */
1061
1062 return 0;
1063
1064 fail:
1065 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1066 g_quark_to_string(tf->name));
1067 return 1;
1068 }
1069
1070 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1071 * corresponds to.
1072 */
1073
1074 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1075 {
1076 LttTime time;
1077
1078 if(tsc > tf->trace->start_tsc) {
1079 time = ltt_time_from_uint64(
1080 (double)(tsc - tf->trace->start_tsc)
1081 * (1000000000.0 / tf->trace->freq_scale)
1082 / (double)tf->trace->start_freq);
1083 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1084 } else {
1085 time = ltt_time_from_uint64(
1086 (double)(tf->trace->start_tsc - tsc)
1087 * (1000000000.0 / tf->trace->freq_scale)
1088 / (double)tf->trace->start_freq);
1089 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1090 }
1091 return time;
1092 }
1093
1094 /* Calculate the real event time based on the buffer boundaries */
1095 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1096 {
1097 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1098 }
1099
1100
1101 /* Get the current event of the tracefile : valid until the next read */
1102 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1103 {
1104 return &tf->event;
1105 }
1106
1107
1108
1109 /*****************************************************************************
1110 *Function name
1111 * ltt_tracefile_read : Read the next event in the tracefile
1112 *Input params
1113 * t : tracefile
1114 *Return value
1115 *
1116 * Returns 0 if an event can be used in tf->event.
1117 * Returns ERANGE on end of trace. The event in tf->event still can be used
1118 * (if the last block was not empty).
1119 * Returns EPERM on error.
1120 *
1121 * This function does make the tracefile event structure point to the event
1122 * currently pointed to by the tf->event.
1123 *
1124 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1125 * reinitialize it after an error if you want results to be coherent.
1126 * It would be the case if a end of trace last buffer has no event : the end
1127 * of trace wouldn't be returned, but an error.
1128 * We make the assumption there is at least one event per buffer.
1129 ****************************************************************************/
1130
1131 int ltt_tracefile_read(LttTracefile *tf)
1132 {
1133 int err;
1134
1135 err = ltt_tracefile_read_seek(tf);
1136 if(err) return err;
1137 err = ltt_tracefile_read_update_event(tf);
1138 if(err) return err;
1139
1140 /* deactivate this, as it does nothing for now
1141 err = ltt_tracefile_read_op(tf);
1142 if(err) return err;
1143 */
1144
1145 return 0;
1146 }
1147
1148 int ltt_tracefile_read_seek(LttTracefile *tf)
1149 {
1150 int err;
1151
1152 /* Get next buffer until we finally have an event, or end of trace */
1153 while(1) {
1154 err = ltt_seek_next_event(tf);
1155 if(unlikely(err == ENOPROTOOPT)) {
1156 return EPERM;
1157 }
1158
1159 /* Are we at the end of the buffer ? */
1160 if(err == ERANGE) {
1161 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1162 return ERANGE;
1163 } else {
1164 /* get next block */
1165 err = map_block(tf, tf->buffer.index + 1);
1166 if(unlikely(err)) {
1167 g_error("Can not map block");
1168 return EPERM;
1169 }
1170 }
1171 } else break; /* We found an event ! */
1172 }
1173
1174 return 0;
1175 }
1176
1177 /* do an operation when reading a new event */
1178
1179 /* This function does nothing for now */
1180 #if 0
1181 int ltt_tracefile_read_op(LttTracefile *tf)
1182 {
1183 LttEvent *event;
1184
1185 event = &tf->event;
1186
1187 /* do event specific operation */
1188
1189 /* nothing */
1190
1191 return 0;
1192 }
1193 #endif
1194
1195 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1196 {
1197 unsigned int offset = 0;
1198 int i, j;
1199
1200 g_printf("Event header (tracefile %s offset %llx):\n",
1201 g_quark_to_string(ev->tracefile->long_name),
1202 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1203 + (long)start_pos - (long)ev->tracefile->buffer.head);
1204
1205 while (offset < (long)end_pos - (long)start_pos) {
1206 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1207 g_printf(" ");
1208
1209 for (i = 0; i < 4 ; i++) {
1210 for (j = 0; j < 4; j++) {
1211 if (offset + ((i * 4) + j) <
1212 (long)end_pos - (long)start_pos)
1213 g_printf("%02hhX",
1214 ((char*)start_pos)[offset + ((i * 4) + j)]);
1215 else
1216 g_printf(" ");
1217 g_printf(" ");
1218 }
1219 if (i < 4)
1220 g_printf(" ");
1221 }
1222 offset+=16;
1223 g_printf("\n");
1224 }
1225 }
1226
1227
1228 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1229 * event specific operation. */
1230 int ltt_tracefile_read_update_event(LttTracefile *tf)
1231 {
1232 void * pos;
1233 LttEvent *event;
1234 void *pos_aligned;
1235
1236 event = &tf->event;
1237 pos = tf->buffer.head + event->offset;
1238
1239 /* Read event header */
1240
1241 /* Align the head */
1242 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1243 pos_aligned = pos;
1244
1245 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1246 event->event_id = event->timestamp >> tf->tscbits;
1247 event->timestamp = event->timestamp & tf->tsc_mask;
1248 pos += sizeof(guint32);
1249
1250 switch (event->event_id) {
1251 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1252 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1253 pos += sizeof(guint16);
1254 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1255 pos += sizeof(guint16);
1256 if (event->event_size == 0xFFFF) {
1257 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1258 pos += sizeof(guint32);
1259 }
1260 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1261 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1262 pos += sizeof(guint64);
1263 break;
1264 case 30: /* LTT_RFLAG_ID_SIZE */
1265 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1266 pos += sizeof(guint16);
1267 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1268 pos += sizeof(guint16);
1269 if (event->event_size == 0xFFFF) {
1270 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1271 pos += sizeof(guint32);
1272 }
1273 break;
1274 case 31: /* LTT_RFLAG_ID */
1275 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1276 pos += sizeof(guint16);
1277 event->event_size = G_MAXUINT;
1278 break;
1279 default:
1280 event->event_size = G_MAXUINT;
1281 break;
1282 }
1283
1284 if (likely(event->event_id != 29)) {
1285 /* No extended timestamp */
1286 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1287 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1288 + tf->tsc_mask_next_bit)
1289 | (guint64)event->timestamp;
1290 else
1291 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1292 | (guint64)event->timestamp;
1293 }
1294 event->tsc = tf->buffer.tsc;
1295
1296 event->event_time = ltt_interpolate_time(tf, event);
1297
1298 if (a_event_debug)
1299 print_debug_event_header(event, pos_aligned, pos);
1300
1301 event->data = pos;
1302
1303 /*
1304 * Let ltt_update_event_size update event->data according to the largest
1305 * alignment within the payload.
1306 * Get the data size and update the event fields with the current
1307 * information. */
1308 ltt_update_event_size(tf);
1309
1310 return 0;
1311 }
1312
1313
1314 /****************************************************************************
1315 *Function name
1316 * map_block : map a block from the file
1317 *Input Params
1318 * lttdes : ltt trace file
1319 * whichBlock : the block which will be read
1320 *return value
1321 * 0 : success
1322 * EINVAL : lseek fail
1323 * EIO : can not read from the file
1324 ****************************************************************************/
1325
1326 static gint map_block(LttTracefile * tf, guint block_num)
1327 {
1328 int page_size = getpagesize();
1329 ltt_subbuffer_header_t *header;
1330
1331 g_assert(block_num < tf->num_blocks);
1332
1333 if(tf->buffer.head != NULL) {
1334 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1335 g_warning("unmap size : %u\n",
1336 PAGE_ALIGN(tf->buf_size));
1337 perror("munmap error");
1338 g_assert(0);
1339 }
1340 }
1341
1342 /* Multiple of pages aligned head */
1343 tf->buffer.head = mmap(0,
1344 PAGE_ALIGN(tf->buf_size),
1345 PROT_READ, MAP_PRIVATE, tf->fd,
1346 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1347
1348 if(tf->buffer.head == MAP_FAILED) {
1349 perror("Error in allocating memory for buffer of tracefile");
1350 g_assert(0);
1351 goto map_error;
1352 }
1353 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1354
1355
1356 tf->buffer.index = block_num;
1357
1358 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1359
1360 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1361 &header->cycle_count_begin);
1362 tf->buffer.begin.freq = tf->trace->start_freq;
1363
1364 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1365 tf->buffer.begin.cycle_count);
1366 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1367 &header->cycle_count_end);
1368 tf->buffer.end.freq = tf->trace->start_freq;
1369
1370 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1371 &header->lost_size);
1372 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1373 tf->buffer.end.cycle_count);
1374 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1375 tf->event.tsc = tf->buffer.tsc;
1376 tf->buffer.freq = tf->buffer.begin.freq;
1377
1378 /* FIXME
1379 * eventually support variable buffer size : will need a partial pre-read of
1380 * the headers to create an index when we open the trace... eventually. */
1381 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1382 &header->buf_size));
1383
1384 /* Make the current event point to the beginning of the buffer :
1385 * it means that the event read must get the first event. */
1386 tf->event.tracefile = tf;
1387 tf->event.block = block_num;
1388 tf->event.offset = 0;
1389
1390 if (header->events_lost) {
1391 g_warning("%d events lost so far in tracefile %s at block %u",
1392 tf->events_lost - header->events_lost,
1393 g_quark_to_string(tf->long_name),
1394 block_num);
1395 tf->events_lost = header->events_lost;
1396 }
1397 if (header->subbuf_corrupt) {
1398 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1399 tf->subbuf_corrupt - header->subbuf_corrupt,
1400 g_quark_to_string(tf->long_name),
1401 block_num);
1402 tf->subbuf_corrupt = header->subbuf_corrupt;
1403 }
1404
1405 return 0;
1406
1407 map_error:
1408 return -errno;
1409 }
1410
1411 static void print_debug_event_data(LttEvent *ev)
1412 {
1413 unsigned int offset = 0;
1414 int i, j;
1415
1416 if (!max(ev->event_size, ev->data_size))
1417 return;
1418
1419 g_printf("Event data (tracefile %s offset %llx):\n",
1420 g_quark_to_string(ev->tracefile->long_name),
1421 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1422 + (long)ev->data - (long)ev->tracefile->buffer.head);
1423
1424 while (offset < max(ev->event_size, ev->data_size)) {
1425 g_printf("%8lx", (long)ev->data + offset
1426 - (long)ev->tracefile->buffer.head);
1427 g_printf(" ");
1428
1429 for (i = 0; i < 4 ; i++) {
1430 for (j = 0; j < 4; j++) {
1431 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1432 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1433 else
1434 g_printf(" ");
1435 g_printf(" ");
1436 }
1437 if (i < 4)
1438 g_printf(" ");
1439 }
1440
1441 g_printf(" ");
1442
1443 for (i = 0; i < 4; i++) {
1444 for (j = 0; j < 4; j++) {
1445 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1446 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1447 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1448 else
1449 g_printf(".");
1450 } else
1451 g_printf(" ");
1452 }
1453 }
1454 offset+=16;
1455 g_printf("\n");
1456 }
1457 }
1458
1459 /* It will update the fields offsets too */
1460 void ltt_update_event_size(LttTracefile *tf)
1461 {
1462 off_t size = 0;
1463 char *tscdata;
1464 struct marker_info *info;
1465
1466 switch((enum marker_id)tf->event.event_id) {
1467 case MARKER_ID_SET_MARKER_ID:
1468 size = strlen((char*)tf->event.data) + 1;
1469 g_debug("marker %s id set", (char*)tf->event.data);
1470 size += ltt_align(size, sizeof(guint16), tf->alignment);
1471 size += sizeof(guint16);
1472 size += sizeof(guint8);
1473 size += sizeof(guint8);
1474 size += sizeof(guint8);
1475 size += sizeof(guint8);
1476 size += sizeof(guint8);
1477 break;
1478 case MARKER_ID_SET_MARKER_FORMAT:
1479 g_debug("marker %s format set", (char*)tf->event.data);
1480 size = strlen((char*)tf->event.data) + 1;
1481 size += strlen((char*)tf->event.data + size) + 1;
1482 break;
1483 }
1484
1485 info = marker_get_info_from_id(tf->trace, tf->event.event_id);
1486
1487 if (tf->event.event_id >= MARKER_CORE_IDS)
1488 g_assert(info != NULL);
1489
1490 /* Do not update field offsets of core markers when initially reading the
1491 * metadata tracefile when the infos about these markers do not exist yet.
1492 */
1493 if (likely(info && info->fields)) {
1494 /* alignment */
1495 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1496 info->largest_align,
1497 info->alignment);
1498 /* size, dynamically computed */
1499 if (info->size != -1)
1500 size = info->size;
1501 else
1502 size = marker_update_fields_offsets(marker_get_info_from_id(tf->trace,
1503 tf->event.event_id), tf->event.data);
1504 }
1505
1506 tf->event.data_size = size;
1507
1508 /* Check consistency between kernel and LTTV structure sizes */
1509 if(tf->event.event_size == G_MAXUINT) {
1510 /* Event size too big to fit in the event size field */
1511 tf->event.event_size = tf->event.data_size;
1512 }
1513
1514 if (a_event_debug)
1515 print_debug_event_data(&tf->event);
1516
1517 /* Having a marker load or marker format event out of the metadata
1518 * tracefiles is a serious bug. */
1519 switch((enum marker_id)tf->event.event_id) {
1520 case MARKER_ID_SET_MARKER_ID:
1521 case MARKER_ID_SET_MARKER_FORMAT:
1522 if (tf->name != g_quark_from_string("/control/metadata"))
1523 g_error("Trace inconsistency : metadata event found in data "
1524 "tracefile %s", g_quark_to_string(tf->long_name));
1525 }
1526
1527 if (tf->event.data_size != tf->event.event_size) {
1528 struct marker_info *info = marker_get_info_from_id(tf->trace,
1529 tf->event.event_id);
1530 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1531 g_quark_to_string(info->name),
1532 tf->event.event_size, tf->event.data_size);
1533 exit(-1);
1534 }
1535 }
1536
1537
1538 /* Take the tf current event offset and use the event id to figure out where is
1539 * the next event offset.
1540 *
1541 * This is an internal function not aiming at being used elsewhere : it will
1542 * not jump over the current block limits. Please consider using
1543 * ltt_tracefile_read to do this.
1544 *
1545 * Returns 0 on success
1546 * ERANGE if we are at the end of the buffer.
1547 * ENOPROTOOPT if an error occured when getting the current event size.
1548 */
1549 static int ltt_seek_next_event(LttTracefile *tf)
1550 {
1551 int ret = 0;
1552 void *pos;
1553
1554 /* seek over the buffer header if we are at the buffer start */
1555 if(tf->event.offset == 0) {
1556 tf->event.offset += tf->buffer_header_size;
1557
1558 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1559 ret = ERANGE;
1560 }
1561 goto found;
1562 }
1563
1564 pos = tf->event.data;
1565
1566 if(tf->event.data_size < 0) goto error;
1567
1568 pos += (size_t)tf->event.data_size;
1569
1570 tf->event.offset = pos - tf->buffer.head;
1571
1572 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1573 ret = ERANGE;
1574 goto found;
1575 }
1576 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1577
1578 found:
1579 return ret;
1580
1581 error:
1582 g_error("Error in ltt_seek_next_event for tracefile %s",
1583 g_quark_to_string(tf->name));
1584 return ENOPROTOOPT;
1585 }
1586
1587 #if 0
1588 /*****************************************************************************
1589 *Function name
1590 * set_fields_offsets : set the precomputable offset of the fields
1591 *Input params
1592 * tracefile : opened trace file
1593 * event_type : the event type
1594 ****************************************************************************/
1595
1596 void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1597 {
1598 LttField *field = event_type->root_field;
1599 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1600
1601 if(likely(field))
1602 preset_field_type_size(tf, event_type, 0, 0,
1603 &fixed_root, &fixed_parent,
1604 field);
1605
1606 }
1607 #endif //0
1608
1609
1610 /*****************************************************************************
1611 *Function name
1612 * get_alignment : Get the alignment needed for a field.
1613 *Input params
1614 * field : field
1615 *
1616 * returns : The size on which it must be aligned.
1617 *
1618 ****************************************************************************/
1619 #if 0
1620 off_t get_alignment(LttField *field)
1621 {
1622 LttType *type = &field->field_type;
1623
1624 switch(type->type_class) {
1625 case LTT_INT_FIXED:
1626 case LTT_UINT_FIXED:
1627 case LTT_POINTER:
1628 case LTT_CHAR:
1629 case LTT_UCHAR:
1630 case LTT_SHORT:
1631 case LTT_USHORT:
1632 case LTT_INT:
1633 case LTT_UINT:
1634 case LTT_LONG:
1635 case LTT_ULONG:
1636 case LTT_SIZE_T:
1637 case LTT_SSIZE_T:
1638 case LTT_OFF_T:
1639 case LTT_FLOAT:
1640 case LTT_ENUM:
1641 /* Align offset on type size */
1642 g_assert(field->field_size != 0);
1643 return field->field_size;
1644 break;
1645 case LTT_STRING:
1646 return 1;
1647 break;
1648 case LTT_ARRAY:
1649 g_assert(type->fields->len == 1);
1650 {
1651 LttField *child = &g_array_index(type->fields, LttField, 0);
1652 return get_alignment(child);
1653 }
1654 break;
1655 case LTT_SEQUENCE:
1656 g_assert(type->fields->len == 2);
1657 {
1658 off_t localign = 1;
1659 LttField *child = &g_array_index(type->fields, LttField, 0);
1660
1661 localign = max(localign, get_alignment(child));
1662
1663 child = &g_array_index(type->fields, LttField, 1);
1664 localign = max(localign, get_alignment(child));
1665
1666 return localign;
1667 }
1668 break;
1669 case LTT_STRUCT:
1670 case LTT_UNION:
1671 {
1672 guint i;
1673 off_t localign = 1;
1674
1675 for(i=0; i<type->fields->len; i++) {
1676 LttField *child = &g_array_index(type->fields, LttField, i);
1677 localign = max(localign, get_alignment(child));
1678 }
1679 return localign;
1680 }
1681 break;
1682 case LTT_NONE:
1683 default:
1684 g_error("get_alignment : unknown type");
1685 return -1;
1686 }
1687 }
1688
1689 #endif //0
1690
1691 /*****************************************************************************
1692 *Function name
1693 * field_compute_static_size : Determine the size of fields known by their
1694 * sole definition. Unions, arrays and struct sizes might be known, but
1695 * the parser does not give that information.
1696 *Input params
1697 * tf : tracefile
1698 * field : field
1699 *
1700 ****************************************************************************/
1701 #if 0
1702 void field_compute_static_size(LttFacility *fac, LttField *field)
1703 {
1704 LttType *type = &field->field_type;
1705
1706 switch(type->type_class) {
1707 case LTT_INT_FIXED:
1708 case LTT_UINT_FIXED:
1709 case LTT_POINTER:
1710 case LTT_CHAR:
1711 case LTT_UCHAR:
1712 case LTT_SHORT:
1713 case LTT_USHORT:
1714 case LTT_INT:
1715 case LTT_UINT:
1716 case LTT_LONG:
1717 case LTT_ULONG:
1718 case LTT_SIZE_T:
1719 case LTT_SSIZE_T:
1720 case LTT_OFF_T:
1721 case LTT_FLOAT:
1722 case LTT_ENUM:
1723 case LTT_STRING:
1724 /* nothing to do */
1725 break;
1726 case LTT_ARRAY:
1727 /* note this : array type size is the number of elements in the array,
1728 * while array field size of the length of the array in bytes */
1729 g_assert(type->fields->len == 1);
1730 {
1731 LttField *child = &g_array_index(type->fields, LttField, 0);
1732 field_compute_static_size(fac, child);
1733
1734 if(child->field_size != 0) {
1735 field->field_size = type->size * child->field_size;
1736 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1737 sizeof(off_t), type->size);
1738 } else {
1739 field->field_size = 0;
1740 }
1741 }
1742 break;
1743 case LTT_SEQUENCE:
1744 g_assert(type->fields->len == 2);
1745 {
1746 off_t local_offset = 0;
1747 LttField *child = &g_array_index(type->fields, LttField, 1);
1748 field_compute_static_size(fac, child);
1749 field->field_size = 0;
1750 type->size = 0;
1751 if(child->field_size != 0) {
1752 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1753 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1754 }
1755 }
1756 break;
1757 case LTT_STRUCT:
1758 case LTT_UNION:
1759 {
1760 guint i;
1761 for(i=0;i<type->fields->len;i++) {
1762 LttField *child = &g_array_index(type->fields, LttField, i);
1763 field_compute_static_size(fac, child);
1764 if(child->field_size != 0) {
1765 type->size += ltt_align(type->size, get_alignment(child),
1766 fac->alignment);
1767 type->size += child->field_size;
1768 } else {
1769 /* As soon as we find a child with variable size, we have
1770 * a variable size */
1771 type->size = 0;
1772 break;
1773 }
1774 }
1775 field->field_size = type->size;
1776 }
1777 break;
1778 default:
1779 g_error("field_static_size : unknown type");
1780 }
1781
1782 }
1783 #endif //0
1784
1785
1786 /*****************************************************************************
1787 *Function name
1788 * precompute_fields_offsets : set the precomputable offset of the fields
1789 *Input params
1790 * fac : facility
1791 * field : the field
1792 * offset : pointer to the current offset, must be incremented
1793 *
1794 * return : 1 : found a variable length field, stop the processing.
1795 * 0 otherwise.
1796 ****************************************************************************/
1797
1798 #if 0
1799 gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1800 {
1801 LttType *type = &field->field_type;
1802
1803 if(unlikely(is_compact)) {
1804 g_assert(field->field_size != 0);
1805 /* FIXME THIS IS A HUUUUUGE hack :
1806 * offset is between the compact_data field in struct LttEvent
1807 * and the address of the field root in the memory map.
1808 * ark. Both will stay at the same addresses while the event
1809 * is readable, so it's ok.
1810 */
1811 field->offset_root = 0;
1812 field->fixed_root = FIELD_FIXED;
1813 return 0;
1814 }
1815
1816 switch(type->type_class) {
1817 case LTT_INT_FIXED:
1818 case LTT_UINT_FIXED:
1819 case LTT_POINTER:
1820 case LTT_CHAR:
1821 case LTT_UCHAR:
1822 case LTT_SHORT:
1823 case LTT_USHORT:
1824 case LTT_INT:
1825 case LTT_UINT:
1826 case LTT_LONG:
1827 case LTT_ULONG:
1828 case LTT_SIZE_T:
1829 case LTT_SSIZE_T:
1830 case LTT_OFF_T:
1831 case LTT_FLOAT:
1832 case LTT_ENUM:
1833 g_assert(field->field_size != 0);
1834 /* Align offset on type size */
1835 *offset += ltt_align(*offset, get_alignment(field),
1836 fac->alignment);
1837 /* remember offset */
1838 field->offset_root = *offset;
1839 field->fixed_root = FIELD_FIXED;
1840 /* Increment offset */
1841 *offset += field->field_size;
1842 return 0;
1843 break;
1844 case LTT_STRING:
1845 field->offset_root = *offset;
1846 field->fixed_root = FIELD_FIXED;
1847 return 1;
1848 break;
1849 case LTT_ARRAY:
1850 g_assert(type->fields->len == 1);
1851 {
1852 LttField *child = &g_array_index(type->fields, LttField, 0);
1853
1854 *offset += ltt_align(*offset, get_alignment(field),
1855 fac->alignment);
1856
1857 /* remember offset */
1858 field->offset_root = *offset;
1859 field->array_offset = *offset;
1860 field->fixed_root = FIELD_FIXED;
1861
1862 /* Let the child be variable */
1863 //precompute_fields_offsets(tf, child, offset);
1864
1865 if(field->field_size != 0) {
1866 /* Increment offset */
1867 /* field_size is the array size in bytes */
1868 *offset += field->field_size;
1869 return 0;
1870 } else {
1871 return 1;
1872 }
1873 }
1874 break;
1875 case LTT_SEQUENCE:
1876 g_assert(type->fields->len == 2);
1877 {
1878 LttField *child;
1879 guint ret;
1880
1881 *offset += ltt_align(*offset, get_alignment(field),
1882 fac->alignment);
1883
1884 /* remember offset */
1885 field->offset_root = *offset;
1886 field->fixed_root = FIELD_FIXED;
1887
1888 child = &g_array_index(type->fields, LttField, 0);
1889 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1890 g_assert(ret == 0); /* Seq len cannot have variable len */
1891
1892 child = &g_array_index(type->fields, LttField, 1);
1893 *offset += ltt_align(*offset, get_alignment(child),
1894 fac->alignment);
1895 field->array_offset = *offset;
1896 /* Let the child be variable. */
1897 //ret = precompute_fields_offsets(fac, child, offset);
1898
1899 /* Cannot precompute fields offsets of sequence members, and has
1900 * variable length. */
1901 return 1;
1902 }
1903 break;
1904 case LTT_STRUCT:
1905 {
1906 LttField *child;
1907 guint i;
1908 gint ret=0;
1909
1910 *offset += ltt_align(*offset, get_alignment(field),
1911 fac->alignment);
1912 /* remember offset */
1913 field->offset_root = *offset;
1914 field->fixed_root = FIELD_FIXED;
1915
1916 for(i=0; i< type->fields->len; i++) {
1917 child = &g_array_index(type->fields, LttField, i);
1918 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1919
1920 if(ret) break;
1921 }
1922 return ret;
1923 }
1924 break;
1925 case LTT_UNION:
1926 {
1927 LttField *child;
1928 guint i;
1929 gint ret=0;
1930
1931 *offset += ltt_align(*offset, get_alignment(field),
1932 fac->alignment);
1933 /* remember offset */
1934 field->offset_root = *offset;
1935 field->fixed_root = FIELD_FIXED;
1936
1937 for(i=0; i< type->fields->len; i++) {
1938 *offset = field->offset_root;
1939 child = &g_array_index(type->fields, LttField, i);
1940 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1941
1942 if(ret) break;
1943 }
1944 *offset = field->offset_root + field->field_size;
1945 return ret;
1946 }
1947
1948 break;
1949 case LTT_NONE:
1950 default:
1951 g_error("precompute_fields_offsets : unknown type");
1952 return 1;
1953 }
1954
1955 }
1956
1957 #endif //0
1958
1959 #if 0
1960 /*****************************************************************************
1961 *Function name
1962 * precompute_offsets : set the precomputable offset of an event type
1963 *Input params
1964 * tf : tracefile
1965 * event : event type
1966 *
1967 ****************************************************************************/
1968 void precompute_offsets(LttFacility *fac, LttEventType *event)
1969 {
1970 guint i;
1971 off_t offset = 0;
1972 gint ret;
1973
1974 /* First, compute the size of fixed size fields. Will determine size for
1975 * arrays, struct and unions, which is not done by the parser */
1976 for(i=0; i<event->fields->len; i++) {
1977 LttField *field = &g_array_index(event->fields, LttField, i);
1978 field_compute_static_size(fac, field);
1979 }
1980
1981 /* Precompute all known offsets */
1982 for(i=0; i<event->fields->len; i++) {
1983 LttField *field = &g_array_index(event->fields, LttField, i);
1984 if(event->has_compact_data && i == 0)
1985 ret = precompute_fields_offsets(fac, field, &offset, 1);
1986 else
1987 ret = precompute_fields_offsets(fac, field, &offset, 0);
1988 if(ret) break;
1989 }
1990 }
1991 #endif //0
1992
1993
1994
1995 /*****************************************************************************
1996 *Function name
1997 * preset_field_type_size : set the fixed sizes of the field type
1998 *Input params
1999 * tf : tracefile
2000 * event_type : event type
2001 * offset_root : offset from the root
2002 * offset_parent : offset from the parent
2003 * fixed_root : Do we know a fixed offset to the root ?
2004 * fixed_parent : Do we know a fixed offset to the parent ?
2005 * field : field
2006 ****************************************************************************/
2007
2008
2009
2010 // preset the fixed size offsets. Calculate them just like genevent-new : an
2011 // increment of a *to value that represents the offset from the start of the
2012 // event data.
2013 // The preset information is : offsets up to (and including) the first element
2014 // of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
2015 #if 0
2016 void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
2017 off_t offset_root, off_t offset_parent,
2018 enum field_status *fixed_root, enum field_status *fixed_parent,
2019 LttField *field)
2020 {
2021 enum field_status local_fixed_root, local_fixed_parent;
2022 guint i;
2023 LttType *type;
2024
2025 g_assert(field->fixed_root == FIELD_UNKNOWN);
2026 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2027 g_assert(field->fixed_size == FIELD_UNKNOWN);
2028
2029 type = field->field_type;
2030
2031 field->fixed_root = *fixed_root;
2032 if(field->fixed_root == FIELD_FIXED)
2033 field->offset_root = offset_root;
2034 else
2035 field->offset_root = 0;
2036
2037 field->fixed_parent = *fixed_parent;
2038 if(field->fixed_parent == FIELD_FIXED)
2039 field->offset_parent = offset_parent;
2040 else
2041 field->offset_parent = 0;
2042
2043 size_t current_root_offset;
2044 size_t current_offset;
2045 enum field_status current_child_status, final_child_status;
2046 size_t max_size;
2047
2048 switch(type->type_class) {
2049 case LTT_INT_FIXED:
2050 case LTT_UINT_FIXED:
2051 case LTT_CHAR:
2052 case LTT_UCHAR:
2053 case LTT_SHORT:
2054 case LTT_USHORT:
2055 case LTT_INT:
2056 case LTT_UINT:
2057 case LTT_FLOAT:
2058 case LTT_ENUM:
2059 field->field_size = ltt_type_size(tf->trace, type);
2060 field->fixed_size = FIELD_FIXED;
2061 break;
2062 case LTT_POINTER:
2063 field->field_size = (off_t)event_type->facility->pointer_size;
2064 field->fixed_size = FIELD_FIXED;
2065 break;
2066 case LTT_LONG:
2067 case LTT_ULONG:
2068 field->field_size = (off_t)event_type->facility->long_size;
2069 field->fixed_size = FIELD_FIXED;
2070 break;
2071 case LTT_SIZE_T:
2072 case LTT_SSIZE_T:
2073 case LTT_OFF_T:
2074 field->field_size = (off_t)event_type->facility->size_t_size;
2075 field->fixed_size = FIELD_FIXED;
2076 break;
2077 case LTT_SEQUENCE:
2078 local_fixed_root = FIELD_VARIABLE;
2079 local_fixed_parent = FIELD_VARIABLE;
2080 preset_field_type_size(tf, event_type,
2081 0, 0,
2082 &local_fixed_root, &local_fixed_parent,
2083 field->child[0]);
2084 field->fixed_size = FIELD_VARIABLE;
2085 field->field_size = 0;
2086 *fixed_root = FIELD_VARIABLE;
2087 *fixed_parent = FIELD_VARIABLE;
2088 break;
2089 case LTT_STRING:
2090 field->fixed_size = FIELD_VARIABLE;
2091 field->field_size = 0;
2092 *fixed_root = FIELD_VARIABLE;
2093 *fixed_parent = FIELD_VARIABLE;
2094 break;
2095 case LTT_ARRAY:
2096 local_fixed_root = FIELD_VARIABLE;
2097 local_fixed_parent = FIELD_VARIABLE;
2098 preset_field_type_size(tf, event_type,
2099 0, 0,
2100 &local_fixed_root, &local_fixed_parent,
2101 field->child[0]);
2102 field->fixed_size = field->child[0]->fixed_size;
2103 if(field->fixed_size == FIELD_FIXED) {
2104 field->field_size = type->element_number * field->child[0]->field_size;
2105 } else {
2106 field->field_size = 0;
2107 *fixed_root = FIELD_VARIABLE;
2108 *fixed_parent = FIELD_VARIABLE;
2109 }
2110 break;
2111 case LTT_STRUCT:
2112 current_root_offset = field->offset_root;
2113 current_offset = 0;
2114 current_child_status = FIELD_FIXED;
2115 for(i=0;i<type->element_number;i++) {
2116 preset_field_type_size(tf, event_type,
2117 current_root_offset, current_offset,
2118 fixed_root, &current_child_status,
2119 field->child[i]);
2120 if(current_child_status == FIELD_FIXED) {
2121 current_root_offset += field->child[i]->field_size;
2122 current_offset += field->child[i]->field_size;
2123 } else {
2124 current_root_offset = 0;
2125 current_offset = 0;
2126 }
2127 }
2128 if(current_child_status != FIELD_FIXED) {
2129 *fixed_parent = current_child_status;
2130 field->field_size = 0;
2131 field->fixed_size = current_child_status;
2132 } else {
2133 field->field_size = current_offset;
2134 field->fixed_size = FIELD_FIXED;
2135 }
2136 break;
2137 case LTT_UNION:
2138 current_root_offset = field->offset_root;
2139 current_offset = 0;
2140 max_size = 0;
2141 final_child_status = FIELD_FIXED;
2142 for(i=0;i<type->element_number;i++) {
2143 enum field_status current_root_child_status = FIELD_FIXED;
2144 enum field_status current_child_status = FIELD_FIXED;
2145 preset_field_type_size(tf, event_type,
2146 current_root_offset, current_offset,
2147 &current_root_child_status, &current_child_status,
2148 field->child[i]);
2149 if(current_child_status != FIELD_FIXED)
2150 final_child_status = current_child_status;
2151 else
2152 max_size = max(max_size, field->child[i]->field_size);
2153 }
2154 if(final_child_status != FIELD_FIXED) {
2155 g_error("LTTV does not support variable size fields in unions.");
2156 /* This will stop the application. */
2157 *fixed_root = final_child_status;
2158 *fixed_parent = final_child_status;
2159 field->field_size = 0;
2160 field->fixed_size = current_child_status;
2161 } else {
2162 field->field_size = max_size;
2163 field->fixed_size = FIELD_FIXED;
2164 }
2165 break;
2166 case LTT_NONE:
2167 g_error("unexpected type NONE");
2168 break;
2169 }
2170
2171 }
2172 #endif //0
2173
2174 /*****************************************************************************
2175 *Function name
2176 * check_fields_compatibility : Check for compatibility between two fields :
2177 * do they use the same inner structure ?
2178 *Input params
2179 * event_type1 : event type
2180 * event_type2 : event type
2181 * field1 : field
2182 * field2 : field
2183 *Returns : 0 if identical
2184 * 1 if not.
2185 ****************************************************************************/
2186 // this function checks for equality of field types. Therefore, it does not use
2187 // per se offsets. For instance, an aligned version of a structure is
2188 // compatible with an unaligned version of the same structure.
2189 #if 0
2190 gint check_fields_compatibility(LttEventType *event_type1,
2191 LttEventType *event_type2,
2192 LttField *field1, LttField *field2)
2193 {
2194 guint different = 0;
2195 LttType *type1;
2196 LttType *type2;
2197
2198 if(field1 == NULL) {
2199 if(field2 == NULL) goto end;
2200 else {
2201 different = 1;
2202 goto end;
2203 }
2204 } else if(field2 == NULL) {
2205 different = 1;
2206 goto end;
2207 }
2208
2209 type1 = &field1->field_type;
2210 type2 = &field2->field_type;
2211
2212 if(type1->type_class != type2->type_class) {
2213 different = 1;
2214 goto end;
2215 }
2216 if(type1->network != type2->network) {
2217 different = 1;
2218 goto end;
2219 }
2220
2221 switch(type1->type_class) {
2222 case LTT_INT_FIXED:
2223 case LTT_UINT_FIXED:
2224 case LTT_POINTER:
2225 case LTT_CHAR:
2226 case LTT_UCHAR:
2227 case LTT_SHORT:
2228 case LTT_USHORT:
2229 case LTT_INT:
2230 case LTT_UINT:
2231 case LTT_LONG:
2232 case LTT_ULONG:
2233 case LTT_SIZE_T:
2234 case LTT_SSIZE_T:
2235 case LTT_OFF_T:
2236 case LTT_FLOAT:
2237 case LTT_ENUM:
2238 if(field1->field_size != field2->field_size)
2239 different = 1;
2240 break;
2241 case LTT_STRING:
2242 break;
2243 case LTT_ARRAY:
2244 {
2245 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2246 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2247
2248 if(type1->size != type2->size)
2249 different = 1;
2250 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2251 different = 1;
2252 }
2253 break;
2254 case LTT_SEQUENCE:
2255 {
2256 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2257 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2258
2259 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2260 different = 1;
2261 }
2262 break;
2263 case LTT_STRUCT:
2264 case LTT_UNION:
2265 {
2266 LttField *child;
2267 guint i;
2268
2269 if(type1->fields->len != type2->fields->len) {
2270 different = 1;
2271 goto end;
2272 }
2273
2274 for(i=0; i< type1->fields->len; i++) {
2275 LttField *child1;
2276 LttField *child2;
2277 child1 = &g_array_index(type1->fields, LttField, i);
2278 child2 = &g_array_index(type2->fields, LttField, i);
2279 different = check_fields_compatibility(event_type1,
2280 event_type2, child1, child2);
2281
2282 if(different) break;
2283 }
2284 }
2285 break;
2286 case LTT_NONE:
2287 default:
2288 g_error("check_fields_compatibility : unknown type");
2289 }
2290
2291 end:
2292 return different;
2293 }
2294 #endif //0
2295
2296 #if 0
2297 gint check_fields_compatibility(LttEventType *event_type1,
2298 LttEventType *event_type2,
2299 LttField *field1, LttField *field2)
2300 {
2301 guint different = 0;
2302 guint i;
2303 LttType *type1;
2304 LttType *type2;
2305
2306 if(field1 == NULL) {
2307 if(field2 == NULL) goto end;
2308 else {
2309 different = 1;
2310 goto end;
2311 }
2312 } else if(field2 == NULL) {
2313 different = 1;
2314 goto end;
2315 }
2316
2317 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2318 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2319 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2320 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2321 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2322 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2323
2324 type1 = field1->field_type;
2325 type2 = field2->field_type;
2326
2327 if(type1->type_class != type2->type_class) {
2328 different = 1;
2329 goto end;
2330 }
2331 if(type1->element_name != type2->element_name) {
2332 different = 1;
2333 goto end;
2334 }
2335
2336 switch(type1->type_class) {
2337 case LTT_INT_FIXED:
2338 case LTT_UINT_FIXED:
2339 case LTT_POINTER:
2340 case LTT_CHAR:
2341 case LTT_UCHAR:
2342 case LTT_SHORT:
2343 case LTT_USHORT:
2344 case LTT_INT:
2345 case LTT_UINT:
2346 case LTT_FLOAT:
2347 case LTT_POINTER:
2348 case LTT_LONG:
2349 case LTT_ULONG:
2350 case LTT_SIZE_T:
2351 case LTT_SSIZE_T:
2352 case LTT_OFF_T:
2353 if(field1->field_size != field2->field_size) {
2354 different = 1;
2355 goto end;
2356 }
2357 break;
2358 case LTT_ENUM:
2359 if(type1->element_number != type2->element_number) {
2360 different = 1;
2361 goto end;
2362 }
2363 for(i=0;i<type1->element_number;i++) {
2364 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2365 different = 1;
2366 goto end;
2367 }
2368 }
2369 break;
2370 case LTT_SEQUENCE:
2371 /* Two elements : size and child */
2372 g_assert(type1->element_number != type2->element_number);
2373 for(i=0;i<type1->element_number;i++) {
2374 if(check_fields_compatibility(event_type1, event_type2,
2375 field1->child[0], field2->child[0])) {
2376 different = 1;
2377 goto end;
2378 }
2379 }
2380 break;
2381 case LTT_STRING:
2382 break;
2383 case LTT_ARRAY:
2384 if(field1->field_size != field2->field_size) {
2385 different = 1;
2386 goto end;
2387 }
2388 /* Two elements : size and child */
2389 g_assert(type1->element_number != type2->element_number);
2390 for(i=0;i<type1->element_number;i++) {
2391 if(check_fields_compatibility(event_type1, event_type2,
2392 field1->child[0], field2->child[0])) {
2393 different = 1;
2394 goto end;
2395 }
2396 }
2397 break;
2398 case LTT_STRUCT:
2399 case LTT_UNION:
2400 if(type1->element_number != type2->element_number) {
2401 different = 1;
2402 break;
2403 }
2404 for(i=0;i<type1->element_number;i++) {
2405 if(check_fields_compatibility(event_type1, event_type2,
2406 field1->child[0], field2->child[0])) {
2407 different = 1;
2408 goto end;
2409 }
2410 }
2411 break;
2412 }
2413 end:
2414 return different;
2415 }
2416 #endif //0
2417
2418
2419 /*****************************************************************************
2420 *Function name
2421 * ltt_get_int : get an integer number
2422 *Input params
2423 * reverse_byte_order: must we reverse the byte order ?
2424 * size : the size of the integer
2425 * ptr : the data pointer
2426 *Return value
2427 * gint64 : a 64 bits integer
2428 ****************************************************************************/
2429
2430 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2431 {
2432 gint64 val;
2433
2434 switch(size) {
2435 case 1: val = *((gint8*)data); break;
2436 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2437 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2438 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2439 default: val = ltt_get_int64(reverse_byte_order, data);
2440 g_critical("get_int : integer size %d unknown", size);
2441 break;
2442 }
2443
2444 return val;
2445 }
2446
2447 /*****************************************************************************
2448 *Function name
2449 * ltt_get_uint : get an unsigned integer number
2450 *Input params
2451 * reverse_byte_order: must we reverse the byte order ?
2452 * size : the size of the integer
2453 * ptr : the data pointer
2454 *Return value
2455 * guint64 : a 64 bits unsigned integer
2456 ****************************************************************************/
2457
2458 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2459 {
2460 guint64 val;
2461
2462 switch(size) {
2463 case 1: val = *((gint8*)data); break;
2464 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2465 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2466 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2467 default: val = ltt_get_uint64(reverse_byte_order, data);
2468 g_critical("get_uint : unsigned integer size %d unknown",
2469 size);
2470 break;
2471 }
2472
2473 return val;
2474 }
2475
2476
2477 /* get the node name of the system */
2478
2479 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2480 {
2481 return s->node_name;
2482 }
2483
2484
2485 /* get the domain name of the system */
2486
2487 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2488 {
2489 return s->domain_name;
2490 }
2491
2492
2493 /* get the description of the system */
2494
2495 char * ltt_trace_system_description_description (LttSystemDescription * s)
2496 {
2497 return s->description;
2498 }
2499
2500
2501 /* get the NTP corrected start time of the trace */
2502 LttTime ltt_trace_start_time(LttTrace *t)
2503 {
2504 return t->start_time;
2505 }
2506
2507 /* get the monotonic start time of the trace */
2508 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2509 {
2510 return t->start_time_from_tsc;
2511 }
2512
2513 static LttTracefile *ltt_tracefile_new()
2514 {
2515 LttTracefile *tf;
2516 tf = g_new(LttTracefile, 1);
2517 tf->event.tracefile = tf;
2518 return tf;
2519 }
2520
2521 static void ltt_tracefile_destroy(LttTracefile *tf)
2522 {
2523 g_free(tf);
2524 }
2525
2526 static void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2527 {
2528 *dest = *src;
2529 }
2530
2531 /* Before library loading... */
2532
2533 static __attribute__((constructor)) void init(void)
2534 {
2535 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("/control/metadata");
2536 }
This page took 0.095809 seconds and 5 git commands to generate.