update warning
[lttv.git] / trunk / lttv / ltt / tracefile.c
1 /* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21 #ifdef HAVE_CONFIG_H
22 #include <config.h>
23 #endif
24
25 #include <stdio.h>
26 #include <fcntl.h>
27 #include <string.h>
28 #include <dirent.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <math.h>
34 #include <glib.h>
35 #include <malloc.h>
36 #include <sys/mman.h>
37 #include <string.h>
38
39 // For realpath
40 #include <limits.h>
41 #include <stdlib.h>
42
43
44 #include <ltt/ltt.h>
45 #include "ltt-private.h"
46 #include <ltt/trace.h>
47 #include <ltt/event.h>
48 #include <ltt/ltt-types.h>
49 #include <ltt/marker.h>
50
51 /* Tracefile names used in this file */
52
53 GQuark LTT_TRACEFILE_NAME_METADATA;
54
55 #ifndef g_open
56 #define g_open open
57 #endif
58
59
60 #define __UNUSED__ __attribute__((__unused__))
61
62 #define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
63
64 #ifndef g_debug
65 #define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
66 #endif
67
68 #define g_close close
69
70 /* Those macros must be called from within a function where page_size is a known
71 * variable */
72 #define PAGE_MASK (~(page_size-1))
73 #define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
74
75 LttTrace *father_trace = NULL;
76
77 /* set the offset of the fields belonging to the event,
78 need the information of the archecture */
79 //void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
80 //size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
81
82 #if 0
83 /* get the size of the field type according to
84 * The facility size information. */
85 static inline void preset_field_type_size(LttTracefile *tf,
86 LttEventType *event_type,
87 off_t offset_root, off_t offset_parent,
88 enum field_status *fixed_root, enum field_status *fixed_parent,
89 LttField *field);
90 #endif //0
91
92 /* map a fixed size or a block information from the file (fd) */
93 static gint map_block(LttTracefile * tf, guint block_num);
94
95 /* calculate nsec per cycles for current block */
96 #if 0
97 static guint32 calc_nsecs_per_cycle(LttTracefile * t);
98 static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
99 #endif //0
100
101 /* go to the next event */
102 static int ltt_seek_next_event(LttTracefile *tf);
103
104 static int open_tracefiles(LttTrace *trace, gchar *root_path,
105 gchar *relative_path);
106 static int ltt_process_metadata_tracefile(LttTracefile *tf);
107 static void ltt_tracefile_time_span_get(LttTracefile *tf,
108 LttTime *start, LttTime *end);
109 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
110 static gint map_block(LttTracefile * tf, guint block_num);
111 static void ltt_update_event_size(LttTracefile *tf);
112
113 /* Enable event debugging */
114 static int a_event_debug = 0;
115
116 void ltt_event_debug(int state)
117 {
118 a_event_debug = state;
119 }
120
121 /* trace can be NULL
122 *
123 * Return value : 0 success, 1 bad tracefile
124 */
125 static int parse_trace_header(ltt_subbuffer_header_t *header,
126 LttTracefile *tf, LttTrace *t)
127 {
128 if (header->magic_number == LTT_MAGIC_NUMBER)
129 tf->reverse_bo = 0;
130 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
131 tf->reverse_bo = 1;
132 else /* invalid magic number, bad tracefile ! */
133 return 1;
134
135 if(t) {
136 t->ltt_major_version = header->major_version;
137 t->ltt_minor_version = header->minor_version;
138 t->arch_size = header->arch_size;
139 }
140 tf->alignment = header->alignment;
141
142 /* Get float byte order : might be different from int byte order
143 * (or is set to 0 if the trace has no float (kernel trace)) */
144 tf->float_word_order = 0;
145
146 switch(header->major_version) {
147 case 0:
148 case 1:
149 g_warning("Unsupported trace version : %hhu.%hhu",
150 header->major_version, header->minor_version);
151 return 1;
152 break;
153 case 2:
154 switch(header->minor_version) {
155 case 0:
156 {
157 struct ltt_subbuffer_header_2_0 *vheader = header;
158 tf->buffer_header_size = sizeof(struct ltt_subbuffer_header_2_0) ;
159 tf->tscbits = 27;
160 tf->eventbits = 5;
161 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
162 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
163
164 if(t) {
165 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->start_freq);
167 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
168 &vheader->freq_scale);
169 if(father_trace) {
170 t->start_freq = father_trace->start_freq;
171 t->freq_scale = father_trace->freq_scale;
172 } else {
173 father_trace = t;
174 }
175 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
176 &vheader->cycle_count_begin);
177 t->start_monotonic = 0;
178 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
179 &vheader->start_time_sec);
180 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
181 &vheader->start_time_usec);
182 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
183
184 t->start_time_from_tsc = ltt_time_from_uint64(
185 (double)t->start_tsc
186 * (1000000000.0 / tf->trace->freq_scale)
187 / (double)t->start_freq);
188 }
189 }
190 break;
191 default:
192 g_warning("Unsupported trace version : %hhu.%hhu",
193 header->major_version, header->minor_version);
194 return 1;
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 return 0;
203 }
204
205
206
207 /*****************************************************************************
208 *Function name
209 * ltt_tracefile_open : open a trace file, construct a LttTracefile
210 *Input params
211 * t : the trace containing the tracefile
212 * fileName : path name of the trace file
213 * tf : the tracefile structure
214 *Return value
215 * : 0 for success, -1 otherwise.
216 ****************************************************************************/
217
218 static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
219 {
220 struct stat lTDFStat; /* Trace data file status */
221 ltt_subbuffer_header_t *header;
222 int page_size = getpagesize();
223
224 //open the file
225 tf->long_name = g_quark_from_string(fileName);
226 tf->trace = t;
227 tf->fd = open(fileName, O_RDONLY);
228 if(tf->fd < 0){
229 g_warning("Unable to open input data file %s\n", fileName);
230 goto end;
231 }
232
233 // Get the file's status
234 if(fstat(tf->fd, &lTDFStat) < 0){
235 g_warning("Unable to get the status of the input data file %s\n", fileName);
236 goto close_file;
237 }
238
239 // Is the file large enough to contain a trace
240 if(lTDFStat.st_size <
241 (off_t)(sizeof(ltt_subbuffer_header_t))){
242 g_print("The input data file %s does not contain a trace\n", fileName);
243 goto close_file;
244 }
245
246 /* Temporarily map the buffer start header to get trace information */
247 /* Multiple of pages aligned head */
248 tf->buffer.head = mmap(0,
249 PAGE_ALIGN(sizeof(ltt_subbuffer_header_t)), PROT_READ,
250 MAP_PRIVATE, tf->fd, 0);
251 if(tf->buffer.head == MAP_FAILED) {
252 perror("Error in allocating memory for buffer of tracefile");
253 goto close_file;
254 }
255 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
256
257 header = (ltt_subbuffer_header_t *)tf->buffer.head;
258
259 if(parse_trace_header(header, tf, NULL)) {
260 g_warning("parse_trace_header error");
261 goto unmap_file;
262 }
263
264 //store the size of the file
265 tf->file_size = lTDFStat.st_size;
266 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
267 tf->num_blocks = tf->file_size / tf->buf_size;
268
269 if(munmap(tf->buffer.head,
270 PAGE_ALIGN(sizeof(ltt_subbuffer_header_t)))) {
271 g_warning("unmap size : %u\n",
272 PAGE_ALIGN(sizeof(ltt_subbuffer_header_t)));
273 perror("munmap error");
274 g_assert(0);
275 }
276 tf->buffer.head = NULL;
277
278 //read the first block
279 if(map_block(tf,0)) {
280 perror("Cannot map block for tracefile");
281 goto close_file;
282 }
283
284 return 0;
285
286 /* Error */
287 unmap_file:
288 if(munmap(tf->buffer.head,
289 PAGE_ALIGN(sizeof(ltt_subbuffer_header_t)))) {
290 g_warning("unmap size : %u\n",
291 PAGE_ALIGN(sizeof(ltt_subbuffer_header_t)));
292 perror("munmap error");
293 g_assert(0);
294 }
295 close_file:
296 close(tf->fd);
297 end:
298 return -1;
299 }
300
301
302 /*****************************************************************************
303 *Function name
304 * ltt_tracefile_close: close a trace file,
305 *Input params
306 * t : tracefile which will be closed
307 ****************************************************************************/
308
309 static void ltt_tracefile_close(LttTracefile *t)
310 {
311 int page_size = getpagesize();
312
313 if(t->buffer.head != NULL)
314 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
315 g_warning("unmap size : %u\n",
316 PAGE_ALIGN(t->buf_size));
317 perror("munmap error");
318 g_assert(0);
319 }
320
321 close(t->fd);
322 }
323
324 /****************************************************************************
325 * get_absolute_pathname
326 *
327 * return the unique pathname in the system
328 *
329 * MD : Fixed this function so it uses realpath, dealing well with
330 * forgotten cases (.. were not used correctly before).
331 *
332 ****************************************************************************/
333 void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
334 {
335 abs_pathname[0] = '\0';
336
337 if (realpath(pathname, abs_pathname) != NULL)
338 return;
339 else
340 {
341 /* error, return the original path unmodified */
342 strcpy(abs_pathname, pathname);
343 return;
344 }
345 return;
346 }
347
348 /* Search for something like : .*_.*
349 *
350 * The left side is the name, the right side is the number.
351 */
352
353 static int get_tracefile_name_number(gchar *raw_name,
354 GQuark *name,
355 guint *num,
356 gulong *tid,
357 gulong *pgid,
358 guint64 *creation)
359 {
360 guint raw_name_len = strlen(raw_name);
361 gchar char_name[PATH_MAX];
362 int i;
363 int underscore_pos;
364 long int cpu_num;
365 gchar *endptr;
366 gchar *tmpptr;
367
368 for(i=raw_name_len-1;i>=0;i--) {
369 if(raw_name[i] == '_') break;
370 }
371 if(i==-1) { /* Either not found or name length is 0 */
372 /* This is a userspace tracefile */
373 strncpy(char_name, raw_name, raw_name_len);
374 char_name[raw_name_len] = '\0';
375 *name = g_quark_from_string(char_name);
376 *num = 0; /* unknown cpu */
377 for(i=0;i<raw_name_len;i++) {
378 if(raw_name[i] == '/') {
379 break;
380 }
381 }
382 i++;
383 for(;i<raw_name_len;i++) {
384 if(raw_name[i] == '/') {
385 break;
386 }
387 }
388 i++;
389 for(;i<raw_name_len;i++) {
390 if(raw_name[i] == '-') {
391 break;
392 }
393 }
394 if(i == raw_name_len) return -1;
395 i++;
396 tmpptr = &raw_name[i];
397 for(;i<raw_name_len;i++) {
398 if(raw_name[i] == '.') {
399 raw_name[i] = ' ';
400 break;
401 }
402 }
403 *tid = strtoul(tmpptr, &endptr, 10);
404 if(endptr == tmpptr)
405 return -1; /* No digit */
406 if(*tid == ULONG_MAX)
407 return -1; /* underflow / overflow */
408 i++;
409 tmpptr = &raw_name[i];
410 for(;i<raw_name_len;i++) {
411 if(raw_name[i] == '.') {
412 raw_name[i] = ' ';
413 break;
414 }
415 }
416 *pgid = strtoul(tmpptr, &endptr, 10);
417 if(endptr == tmpptr)
418 return -1; /* No digit */
419 if(*pgid == ULONG_MAX)
420 return -1; /* underflow / overflow */
421 i++;
422 tmpptr = &raw_name[i];
423 *creation = strtoull(tmpptr, &endptr, 10);
424 if(endptr == tmpptr)
425 return -1; /* No digit */
426 if(*creation == G_MAXUINT64)
427 return -1; /* underflow / overflow */
428 } else {
429 underscore_pos = i;
430
431 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
432
433 if(endptr == raw_name+underscore_pos+1)
434 return -1; /* No digit */
435 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
436 return -1; /* underflow / overflow */
437
438 strncpy(char_name, raw_name, underscore_pos);
439 char_name[underscore_pos] = '\0';
440
441 *name = g_quark_from_string(char_name);
442 *num = cpu_num;
443 }
444
445
446 return 0;
447 }
448
449
450 GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
451 {
452 return &trace->tracefiles;
453 }
454
455
456 void compute_tracefile_group(GQuark key_id,
457 GArray *group,
458 struct compute_tracefile_group_args *args)
459 {
460 int i;
461 LttTracefile *tf;
462
463 for(i=0; i<group->len; i++) {
464 tf = &g_array_index (group, LttTracefile, i);
465 if(tf->cpu_online)
466 args->func(tf, args->func_args);
467 }
468 }
469
470
471 static void ltt_tracefile_group_destroy(gpointer data)
472 {
473 GArray *group = (GArray *)data;
474 int i;
475 LttTracefile *tf;
476
477 for(i=0; i<group->len; i++) {
478 tf = &g_array_index (group, LttTracefile, i);
479 if(tf->cpu_online)
480 ltt_tracefile_close(tf);
481 }
482 g_array_free(group, TRUE);
483 }
484
485 static gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
486 {
487 GArray *group = (GArray *)data;
488 int i;
489 LttTracefile *tf;
490
491 for(i=0; i<group->len; i++) {
492 tf = &g_array_index (group, LttTracefile, i);
493 if(tf->cpu_online)
494 return 1;
495 }
496 return 0;
497 }
498
499
500 /* Open each tracefile under a specific directory. Put them in a
501 * GData : permits to access them using their tracefile group pathname.
502 * i.e. access control/modules tracefile group by index :
503 * "control/module".
504 *
505 * relative path is the path relative to the trace root
506 * root path is the full path
507 *
508 * A tracefile group is simply an array where all the per cpu tracefiles sit.
509 */
510
511 static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
512 {
513 DIR *dir = opendir(root_path);
514 struct dirent *entry;
515 struct stat stat_buf;
516 int ret;
517
518 gchar path[PATH_MAX];
519 int path_len;
520 gchar *path_ptr;
521
522 int rel_path_len;
523 gchar rel_path[PATH_MAX];
524 gchar *rel_path_ptr;
525 LttTracefile tmp_tf;
526
527 if(dir == NULL) {
528 perror(root_path);
529 return ENOENT;
530 }
531
532 strncpy(path, root_path, PATH_MAX-1);
533 path_len = strlen(path);
534 path[path_len] = '/';
535 path_len++;
536 path_ptr = path + path_len;
537
538 strncpy(rel_path, relative_path, PATH_MAX-1);
539 rel_path_len = strlen(rel_path);
540 rel_path[rel_path_len] = '/';
541 rel_path_len++;
542 rel_path_ptr = rel_path + rel_path_len;
543
544 while((entry = readdir(dir)) != NULL) {
545
546 if(entry->d_name[0] == '.') continue;
547
548 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
549 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
550
551 ret = stat(path, &stat_buf);
552 if(ret == -1) {
553 perror(path);
554 continue;
555 }
556
557 g_debug("Tracefile file or directory : %s\n", path);
558
559 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
560
561 if(S_ISDIR(stat_buf.st_mode)) {
562
563 g_debug("Entering subdirectory...\n");
564 ret = open_tracefiles(trace, path, rel_path);
565 if(ret < 0) continue;
566 } else if(S_ISREG(stat_buf.st_mode)) {
567 GQuark name;
568 guint num;
569 gulong tid, pgid;
570 guint64 creation;
571 GArray *group;
572 num = 0;
573 tid = pgid = 0;
574 creation = 0;
575 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
576 continue; /* invalid name */
577
578 g_debug("Opening file.\n");
579 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
580 g_info("Error opening tracefile %s", path);
581
582 continue; /* error opening the tracefile : bad magic number ? */
583 }
584
585 g_debug("Tracefile name is %s and number is %u",
586 g_quark_to_string(name), num);
587
588 tmp_tf.cpu_online = 1;
589 tmp_tf.cpu_num = num;
590 tmp_tf.name = name;
591 tmp_tf.tid = tid;
592 tmp_tf.pgid = pgid;
593 tmp_tf.creation = creation;
594 group = g_datalist_id_get_data(&trace->tracefiles, name);
595 if(group == NULL) {
596 /* Elements are automatically cleared when the array is allocated.
597 * It makes the cpu_online variable set to 0 : cpu offline, by default.
598 */
599 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
600 g_datalist_id_set_data_full(&trace->tracefiles, name,
601 group, ltt_tracefile_group_destroy);
602 }
603
604 /* Add the per cpu tracefile to the named group */
605 unsigned int old_len = group->len;
606 if(num+1 > old_len)
607 group = g_array_set_size(group, num+1);
608 g_array_index (group, LttTracefile, num) = tmp_tf;
609 g_array_index (group, LttTracefile, num).event.tracefile =
610 &g_array_index (group, LttTracefile, num);
611 }
612 }
613
614 closedir(dir);
615
616 return 0;
617 }
618
619
620 /* Presumes the tracefile is already seeked at the beginning. It makes sense,
621 * because it must be done just after the opening */
622 static int ltt_process_metadata_tracefile(LttTracefile *tf)
623 {
624 int err;
625 guint i;
626
627 while(1) {
628 err = ltt_tracefile_read_seek(tf);
629 if(err == EPERM) goto seek_error;
630 else if(err == ERANGE) break; /* End of tracefile */
631
632 err = ltt_tracefile_read_update_event(tf);
633 if(err) goto update_error;
634
635 /* The rules are :
636 * It contains only core events :
637 * 0 : set_marker_id
638 * 1 : set_marker_format
639 */
640 if(tf->event.event_id >= MARKER_CORE_IDS) {
641 /* Should only contain core events */
642 g_warning("Error in processing metadata file %s, "
643 "should not contain event id %u.", g_quark_to_string(tf->name),
644 tf->event.event_id);
645 err = EPERM;
646 goto event_id_error;
647 } else {
648 char *pos;
649 const char *marker_name, *format;
650 uint16_t id;
651 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
652
653 switch((enum marker_id)tf->event.event_id) {
654 case MARKER_ID_SET_MARKER_ID:
655 marker_name = pos = tf->event.data;
656 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s", marker_name);
657 pos += strlen(marker_name) + 1;
658 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
659 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
660 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s id %hu",
661 marker_name, id);
662 pos += sizeof(guint16);
663 int_size = *(guint8*)pos;
664 pos += sizeof(guint8);
665 long_size = *(guint8*)pos;
666 pos += sizeof(guint8);
667 pointer_size = *(guint8*)pos;
668 pos += sizeof(guint8);
669 size_t_size = *(guint8*)pos;
670 pos += sizeof(guint8);
671 alignment = *(guint8*)pos;
672 pos += sizeof(guint8);
673 marker_id_event(tf->trace, g_quark_from_string(marker_name),
674 id, int_size, long_size,
675 pointer_size, size_t_size, alignment);
676 break;
677 case MARKER_ID_SET_MARKER_FORMAT:
678 marker_name = pos = tf->event.data;
679 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s",
680 marker_name);
681 pos += strlen(marker_name) + 1;
682 format = pos;
683 pos += strlen(format) + 1;
684 marker_format_event(tf->trace, g_quark_from_string(marker_name),
685 format);
686 /* get information from dictionary TODO */
687 break;
688 default:
689 g_warning("Error in processing metadata file %s, "
690 "unknown event id %hhu.",
691 g_quark_to_string(tf->name),
692 tf->event.event_id);
693 err = EPERM;
694 goto event_id_error;
695 }
696 }
697 }
698 return 0;
699
700 /* Error handling */
701 event_id_error:
702 update_error:
703 seek_error:
704 g_warning("An error occured in metadata tracefile parsing");
705 return err;
706 }
707
708 /*
709 * Open a trace and return its LttTrace handle.
710 *
711 * pathname must be the directory of the trace
712 */
713
714 LttTrace *ltt_trace_open(const gchar *pathname)
715 {
716 gchar abs_path[PATH_MAX];
717 LttTrace * t;
718 LttTracefile *tf;
719 GArray *group;
720 int i, ret;
721 ltt_subbuffer_header_t *header;
722 DIR *dir;
723 struct dirent *entry;
724 guint control_found = 0;
725 struct stat stat_buf;
726 gchar path[PATH_MAX];
727
728 t = g_new(LttTrace, 1);
729 if(!t) goto alloc_error;
730
731 get_absolute_pathname(pathname, abs_path);
732 t->pathname = g_quark_from_string(abs_path);
733
734 g_datalist_init(&t->tracefiles);
735
736 /* Test to see if it looks like a trace */
737 dir = opendir(abs_path);
738 if(dir == NULL) {
739 perror(abs_path);
740 goto open_error;
741 }
742 while((entry = readdir(dir)) != NULL) {
743 strcpy(path, abs_path);
744 strcat(path, "/");
745 strcat(path, entry->d_name);
746 ret = stat(path, &stat_buf);
747 if(ret == -1) {
748 perror(path);
749 continue;
750 }
751 if(S_ISDIR(stat_buf.st_mode)) {
752 if(strcmp(entry->d_name, "control") == 0) {
753 control_found = 1;
754 }
755 }
756 }
757 closedir(dir);
758
759 if(!control_found) goto find_error;
760
761 /* Open all the tracefiles */
762 if(open_tracefiles(t, abs_path, "")) {
763 g_warning("Error opening tracefile %s", abs_path);
764 goto find_error;
765 }
766
767 /* Parse each trace control/metadata_N files : get runtime fac. info */
768 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
769 if(group == NULL) {
770 g_error("Trace %s has no metadata tracefile", abs_path);
771 g_assert(0);
772 goto metadata_error;
773 }
774
775 /* Get the trace information for the control/metadata_0 tracefile */
776 g_assert(group->len > 0);
777 tf = &g_array_index (group, LttTracefile, 0);
778 header = (ltt_subbuffer_header_t *)tf->buffer.head;
779 g_assert(parse_trace_header(header, tf, t) == 0);
780
781 t->num_cpu = group->len;
782
783 ret = allocate_marker_data(t);
784 if (ret)
785 g_error("Error in allocating marker data");
786
787 for(i=0; i<group->len; i++) {
788 tf = &g_array_index (group, LttTracefile, i);
789 if (tf->cpu_online)
790 if(ltt_process_metadata_tracefile(tf))
791 goto metadata_error;
792 }
793
794 return t;
795
796 /* Error handling */
797 metadata_error:
798 destroy_marker_data(t);
799 find_error:
800 g_datalist_clear(&t->tracefiles);
801 open_error:
802 g_free(t);
803 alloc_error:
804 return NULL;
805
806 }
807
808 /* Open another, completely independant, instance of a trace.
809 *
810 * A read on this new instance will read the first event of the trace.
811 *
812 * When we copy a trace, we want all the opening actions to happen again :
813 * the trace will be reopened and totally independant from the original.
814 * That's why we call ltt_trace_open.
815 */
816 LttTrace *ltt_trace_copy(LttTrace *self)
817 {
818 return ltt_trace_open(g_quark_to_string(self->pathname));
819 }
820
821 /*
822 * Close a trace
823 */
824
825 void ltt_trace_close(LttTrace *t)
826 {
827 g_datalist_clear(&t->tracefiles);
828 g_free(t);
829 }
830
831
832 /*****************************************************************************
833 * Get the start time and end time of the trace
834 ****************************************************************************/
835
836 void ltt_tracefile_time_span_get(LttTracefile *tf,
837 LttTime *start, LttTime *end)
838 {
839 int err;
840
841 err = map_block(tf, 0);
842 if(unlikely(err)) {
843 g_error("Can not map block");
844 *start = ltt_time_infinite;
845 } else
846 *start = tf->buffer.begin.timestamp;
847
848 err = map_block(tf, tf->num_blocks - 1); /* Last block */
849 if(unlikely(err)) {
850 g_error("Can not map block");
851 *end = ltt_time_zero;
852 } else
853 *end = tf->buffer.end.timestamp;
854 }
855
856 struct tracefile_time_span_get_args {
857 LttTrace *t;
858 LttTime *start;
859 LttTime *end;
860 };
861
862 static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
863 {
864 struct tracefile_time_span_get_args *args =
865 (struct tracefile_time_span_get_args*)user_data;
866
867 GArray *group = (GArray *)data;
868 int i;
869 LttTracefile *tf;
870 LttTime tmp_start;
871 LttTime tmp_end;
872
873 for(i=0; i<group->len; i++) {
874 tf = &g_array_index (group, LttTracefile, i);
875 if(tf->cpu_online) {
876 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
877 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
878 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
879 }
880 }
881 }
882
883 /* return the start and end time of a trace */
884
885 void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
886 {
887 LttTime min_start = ltt_time_infinite;
888 LttTime max_end = ltt_time_zero;
889 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
890
891 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
892
893 if(start != NULL) *start = min_start;
894 if(end != NULL) *end = max_end;
895
896 }
897
898
899 /* Seek to the first event in a tracefile that has a time equal or greater than
900 * the time passed in parameter.
901 *
902 * If the time parameter is outside the tracefile time span, seek to the first
903 * event or if after, return ERANGE.
904 *
905 * If the time parameter is before the first event, we have to seek specially to
906 * there.
907 *
908 * If the time is after the end of the trace, return ERANGE.
909 *
910 * Do a binary search to find the right block, then a sequential search in the
911 * block to find the event.
912 *
913 * In the special case where the time requested fits inside a block that has no
914 * event corresponding to the requested time, the first event of the next block
915 * will be seeked.
916 *
917 * IMPORTANT NOTE : // FIXME everywhere...
918 *
919 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
920 * you will jump over an event if you do.
921 *
922 * Return value : 0 : no error, the tf->event can be used
923 * ERANGE : time if after the last event of the trace
924 * otherwise : this is an error.
925 *
926 * */
927
928 int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
929 {
930 int ret = 0;
931 int err;
932 unsigned int block_num, high, low;
933
934 /* seek at the beginning of trace */
935 err = map_block(tf, 0); /* First block */
936 if(unlikely(err)) {
937 g_error("Can not map block");
938 goto fail;
939 }
940
941 /* If the time is lower or equal the beginning of the trace,
942 * go to the first event. */
943 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
944 ret = ltt_tracefile_read(tf);
945 if(ret == ERANGE) goto range;
946 else if (ret) goto fail;
947 goto found; /* There is either no event in the trace or the event points
948 to the first event in the trace */
949 }
950
951 err = map_block(tf, tf->num_blocks - 1); /* Last block */
952 if(unlikely(err)) {
953 g_error("Can not map block");
954 goto fail;
955 }
956
957 /* If the time is after the end of the trace, return ERANGE. */
958 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
959 goto range;
960 }
961
962 /* Binary search the block */
963 high = tf->num_blocks - 1;
964 low = 0;
965
966 while(1) {
967 block_num = ((high-low) / 2) + low;
968
969 err = map_block(tf, block_num);
970 if(unlikely(err)) {
971 g_error("Can not map block");
972 goto fail;
973 }
974 if(high == low) {
975 /* We cannot divide anymore : this is what would happen if the time
976 * requested was exactly between two consecutive buffers'end and start
977 * timestamps. This is also what would happend if we didn't deal with out
978 * of span cases prior in this function. */
979 /* The event is right in the buffer!
980 * (or in the next buffer first event) */
981 while(1) {
982 ret = ltt_tracefile_read(tf);
983 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
984 else if(ret) goto fail;
985
986 if(ltt_time_compare(time, tf->event.event_time) <= 0)
987 goto found;
988 }
989
990 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
991 /* go to lower part */
992 high = block_num - 1;
993 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
994 /* go to higher part */
995 low = block_num + 1;
996 } else {/* The event is right in the buffer!
997 (or in the next buffer first event) */
998 while(1) {
999 ret = ltt_tracefile_read(tf);
1000 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1001 else if(ret) goto fail;
1002
1003 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1004 break;
1005 }
1006 goto found;
1007 }
1008 }
1009
1010 found:
1011 return 0;
1012 range:
1013 return ERANGE;
1014
1015 /* Error handling */
1016 fail:
1017 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1018 g_quark_to_string(tf->name));
1019 return EPERM;
1020 }
1021
1022 /* Seek to a position indicated by an LttEventPosition
1023 */
1024
1025 int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1026 {
1027 int err;
1028
1029 if(ep->tracefile != tf) {
1030 goto fail;
1031 }
1032
1033 err = map_block(tf, ep->block);
1034 if(unlikely(err)) {
1035 g_error("Can not map block");
1036 goto fail;
1037 }
1038
1039 tf->event.offset = ep->offset;
1040
1041 /* Put back the event real tsc */
1042 tf->event.tsc = ep->tsc;
1043 tf->buffer.tsc = ep->tsc;
1044
1045 err = ltt_tracefile_read_update_event(tf);
1046 if(err) goto fail;
1047
1048 /* deactivate this, as it does nothing for now
1049 err = ltt_tracefile_read_op(tf);
1050 if(err) goto fail;
1051 */
1052
1053 return 0;
1054
1055 fail:
1056 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1057 g_quark_to_string(tf->name));
1058 return 1;
1059 }
1060
1061 /* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1062 * corresponds to.
1063 */
1064
1065 LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1066 {
1067 LttTime time;
1068
1069 if(tsc > tf->trace->start_tsc) {
1070 time = ltt_time_from_uint64(
1071 (double)(tsc - tf->trace->start_tsc)
1072 * (1000000000.0 / tf->trace->freq_scale)
1073 / (double)tf->trace->start_freq);
1074 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1075 } else {
1076 time = ltt_time_from_uint64(
1077 (double)(tf->trace->start_tsc - tsc)
1078 * (1000000000.0 / tf->trace->freq_scale)
1079 / (double)tf->trace->start_freq);
1080 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1081 }
1082 return time;
1083 }
1084
1085 /* Calculate the real event time based on the buffer boundaries */
1086 LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1087 {
1088 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1089 }
1090
1091
1092 /* Get the current event of the tracefile : valid until the next read */
1093 LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1094 {
1095 return &tf->event;
1096 }
1097
1098
1099
1100 /*****************************************************************************
1101 *Function name
1102 * ltt_tracefile_read : Read the next event in the tracefile
1103 *Input params
1104 * t : tracefile
1105 *Return value
1106 *
1107 * Returns 0 if an event can be used in tf->event.
1108 * Returns ERANGE on end of trace. The event in tf->event still can be used
1109 * (if the last block was not empty).
1110 * Returns EPERM on error.
1111 *
1112 * This function does make the tracefile event structure point to the event
1113 * currently pointed to by the tf->event.
1114 *
1115 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1116 * reinitialize it after an error if you want results to be coherent.
1117 * It would be the case if a end of trace last buffer has no event : the end
1118 * of trace wouldn't be returned, but an error.
1119 * We make the assumption there is at least one event per buffer.
1120 ****************************************************************************/
1121
1122 int ltt_tracefile_read(LttTracefile *tf)
1123 {
1124 int err;
1125
1126 err = ltt_tracefile_read_seek(tf);
1127 if(err) return err;
1128 err = ltt_tracefile_read_update_event(tf);
1129 if(err) return err;
1130
1131 /* deactivate this, as it does nothing for now
1132 err = ltt_tracefile_read_op(tf);
1133 if(err) return err;
1134 */
1135
1136 return 0;
1137 }
1138
1139 int ltt_tracefile_read_seek(LttTracefile *tf)
1140 {
1141 int err;
1142
1143 /* Get next buffer until we finally have an event, or end of trace */
1144 while(1) {
1145 err = ltt_seek_next_event(tf);
1146 if(unlikely(err == ENOPROTOOPT)) {
1147 return EPERM;
1148 }
1149
1150 /* Are we at the end of the buffer ? */
1151 if(err == ERANGE) {
1152 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1153 return ERANGE;
1154 } else {
1155 /* get next block */
1156 err = map_block(tf, tf->buffer.index + 1);
1157 if(unlikely(err)) {
1158 g_error("Can not map block");
1159 return EPERM;
1160 }
1161 }
1162 } else break; /* We found an event ! */
1163 }
1164
1165 return 0;
1166 }
1167
1168 /* do an operation when reading a new event */
1169
1170 /* This function does nothing for now */
1171 #if 0
1172 int ltt_tracefile_read_op(LttTracefile *tf)
1173 {
1174 LttEvent *event;
1175
1176 event = &tf->event;
1177
1178 /* do event specific operation */
1179
1180 /* nothing */
1181
1182 return 0;
1183 }
1184 #endif
1185
1186 static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1187 {
1188 unsigned int offset = 0;
1189 int i, j;
1190
1191 g_printf("Event header (tracefile %s offset %llx):\n",
1192 g_quark_to_string(ev->tracefile->long_name),
1193 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1194 + (long)start_pos - (long)ev->tracefile->buffer.head);
1195
1196 while (offset < (long)end_pos - (long)start_pos) {
1197 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1198 g_printf(" ");
1199
1200 for (i = 0; i < 4 ; i++) {
1201 for (j = 0; j < 4; j++) {
1202 if (offset + ((i * 4) + j) <
1203 (long)end_pos - (long)start_pos)
1204 g_printf("%02hhX",
1205 ((char*)start_pos)[offset + ((i * 4) + j)]);
1206 else
1207 g_printf(" ");
1208 g_printf(" ");
1209 }
1210 if (i < 4)
1211 g_printf(" ");
1212 }
1213 offset+=16;
1214 g_printf("\n");
1215 }
1216 }
1217
1218
1219 /* same as ltt_tracefile_read, but does not seek to the next event nor call
1220 * event specific operation. */
1221 int ltt_tracefile_read_update_event(LttTracefile *tf)
1222 {
1223 void * pos;
1224 LttEvent *event;
1225 void *pos_aligned;
1226
1227 event = &tf->event;
1228 pos = tf->buffer.head + event->offset;
1229
1230 /* Read event header */
1231
1232 /* Align the head */
1233 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1234 pos_aligned = pos;
1235
1236 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1237 event->event_id = event->timestamp >> tf->tscbits;
1238 event->timestamp = event->timestamp & tf->tsc_mask;
1239 pos += sizeof(guint32);
1240
1241 switch (event->event_id) {
1242 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1243 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1244 pos += sizeof(guint16);
1245 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1246 pos += sizeof(guint16);
1247 if (event->event_size == 0xFFFF) {
1248 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1249 pos += sizeof(guint32);
1250 }
1251 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1252 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1253 pos += sizeof(guint64);
1254 break;
1255 case 30: /* LTT_RFLAG_ID_SIZE */
1256 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1257 pos += sizeof(guint16);
1258 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1259 pos += sizeof(guint16);
1260 if (event->event_size == 0xFFFF) {
1261 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1262 pos += sizeof(guint32);
1263 }
1264 break;
1265 case 31: /* LTT_RFLAG_ID */
1266 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1267 pos += sizeof(guint16);
1268 event->event_size = G_MAXUINT;
1269 break;
1270 default:
1271 event->event_size = G_MAXUINT;
1272 break;
1273 }
1274
1275 if (likely(event->event_id != 29)) {
1276 /* No extended timestamp */
1277 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1278 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1279 + tf->tsc_mask_next_bit)
1280 | (guint64)event->timestamp;
1281 else
1282 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1283 | (guint64)event->timestamp;
1284 }
1285 event->tsc = tf->buffer.tsc;
1286
1287 event->event_time = ltt_interpolate_time(tf, event);
1288
1289 if (a_event_debug)
1290 print_debug_event_header(event, pos_aligned, pos);
1291
1292 event->data = pos;
1293
1294 /*
1295 * Let ltt_update_event_size update event->data according to the largest
1296 * alignment within the payload.
1297 * Get the data size and update the event fields with the current
1298 * information. */
1299 ltt_update_event_size(tf);
1300
1301 return 0;
1302 }
1303
1304
1305 /****************************************************************************
1306 *Function name
1307 * map_block : map a block from the file
1308 *Input Params
1309 * lttdes : ltt trace file
1310 * whichBlock : the block which will be read
1311 *return value
1312 * 0 : success
1313 * EINVAL : lseek fail
1314 * EIO : can not read from the file
1315 ****************************************************************************/
1316
1317 static gint map_block(LttTracefile * tf, guint block_num)
1318 {
1319 int page_size = getpagesize();
1320 ltt_subbuffer_header_t *header;
1321
1322 g_assert(block_num < tf->num_blocks);
1323
1324 if(tf->buffer.head != NULL) {
1325 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1326 g_warning("unmap size : %u\n",
1327 PAGE_ALIGN(tf->buf_size));
1328 perror("munmap error");
1329 g_assert(0);
1330 }
1331 }
1332
1333 /* Multiple of pages aligned head */
1334 tf->buffer.head = mmap(0,
1335 PAGE_ALIGN(tf->buf_size),
1336 PROT_READ, MAP_PRIVATE, tf->fd,
1337 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1338
1339 if(tf->buffer.head == MAP_FAILED) {
1340 perror("Error in allocating memory for buffer of tracefile");
1341 g_assert(0);
1342 goto map_error;
1343 }
1344 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1345
1346
1347 tf->buffer.index = block_num;
1348
1349 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1350
1351 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1352 &header->cycle_count_begin);
1353 tf->buffer.begin.freq = tf->trace->start_freq;
1354
1355 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1356 tf->buffer.begin.cycle_count);
1357 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1358 &header->cycle_count_end);
1359 tf->buffer.end.freq = tf->trace->start_freq;
1360
1361 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1362 &header->lost_size);
1363 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1364 tf->buffer.end.cycle_count);
1365 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1366 tf->event.tsc = tf->buffer.tsc;
1367 tf->buffer.freq = tf->buffer.begin.freq;
1368
1369 /* FIXME
1370 * eventually support variable buffer size : will need a partial pre-read of
1371 * the headers to create an index when we open the trace... eventually. */
1372 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1373 &header->buf_size));
1374
1375 /* Make the current event point to the beginning of the buffer :
1376 * it means that the event read must get the first event. */
1377 tf->event.tracefile = tf;
1378 tf->event.block = block_num;
1379 tf->event.offset = 0;
1380
1381 return 0;
1382
1383 map_error:
1384 return -errno;
1385 }
1386
1387 static void print_debug_event_data(LttEvent *ev)
1388 {
1389 unsigned int offset = 0;
1390 int i, j;
1391
1392 if (!max(ev->event_size, ev->data_size))
1393 return;
1394
1395 g_printf("Event data (tracefile %s offset %llx):\n",
1396 g_quark_to_string(ev->tracefile->long_name),
1397 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1398 + (long)ev->data - (long)ev->tracefile->buffer.head);
1399
1400 while (offset < max(ev->event_size, ev->data_size)) {
1401 g_printf("%8lx", (long)ev->data + offset
1402 - (long)ev->tracefile->buffer.head);
1403 g_printf(" ");
1404
1405 for (i = 0; i < 4 ; i++) {
1406 for (j = 0; j < 4; j++) {
1407 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1408 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1409 else
1410 g_printf(" ");
1411 g_printf(" ");
1412 }
1413 if (i < 4)
1414 g_printf(" ");
1415 }
1416
1417 g_printf(" ");
1418
1419 for (i = 0; i < 4; i++) {
1420 for (j = 0; j < 4; j++) {
1421 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1422 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1423 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1424 else
1425 g_printf(".");
1426 } else
1427 g_printf(" ");
1428 }
1429 }
1430 offset+=16;
1431 g_printf("\n");
1432 }
1433 }
1434
1435 /* It will update the fields offsets too */
1436 void ltt_update_event_size(LttTracefile *tf)
1437 {
1438 off_t size = 0;
1439 char *tscdata;
1440 struct marker_info *info;
1441
1442 switch((enum marker_id)tf->event.event_id) {
1443 case MARKER_ID_SET_MARKER_ID:
1444 size = strlen((char*)tf->event.data) + 1;
1445 g_debug("marker %s id set", (char*)tf->event.data);
1446 size += ltt_align(size, sizeof(guint16), tf->alignment);
1447 size += sizeof(guint16);
1448 size += sizeof(guint8);
1449 size += sizeof(guint8);
1450 size += sizeof(guint8);
1451 size += sizeof(guint8);
1452 size += sizeof(guint8);
1453 break;
1454 case MARKER_ID_SET_MARKER_FORMAT:
1455 g_debug("marker %s format set", (char*)tf->event.data);
1456 size = strlen((char*)tf->event.data) + 1;
1457 size += strlen((char*)tf->event.data + size) + 1;
1458 break;
1459 }
1460
1461 info = marker_get_info_from_id(tf->trace, tf->event.event_id);
1462
1463 if (tf->event.event_id >= MARKER_CORE_IDS)
1464 g_assert(info != NULL);
1465
1466 /* Do not update field offsets of core markers when initially reading the
1467 * metadata tracefile when the infos about these markers do not exist yet.
1468 */
1469 if (likely(info && info->fields)) {
1470 /* alignment */
1471 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1472 info->largest_align,
1473 info->alignment);
1474 /* size, dynamically computed */
1475 if (info->size != -1)
1476 size = info->size;
1477 else
1478 size = marker_update_fields_offsets(marker_get_info_from_id(tf->trace,
1479 tf->event.event_id), tf->event.data);
1480 }
1481
1482 tf->event.data_size = size;
1483
1484 /* Check consistency between kernel and LTTV structure sizes */
1485 if(tf->event.event_size == G_MAXUINT) {
1486 /* Event size too big to fit in the event size field */
1487 tf->event.event_size = tf->event.data_size;
1488 }
1489
1490 if (a_event_debug)
1491 print_debug_event_data(&tf->event);
1492
1493 /* Having a marker load or marker format event out of the metadata
1494 * tracefiles is a serious bug. */
1495 switch((enum marker_id)tf->event.event_id) {
1496 case MARKER_ID_SET_MARKER_ID:
1497 case MARKER_ID_SET_MARKER_FORMAT:
1498 if (tf->name != g_quark_from_string("/control/metadata"))
1499 g_error("Trace inconsistency : metadata event found in data "
1500 "tracefile %s", g_quark_to_string(tf->long_name));
1501 }
1502
1503 if (tf->event.data_size != tf->event.event_size) {
1504 struct marker_info *info = marker_get_info_from_id(tf->trace,
1505 tf->event.event_id);
1506 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1507 g_quark_to_string(info->name),
1508 tf->event.event_size, tf->event.data_size);
1509 exit(-1);
1510 }
1511 }
1512
1513
1514 /* Take the tf current event offset and use the event id to figure out where is
1515 * the next event offset.
1516 *
1517 * This is an internal function not aiming at being used elsewhere : it will
1518 * not jump over the current block limits. Please consider using
1519 * ltt_tracefile_read to do this.
1520 *
1521 * Returns 0 on success
1522 * ERANGE if we are at the end of the buffer.
1523 * ENOPROTOOPT if an error occured when getting the current event size.
1524 */
1525 static int ltt_seek_next_event(LttTracefile *tf)
1526 {
1527 int ret = 0;
1528 void *pos;
1529
1530 /* seek over the buffer header if we are at the buffer start */
1531 if(tf->event.offset == 0) {
1532 tf->event.offset += tf->buffer_header_size;
1533
1534 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1535 ret = ERANGE;
1536 }
1537 goto found;
1538 }
1539
1540 pos = tf->event.data;
1541
1542 if(tf->event.data_size < 0) goto error;
1543
1544 pos += (size_t)tf->event.data_size;
1545
1546 tf->event.offset = pos - tf->buffer.head;
1547
1548 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1549 ret = ERANGE;
1550 goto found;
1551 }
1552 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1553
1554 found:
1555 return ret;
1556
1557 error:
1558 g_error("Error in ltt_seek_next_event for tracefile %s",
1559 g_quark_to_string(tf->name));
1560 return ENOPROTOOPT;
1561 }
1562
1563 #if 0
1564 /*****************************************************************************
1565 *Function name
1566 * set_fields_offsets : set the precomputable offset of the fields
1567 *Input params
1568 * tracefile : opened trace file
1569 * event_type : the event type
1570 ****************************************************************************/
1571
1572 void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1573 {
1574 LttField *field = event_type->root_field;
1575 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1576
1577 if(likely(field))
1578 preset_field_type_size(tf, event_type, 0, 0,
1579 &fixed_root, &fixed_parent,
1580 field);
1581
1582 }
1583 #endif //0
1584
1585
1586 /*****************************************************************************
1587 *Function name
1588 * get_alignment : Get the alignment needed for a field.
1589 *Input params
1590 * field : field
1591 *
1592 * returns : The size on which it must be aligned.
1593 *
1594 ****************************************************************************/
1595 #if 0
1596 off_t get_alignment(LttField *field)
1597 {
1598 LttType *type = &field->field_type;
1599
1600 switch(type->type_class) {
1601 case LTT_INT_FIXED:
1602 case LTT_UINT_FIXED:
1603 case LTT_POINTER:
1604 case LTT_CHAR:
1605 case LTT_UCHAR:
1606 case LTT_SHORT:
1607 case LTT_USHORT:
1608 case LTT_INT:
1609 case LTT_UINT:
1610 case LTT_LONG:
1611 case LTT_ULONG:
1612 case LTT_SIZE_T:
1613 case LTT_SSIZE_T:
1614 case LTT_OFF_T:
1615 case LTT_FLOAT:
1616 case LTT_ENUM:
1617 /* Align offset on type size */
1618 g_assert(field->field_size != 0);
1619 return field->field_size;
1620 break;
1621 case LTT_STRING:
1622 return 1;
1623 break;
1624 case LTT_ARRAY:
1625 g_assert(type->fields->len == 1);
1626 {
1627 LttField *child = &g_array_index(type->fields, LttField, 0);
1628 return get_alignment(child);
1629 }
1630 break;
1631 case LTT_SEQUENCE:
1632 g_assert(type->fields->len == 2);
1633 {
1634 off_t localign = 1;
1635 LttField *child = &g_array_index(type->fields, LttField, 0);
1636
1637 localign = max(localign, get_alignment(child));
1638
1639 child = &g_array_index(type->fields, LttField, 1);
1640 localign = max(localign, get_alignment(child));
1641
1642 return localign;
1643 }
1644 break;
1645 case LTT_STRUCT:
1646 case LTT_UNION:
1647 {
1648 guint i;
1649 off_t localign = 1;
1650
1651 for(i=0; i<type->fields->len; i++) {
1652 LttField *child = &g_array_index(type->fields, LttField, i);
1653 localign = max(localign, get_alignment(child));
1654 }
1655 return localign;
1656 }
1657 break;
1658 case LTT_NONE:
1659 default:
1660 g_error("get_alignment : unknown type");
1661 return -1;
1662 }
1663 }
1664
1665 #endif //0
1666
1667 /*****************************************************************************
1668 *Function name
1669 * field_compute_static_size : Determine the size of fields known by their
1670 * sole definition. Unions, arrays and struct sizes might be known, but
1671 * the parser does not give that information.
1672 *Input params
1673 * tf : tracefile
1674 * field : field
1675 *
1676 ****************************************************************************/
1677 #if 0
1678 void field_compute_static_size(LttFacility *fac, LttField *field)
1679 {
1680 LttType *type = &field->field_type;
1681
1682 switch(type->type_class) {
1683 case LTT_INT_FIXED:
1684 case LTT_UINT_FIXED:
1685 case LTT_POINTER:
1686 case LTT_CHAR:
1687 case LTT_UCHAR:
1688 case LTT_SHORT:
1689 case LTT_USHORT:
1690 case LTT_INT:
1691 case LTT_UINT:
1692 case LTT_LONG:
1693 case LTT_ULONG:
1694 case LTT_SIZE_T:
1695 case LTT_SSIZE_T:
1696 case LTT_OFF_T:
1697 case LTT_FLOAT:
1698 case LTT_ENUM:
1699 case LTT_STRING:
1700 /* nothing to do */
1701 break;
1702 case LTT_ARRAY:
1703 /* note this : array type size is the number of elements in the array,
1704 * while array field size of the length of the array in bytes */
1705 g_assert(type->fields->len == 1);
1706 {
1707 LttField *child = &g_array_index(type->fields, LttField, 0);
1708 field_compute_static_size(fac, child);
1709
1710 if(child->field_size != 0) {
1711 field->field_size = type->size * child->field_size;
1712 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1713 sizeof(off_t), type->size);
1714 } else {
1715 field->field_size = 0;
1716 }
1717 }
1718 break;
1719 case LTT_SEQUENCE:
1720 g_assert(type->fields->len == 2);
1721 {
1722 off_t local_offset = 0;
1723 LttField *child = &g_array_index(type->fields, LttField, 1);
1724 field_compute_static_size(fac, child);
1725 field->field_size = 0;
1726 type->size = 0;
1727 if(child->field_size != 0) {
1728 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1729 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1730 }
1731 }
1732 break;
1733 case LTT_STRUCT:
1734 case LTT_UNION:
1735 {
1736 guint i;
1737 for(i=0;i<type->fields->len;i++) {
1738 LttField *child = &g_array_index(type->fields, LttField, i);
1739 field_compute_static_size(fac, child);
1740 if(child->field_size != 0) {
1741 type->size += ltt_align(type->size, get_alignment(child),
1742 fac->alignment);
1743 type->size += child->field_size;
1744 } else {
1745 /* As soon as we find a child with variable size, we have
1746 * a variable size */
1747 type->size = 0;
1748 break;
1749 }
1750 }
1751 field->field_size = type->size;
1752 }
1753 break;
1754 default:
1755 g_error("field_static_size : unknown type");
1756 }
1757
1758 }
1759 #endif //0
1760
1761
1762 /*****************************************************************************
1763 *Function name
1764 * precompute_fields_offsets : set the precomputable offset of the fields
1765 *Input params
1766 * fac : facility
1767 * field : the field
1768 * offset : pointer to the current offset, must be incremented
1769 *
1770 * return : 1 : found a variable length field, stop the processing.
1771 * 0 otherwise.
1772 ****************************************************************************/
1773
1774 #if 0
1775 gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1776 {
1777 LttType *type = &field->field_type;
1778
1779 if(unlikely(is_compact)) {
1780 g_assert(field->field_size != 0);
1781 /* FIXME THIS IS A HUUUUUGE hack :
1782 * offset is between the compact_data field in struct LttEvent
1783 * and the address of the field root in the memory map.
1784 * ark. Both will stay at the same addresses while the event
1785 * is readable, so it's ok.
1786 */
1787 field->offset_root = 0;
1788 field->fixed_root = FIELD_FIXED;
1789 return 0;
1790 }
1791
1792 switch(type->type_class) {
1793 case LTT_INT_FIXED:
1794 case LTT_UINT_FIXED:
1795 case LTT_POINTER:
1796 case LTT_CHAR:
1797 case LTT_UCHAR:
1798 case LTT_SHORT:
1799 case LTT_USHORT:
1800 case LTT_INT:
1801 case LTT_UINT:
1802 case LTT_LONG:
1803 case LTT_ULONG:
1804 case LTT_SIZE_T:
1805 case LTT_SSIZE_T:
1806 case LTT_OFF_T:
1807 case LTT_FLOAT:
1808 case LTT_ENUM:
1809 g_assert(field->field_size != 0);
1810 /* Align offset on type size */
1811 *offset += ltt_align(*offset, get_alignment(field),
1812 fac->alignment);
1813 /* remember offset */
1814 field->offset_root = *offset;
1815 field->fixed_root = FIELD_FIXED;
1816 /* Increment offset */
1817 *offset += field->field_size;
1818 return 0;
1819 break;
1820 case LTT_STRING:
1821 field->offset_root = *offset;
1822 field->fixed_root = FIELD_FIXED;
1823 return 1;
1824 break;
1825 case LTT_ARRAY:
1826 g_assert(type->fields->len == 1);
1827 {
1828 LttField *child = &g_array_index(type->fields, LttField, 0);
1829
1830 *offset += ltt_align(*offset, get_alignment(field),
1831 fac->alignment);
1832
1833 /* remember offset */
1834 field->offset_root = *offset;
1835 field->array_offset = *offset;
1836 field->fixed_root = FIELD_FIXED;
1837
1838 /* Let the child be variable */
1839 //precompute_fields_offsets(tf, child, offset);
1840
1841 if(field->field_size != 0) {
1842 /* Increment offset */
1843 /* field_size is the array size in bytes */
1844 *offset += field->field_size;
1845 return 0;
1846 } else {
1847 return 1;
1848 }
1849 }
1850 break;
1851 case LTT_SEQUENCE:
1852 g_assert(type->fields->len == 2);
1853 {
1854 LttField *child;
1855 guint ret;
1856
1857 *offset += ltt_align(*offset, get_alignment(field),
1858 fac->alignment);
1859
1860 /* remember offset */
1861 field->offset_root = *offset;
1862 field->fixed_root = FIELD_FIXED;
1863
1864 child = &g_array_index(type->fields, LttField, 0);
1865 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1866 g_assert(ret == 0); /* Seq len cannot have variable len */
1867
1868 child = &g_array_index(type->fields, LttField, 1);
1869 *offset += ltt_align(*offset, get_alignment(child),
1870 fac->alignment);
1871 field->array_offset = *offset;
1872 /* Let the child be variable. */
1873 //ret = precompute_fields_offsets(fac, child, offset);
1874
1875 /* Cannot precompute fields offsets of sequence members, and has
1876 * variable length. */
1877 return 1;
1878 }
1879 break;
1880 case LTT_STRUCT:
1881 {
1882 LttField *child;
1883 guint i;
1884 gint ret=0;
1885
1886 *offset += ltt_align(*offset, get_alignment(field),
1887 fac->alignment);
1888 /* remember offset */
1889 field->offset_root = *offset;
1890 field->fixed_root = FIELD_FIXED;
1891
1892 for(i=0; i< type->fields->len; i++) {
1893 child = &g_array_index(type->fields, LttField, i);
1894 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1895
1896 if(ret) break;
1897 }
1898 return ret;
1899 }
1900 break;
1901 case LTT_UNION:
1902 {
1903 LttField *child;
1904 guint i;
1905 gint ret=0;
1906
1907 *offset += ltt_align(*offset, get_alignment(field),
1908 fac->alignment);
1909 /* remember offset */
1910 field->offset_root = *offset;
1911 field->fixed_root = FIELD_FIXED;
1912
1913 for(i=0; i< type->fields->len; i++) {
1914 *offset = field->offset_root;
1915 child = &g_array_index(type->fields, LttField, i);
1916 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1917
1918 if(ret) break;
1919 }
1920 *offset = field->offset_root + field->field_size;
1921 return ret;
1922 }
1923
1924 break;
1925 case LTT_NONE:
1926 default:
1927 g_error("precompute_fields_offsets : unknown type");
1928 return 1;
1929 }
1930
1931 }
1932
1933 #endif //0
1934
1935 #if 0
1936 /*****************************************************************************
1937 *Function name
1938 * precompute_offsets : set the precomputable offset of an event type
1939 *Input params
1940 * tf : tracefile
1941 * event : event type
1942 *
1943 ****************************************************************************/
1944 void precompute_offsets(LttFacility *fac, LttEventType *event)
1945 {
1946 guint i;
1947 off_t offset = 0;
1948 gint ret;
1949
1950 /* First, compute the size of fixed size fields. Will determine size for
1951 * arrays, struct and unions, which is not done by the parser */
1952 for(i=0; i<event->fields->len; i++) {
1953 LttField *field = &g_array_index(event->fields, LttField, i);
1954 field_compute_static_size(fac, field);
1955 }
1956
1957 /* Precompute all known offsets */
1958 for(i=0; i<event->fields->len; i++) {
1959 LttField *field = &g_array_index(event->fields, LttField, i);
1960 if(event->has_compact_data && i == 0)
1961 ret = precompute_fields_offsets(fac, field, &offset, 1);
1962 else
1963 ret = precompute_fields_offsets(fac, field, &offset, 0);
1964 if(ret) break;
1965 }
1966 }
1967 #endif //0
1968
1969
1970
1971 /*****************************************************************************
1972 *Function name
1973 * preset_field_type_size : set the fixed sizes of the field type
1974 *Input params
1975 * tf : tracefile
1976 * event_type : event type
1977 * offset_root : offset from the root
1978 * offset_parent : offset from the parent
1979 * fixed_root : Do we know a fixed offset to the root ?
1980 * fixed_parent : Do we know a fixed offset to the parent ?
1981 * field : field
1982 ****************************************************************************/
1983
1984
1985
1986 // preset the fixed size offsets. Calculate them just like genevent-new : an
1987 // increment of a *to value that represents the offset from the start of the
1988 // event data.
1989 // The preset information is : offsets up to (and including) the first element
1990 // of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
1991 #if 0
1992 void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
1993 off_t offset_root, off_t offset_parent,
1994 enum field_status *fixed_root, enum field_status *fixed_parent,
1995 LttField *field)
1996 {
1997 enum field_status local_fixed_root, local_fixed_parent;
1998 guint i;
1999 LttType *type;
2000
2001 g_assert(field->fixed_root == FIELD_UNKNOWN);
2002 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2003 g_assert(field->fixed_size == FIELD_UNKNOWN);
2004
2005 type = field->field_type;
2006
2007 field->fixed_root = *fixed_root;
2008 if(field->fixed_root == FIELD_FIXED)
2009 field->offset_root = offset_root;
2010 else
2011 field->offset_root = 0;
2012
2013 field->fixed_parent = *fixed_parent;
2014 if(field->fixed_parent == FIELD_FIXED)
2015 field->offset_parent = offset_parent;
2016 else
2017 field->offset_parent = 0;
2018
2019 size_t current_root_offset;
2020 size_t current_offset;
2021 enum field_status current_child_status, final_child_status;
2022 size_t max_size;
2023
2024 switch(type->type_class) {
2025 case LTT_INT_FIXED:
2026 case LTT_UINT_FIXED:
2027 case LTT_CHAR:
2028 case LTT_UCHAR:
2029 case LTT_SHORT:
2030 case LTT_USHORT:
2031 case LTT_INT:
2032 case LTT_UINT:
2033 case LTT_FLOAT:
2034 case LTT_ENUM:
2035 field->field_size = ltt_type_size(tf->trace, type);
2036 field->fixed_size = FIELD_FIXED;
2037 break;
2038 case LTT_POINTER:
2039 field->field_size = (off_t)event_type->facility->pointer_size;
2040 field->fixed_size = FIELD_FIXED;
2041 break;
2042 case LTT_LONG:
2043 case LTT_ULONG:
2044 field->field_size = (off_t)event_type->facility->long_size;
2045 field->fixed_size = FIELD_FIXED;
2046 break;
2047 case LTT_SIZE_T:
2048 case LTT_SSIZE_T:
2049 case LTT_OFF_T:
2050 field->field_size = (off_t)event_type->facility->size_t_size;
2051 field->fixed_size = FIELD_FIXED;
2052 break;
2053 case LTT_SEQUENCE:
2054 local_fixed_root = FIELD_VARIABLE;
2055 local_fixed_parent = FIELD_VARIABLE;
2056 preset_field_type_size(tf, event_type,
2057 0, 0,
2058 &local_fixed_root, &local_fixed_parent,
2059 field->child[0]);
2060 field->fixed_size = FIELD_VARIABLE;
2061 field->field_size = 0;
2062 *fixed_root = FIELD_VARIABLE;
2063 *fixed_parent = FIELD_VARIABLE;
2064 break;
2065 case LTT_STRING:
2066 field->fixed_size = FIELD_VARIABLE;
2067 field->field_size = 0;
2068 *fixed_root = FIELD_VARIABLE;
2069 *fixed_parent = FIELD_VARIABLE;
2070 break;
2071 case LTT_ARRAY:
2072 local_fixed_root = FIELD_VARIABLE;
2073 local_fixed_parent = FIELD_VARIABLE;
2074 preset_field_type_size(tf, event_type,
2075 0, 0,
2076 &local_fixed_root, &local_fixed_parent,
2077 field->child[0]);
2078 field->fixed_size = field->child[0]->fixed_size;
2079 if(field->fixed_size == FIELD_FIXED) {
2080 field->field_size = type->element_number * field->child[0]->field_size;
2081 } else {
2082 field->field_size = 0;
2083 *fixed_root = FIELD_VARIABLE;
2084 *fixed_parent = FIELD_VARIABLE;
2085 }
2086 break;
2087 case LTT_STRUCT:
2088 current_root_offset = field->offset_root;
2089 current_offset = 0;
2090 current_child_status = FIELD_FIXED;
2091 for(i=0;i<type->element_number;i++) {
2092 preset_field_type_size(tf, event_type,
2093 current_root_offset, current_offset,
2094 fixed_root, &current_child_status,
2095 field->child[i]);
2096 if(current_child_status == FIELD_FIXED) {
2097 current_root_offset += field->child[i]->field_size;
2098 current_offset += field->child[i]->field_size;
2099 } else {
2100 current_root_offset = 0;
2101 current_offset = 0;
2102 }
2103 }
2104 if(current_child_status != FIELD_FIXED) {
2105 *fixed_parent = current_child_status;
2106 field->field_size = 0;
2107 field->fixed_size = current_child_status;
2108 } else {
2109 field->field_size = current_offset;
2110 field->fixed_size = FIELD_FIXED;
2111 }
2112 break;
2113 case LTT_UNION:
2114 current_root_offset = field->offset_root;
2115 current_offset = 0;
2116 max_size = 0;
2117 final_child_status = FIELD_FIXED;
2118 for(i=0;i<type->element_number;i++) {
2119 enum field_status current_root_child_status = FIELD_FIXED;
2120 enum field_status current_child_status = FIELD_FIXED;
2121 preset_field_type_size(tf, event_type,
2122 current_root_offset, current_offset,
2123 &current_root_child_status, &current_child_status,
2124 field->child[i]);
2125 if(current_child_status != FIELD_FIXED)
2126 final_child_status = current_child_status;
2127 else
2128 max_size = max(max_size, field->child[i]->field_size);
2129 }
2130 if(final_child_status != FIELD_FIXED) {
2131 g_error("LTTV does not support variable size fields in unions.");
2132 /* This will stop the application. */
2133 *fixed_root = final_child_status;
2134 *fixed_parent = final_child_status;
2135 field->field_size = 0;
2136 field->fixed_size = current_child_status;
2137 } else {
2138 field->field_size = max_size;
2139 field->fixed_size = FIELD_FIXED;
2140 }
2141 break;
2142 case LTT_NONE:
2143 g_error("unexpected type NONE");
2144 break;
2145 }
2146
2147 }
2148 #endif //0
2149
2150 /*****************************************************************************
2151 *Function name
2152 * check_fields_compatibility : Check for compatibility between two fields :
2153 * do they use the same inner structure ?
2154 *Input params
2155 * event_type1 : event type
2156 * event_type2 : event type
2157 * field1 : field
2158 * field2 : field
2159 *Returns : 0 if identical
2160 * 1 if not.
2161 ****************************************************************************/
2162 // this function checks for equality of field types. Therefore, it does not use
2163 // per se offsets. For instance, an aligned version of a structure is
2164 // compatible with an unaligned version of the same structure.
2165 #if 0
2166 gint check_fields_compatibility(LttEventType *event_type1,
2167 LttEventType *event_type2,
2168 LttField *field1, LttField *field2)
2169 {
2170 guint different = 0;
2171 LttType *type1;
2172 LttType *type2;
2173
2174 if(field1 == NULL) {
2175 if(field2 == NULL) goto end;
2176 else {
2177 different = 1;
2178 goto end;
2179 }
2180 } else if(field2 == NULL) {
2181 different = 1;
2182 goto end;
2183 }
2184
2185 type1 = &field1->field_type;
2186 type2 = &field2->field_type;
2187
2188 if(type1->type_class != type2->type_class) {
2189 different = 1;
2190 goto end;
2191 }
2192 if(type1->network != type2->network) {
2193 different = 1;
2194 goto end;
2195 }
2196
2197 switch(type1->type_class) {
2198 case LTT_INT_FIXED:
2199 case LTT_UINT_FIXED:
2200 case LTT_POINTER:
2201 case LTT_CHAR:
2202 case LTT_UCHAR:
2203 case LTT_SHORT:
2204 case LTT_USHORT:
2205 case LTT_INT:
2206 case LTT_UINT:
2207 case LTT_LONG:
2208 case LTT_ULONG:
2209 case LTT_SIZE_T:
2210 case LTT_SSIZE_T:
2211 case LTT_OFF_T:
2212 case LTT_FLOAT:
2213 case LTT_ENUM:
2214 if(field1->field_size != field2->field_size)
2215 different = 1;
2216 break;
2217 case LTT_STRING:
2218 break;
2219 case LTT_ARRAY:
2220 {
2221 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2222 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2223
2224 if(type1->size != type2->size)
2225 different = 1;
2226 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2227 different = 1;
2228 }
2229 break;
2230 case LTT_SEQUENCE:
2231 {
2232 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2233 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2234
2235 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2236 different = 1;
2237 }
2238 break;
2239 case LTT_STRUCT:
2240 case LTT_UNION:
2241 {
2242 LttField *child;
2243 guint i;
2244
2245 if(type1->fields->len != type2->fields->len) {
2246 different = 1;
2247 goto end;
2248 }
2249
2250 for(i=0; i< type1->fields->len; i++) {
2251 LttField *child1;
2252 LttField *child2;
2253 child1 = &g_array_index(type1->fields, LttField, i);
2254 child2 = &g_array_index(type2->fields, LttField, i);
2255 different = check_fields_compatibility(event_type1,
2256 event_type2, child1, child2);
2257
2258 if(different) break;
2259 }
2260 }
2261 break;
2262 case LTT_NONE:
2263 default:
2264 g_error("check_fields_compatibility : unknown type");
2265 }
2266
2267 end:
2268 return different;
2269 }
2270 #endif //0
2271
2272 #if 0
2273 gint check_fields_compatibility(LttEventType *event_type1,
2274 LttEventType *event_type2,
2275 LttField *field1, LttField *field2)
2276 {
2277 guint different = 0;
2278 guint i;
2279 LttType *type1;
2280 LttType *type2;
2281
2282 if(field1 == NULL) {
2283 if(field2 == NULL) goto end;
2284 else {
2285 different = 1;
2286 goto end;
2287 }
2288 } else if(field2 == NULL) {
2289 different = 1;
2290 goto end;
2291 }
2292
2293 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2294 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2295 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2296 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2297 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2298 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2299
2300 type1 = field1->field_type;
2301 type2 = field2->field_type;
2302
2303 if(type1->type_class != type2->type_class) {
2304 different = 1;
2305 goto end;
2306 }
2307 if(type1->element_name != type2->element_name) {
2308 different = 1;
2309 goto end;
2310 }
2311
2312 switch(type1->type_class) {
2313 case LTT_INT_FIXED:
2314 case LTT_UINT_FIXED:
2315 case LTT_POINTER:
2316 case LTT_CHAR:
2317 case LTT_UCHAR:
2318 case LTT_SHORT:
2319 case LTT_USHORT:
2320 case LTT_INT:
2321 case LTT_UINT:
2322 case LTT_FLOAT:
2323 case LTT_POINTER:
2324 case LTT_LONG:
2325 case LTT_ULONG:
2326 case LTT_SIZE_T:
2327 case LTT_SSIZE_T:
2328 case LTT_OFF_T:
2329 if(field1->field_size != field2->field_size) {
2330 different = 1;
2331 goto end;
2332 }
2333 break;
2334 case LTT_ENUM:
2335 if(type1->element_number != type2->element_number) {
2336 different = 1;
2337 goto end;
2338 }
2339 for(i=0;i<type1->element_number;i++) {
2340 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2341 different = 1;
2342 goto end;
2343 }
2344 }
2345 break;
2346 case LTT_SEQUENCE:
2347 /* Two elements : size and child */
2348 g_assert(type1->element_number != type2->element_number);
2349 for(i=0;i<type1->element_number;i++) {
2350 if(check_fields_compatibility(event_type1, event_type2,
2351 field1->child[0], field2->child[0])) {
2352 different = 1;
2353 goto end;
2354 }
2355 }
2356 break;
2357 case LTT_STRING:
2358 break;
2359 case LTT_ARRAY:
2360 if(field1->field_size != field2->field_size) {
2361 different = 1;
2362 goto end;
2363 }
2364 /* Two elements : size and child */
2365 g_assert(type1->element_number != type2->element_number);
2366 for(i=0;i<type1->element_number;i++) {
2367 if(check_fields_compatibility(event_type1, event_type2,
2368 field1->child[0], field2->child[0])) {
2369 different = 1;
2370 goto end;
2371 }
2372 }
2373 break;
2374 case LTT_STRUCT:
2375 case LTT_UNION:
2376 if(type1->element_number != type2->element_number) {
2377 different = 1;
2378 break;
2379 }
2380 for(i=0;i<type1->element_number;i++) {
2381 if(check_fields_compatibility(event_type1, event_type2,
2382 field1->child[0], field2->child[0])) {
2383 different = 1;
2384 goto end;
2385 }
2386 }
2387 break;
2388 }
2389 end:
2390 return different;
2391 }
2392 #endif //0
2393
2394
2395 /*****************************************************************************
2396 *Function name
2397 * ltt_get_int : get an integer number
2398 *Input params
2399 * reverse_byte_order: must we reverse the byte order ?
2400 * size : the size of the integer
2401 * ptr : the data pointer
2402 *Return value
2403 * gint64 : a 64 bits integer
2404 ****************************************************************************/
2405
2406 gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2407 {
2408 gint64 val;
2409
2410 switch(size) {
2411 case 1: val = *((gint8*)data); break;
2412 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2413 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2414 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2415 default: val = ltt_get_int64(reverse_byte_order, data);
2416 g_critical("get_int : integer size %d unknown", size);
2417 break;
2418 }
2419
2420 return val;
2421 }
2422
2423 /*****************************************************************************
2424 *Function name
2425 * ltt_get_uint : get an unsigned integer number
2426 *Input params
2427 * reverse_byte_order: must we reverse the byte order ?
2428 * size : the size of the integer
2429 * ptr : the data pointer
2430 *Return value
2431 * guint64 : a 64 bits unsigned integer
2432 ****************************************************************************/
2433
2434 guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2435 {
2436 guint64 val;
2437
2438 switch(size) {
2439 case 1: val = *((gint8*)data); break;
2440 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2441 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2442 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2443 default: val = ltt_get_uint64(reverse_byte_order, data);
2444 g_critical("get_uint : unsigned integer size %d unknown",
2445 size);
2446 break;
2447 }
2448
2449 return val;
2450 }
2451
2452
2453 /* get the node name of the system */
2454
2455 char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2456 {
2457 return s->node_name;
2458 }
2459
2460
2461 /* get the domain name of the system */
2462
2463 char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2464 {
2465 return s->domain_name;
2466 }
2467
2468
2469 /* get the description of the system */
2470
2471 char * ltt_trace_system_description_description (LttSystemDescription * s)
2472 {
2473 return s->description;
2474 }
2475
2476
2477 /* get the NTP corrected start time of the trace */
2478 LttTime ltt_trace_start_time(LttTrace *t)
2479 {
2480 return t->start_time;
2481 }
2482
2483 /* get the monotonic start time of the trace */
2484 LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2485 {
2486 return t->start_time_from_tsc;
2487 }
2488
2489 static LttTracefile *ltt_tracefile_new()
2490 {
2491 LttTracefile *tf;
2492 tf = g_new(LttTracefile, 1);
2493 tf->event.tracefile = tf;
2494 return tf;
2495 }
2496
2497 static void ltt_tracefile_destroy(LttTracefile *tf)
2498 {
2499 g_free(tf);
2500 }
2501
2502 static void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2503 {
2504 *dest = *src;
2505 }
2506
2507 /* Before library loading... */
2508
2509 static __attribute__((constructor)) void init(void)
2510 {
2511 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("/control/metadata");
2512 }
This page took 0.080503 seconds and 4 git commands to generate.