update quickstart and flight
[lttv.git] / trunk / lttv / ltt / tracefile.c
... / ...
CommitLineData
1/* This file is part of the Linux Trace Toolkit viewer
2 * Copyright (C) 2005 Mathieu Desnoyers
3 *
4 * Complete rewrite from the original version made by XangXiu Yang.
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License Version 2.1 as published by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the
17 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
18 * Boston, MA 02111-1307, USA.
19 */
20
21#ifdef HAVE_CONFIG_H
22#include <config.h>
23#endif
24
25#include <stdio.h>
26#include <fcntl.h>
27#include <string.h>
28#include <dirent.h>
29#include <sys/stat.h>
30#include <sys/types.h>
31#include <errno.h>
32#include <unistd.h>
33#include <math.h>
34#include <glib.h>
35#include <malloc.h>
36#include <sys/mman.h>
37#include <string.h>
38
39// For realpath
40#include <limits.h>
41#include <stdlib.h>
42
43
44#include <ltt/ltt.h>
45#include "ltt-private.h"
46#include <ltt/trace.h>
47#include <ltt/event.h>
48#include <ltt/ltt-types.h>
49#include <ltt/marker.h>
50
51/* Tracefile names used in this file */
52
53GQuark LTT_TRACEFILE_NAME_METADATA;
54
55#ifndef g_open
56#define g_open open
57#endif
58
59
60#define __UNUSED__ __attribute__((__unused__))
61
62#define g_info(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_INFO, format)
63
64#ifndef g_debug
65#define g_debug(format...) g_log (G_LOG_DOMAIN, G_LOG_LEVEL_DEBUG, format)
66#endif
67
68#define g_close close
69
70/* Those macros must be called from within a function where page_size is a known
71 * variable */
72#define PAGE_MASK (~(page_size-1))
73#define PAGE_ALIGN(addr) (((addr)+page_size-1)&PAGE_MASK)
74
75LttTrace *father_trace = NULL;
76
77/* set the offset of the fields belonging to the event,
78 need the information of the archecture */
79//void set_fields_offsets(LttTracefile *tf, LttEventType *event_type);
80//size_t get_fields_offsets(LttTracefile *tf, LttEventType *event_type, void *data);
81
82#if 0
83/* get the size of the field type according to
84 * The facility size information. */
85static inline void preset_field_type_size(LttTracefile *tf,
86 LttEventType *event_type,
87 off_t offset_root, off_t offset_parent,
88 enum field_status *fixed_root, enum field_status *fixed_parent,
89 LttField *field);
90#endif //0
91
92/* map a fixed size or a block information from the file (fd) */
93static gint map_block(LttTracefile * tf, guint block_num);
94
95/* calculate nsec per cycles for current block */
96#if 0
97static guint32 calc_nsecs_per_cycle(LttTracefile * t);
98static guint64 cycles_2_ns(LttTracefile *tf, guint64 cycles);
99#endif //0
100
101/* go to the next event */
102static int ltt_seek_next_event(LttTracefile *tf);
103
104static int open_tracefiles(LttTrace *trace, gchar *root_path,
105 gchar *relative_path);
106static int ltt_process_metadata_tracefile(LttTracefile *tf);
107static void ltt_tracefile_time_span_get(LttTracefile *tf,
108 LttTime *start, LttTime *end);
109static void group_time_span_get(GQuark name, gpointer data, gpointer user_data);
110static gint map_block(LttTracefile * tf, guint block_num);
111static void ltt_update_event_size(LttTracefile *tf);
112
113/* Enable event debugging */
114static int a_event_debug = 0;
115
116void ltt_event_debug(int state)
117{
118 a_event_debug = state;
119}
120
121/* trace can be NULL
122 *
123 * Return value : 0 success, 1 bad tracefile
124 */
125static int parse_trace_header(ltt_subbuffer_header_t *header,
126 LttTracefile *tf, LttTrace *t)
127{
128 if (header->magic_number == LTT_MAGIC_NUMBER)
129 tf->reverse_bo = 0;
130 else if(header->magic_number == LTT_REV_MAGIC_NUMBER)
131 tf->reverse_bo = 1;
132 else /* invalid magic number, bad tracefile ! */
133 return 1;
134
135 if(t) {
136 t->ltt_major_version = header->major_version;
137 t->ltt_minor_version = header->minor_version;
138 t->arch_size = header->arch_size;
139 }
140 tf->alignment = header->alignment;
141
142 /* Get float byte order : might be different from int byte order
143 * (or is set to 0 if the trace has no float (kernel trace)) */
144 tf->float_word_order = 0;
145
146 switch(header->major_version) {
147 case 0:
148 case 1:
149 g_warning("Unsupported trace version : %hhu.%hhu",
150 header->major_version, header->minor_version);
151 return 1;
152 break;
153 case 2:
154 switch(header->minor_version) {
155 case 3:
156 {
157 struct ltt_subbuffer_header_2_3 *vheader = header;
158 tf->buffer_header_size = ltt_subbuffer_header_size();
159 tf->tscbits = 27;
160 tf->eventbits = 5;
161 tf->tsc_mask = ((1ULL << tf->tscbits) - 1);
162 tf->tsc_mask_next_bit = (1ULL << tf->tscbits);
163
164 if(t) {
165 t->start_freq = ltt_get_uint64(LTT_GET_BO(tf),
166 &vheader->start_freq);
167 t->freq_scale = ltt_get_uint32(LTT_GET_BO(tf),
168 &vheader->freq_scale);
169 if(father_trace) {
170 t->start_freq = father_trace->start_freq;
171 t->freq_scale = father_trace->freq_scale;
172 } else {
173 father_trace = t;
174 }
175 t->start_tsc = ltt_get_uint64(LTT_GET_BO(tf),
176 &vheader->cycle_count_begin);
177 t->start_monotonic = 0;
178 t->start_time.tv_sec = ltt_get_uint64(LTT_GET_BO(tf),
179 &vheader->start_time_sec);
180 t->start_time.tv_nsec = ltt_get_uint64(LTT_GET_BO(tf),
181 &vheader->start_time_usec);
182 t->start_time.tv_nsec *= 1000; /* microsec to nanosec */
183
184 t->start_time_from_tsc = ltt_time_from_uint64(
185 (double)t->start_tsc
186 * 1000000000.0 * tf->trace->freq_scale
187 / (double)t->start_freq);
188 }
189 }
190 break;
191 default:
192 g_warning("Unsupported trace version : %hhu.%hhu",
193 header->major_version, header->minor_version);
194 return 1;
195 }
196 break;
197 default:
198 g_warning("Unsupported trace version : %hhu.%hhu",
199 header->major_version, header->minor_version);
200 return 1;
201 }
202 return 0;
203}
204
205
206
207/*****************************************************************************
208 *Function name
209 * ltt_tracefile_open : open a trace file, construct a LttTracefile
210 *Input params
211 * t : the trace containing the tracefile
212 * fileName : path name of the trace file
213 * tf : the tracefile structure
214 *Return value
215 * : 0 for success, -1 otherwise.
216 ****************************************************************************/
217
218static gint ltt_tracefile_open(LttTrace *t, gchar * fileName, LttTracefile *tf)
219{
220 struct stat lTDFStat; /* Trace data file status */
221 ltt_subbuffer_header_t *header;
222 int page_size = getpagesize();
223
224 //open the file
225 tf->long_name = g_quark_from_string(fileName);
226 tf->trace = t;
227 tf->fd = open(fileName, O_RDONLY);
228 if(tf->fd < 0){
229 g_warning("Unable to open input data file %s\n", fileName);
230 goto end;
231 }
232
233 // Get the file's status
234 if(fstat(tf->fd, &lTDFStat) < 0){
235 g_warning("Unable to get the status of the input data file %s\n", fileName);
236 goto close_file;
237 }
238
239 // Is the file large enough to contain a trace
240 if(lTDFStat.st_size <
241 (off_t)(ltt_subbuffer_header_size())){
242 g_print("The input data file %s does not contain a trace\n", fileName);
243 goto close_file;
244 }
245
246 /* Temporarily map the buffer start header to get trace information */
247 /* Multiple of pages aligned head */
248 tf->buffer.head = mmap(0,
249 PAGE_ALIGN(ltt_subbuffer_header_size()), PROT_READ,
250 MAP_PRIVATE, tf->fd, 0);
251 if(tf->buffer.head == MAP_FAILED) {
252 perror("Error in allocating memory for buffer of tracefile");
253 goto close_file;
254 }
255 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
256
257 header = (ltt_subbuffer_header_t *)tf->buffer.head;
258
259 if(parse_trace_header(header, tf, NULL)) {
260 g_warning("parse_trace_header error");
261 goto unmap_file;
262 }
263
264 //store the size of the file
265 tf->file_size = lTDFStat.st_size;
266 tf->buf_size = ltt_get_uint32(LTT_GET_BO(tf), &header->buf_size);
267 tf->num_blocks = tf->file_size / tf->buf_size;
268 tf->events_lost = 0;
269 tf->subbuf_corrupt = 0;
270
271 if(munmap(tf->buffer.head,
272 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
273 g_warning("unmap size : %u\n",
274 PAGE_ALIGN(ltt_subbuffer_header_size()));
275 perror("munmap error");
276 g_assert(0);
277 }
278 tf->buffer.head = NULL;
279
280 //read the first block
281 if(map_block(tf,0)) {
282 perror("Cannot map block for tracefile");
283 goto close_file;
284 }
285
286 return 0;
287
288 /* Error */
289unmap_file:
290 if(munmap(tf->buffer.head,
291 PAGE_ALIGN(ltt_subbuffer_header_size()))) {
292 g_warning("unmap size : %u\n",
293 PAGE_ALIGN(ltt_subbuffer_header_size()));
294 perror("munmap error");
295 g_assert(0);
296 }
297close_file:
298 close(tf->fd);
299end:
300 return -1;
301}
302
303
304/*****************************************************************************
305 *Function name
306 * ltt_tracefile_close: close a trace file,
307 *Input params
308 * t : tracefile which will be closed
309 ****************************************************************************/
310
311static void ltt_tracefile_close(LttTracefile *t)
312{
313 int page_size = getpagesize();
314
315 if(t->buffer.head != NULL)
316 if(munmap(t->buffer.head, PAGE_ALIGN(t->buf_size))) {
317 g_warning("unmap size : %u\n",
318 PAGE_ALIGN(t->buf_size));
319 perror("munmap error");
320 g_assert(0);
321 }
322
323 close(t->fd);
324}
325
326/****************************************************************************
327 * get_absolute_pathname
328 *
329 * return the unique pathname in the system
330 *
331 * MD : Fixed this function so it uses realpath, dealing well with
332 * forgotten cases (.. were not used correctly before).
333 *
334 ****************************************************************************/
335void get_absolute_pathname(const gchar *pathname, gchar * abs_pathname)
336{
337 abs_pathname[0] = '\0';
338
339 if (realpath(pathname, abs_pathname) != NULL)
340 return;
341 else
342 {
343 /* error, return the original path unmodified */
344 strcpy(abs_pathname, pathname);
345 return;
346 }
347 return;
348}
349
350/* Search for something like : .*_.*
351 *
352 * The left side is the name, the right side is the number.
353 * Exclude leading /.
354 * Exclude flight- prefix.
355 */
356
357static int get_tracefile_name_number(gchar *raw_name,
358 GQuark *name,
359 guint *num,
360 gulong *tid,
361 gulong *pgid,
362 guint64 *creation)
363{
364 guint raw_name_len = strlen(raw_name);
365 gchar char_name[PATH_MAX];
366 int i;
367 int underscore_pos;
368 long int cpu_num;
369 gchar *endptr;
370 gchar *tmpptr;
371
372 /* skip leading / */
373 for(i = 0; i < raw_name_len-1;i++) {
374 if(raw_name[i] != '/')
375 break;
376 }
377 raw_name = &raw_name[i];
378 raw_name_len = strlen(raw_name);
379
380 for(i=raw_name_len-1;i>=0;i--) {
381 if(raw_name[i] == '_') break;
382 }
383 if(i==-1) { /* Either not found or name length is 0 */
384 /* This is a userspace tracefile */
385 strncpy(char_name, raw_name, raw_name_len);
386 char_name[raw_name_len] = '\0';
387 *name = g_quark_from_string(char_name);
388 *num = 0; /* unknown cpu */
389 for(i=0;i<raw_name_len;i++) {
390 if(raw_name[i] == '/') {
391 break;
392 }
393 }
394 i++;
395 for(;i<raw_name_len;i++) {
396 if(raw_name[i] == '/') {
397 break;
398 }
399 }
400 i++;
401 for(;i<raw_name_len;i++) {
402 if(raw_name[i] == '-') {
403 break;
404 }
405 }
406 if(i == raw_name_len) return -1;
407 i++;
408 tmpptr = &raw_name[i];
409 for(;i<raw_name_len;i++) {
410 if(raw_name[i] == '.') {
411 raw_name[i] = ' ';
412 break;
413 }
414 }
415 *tid = strtoul(tmpptr, &endptr, 10);
416 if(endptr == tmpptr)
417 return -1; /* No digit */
418 if(*tid == ULONG_MAX)
419 return -1; /* underflow / overflow */
420 i++;
421 tmpptr = &raw_name[i];
422 for(;i<raw_name_len;i++) {
423 if(raw_name[i] == '.') {
424 raw_name[i] = ' ';
425 break;
426 }
427 }
428 *pgid = strtoul(tmpptr, &endptr, 10);
429 if(endptr == tmpptr)
430 return -1; /* No digit */
431 if(*pgid == ULONG_MAX)
432 return -1; /* underflow / overflow */
433 i++;
434 tmpptr = &raw_name[i];
435 *creation = strtoull(tmpptr, &endptr, 10);
436 if(endptr == tmpptr)
437 return -1; /* No digit */
438 if(*creation == G_MAXUINT64)
439 return -1; /* underflow / overflow */
440 } else {
441 underscore_pos = i;
442
443 cpu_num = strtol(raw_name+underscore_pos+1, &endptr, 10);
444
445 if(endptr == raw_name+underscore_pos+1)
446 return -1; /* No digit */
447 if(cpu_num == LONG_MIN || cpu_num == LONG_MAX)
448 return -1; /* underflow / overflow */
449
450 if (!strncmp(raw_name, "flight-", sizeof("flight-") - 1)) {
451 raw_name += sizeof("flight-") - 1;
452 underscore_pos -= sizeof("flight-") - 1;
453 }
454 strncpy(char_name, raw_name, underscore_pos);
455 char_name[underscore_pos] = '\0';
456 *name = g_quark_from_string(char_name);
457 *num = cpu_num;
458 }
459
460
461 return 0;
462}
463
464
465GData **ltt_trace_get_tracefiles_groups(LttTrace *trace)
466{
467 return &trace->tracefiles;
468}
469
470
471void compute_tracefile_group(GQuark key_id,
472 GArray *group,
473 struct compute_tracefile_group_args *args)
474{
475 int i;
476 LttTracefile *tf;
477
478 for(i=0; i<group->len; i++) {
479 tf = &g_array_index (group, LttTracefile, i);
480 if(tf->cpu_online)
481 args->func(tf, args->func_args);
482 }
483}
484
485
486static void ltt_tracefile_group_destroy(gpointer data)
487{
488 GArray *group = (GArray *)data;
489 int i;
490 LttTracefile *tf;
491
492 if (group->len > 0)
493 destroy_marker_data(g_array_index (group, LttTracefile, 0).mdata);
494 for(i=0; i<group->len; i++) {
495 tf = &g_array_index (group, LttTracefile, i);
496 if(tf->cpu_online)
497 ltt_tracefile_close(tf);
498 }
499 g_array_free(group, TRUE);
500}
501
502static gboolean ltt_tracefile_group_has_cpu_online(gpointer data)
503{
504 GArray *group = (GArray *)data;
505 int i;
506 LttTracefile *tf;
507
508 for(i=0; i<group->len; i++) {
509 tf = &g_array_index (group, LttTracefile, i);
510 if(tf->cpu_online)
511 return 1;
512 }
513 return 0;
514}
515
516
517/* Open each tracefile under a specific directory. Put them in a
518 * GData : permits to access them using their tracefile group pathname.
519 * i.e. access control/modules tracefile group by index :
520 * "control/module".
521 *
522 * relative path is the path relative to the trace root
523 * root path is the full path
524 *
525 * A tracefile group is simply an array where all the per cpu tracefiles sit.
526 */
527
528static int open_tracefiles(LttTrace *trace, gchar *root_path, gchar *relative_path)
529{
530 DIR *dir = opendir(root_path);
531 struct dirent *entry;
532 struct stat stat_buf;
533 int ret, i;
534 struct marker_data *mdata;
535
536 gchar path[PATH_MAX];
537 int path_len;
538 gchar *path_ptr;
539
540 int rel_path_len;
541 gchar rel_path[PATH_MAX];
542 gchar *rel_path_ptr;
543 LttTracefile tmp_tf;
544
545 if(dir == NULL) {
546 perror(root_path);
547 return ENOENT;
548 }
549
550 strncpy(path, root_path, PATH_MAX-1);
551 path_len = strlen(path);
552 path[path_len] = '/';
553 path_len++;
554 path_ptr = path + path_len;
555
556 strncpy(rel_path, relative_path, PATH_MAX-1);
557 rel_path_len = strlen(rel_path);
558 rel_path[rel_path_len] = '/';
559 rel_path_len++;
560 rel_path_ptr = rel_path + rel_path_len;
561
562 while((entry = readdir(dir)) != NULL) {
563
564 if(entry->d_name[0] == '.') continue;
565
566 strncpy(path_ptr, entry->d_name, PATH_MAX - path_len);
567 strncpy(rel_path_ptr, entry->d_name, PATH_MAX - rel_path_len);
568
569 ret = stat(path, &stat_buf);
570 if(ret == -1) {
571 perror(path);
572 continue;
573 }
574
575 g_debug("Tracefile file or directory : %s\n", path);
576
577 // if(strcmp(rel_path, "/eventdefs") == 0) continue;
578
579 if(S_ISDIR(stat_buf.st_mode)) {
580
581 g_debug("Entering subdirectory...\n");
582 ret = open_tracefiles(trace, path, rel_path);
583 if(ret < 0) continue;
584 } else if(S_ISREG(stat_buf.st_mode)) {
585 GQuark name;
586 guint num;
587 gulong tid, pgid;
588 guint64 creation;
589 GArray *group;
590 num = 0;
591 tid = pgid = 0;
592 creation = 0;
593 if(get_tracefile_name_number(rel_path, &name, &num, &tid, &pgid, &creation))
594 continue; /* invalid name */
595
596 g_debug("Opening file.\n");
597 if(ltt_tracefile_open(trace, path, &tmp_tf)) {
598 g_info("Error opening tracefile %s", path);
599
600 continue; /* error opening the tracefile : bad magic number ? */
601 }
602
603 g_debug("Tracefile name is %s and number is %u",
604 g_quark_to_string(name), num);
605
606 mdata = NULL;
607 tmp_tf.cpu_online = 1;
608 tmp_tf.cpu_num = num;
609 tmp_tf.name = name;
610 tmp_tf.tid = tid;
611 tmp_tf.pgid = pgid;
612 tmp_tf.creation = creation;
613 group = g_datalist_id_get_data(&trace->tracefiles, name);
614 if(group == NULL) {
615 /* Elements are automatically cleared when the array is allocated.
616 * It makes the cpu_online variable set to 0 : cpu offline, by default.
617 */
618 group = g_array_sized_new (FALSE, TRUE, sizeof(LttTracefile), 10);
619 g_datalist_id_set_data_full(&trace->tracefiles, name,
620 group, ltt_tracefile_group_destroy);
621 mdata = allocate_marker_data();
622 if (!mdata)
623 g_error("Error in allocating marker data");
624 }
625
626 /* Add the per cpu tracefile to the named group */
627 unsigned int old_len = group->len;
628 if(num+1 > old_len)
629 group = g_array_set_size(group, num+1);
630
631 g_assert(group->len > 0);
632 if (!mdata)
633 mdata = g_array_index (group, LttTracefile, 0).mdata;
634
635 g_array_index (group, LttTracefile, num) = tmp_tf;
636 g_array_index (group, LttTracefile, num).event.tracefile =
637 &g_array_index (group, LttTracefile, num);
638 for (i = 0; i < group->len; i++)
639 g_array_index (group, LttTracefile, i).mdata = mdata;
640 }
641 }
642
643 closedir(dir);
644
645 return 0;
646}
647
648
649/* Presumes the tracefile is already seeked at the beginning. It makes sense,
650 * because it must be done just after the opening */
651static int ltt_process_metadata_tracefile(LttTracefile *tf)
652{
653 int err;
654 guint i;
655
656 while(1) {
657 err = ltt_tracefile_read_seek(tf);
658 if(err == EPERM) goto seek_error;
659 else if(err == ERANGE) break; /* End of tracefile */
660
661 err = ltt_tracefile_read_update_event(tf);
662 if(err) goto update_error;
663
664 /* The rules are :
665 * It contains only core events :
666 * 0 : set_marker_id
667 * 1 : set_marker_format
668 */
669 if(tf->event.event_id >= MARKER_CORE_IDS) {
670 /* Should only contain core events */
671 g_warning("Error in processing metadata file %s, "
672 "should not contain event id %u.", g_quark_to_string(tf->name),
673 tf->event.event_id);
674 err = EPERM;
675 goto event_id_error;
676 } else {
677 char *pos;
678 const char *channel_name, *marker_name, *format;
679 uint16_t id;
680 guint8 int_size, long_size, pointer_size, size_t_size, alignment;
681
682 switch((enum marker_id)tf->event.event_id) {
683 case MARKER_ID_SET_MARKER_ID:
684 channel_name = pos = tf->event.data;
685 pos += strlen(channel_name) + 1;
686 marker_name = pos;
687 g_debug("Doing MARKER_ID_SET_MARKER_ID of marker %s.%s",
688 channel_name, marker_name);
689 pos += strlen(marker_name) + 1;
690 pos += ltt_align((size_t)pos, sizeof(guint16), tf->alignment);
691 id = ltt_get_uint16(LTT_GET_BO(tf), pos);
692 g_debug("In MARKER_ID_SET_MARKER_ID of marker %s.%s id %hu",
693 channel_name, marker_name, id);
694 pos += sizeof(guint16);
695 int_size = *(guint8*)pos;
696 pos += sizeof(guint8);
697 long_size = *(guint8*)pos;
698 pos += sizeof(guint8);
699 pointer_size = *(guint8*)pos;
700 pos += sizeof(guint8);
701 size_t_size = *(guint8*)pos;
702 pos += sizeof(guint8);
703 alignment = *(guint8*)pos;
704 pos += sizeof(guint8);
705 marker_id_event(tf->trace,
706 g_quark_from_string(channel_name),
707 g_quark_from_string(marker_name),
708 id, int_size, long_size,
709 pointer_size, size_t_size, alignment);
710 break;
711 case MARKER_ID_SET_MARKER_FORMAT:
712 channel_name = pos = tf->event.data;
713 pos += strlen(channel_name) + 1;
714 marker_name = pos;
715 g_debug("Doing MARKER_ID_SET_MARKER_FORMAT of marker %s.%s",
716 channel_name, marker_name);
717 pos += strlen(marker_name) + 1;
718 format = pos;
719 pos += strlen(format) + 1;
720 marker_format_event(tf->trace,
721 g_quark_from_string(channel_name),
722 g_quark_from_string(marker_name),
723 format);
724 /* get information from dictionary TODO */
725 break;
726 default:
727 g_warning("Error in processing metadata file %s, "
728 "unknown event id %hhu.",
729 g_quark_to_string(tf->name),
730 tf->event.event_id);
731 err = EPERM;
732 goto event_id_error;
733 }
734 }
735 }
736 return 0;
737
738 /* Error handling */
739event_id_error:
740update_error:
741seek_error:
742 g_warning("An error occured in metadata tracefile parsing");
743 return err;
744}
745
746/*
747 * Open a trace and return its LttTrace handle.
748 *
749 * pathname must be the directory of the trace
750 */
751
752LttTrace *ltt_trace_open(const gchar *pathname)
753{
754 gchar abs_path[PATH_MAX];
755 LttTrace * t;
756 LttTracefile *tf;
757 GArray *group;
758 int i, ret;
759 ltt_subbuffer_header_t *header;
760 DIR *dir;
761 struct dirent *entry;
762 struct stat stat_buf;
763 gchar path[PATH_MAX];
764
765 t = g_new(LttTrace, 1);
766 if(!t) goto alloc_error;
767
768 get_absolute_pathname(pathname, abs_path);
769 t->pathname = g_quark_from_string(abs_path);
770
771 g_datalist_init(&t->tracefiles);
772
773 /* Test to see if it looks like a trace */
774 dir = opendir(abs_path);
775 if(dir == NULL) {
776 perror(abs_path);
777 goto open_error;
778 }
779 while((entry = readdir(dir)) != NULL) {
780 strcpy(path, abs_path);
781 strcat(path, "/");
782 strcat(path, entry->d_name);
783 ret = stat(path, &stat_buf);
784 if(ret == -1) {
785 perror(path);
786 continue;
787 }
788 }
789 closedir(dir);
790
791 /* Open all the tracefiles */
792 if(open_tracefiles(t, abs_path, "")) {
793 g_warning("Error opening tracefile %s", abs_path);
794 goto find_error;
795 }
796
797 /* Parse each trace metadata_N files : get runtime fac. info */
798 group = g_datalist_id_get_data(&t->tracefiles, LTT_TRACEFILE_NAME_METADATA);
799 if(group == NULL) {
800 g_error("Trace %s has no metadata tracefile", abs_path);
801 g_assert(0);
802 goto find_error;
803 }
804
805 /*
806 * Get the trace information for the metadata_0 tracefile.
807 * Getting a correct trace start_time and start_tsc is insured by the fact
808 * that no subbuffers are supposed to be lost in the metadata channel.
809 * Therefore, the first subbuffer contains the start_tsc timestamp in its
810 * buffer header.
811 */
812 g_assert(group->len > 0);
813 tf = &g_array_index (group, LttTracefile, 0);
814 header = (ltt_subbuffer_header_t *)tf->buffer.head;
815 ret = parse_trace_header(header, tf, t);
816 g_assert(!ret);
817
818 t->num_cpu = group->len;
819
820 //ret = allocate_marker_data(t);
821 //if (ret)
822 // g_error("Error in allocating marker data");
823
824 for(i=0; i<group->len; i++) {
825 tf = &g_array_index (group, LttTracefile, i);
826 if (tf->cpu_online)
827 if(ltt_process_metadata_tracefile(tf))
828 goto find_error;
829 // goto metadata_error;
830 }
831
832 return t;
833
834 /* Error handling */
835//metadata_error:
836// destroy_marker_data(t);
837find_error:
838 g_datalist_clear(&t->tracefiles);
839open_error:
840 g_free(t);
841alloc_error:
842 return NULL;
843
844}
845
846/* Open another, completely independant, instance of a trace.
847 *
848 * A read on this new instance will read the first event of the trace.
849 *
850 * When we copy a trace, we want all the opening actions to happen again :
851 * the trace will be reopened and totally independant from the original.
852 * That's why we call ltt_trace_open.
853 */
854LttTrace *ltt_trace_copy(LttTrace *self)
855{
856 return ltt_trace_open(g_quark_to_string(self->pathname));
857}
858
859/*
860 * Close a trace
861 */
862
863void ltt_trace_close(LttTrace *t)
864{
865 g_datalist_clear(&t->tracefiles);
866 g_free(t);
867}
868
869
870/*****************************************************************************
871 * Get the start time and end time of the trace
872 ****************************************************************************/
873
874void ltt_tracefile_time_span_get(LttTracefile *tf,
875 LttTime *start, LttTime *end)
876{
877 int err;
878
879 err = map_block(tf, 0);
880 if(unlikely(err)) {
881 g_error("Can not map block");
882 *start = ltt_time_infinite;
883 } else
884 *start = tf->buffer.begin.timestamp;
885
886 err = map_block(tf, tf->num_blocks - 1); /* Last block */
887 if(unlikely(err)) {
888 g_error("Can not map block");
889 *end = ltt_time_zero;
890 } else
891 *end = tf->buffer.end.timestamp;
892}
893
894struct tracefile_time_span_get_args {
895 LttTrace *t;
896 LttTime *start;
897 LttTime *end;
898};
899
900static void group_time_span_get(GQuark name, gpointer data, gpointer user_data)
901{
902 struct tracefile_time_span_get_args *args =
903 (struct tracefile_time_span_get_args*)user_data;
904
905 GArray *group = (GArray *)data;
906 int i;
907 LttTracefile *tf;
908 LttTime tmp_start;
909 LttTime tmp_end;
910
911 for(i=0; i<group->len; i++) {
912 tf = &g_array_index (group, LttTracefile, i);
913 if(tf->cpu_online) {
914 ltt_tracefile_time_span_get(tf, &tmp_start, &tmp_end);
915 if(ltt_time_compare(*args->start, tmp_start)>0) *args->start = tmp_start;
916 if(ltt_time_compare(*args->end, tmp_end)<0) *args->end = tmp_end;
917 }
918 }
919}
920
921/* return the start and end time of a trace */
922
923void ltt_trace_time_span_get(LttTrace *t, LttTime *start, LttTime *end)
924{
925 LttTime min_start = ltt_time_infinite;
926 LttTime max_end = ltt_time_zero;
927 struct tracefile_time_span_get_args args = { t, &min_start, &max_end };
928
929 g_datalist_foreach(&t->tracefiles, &group_time_span_get, &args);
930
931 if(start != NULL) *start = min_start;
932 if(end != NULL) *end = max_end;
933
934}
935
936
937/* Seek to the first event in a tracefile that has a time equal or greater than
938 * the time passed in parameter.
939 *
940 * If the time parameter is outside the tracefile time span, seek to the first
941 * event or if after, return ERANGE.
942 *
943 * If the time parameter is before the first event, we have to seek specially to
944 * there.
945 *
946 * If the time is after the end of the trace, return ERANGE.
947 *
948 * Do a binary search to find the right block, then a sequential search in the
949 * block to find the event.
950 *
951 * In the special case where the time requested fits inside a block that has no
952 * event corresponding to the requested time, the first event of the next block
953 * will be seeked.
954 *
955 * IMPORTANT NOTE : // FIXME everywhere...
956 *
957 * You MUST NOT do a ltt_tracefile_read right after a ltt_tracefile_seek_time :
958 * you will jump over an event if you do.
959 *
960 * Return value : 0 : no error, the tf->event can be used
961 * ERANGE : time if after the last event of the trace
962 * otherwise : this is an error.
963 *
964 * */
965
966int ltt_tracefile_seek_time(LttTracefile *tf, LttTime time)
967{
968 int ret = 0;
969 int err;
970 unsigned int block_num, high, low;
971
972 /* seek at the beginning of trace */
973 err = map_block(tf, 0); /* First block */
974 if(unlikely(err)) {
975 g_error("Can not map block");
976 goto fail;
977 }
978
979 /* If the time is lower or equal the beginning of the trace,
980 * go to the first event. */
981 if(ltt_time_compare(time, tf->buffer.begin.timestamp) <= 0) {
982 ret = ltt_tracefile_read(tf);
983 if(ret == ERANGE) goto range;
984 else if (ret) goto fail;
985 goto found; /* There is either no event in the trace or the event points
986 to the first event in the trace */
987 }
988
989 err = map_block(tf, tf->num_blocks - 1); /* Last block */
990 if(unlikely(err)) {
991 g_error("Can not map block");
992 goto fail;
993 }
994
995 /* If the time is after the end of the trace, return ERANGE. */
996 if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
997 goto range;
998 }
999
1000 /* Binary search the block */
1001 high = tf->num_blocks - 1;
1002 low = 0;
1003
1004 while(1) {
1005 block_num = ((high-low) / 2) + low;
1006
1007 err = map_block(tf, block_num);
1008 if(unlikely(err)) {
1009 g_error("Can not map block");
1010 goto fail;
1011 }
1012 if(high == low) {
1013 /* We cannot divide anymore : this is what would happen if the time
1014 * requested was exactly between two consecutive buffers'end and start
1015 * timestamps. This is also what would happend if we didn't deal with out
1016 * of span cases prior in this function. */
1017 /* The event is right in the buffer!
1018 * (or in the next buffer first event) */
1019 while(1) {
1020 ret = ltt_tracefile_read(tf);
1021 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1022 else if(ret) goto fail;
1023
1024 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1025 goto found;
1026 }
1027
1028 } else if(ltt_time_compare(time, tf->buffer.begin.timestamp) < 0) {
1029 /* go to lower part */
1030 high = block_num - 1;
1031 } else if(ltt_time_compare(time, tf->buffer.end.timestamp) > 0) {
1032 /* go to higher part */
1033 low = block_num + 1;
1034 } else {/* The event is right in the buffer!
1035 (or in the next buffer first event) */
1036 while(1) {
1037 ret = ltt_tracefile_read(tf);
1038 if(ret == ERANGE) goto range; /* ERANGE or EPERM */
1039 else if(ret) goto fail;
1040
1041 if(ltt_time_compare(time, tf->event.event_time) <= 0)
1042 break;
1043 }
1044 goto found;
1045 }
1046 }
1047
1048found:
1049 return 0;
1050range:
1051 return ERANGE;
1052
1053 /* Error handling */
1054fail:
1055 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1056 g_quark_to_string(tf->name));
1057 return EPERM;
1058}
1059
1060/* Seek to a position indicated by an LttEventPosition
1061 */
1062
1063int ltt_tracefile_seek_position(LttTracefile *tf, const LttEventPosition *ep)
1064{
1065 int err;
1066
1067 if(ep->tracefile != tf) {
1068 goto fail;
1069 }
1070
1071 err = map_block(tf, ep->block);
1072 if(unlikely(err)) {
1073 g_error("Can not map block");
1074 goto fail;
1075 }
1076
1077 tf->event.offset = ep->offset;
1078
1079 /* Put back the event real tsc */
1080 tf->event.tsc = ep->tsc;
1081 tf->buffer.tsc = ep->tsc;
1082
1083 err = ltt_tracefile_read_update_event(tf);
1084 if(err) goto fail;
1085
1086 /* deactivate this, as it does nothing for now
1087 err = ltt_tracefile_read_op(tf);
1088 if(err) goto fail;
1089 */
1090
1091 return 0;
1092
1093fail:
1094 g_error("ltt_tracefile_seek_time failed on tracefile %s",
1095 g_quark_to_string(tf->name));
1096 return 1;
1097}
1098
1099/* Given a TSC value, return the LttTime (seconds,nanoseconds) it
1100 * corresponds to.
1101 */
1102
1103LttTime ltt_interpolate_time_from_tsc(LttTracefile *tf, guint64 tsc)
1104{
1105 LttTime time;
1106
1107 if(tsc > tf->trace->start_tsc) {
1108 time = ltt_time_from_uint64(
1109 (double)(tsc - tf->trace->start_tsc)
1110 * 1000000000.0 * tf->trace->freq_scale
1111 / (double)tf->trace->start_freq);
1112 time = ltt_time_add(tf->trace->start_time_from_tsc, time);
1113 } else {
1114 time = ltt_time_from_uint64(
1115 (double)(tf->trace->start_tsc - tsc)
1116 * 1000000000.0 * tf->trace->freq_scale
1117 / (double)tf->trace->start_freq);
1118 time = ltt_time_sub(tf->trace->start_time_from_tsc, time);
1119 }
1120 return time;
1121}
1122
1123/* Calculate the real event time based on the buffer boundaries */
1124LttTime ltt_interpolate_time(LttTracefile *tf, LttEvent *event)
1125{
1126 return ltt_interpolate_time_from_tsc(tf, tf->buffer.tsc);
1127}
1128
1129
1130/* Get the current event of the tracefile : valid until the next read */
1131LttEvent *ltt_tracefile_get_event(LttTracefile *tf)
1132{
1133 return &tf->event;
1134}
1135
1136
1137
1138/*****************************************************************************
1139 *Function name
1140 * ltt_tracefile_read : Read the next event in the tracefile
1141 *Input params
1142 * t : tracefile
1143 *Return value
1144 *
1145 * Returns 0 if an event can be used in tf->event.
1146 * Returns ERANGE on end of trace. The event in tf->event still can be used
1147 * (if the last block was not empty).
1148 * Returns EPERM on error.
1149 *
1150 * This function does make the tracefile event structure point to the event
1151 * currently pointed to by the tf->event.
1152 *
1153 * Note : you must call a ltt_tracefile_seek to the beginning of the trace to
1154 * reinitialize it after an error if you want results to be coherent.
1155 * It would be the case if a end of trace last buffer has no event : the end
1156 * of trace wouldn't be returned, but an error.
1157 * We make the assumption there is at least one event per buffer.
1158 ****************************************************************************/
1159
1160int ltt_tracefile_read(LttTracefile *tf)
1161{
1162 int err;
1163
1164 err = ltt_tracefile_read_seek(tf);
1165 if(err) return err;
1166 err = ltt_tracefile_read_update_event(tf);
1167 if(err) return err;
1168
1169 /* deactivate this, as it does nothing for now
1170 err = ltt_tracefile_read_op(tf);
1171 if(err) return err;
1172 */
1173
1174 return 0;
1175}
1176
1177int ltt_tracefile_read_seek(LttTracefile *tf)
1178{
1179 int err;
1180
1181 /* Get next buffer until we finally have an event, or end of trace */
1182 while(1) {
1183 err = ltt_seek_next_event(tf);
1184 if(unlikely(err == ENOPROTOOPT)) {
1185 return EPERM;
1186 }
1187
1188 /* Are we at the end of the buffer ? */
1189 if(err == ERANGE) {
1190 if(unlikely(tf->buffer.index == tf->num_blocks-1)){ /* end of trace ? */
1191 return ERANGE;
1192 } else {
1193 /* get next block */
1194 err = map_block(tf, tf->buffer.index + 1);
1195 if(unlikely(err)) {
1196 g_error("Can not map block");
1197 return EPERM;
1198 }
1199 }
1200 } else break; /* We found an event ! */
1201 }
1202
1203 return 0;
1204}
1205
1206/* do an operation when reading a new event */
1207
1208/* This function does nothing for now */
1209#if 0
1210int ltt_tracefile_read_op(LttTracefile *tf)
1211{
1212 LttEvent *event;
1213
1214 event = &tf->event;
1215
1216 /* do event specific operation */
1217
1218 /* nothing */
1219
1220 return 0;
1221}
1222#endif
1223
1224static void print_debug_event_header(LttEvent *ev, void *start_pos, void *end_pos)
1225{
1226 unsigned int offset = 0;
1227 int i, j;
1228
1229 g_printf("Event header (tracefile %s offset %llx):\n",
1230 g_quark_to_string(ev->tracefile->long_name),
1231 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1232 + (long)start_pos - (long)ev->tracefile->buffer.head);
1233
1234 while (offset < (long)end_pos - (long)start_pos) {
1235 g_printf("%8lx", (long)start_pos - (long)ev->tracefile->buffer.head + offset);
1236 g_printf(" ");
1237
1238 for (i = 0; i < 4 ; i++) {
1239 for (j = 0; j < 4; j++) {
1240 if (offset + ((i * 4) + j) <
1241 (long)end_pos - (long)start_pos)
1242 g_printf("%02hhX",
1243 ((char*)start_pos)[offset + ((i * 4) + j)]);
1244 else
1245 g_printf(" ");
1246 g_printf(" ");
1247 }
1248 if (i < 4)
1249 g_printf(" ");
1250 }
1251 offset+=16;
1252 g_printf("\n");
1253 }
1254}
1255
1256
1257/* same as ltt_tracefile_read, but does not seek to the next event nor call
1258 * event specific operation. */
1259int ltt_tracefile_read_update_event(LttTracefile *tf)
1260{
1261 void * pos;
1262 LttEvent *event;
1263 void *pos_aligned;
1264
1265 event = &tf->event;
1266 pos = tf->buffer.head + event->offset;
1267
1268 /* Read event header */
1269
1270 /* Align the head */
1271 pos += ltt_align((size_t)pos, sizeof(guint32), tf->alignment);
1272 pos_aligned = pos;
1273
1274 event->timestamp = ltt_get_uint32(LTT_GET_BO(tf), pos);
1275 event->event_id = event->timestamp >> tf->tscbits;
1276 event->timestamp = event->timestamp & tf->tsc_mask;
1277 pos += sizeof(guint32);
1278
1279 switch (event->event_id) {
1280 case 29: /* LTT_RFLAG_ID_SIZE_TSC */
1281 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1282 pos += sizeof(guint16);
1283 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1284 pos += sizeof(guint16);
1285 if (event->event_size == 0xFFFF) {
1286 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1287 pos += sizeof(guint32);
1288 }
1289 pos += ltt_align((size_t)pos, sizeof(guint64), tf->alignment);
1290 tf->buffer.tsc = ltt_get_uint64(LTT_GET_BO(tf), pos);
1291 pos += sizeof(guint64);
1292 break;
1293 case 30: /* LTT_RFLAG_ID_SIZE */
1294 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1295 pos += sizeof(guint16);
1296 event->event_size = ltt_get_uint16(LTT_GET_BO(tf), pos);
1297 pos += sizeof(guint16);
1298 if (event->event_size == 0xFFFF) {
1299 event->event_size = ltt_get_uint32(LTT_GET_BO(tf), pos);
1300 pos += sizeof(guint32);
1301 }
1302 break;
1303 case 31: /* LTT_RFLAG_ID */
1304 event->event_id = ltt_get_uint16(LTT_GET_BO(tf), pos);
1305 pos += sizeof(guint16);
1306 event->event_size = G_MAXUINT;
1307 break;
1308 default:
1309 event->event_size = G_MAXUINT;
1310 break;
1311 }
1312
1313 if (likely(event->event_id != 29)) {
1314 /* No extended timestamp */
1315 if (event->timestamp < (tf->buffer.tsc & tf->tsc_mask))
1316 tf->buffer.tsc = ((tf->buffer.tsc & ~tf->tsc_mask) /* overflow */
1317 + tf->tsc_mask_next_bit)
1318 | (guint64)event->timestamp;
1319 else
1320 tf->buffer.tsc = (tf->buffer.tsc & ~tf->tsc_mask) /* no overflow */
1321 | (guint64)event->timestamp;
1322 }
1323 event->tsc = tf->buffer.tsc;
1324
1325 event->event_time = ltt_interpolate_time(tf, event);
1326
1327 if (a_event_debug)
1328 print_debug_event_header(event, pos_aligned, pos);
1329
1330 event->data = pos;
1331
1332 /*
1333 * Let ltt_update_event_size update event->data according to the largest
1334 * alignment within the payload.
1335 * Get the data size and update the event fields with the current
1336 * information. */
1337 ltt_update_event_size(tf);
1338
1339 return 0;
1340}
1341
1342
1343/****************************************************************************
1344 *Function name
1345 * map_block : map a block from the file
1346 *Input Params
1347 * lttdes : ltt trace file
1348 * whichBlock : the block which will be read
1349 *return value
1350 * 0 : success
1351 * EINVAL : lseek fail
1352 * EIO : can not read from the file
1353 ****************************************************************************/
1354
1355static gint map_block(LttTracefile * tf, guint block_num)
1356{
1357 int page_size = getpagesize();
1358 ltt_subbuffer_header_t *header;
1359
1360 g_assert(block_num < tf->num_blocks);
1361
1362 if(tf->buffer.head != NULL) {
1363 if(munmap(tf->buffer.head, PAGE_ALIGN(tf->buf_size))) {
1364 g_warning("unmap size : %u\n",
1365 PAGE_ALIGN(tf->buf_size));
1366 perror("munmap error");
1367 g_assert(0);
1368 }
1369 }
1370
1371 /* Multiple of pages aligned head */
1372 tf->buffer.head = mmap(0,
1373 PAGE_ALIGN(tf->buf_size),
1374 PROT_READ, MAP_PRIVATE, tf->fd,
1375 PAGE_ALIGN((off_t)tf->buf_size * (off_t)block_num));
1376
1377 if(tf->buffer.head == MAP_FAILED) {
1378 perror("Error in allocating memory for buffer of tracefile");
1379 g_assert(0);
1380 goto map_error;
1381 }
1382 g_assert( ( (gulong)tf->buffer.head&(8-1) ) == 0); // make sure it's aligned.
1383
1384
1385 tf->buffer.index = block_num;
1386
1387 header = (ltt_subbuffer_header_t *)tf->buffer.head;
1388
1389 tf->buffer.begin.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1390 &header->cycle_count_begin);
1391 tf->buffer.begin.freq = tf->trace->start_freq;
1392
1393 tf->buffer.begin.timestamp = ltt_interpolate_time_from_tsc(tf,
1394 tf->buffer.begin.cycle_count);
1395 tf->buffer.end.cycle_count = ltt_get_uint64(LTT_GET_BO(tf),
1396 &header->cycle_count_end);
1397 tf->buffer.end.freq = tf->trace->start_freq;
1398
1399 tf->buffer.lost_size = ltt_get_uint32(LTT_GET_BO(tf),
1400 &header->lost_size);
1401 tf->buffer.end.timestamp = ltt_interpolate_time_from_tsc(tf,
1402 tf->buffer.end.cycle_count);
1403 tf->buffer.tsc = tf->buffer.begin.cycle_count;
1404 tf->event.tsc = tf->buffer.tsc;
1405 tf->buffer.freq = tf->buffer.begin.freq;
1406
1407 /* FIXME
1408 * eventually support variable buffer size : will need a partial pre-read of
1409 * the headers to create an index when we open the trace... eventually. */
1410 g_assert(tf->buf_size == ltt_get_uint32(LTT_GET_BO(tf),
1411 &header->buf_size));
1412
1413 /* Make the current event point to the beginning of the buffer :
1414 * it means that the event read must get the first event. */
1415 tf->event.tracefile = tf;
1416 tf->event.block = block_num;
1417 tf->event.offset = 0;
1418
1419 if (header->events_lost) {
1420 g_warning("%d events lost so far in tracefile %s at block %u",
1421 (guint)header->events_lost,
1422 g_quark_to_string(tf->long_name),
1423 block_num);
1424 tf->events_lost = header->events_lost;
1425 }
1426 if (header->subbuf_corrupt) {
1427 g_warning("%d subbuffer(s) corrupted so far in tracefile %s at block %u",
1428 (guint)header->subbuf_corrupt,
1429 g_quark_to_string(tf->long_name),
1430 block_num);
1431 tf->subbuf_corrupt = header->subbuf_corrupt;
1432 }
1433
1434 return 0;
1435
1436map_error:
1437 return -errno;
1438}
1439
1440static void print_debug_event_data(LttEvent *ev)
1441{
1442 unsigned int offset = 0;
1443 int i, j;
1444
1445 if (!max(ev->event_size, ev->data_size))
1446 return;
1447
1448 g_printf("Event data (tracefile %s offset %llx):\n",
1449 g_quark_to_string(ev->tracefile->long_name),
1450 ((uint64_t)ev->tracefile->buffer.index * ev->tracefile->buf_size)
1451 + (long)ev->data - (long)ev->tracefile->buffer.head);
1452
1453 while (offset < max(ev->event_size, ev->data_size)) {
1454 g_printf("%8lx", (long)ev->data + offset
1455 - (long)ev->tracefile->buffer.head);
1456 g_printf(" ");
1457
1458 for (i = 0; i < 4 ; i++) {
1459 for (j = 0; j < 4; j++) {
1460 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size))
1461 g_printf("%02hhX", ((char*)ev->data)[offset + ((i * 4) + j)]);
1462 else
1463 g_printf(" ");
1464 g_printf(" ");
1465 }
1466 if (i < 4)
1467 g_printf(" ");
1468 }
1469
1470 g_printf(" ");
1471
1472 for (i = 0; i < 4; i++) {
1473 for (j = 0; j < 4; j++) {
1474 if (offset + ((i * 4) + j) < max(ev->event_size, ev->data_size)) {
1475 if (isprint(((char*)ev->data)[offset + ((i * 4) + j)]))
1476 g_printf("%c", ((char*)ev->data)[offset + ((i * 4) + j)]);
1477 else
1478 g_printf(".");
1479 } else
1480 g_printf(" ");
1481 }
1482 }
1483 offset+=16;
1484 g_printf("\n");
1485 }
1486}
1487
1488/* It will update the fields offsets too */
1489void ltt_update_event_size(LttTracefile *tf)
1490{
1491 off_t size = 0;
1492 char *tscdata;
1493 struct marker_info *info;
1494
1495 if (tf->name == LTT_TRACEFILE_NAME_METADATA) {
1496 switch((enum marker_id)tf->event.event_id) {
1497 case MARKER_ID_SET_MARKER_ID:
1498 size = strlen((char*)tf->event.data) + 1;
1499 g_debug("marker %s id set", (char*)tf->event.data + size);
1500 size += strlen((char*)tf->event.data + size) + 1;
1501 size += ltt_align(size, sizeof(guint16), tf->alignment);
1502 size += sizeof(guint16);
1503 size += sizeof(guint8);
1504 size += sizeof(guint8);
1505 size += sizeof(guint8);
1506 size += sizeof(guint8);
1507 size += sizeof(guint8);
1508 break;
1509 case MARKER_ID_SET_MARKER_FORMAT:
1510 size = strlen((char*)tf->event.data) + 1;
1511 g_debug("marker %s format set", (char*)tf->event.data);
1512 size += strlen((char*)tf->event.data + size) + 1;
1513 size += strlen((char*)tf->event.data + size) + 1;
1514 break;
1515 }
1516 }
1517
1518 info = marker_get_info_from_id(tf->mdata, tf->event.event_id);
1519
1520 if (tf->event.event_id >= MARKER_CORE_IDS)
1521 g_assert(info != NULL);
1522
1523 /* Do not update field offsets of core markers when initially reading the
1524 * metadata tracefile when the infos about these markers do not exist yet.
1525 */
1526 if (likely(info && info->fields)) {
1527 /* alignment */
1528 tf->event.data += ltt_align((off_t)(unsigned long)tf->event.data,
1529 info->largest_align,
1530 info->alignment);
1531 /* size, dynamically computed */
1532 if (info->size != -1)
1533 size = info->size;
1534 else
1535 size = marker_update_fields_offsets(marker_get_info_from_id(tf->mdata,
1536 tf->event.event_id), tf->event.data);
1537 }
1538
1539 tf->event.data_size = size;
1540
1541 /* Check consistency between kernel and LTTV structure sizes */
1542 if(tf->event.event_size == G_MAXUINT) {
1543 /* Event size too big to fit in the event size field */
1544 tf->event.event_size = tf->event.data_size;
1545 }
1546
1547 if (a_event_debug)
1548 print_debug_event_data(&tf->event);
1549
1550 if (tf->event.data_size != tf->event.event_size) {
1551 struct marker_info *info = marker_get_info_from_id(tf->mdata,
1552 tf->event.event_id);
1553 if (!info)
1554 g_error("Undescribed event %hhu in channel %s", tf->event.event_id,
1555 g_quark_to_string(tf->name));
1556 g_error("Kernel/LTTV event size differs for event %s: kernel %u, LTTV %u",
1557 g_quark_to_string(info->name),
1558 tf->event.event_size, tf->event.data_size);
1559 exit(-1);
1560 }
1561}
1562
1563
1564/* Take the tf current event offset and use the event id to figure out where is
1565 * the next event offset.
1566 *
1567 * This is an internal function not aiming at being used elsewhere : it will
1568 * not jump over the current block limits. Please consider using
1569 * ltt_tracefile_read to do this.
1570 *
1571 * Returns 0 on success
1572 * ERANGE if we are at the end of the buffer.
1573 * ENOPROTOOPT if an error occured when getting the current event size.
1574 */
1575static int ltt_seek_next_event(LttTracefile *tf)
1576{
1577 int ret = 0;
1578 void *pos;
1579
1580 /* seek over the buffer header if we are at the buffer start */
1581 if(tf->event.offset == 0) {
1582 tf->event.offset += tf->buffer_header_size;
1583
1584 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1585 ret = ERANGE;
1586 }
1587 goto found;
1588 }
1589
1590 pos = tf->event.data;
1591
1592 if(tf->event.data_size < 0) goto error;
1593
1594 pos += (size_t)tf->event.data_size;
1595
1596 tf->event.offset = pos - tf->buffer.head;
1597
1598 if(tf->event.offset == tf->buf_size - tf->buffer.lost_size) {
1599 ret = ERANGE;
1600 goto found;
1601 }
1602 g_assert(tf->event.offset < tf->buf_size - tf->buffer.lost_size);
1603
1604found:
1605 return ret;
1606
1607error:
1608 g_error("Error in ltt_seek_next_event for tracefile %s",
1609 g_quark_to_string(tf->name));
1610 return ENOPROTOOPT;
1611}
1612
1613#if 0
1614/*****************************************************************************
1615 *Function name
1616 * set_fields_offsets : set the precomputable offset of the fields
1617 *Input params
1618 * tracefile : opened trace file
1619 * event_type : the event type
1620 ****************************************************************************/
1621
1622void set_fields_offsets(LttTracefile *tf, LttEventType *event_type)
1623{
1624 LttField *field = event_type->root_field;
1625 enum field_status fixed_root = FIELD_FIXED, fixed_parent = FIELD_FIXED;
1626
1627 if(likely(field))
1628 preset_field_type_size(tf, event_type, 0, 0,
1629 &fixed_root, &fixed_parent,
1630 field);
1631
1632}
1633#endif //0
1634
1635
1636/*****************************************************************************
1637 *Function name
1638 * get_alignment : Get the alignment needed for a field.
1639 *Input params
1640 * field : field
1641 *
1642 * returns : The size on which it must be aligned.
1643 *
1644 ****************************************************************************/
1645#if 0
1646off_t get_alignment(LttField *field)
1647{
1648 LttType *type = &field->field_type;
1649
1650 switch(type->type_class) {
1651 case LTT_INT_FIXED:
1652 case LTT_UINT_FIXED:
1653 case LTT_POINTER:
1654 case LTT_CHAR:
1655 case LTT_UCHAR:
1656 case LTT_SHORT:
1657 case LTT_USHORT:
1658 case LTT_INT:
1659 case LTT_UINT:
1660 case LTT_LONG:
1661 case LTT_ULONG:
1662 case LTT_SIZE_T:
1663 case LTT_SSIZE_T:
1664 case LTT_OFF_T:
1665 case LTT_FLOAT:
1666 case LTT_ENUM:
1667 /* Align offset on type size */
1668 g_assert(field->field_size != 0);
1669 return field->field_size;
1670 break;
1671 case LTT_STRING:
1672 return 1;
1673 break;
1674 case LTT_ARRAY:
1675 g_assert(type->fields->len == 1);
1676 {
1677 LttField *child = &g_array_index(type->fields, LttField, 0);
1678 return get_alignment(child);
1679 }
1680 break;
1681 case LTT_SEQUENCE:
1682 g_assert(type->fields->len == 2);
1683 {
1684 off_t localign = 1;
1685 LttField *child = &g_array_index(type->fields, LttField, 0);
1686
1687 localign = max(localign, get_alignment(child));
1688
1689 child = &g_array_index(type->fields, LttField, 1);
1690 localign = max(localign, get_alignment(child));
1691
1692 return localign;
1693 }
1694 break;
1695 case LTT_STRUCT:
1696 case LTT_UNION:
1697 {
1698 guint i;
1699 off_t localign = 1;
1700
1701 for(i=0; i<type->fields->len; i++) {
1702 LttField *child = &g_array_index(type->fields, LttField, i);
1703 localign = max(localign, get_alignment(child));
1704 }
1705 return localign;
1706 }
1707 break;
1708 case LTT_NONE:
1709 default:
1710 g_error("get_alignment : unknown type");
1711 return -1;
1712 }
1713}
1714
1715#endif //0
1716
1717/*****************************************************************************
1718 *Function name
1719 * field_compute_static_size : Determine the size of fields known by their
1720 * sole definition. Unions, arrays and struct sizes might be known, but
1721 * the parser does not give that information.
1722 *Input params
1723 * tf : tracefile
1724 * field : field
1725 *
1726 ****************************************************************************/
1727#if 0
1728void field_compute_static_size(LttFacility *fac, LttField *field)
1729{
1730 LttType *type = &field->field_type;
1731
1732 switch(type->type_class) {
1733 case LTT_INT_FIXED:
1734 case LTT_UINT_FIXED:
1735 case LTT_POINTER:
1736 case LTT_CHAR:
1737 case LTT_UCHAR:
1738 case LTT_SHORT:
1739 case LTT_USHORT:
1740 case LTT_INT:
1741 case LTT_UINT:
1742 case LTT_LONG:
1743 case LTT_ULONG:
1744 case LTT_SIZE_T:
1745 case LTT_SSIZE_T:
1746 case LTT_OFF_T:
1747 case LTT_FLOAT:
1748 case LTT_ENUM:
1749 case LTT_STRING:
1750 /* nothing to do */
1751 break;
1752 case LTT_ARRAY:
1753 /* note this : array type size is the number of elements in the array,
1754 * while array field size of the length of the array in bytes */
1755 g_assert(type->fields->len == 1);
1756 {
1757 LttField *child = &g_array_index(type->fields, LttField, 0);
1758 field_compute_static_size(fac, child);
1759
1760 if(child->field_size != 0) {
1761 field->field_size = type->size * child->field_size;
1762 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1763 sizeof(off_t), type->size);
1764 } else {
1765 field->field_size = 0;
1766 }
1767 }
1768 break;
1769 case LTT_SEQUENCE:
1770 g_assert(type->fields->len == 2);
1771 {
1772 off_t local_offset = 0;
1773 LttField *child = &g_array_index(type->fields, LttField, 1);
1774 field_compute_static_size(fac, child);
1775 field->field_size = 0;
1776 type->size = 0;
1777 if(child->field_size != 0) {
1778 field->dynamic_offsets = g_array_sized_new(FALSE, TRUE,
1779 sizeof(off_t), SEQUENCE_AVG_ELEMENTS);
1780 }
1781 }
1782 break;
1783 case LTT_STRUCT:
1784 case LTT_UNION:
1785 {
1786 guint i;
1787 for(i=0;i<type->fields->len;i++) {
1788 LttField *child = &g_array_index(type->fields, LttField, i);
1789 field_compute_static_size(fac, child);
1790 if(child->field_size != 0) {
1791 type->size += ltt_align(type->size, get_alignment(child),
1792 fac->alignment);
1793 type->size += child->field_size;
1794 } else {
1795 /* As soon as we find a child with variable size, we have
1796 * a variable size */
1797 type->size = 0;
1798 break;
1799 }
1800 }
1801 field->field_size = type->size;
1802 }
1803 break;
1804 default:
1805 g_error("field_static_size : unknown type");
1806 }
1807
1808}
1809#endif //0
1810
1811
1812/*****************************************************************************
1813 *Function name
1814 * precompute_fields_offsets : set the precomputable offset of the fields
1815 *Input params
1816 * fac : facility
1817 * field : the field
1818 * offset : pointer to the current offset, must be incremented
1819 *
1820 * return : 1 : found a variable length field, stop the processing.
1821 * 0 otherwise.
1822 ****************************************************************************/
1823
1824#if 0
1825gint precompute_fields_offsets(LttFacility *fac, LttField *field, off_t *offset, gint is_compact)
1826{
1827 LttType *type = &field->field_type;
1828
1829 if(unlikely(is_compact)) {
1830 g_assert(field->field_size != 0);
1831 /* FIXME THIS IS A HUUUUUGE hack :
1832 * offset is between the compact_data field in struct LttEvent
1833 * and the address of the field root in the memory map.
1834 * ark. Both will stay at the same addresses while the event
1835 * is readable, so it's ok.
1836 */
1837 field->offset_root = 0;
1838 field->fixed_root = FIELD_FIXED;
1839 return 0;
1840 }
1841
1842 switch(type->type_class) {
1843 case LTT_INT_FIXED:
1844 case LTT_UINT_FIXED:
1845 case LTT_POINTER:
1846 case LTT_CHAR:
1847 case LTT_UCHAR:
1848 case LTT_SHORT:
1849 case LTT_USHORT:
1850 case LTT_INT:
1851 case LTT_UINT:
1852 case LTT_LONG:
1853 case LTT_ULONG:
1854 case LTT_SIZE_T:
1855 case LTT_SSIZE_T:
1856 case LTT_OFF_T:
1857 case LTT_FLOAT:
1858 case LTT_ENUM:
1859 g_assert(field->field_size != 0);
1860 /* Align offset on type size */
1861 *offset += ltt_align(*offset, get_alignment(field),
1862 fac->alignment);
1863 /* remember offset */
1864 field->offset_root = *offset;
1865 field->fixed_root = FIELD_FIXED;
1866 /* Increment offset */
1867 *offset += field->field_size;
1868 return 0;
1869 break;
1870 case LTT_STRING:
1871 field->offset_root = *offset;
1872 field->fixed_root = FIELD_FIXED;
1873 return 1;
1874 break;
1875 case LTT_ARRAY:
1876 g_assert(type->fields->len == 1);
1877 {
1878 LttField *child = &g_array_index(type->fields, LttField, 0);
1879
1880 *offset += ltt_align(*offset, get_alignment(field),
1881 fac->alignment);
1882
1883 /* remember offset */
1884 field->offset_root = *offset;
1885 field->array_offset = *offset;
1886 field->fixed_root = FIELD_FIXED;
1887
1888 /* Let the child be variable */
1889 //precompute_fields_offsets(tf, child, offset);
1890
1891 if(field->field_size != 0) {
1892 /* Increment offset */
1893 /* field_size is the array size in bytes */
1894 *offset += field->field_size;
1895 return 0;
1896 } else {
1897 return 1;
1898 }
1899 }
1900 break;
1901 case LTT_SEQUENCE:
1902 g_assert(type->fields->len == 2);
1903 {
1904 LttField *child;
1905 guint ret;
1906
1907 *offset += ltt_align(*offset, get_alignment(field),
1908 fac->alignment);
1909
1910 /* remember offset */
1911 field->offset_root = *offset;
1912 field->fixed_root = FIELD_FIXED;
1913
1914 child = &g_array_index(type->fields, LttField, 0);
1915 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1916 g_assert(ret == 0); /* Seq len cannot have variable len */
1917
1918 child = &g_array_index(type->fields, LttField, 1);
1919 *offset += ltt_align(*offset, get_alignment(child),
1920 fac->alignment);
1921 field->array_offset = *offset;
1922 /* Let the child be variable. */
1923 //ret = precompute_fields_offsets(fac, child, offset);
1924
1925 /* Cannot precompute fields offsets of sequence members, and has
1926 * variable length. */
1927 return 1;
1928 }
1929 break;
1930 case LTT_STRUCT:
1931 {
1932 LttField *child;
1933 guint i;
1934 gint ret=0;
1935
1936 *offset += ltt_align(*offset, get_alignment(field),
1937 fac->alignment);
1938 /* remember offset */
1939 field->offset_root = *offset;
1940 field->fixed_root = FIELD_FIXED;
1941
1942 for(i=0; i< type->fields->len; i++) {
1943 child = &g_array_index(type->fields, LttField, i);
1944 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1945
1946 if(ret) break;
1947 }
1948 return ret;
1949 }
1950 break;
1951 case LTT_UNION:
1952 {
1953 LttField *child;
1954 guint i;
1955 gint ret=0;
1956
1957 *offset += ltt_align(*offset, get_alignment(field),
1958 fac->alignment);
1959 /* remember offset */
1960 field->offset_root = *offset;
1961 field->fixed_root = FIELD_FIXED;
1962
1963 for(i=0; i< type->fields->len; i++) {
1964 *offset = field->offset_root;
1965 child = &g_array_index(type->fields, LttField, i);
1966 ret = precompute_fields_offsets(fac, child, offset, is_compact);
1967
1968 if(ret) break;
1969 }
1970 *offset = field->offset_root + field->field_size;
1971 return ret;
1972 }
1973
1974 break;
1975 case LTT_NONE:
1976 default:
1977 g_error("precompute_fields_offsets : unknown type");
1978 return 1;
1979 }
1980
1981}
1982
1983#endif //0
1984
1985#if 0
1986/*****************************************************************************
1987 *Function name
1988 * precompute_offsets : set the precomputable offset of an event type
1989 *Input params
1990 * tf : tracefile
1991 * event : event type
1992 *
1993 ****************************************************************************/
1994void precompute_offsets(LttFacility *fac, LttEventType *event)
1995{
1996 guint i;
1997 off_t offset = 0;
1998 gint ret;
1999
2000 /* First, compute the size of fixed size fields. Will determine size for
2001 * arrays, struct and unions, which is not done by the parser */
2002 for(i=0; i<event->fields->len; i++) {
2003 LttField *field = &g_array_index(event->fields, LttField, i);
2004 field_compute_static_size(fac, field);
2005 }
2006
2007 /* Precompute all known offsets */
2008 for(i=0; i<event->fields->len; i++) {
2009 LttField *field = &g_array_index(event->fields, LttField, i);
2010 if(event->has_compact_data && i == 0)
2011 ret = precompute_fields_offsets(fac, field, &offset, 1);
2012 else
2013 ret = precompute_fields_offsets(fac, field, &offset, 0);
2014 if(ret) break;
2015 }
2016}
2017#endif //0
2018
2019
2020
2021/*****************************************************************************
2022 *Function name
2023 * preset_field_type_size : set the fixed sizes of the field type
2024 *Input params
2025 * tf : tracefile
2026 * event_type : event type
2027 * offset_root : offset from the root
2028 * offset_parent : offset from the parent
2029 * fixed_root : Do we know a fixed offset to the root ?
2030 * fixed_parent : Do we know a fixed offset to the parent ?
2031 * field : field
2032 ****************************************************************************/
2033
2034
2035
2036// preset the fixed size offsets. Calculate them just like genevent-new : an
2037// increment of a *to value that represents the offset from the start of the
2038// event data.
2039// The preset information is : offsets up to (and including) the first element
2040// of variable size. All subsequent fields must be flagged "VARIABLE OFFSET".
2041#if 0
2042void preset_field_type_size(LttTracefile *tf, LttEventType *event_type,
2043 off_t offset_root, off_t offset_parent,
2044 enum field_status *fixed_root, enum field_status *fixed_parent,
2045 LttField *field)
2046{
2047 enum field_status local_fixed_root, local_fixed_parent;
2048 guint i;
2049 LttType *type;
2050
2051 g_assert(field->fixed_root == FIELD_UNKNOWN);
2052 g_assert(field->fixed_parent == FIELD_UNKNOWN);
2053 g_assert(field->fixed_size == FIELD_UNKNOWN);
2054
2055 type = field->field_type;
2056
2057 field->fixed_root = *fixed_root;
2058 if(field->fixed_root == FIELD_FIXED)
2059 field->offset_root = offset_root;
2060 else
2061 field->offset_root = 0;
2062
2063 field->fixed_parent = *fixed_parent;
2064 if(field->fixed_parent == FIELD_FIXED)
2065 field->offset_parent = offset_parent;
2066 else
2067 field->offset_parent = 0;
2068
2069 size_t current_root_offset;
2070 size_t current_offset;
2071 enum field_status current_child_status, final_child_status;
2072 size_t max_size;
2073
2074 switch(type->type_class) {
2075 case LTT_INT_FIXED:
2076 case LTT_UINT_FIXED:
2077 case LTT_CHAR:
2078 case LTT_UCHAR:
2079 case LTT_SHORT:
2080 case LTT_USHORT:
2081 case LTT_INT:
2082 case LTT_UINT:
2083 case LTT_FLOAT:
2084 case LTT_ENUM:
2085 field->field_size = ltt_type_size(tf->trace, type);
2086 field->fixed_size = FIELD_FIXED;
2087 break;
2088 case LTT_POINTER:
2089 field->field_size = (off_t)event_type->facility->pointer_size;
2090 field->fixed_size = FIELD_FIXED;
2091 break;
2092 case LTT_LONG:
2093 case LTT_ULONG:
2094 field->field_size = (off_t)event_type->facility->long_size;
2095 field->fixed_size = FIELD_FIXED;
2096 break;
2097 case LTT_SIZE_T:
2098 case LTT_SSIZE_T:
2099 case LTT_OFF_T:
2100 field->field_size = (off_t)event_type->facility->size_t_size;
2101 field->fixed_size = FIELD_FIXED;
2102 break;
2103 case LTT_SEQUENCE:
2104 local_fixed_root = FIELD_VARIABLE;
2105 local_fixed_parent = FIELD_VARIABLE;
2106 preset_field_type_size(tf, event_type,
2107 0, 0,
2108 &local_fixed_root, &local_fixed_parent,
2109 field->child[0]);
2110 field->fixed_size = FIELD_VARIABLE;
2111 field->field_size = 0;
2112 *fixed_root = FIELD_VARIABLE;
2113 *fixed_parent = FIELD_VARIABLE;
2114 break;
2115 case LTT_STRING:
2116 field->fixed_size = FIELD_VARIABLE;
2117 field->field_size = 0;
2118 *fixed_root = FIELD_VARIABLE;
2119 *fixed_parent = FIELD_VARIABLE;
2120 break;
2121 case LTT_ARRAY:
2122 local_fixed_root = FIELD_VARIABLE;
2123 local_fixed_parent = FIELD_VARIABLE;
2124 preset_field_type_size(tf, event_type,
2125 0, 0,
2126 &local_fixed_root, &local_fixed_parent,
2127 field->child[0]);
2128 field->fixed_size = field->child[0]->fixed_size;
2129 if(field->fixed_size == FIELD_FIXED) {
2130 field->field_size = type->element_number * field->child[0]->field_size;
2131 } else {
2132 field->field_size = 0;
2133 *fixed_root = FIELD_VARIABLE;
2134 *fixed_parent = FIELD_VARIABLE;
2135 }
2136 break;
2137 case LTT_STRUCT:
2138 current_root_offset = field->offset_root;
2139 current_offset = 0;
2140 current_child_status = FIELD_FIXED;
2141 for(i=0;i<type->element_number;i++) {
2142 preset_field_type_size(tf, event_type,
2143 current_root_offset, current_offset,
2144 fixed_root, &current_child_status,
2145 field->child[i]);
2146 if(current_child_status == FIELD_FIXED) {
2147 current_root_offset += field->child[i]->field_size;
2148 current_offset += field->child[i]->field_size;
2149 } else {
2150 current_root_offset = 0;
2151 current_offset = 0;
2152 }
2153 }
2154 if(current_child_status != FIELD_FIXED) {
2155 *fixed_parent = current_child_status;
2156 field->field_size = 0;
2157 field->fixed_size = current_child_status;
2158 } else {
2159 field->field_size = current_offset;
2160 field->fixed_size = FIELD_FIXED;
2161 }
2162 break;
2163 case LTT_UNION:
2164 current_root_offset = field->offset_root;
2165 current_offset = 0;
2166 max_size = 0;
2167 final_child_status = FIELD_FIXED;
2168 for(i=0;i<type->element_number;i++) {
2169 enum field_status current_root_child_status = FIELD_FIXED;
2170 enum field_status current_child_status = FIELD_FIXED;
2171 preset_field_type_size(tf, event_type,
2172 current_root_offset, current_offset,
2173 &current_root_child_status, &current_child_status,
2174 field->child[i]);
2175 if(current_child_status != FIELD_FIXED)
2176 final_child_status = current_child_status;
2177 else
2178 max_size = max(max_size, field->child[i]->field_size);
2179 }
2180 if(final_child_status != FIELD_FIXED) {
2181 g_error("LTTV does not support variable size fields in unions.");
2182 /* This will stop the application. */
2183 *fixed_root = final_child_status;
2184 *fixed_parent = final_child_status;
2185 field->field_size = 0;
2186 field->fixed_size = current_child_status;
2187 } else {
2188 field->field_size = max_size;
2189 field->fixed_size = FIELD_FIXED;
2190 }
2191 break;
2192 case LTT_NONE:
2193 g_error("unexpected type NONE");
2194 break;
2195 }
2196
2197}
2198#endif //0
2199
2200/*****************************************************************************
2201 *Function name
2202 * check_fields_compatibility : Check for compatibility between two fields :
2203 * do they use the same inner structure ?
2204 *Input params
2205 * event_type1 : event type
2206 * event_type2 : event type
2207 * field1 : field
2208 * field2 : field
2209 *Returns : 0 if identical
2210 * 1 if not.
2211 ****************************************************************************/
2212// this function checks for equality of field types. Therefore, it does not use
2213// per se offsets. For instance, an aligned version of a structure is
2214// compatible with an unaligned version of the same structure.
2215#if 0
2216gint check_fields_compatibility(LttEventType *event_type1,
2217 LttEventType *event_type2,
2218 LttField *field1, LttField *field2)
2219{
2220 guint different = 0;
2221 LttType *type1;
2222 LttType *type2;
2223
2224 if(field1 == NULL) {
2225 if(field2 == NULL) goto end;
2226 else {
2227 different = 1;
2228 goto end;
2229 }
2230 } else if(field2 == NULL) {
2231 different = 1;
2232 goto end;
2233 }
2234
2235 type1 = &field1->field_type;
2236 type2 = &field2->field_type;
2237
2238 if(type1->type_class != type2->type_class) {
2239 different = 1;
2240 goto end;
2241 }
2242 if(type1->network != type2->network) {
2243 different = 1;
2244 goto end;
2245 }
2246
2247 switch(type1->type_class) {
2248 case LTT_INT_FIXED:
2249 case LTT_UINT_FIXED:
2250 case LTT_POINTER:
2251 case LTT_CHAR:
2252 case LTT_UCHAR:
2253 case LTT_SHORT:
2254 case LTT_USHORT:
2255 case LTT_INT:
2256 case LTT_UINT:
2257 case LTT_LONG:
2258 case LTT_ULONG:
2259 case LTT_SIZE_T:
2260 case LTT_SSIZE_T:
2261 case LTT_OFF_T:
2262 case LTT_FLOAT:
2263 case LTT_ENUM:
2264 if(field1->field_size != field2->field_size)
2265 different = 1;
2266 break;
2267 case LTT_STRING:
2268 break;
2269 case LTT_ARRAY:
2270 {
2271 LttField *child1 = &g_array_index(type1->fields, LttField, 0);
2272 LttField *child2 = &g_array_index(type2->fields, LttField, 0);
2273
2274 if(type1->size != type2->size)
2275 different = 1;
2276 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2277 different = 1;
2278 }
2279 break;
2280 case LTT_SEQUENCE:
2281 {
2282 LttField *child1 = &g_array_index(type1->fields, LttField, 1);
2283 LttField *child2 = &g_array_index(type2->fields, LttField, 1);
2284
2285 if(check_fields_compatibility(event_type1, event_type2, child1, child2))
2286 different = 1;
2287 }
2288 break;
2289 case LTT_STRUCT:
2290 case LTT_UNION:
2291 {
2292 LttField *child;
2293 guint i;
2294
2295 if(type1->fields->len != type2->fields->len) {
2296 different = 1;
2297 goto end;
2298 }
2299
2300 for(i=0; i< type1->fields->len; i++) {
2301 LttField *child1;
2302 LttField *child2;
2303 child1 = &g_array_index(type1->fields, LttField, i);
2304 child2 = &g_array_index(type2->fields, LttField, i);
2305 different = check_fields_compatibility(event_type1,
2306 event_type2, child1, child2);
2307
2308 if(different) break;
2309 }
2310 }
2311 break;
2312 case LTT_NONE:
2313 default:
2314 g_error("check_fields_compatibility : unknown type");
2315 }
2316
2317end:
2318 return different;
2319}
2320#endif //0
2321
2322#if 0
2323gint check_fields_compatibility(LttEventType *event_type1,
2324 LttEventType *event_type2,
2325 LttField *field1, LttField *field2)
2326{
2327 guint different = 0;
2328 guint i;
2329 LttType *type1;
2330 LttType *type2;
2331
2332 if(field1 == NULL) {
2333 if(field2 == NULL) goto end;
2334 else {
2335 different = 1;
2336 goto end;
2337 }
2338 } else if(field2 == NULL) {
2339 different = 1;
2340 goto end;
2341 }
2342
2343 g_assert(field1->fixed_root != FIELD_UNKNOWN);
2344 g_assert(field2->fixed_root != FIELD_UNKNOWN);
2345 g_assert(field1->fixed_parent != FIELD_UNKNOWN);
2346 g_assert(field2->fixed_parent != FIELD_UNKNOWN);
2347 g_assert(field1->fixed_size != FIELD_UNKNOWN);
2348 g_assert(field2->fixed_size != FIELD_UNKNOWN);
2349
2350 type1 = field1->field_type;
2351 type2 = field2->field_type;
2352
2353 if(type1->type_class != type2->type_class) {
2354 different = 1;
2355 goto end;
2356 }
2357 if(type1->element_name != type2->element_name) {
2358 different = 1;
2359 goto end;
2360 }
2361
2362 switch(type1->type_class) {
2363 case LTT_INT_FIXED:
2364 case LTT_UINT_FIXED:
2365 case LTT_POINTER:
2366 case LTT_CHAR:
2367 case LTT_UCHAR:
2368 case LTT_SHORT:
2369 case LTT_USHORT:
2370 case LTT_INT:
2371 case LTT_UINT:
2372 case LTT_FLOAT:
2373 case LTT_POINTER:
2374 case LTT_LONG:
2375 case LTT_ULONG:
2376 case LTT_SIZE_T:
2377 case LTT_SSIZE_T:
2378 case LTT_OFF_T:
2379 if(field1->field_size != field2->field_size) {
2380 different = 1;
2381 goto end;
2382 }
2383 break;
2384 case LTT_ENUM:
2385 if(type1->element_number != type2->element_number) {
2386 different = 1;
2387 goto end;
2388 }
2389 for(i=0;i<type1->element_number;i++) {
2390 if(type1->enum_strings[i] != type2->enum_strings[i]) {
2391 different = 1;
2392 goto end;
2393 }
2394 }
2395 break;
2396 case LTT_SEQUENCE:
2397 /* Two elements : size and child */
2398 g_assert(type1->element_number != type2->element_number);
2399 for(i=0;i<type1->element_number;i++) {
2400 if(check_fields_compatibility(event_type1, event_type2,
2401 field1->child[0], field2->child[0])) {
2402 different = 1;
2403 goto end;
2404 }
2405 }
2406 break;
2407 case LTT_STRING:
2408 break;
2409 case LTT_ARRAY:
2410 if(field1->field_size != field2->field_size) {
2411 different = 1;
2412 goto end;
2413 }
2414 /* Two elements : size and child */
2415 g_assert(type1->element_number != type2->element_number);
2416 for(i=0;i<type1->element_number;i++) {
2417 if(check_fields_compatibility(event_type1, event_type2,
2418 field1->child[0], field2->child[0])) {
2419 different = 1;
2420 goto end;
2421 }
2422 }
2423 break;
2424 case LTT_STRUCT:
2425 case LTT_UNION:
2426 if(type1->element_number != type2->element_number) {
2427 different = 1;
2428 break;
2429 }
2430 for(i=0;i<type1->element_number;i++) {
2431 if(check_fields_compatibility(event_type1, event_type2,
2432 field1->child[0], field2->child[0])) {
2433 different = 1;
2434 goto end;
2435 }
2436 }
2437 break;
2438 }
2439end:
2440 return different;
2441}
2442#endif //0
2443
2444
2445/*****************************************************************************
2446 *Function name
2447 * ltt_get_int : get an integer number
2448 *Input params
2449 * reverse_byte_order: must we reverse the byte order ?
2450 * size : the size of the integer
2451 * ptr : the data pointer
2452 *Return value
2453 * gint64 : a 64 bits integer
2454 ****************************************************************************/
2455
2456gint64 ltt_get_int(gboolean reverse_byte_order, gint size, void *data)
2457{
2458 gint64 val;
2459
2460 switch(size) {
2461 case 1: val = *((gint8*)data); break;
2462 case 2: val = ltt_get_int16(reverse_byte_order, data); break;
2463 case 4: val = ltt_get_int32(reverse_byte_order, data); break;
2464 case 8: val = ltt_get_int64(reverse_byte_order, data); break;
2465 default: val = ltt_get_int64(reverse_byte_order, data);
2466 g_critical("get_int : integer size %d unknown", size);
2467 break;
2468 }
2469
2470 return val;
2471}
2472
2473/*****************************************************************************
2474 *Function name
2475 * ltt_get_uint : get an unsigned integer number
2476 *Input params
2477 * reverse_byte_order: must we reverse the byte order ?
2478 * size : the size of the integer
2479 * ptr : the data pointer
2480 *Return value
2481 * guint64 : a 64 bits unsigned integer
2482 ****************************************************************************/
2483
2484guint64 ltt_get_uint(gboolean reverse_byte_order, gint size, void *data)
2485{
2486 guint64 val;
2487
2488 switch(size) {
2489 case 1: val = *((gint8*)data); break;
2490 case 2: val = ltt_get_uint16(reverse_byte_order, data); break;
2491 case 4: val = ltt_get_uint32(reverse_byte_order, data); break;
2492 case 8: val = ltt_get_uint64(reverse_byte_order, data); break;
2493 default: val = ltt_get_uint64(reverse_byte_order, data);
2494 g_critical("get_uint : unsigned integer size %d unknown",
2495 size);
2496 break;
2497 }
2498
2499 return val;
2500}
2501
2502
2503/* get the node name of the system */
2504
2505char * ltt_trace_system_description_node_name (LttSystemDescription * s)
2506{
2507 return s->node_name;
2508}
2509
2510
2511/* get the domain name of the system */
2512
2513char * ltt_trace_system_description_domain_name (LttSystemDescription * s)
2514{
2515 return s->domain_name;
2516}
2517
2518
2519/* get the description of the system */
2520
2521char * ltt_trace_system_description_description (LttSystemDescription * s)
2522{
2523 return s->description;
2524}
2525
2526
2527/* get the NTP corrected start time of the trace */
2528LttTime ltt_trace_start_time(LttTrace *t)
2529{
2530 return t->start_time;
2531}
2532
2533/* get the monotonic start time of the trace */
2534LttTime ltt_trace_start_time_monotonic(LttTrace *t)
2535{
2536 return t->start_time_from_tsc;
2537}
2538
2539static LttTracefile *ltt_tracefile_new()
2540{
2541 LttTracefile *tf;
2542 tf = g_new(LttTracefile, 1);
2543 tf->event.tracefile = tf;
2544 return tf;
2545}
2546
2547static void ltt_tracefile_destroy(LttTracefile *tf)
2548{
2549 g_free(tf);
2550}
2551
2552static void ltt_tracefile_copy(LttTracefile *dest, const LttTracefile *src)
2553{
2554 *dest = *src;
2555}
2556
2557/* Before library loading... */
2558
2559static __attribute__((constructor)) void init(void)
2560{
2561 LTT_TRACEFILE_NAME_METADATA = g_quark_from_string("metadata");
2562}
This page took 0.028799 seconds and 4 git commands to generate.