fix trace number for 64 bits
[lttv.git] / tests / kernel / trace-event.h
CommitLineData
cd61cbc4 1#include <stdarg.h>
2
3/* LTT flags
4 *
5 * LTT_FLAG_TRACE : first arg contains trace to write into.
6 * (type : struct ltt_trace_struct *)
7 * LTT_FLAG_CHANNEL : following arg contains channel index to write into.
8 * (type : uint8_t)
9 * LTT_FLAG_FORCE : Force write in disabled traces (internal ltt use)
10 */
11
12#define _LTT_FLAG_TRACE 0
13#define _LTT_FLAG_CHANNEL 1
14#define _LTT_FLAG_FORCE 2
15
16#define LTT_FLAG_TRACE (1 << _LTT_FLAG_TRACE)
17#define LTT_FLAG_CHANNEL (1 << _LTT_FLAG_CHANNEL)
18#define LTT_FLAG_FORCE (1 << _LTT_FLAG_FORCE)
19
084f56d4 20
21char *(*ltt_serialize_cb)(char *buffer, const char *fmt, va_list args);
22
23
24static int skip_atoi(const char **s)
25{
26 int i=0;
27
28 while (isdigit(**s))
29 i = i*10 + *((*s)++) - '0';
30 return i;
31}
32
33/* Inspired from vsnprintf */
34/* New types :
35 * %r : serialized pointer.
36 */
37static inline __attribute__((no_instrument_function))
38char *ltt_serialize_data(char *buffer, const char *fmt, va_list args)
39{
40 int len;
41 const char *s;
42 int elem_size; /* Size of the integer for 'b' */
43 /* Size of the data contained by 'r' */
44 int elem_alignment; /* Element alignment for 'r' */
45 int qualifier; /* 'h', 'l', or 'L' for integer fields */
46 /* 'z' support added 23/7/1999 S.H. */
47 /* 'z' changed to 'Z' --davidm 1/25/99 */
48 /* 't' added for ptrdiff_t */
49 char *str; /* Pointer to write to */
50 ltt_serialize_cb cb;
51
52 str = buf;
53
54 for (; *fmt ; ++fmt) {
55 if (*fmt != '%') {
56 /* Skip text */
57 continue;
58 }
59
60 /* process flags : ignore standard print formats for now. */
61 repeat:
62 ++fmt; /* this also skips first '%' */
63 switch (*fmt) {
64 case '-':
65 case '+':
66 case ' ':
67 case '#':
68 case '0': goto repeat;
69 }
70
71 /* get element size */
72 elem_size = -1;
73 if (isdigit(*fmt))
74 elem_size = skip_atoi(&fmt);
75 else if (*fmt == '*') {
76 ++fmt;
77 /* it's the next argument */
78 elem_size = va_arg(args, int);
79 }
80
81 /* get the alignment */
82 elem_alignment = -1;
83 if (*fmt == '.') {
84 ++fmt;
85 if (isdigit(*fmt))
86 elem_alignment = skip_atoi(&fmt);
87 else if (*fmt == '*') {
88 ++fmt;
89 /* it's the next argument */
90 elem_alignment = va_arg(args, int);
91 }
92 }
93
94 /* get the conversion qualifier */
95 qualifier = -1;
96 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
97 *fmt =='Z' || *fmt == 'z' || *fmt == 't') {
98 qualifier = *fmt;
99 ++fmt;
100 if (qualifier == 'l' && *fmt == 'l') {
101 qualifier = 'L';
102 ++fmt;
103 }
104 }
105
106 switch (*fmt) {
107 case 'c':
108 if (buffer)
109 *str = (char) va_arg(args, int);
110 str += sizeof(char);
111 continue;
112
113 case 's':
114 s = va_arg(args, char *);
115 if ((unsigned long)s < PAGE_SIZE)
116 s = "<NULL>";
117 if (buffer)
118 strcpy(str, s);
119 str += strlen(s);
120 continue;
121
122 case 'p':
123 str += ltt_align(str, sizeof(void*));
124 if (buffer)
125 *(void**)str = va_arg(args, void *);
126 continue;
127
128 case 'r':
129 /* For array, struct, union */
130 if (elem_alignment < 0)
131 elem_alignment = sizeof(void*);
132 str += ltt_align(str, elem_alignment);
133 if (elem_size > 0) {
134 const char *src = va_arg(args,
135 const char *);
136 if (buffer)
137 memcpy(str, src, elem_size);
138 str += elem_size;
139 }
140 continue;
141
142 case 'v':
143 /* For sequence */
144 str += ltt_align(str, sizeof(int));
145 if (buffer)
146 *(int*)str = elem_size;
147 str += sizeof(int);
148 if (elem_alignment > 0)
149 str += ltt_align(str, elem_alignment);
150 if (elem_size > 0) {
151 const char *src = va_arg(args,
152 const char *);
153 if (buffer)
154 memcpy(str, src, elem_size);
155 str += elem_size;
156 }
157 continue;
158
159 case 'k':
160 /* For callback */
161 cb = va_arg(args, ltt_serialize_cb);
162 /* The callback will take as many arguments
163 * as it needs from args. They won't be
164 * type verified. */
165 str = cb(str, fmt, args);
166 continue;
167
168 case 'n':
169 /* FIXME:
170 * What does C99 say about the overflow case here? */
171 if (qualifier == 'l') {
172 long * ip = va_arg(args, long *);
173 *ip = (str - buf);
174 } else if (qualifier == 'Z' || qualifier == 'z') {
175 size_t * ip = va_arg(args, size_t *);
176 *ip = (str - buf);
177 } else {
178 int * ip = va_arg(args, int *);
179 *ip = (str - buf);
180 }
181 continue;
182
183 case '%':
184 continue;
185
186 case 'o':
187 case 'X':
188 case 'x':
189 case 'd':
190 case 'i':
191 case 'u':
192 break;
193
194 default:
195 if (!*fmt)
196 --fmt;
197 continue;
198 }
199 switch (qualifier) {
200 case 'L':
201 str += ltt_align(str, sizeof(long long));
202 if (buffer)
203 *(long long*)str = va_arg(args, long long);
204 str += sizeof(long long);
205 break;
206 case 'l':
207 str += ltt_align(str, sizeof(long));
208 if (buffer)
209 *(long*)str = va_arg(args, long);
210 str += sizeof(long);
211 break;
212 case 'Z':
213 case 'z':
214 str += ltt_align(str, sizeof(size_t));
215 if (buffer)
216 *(size_t*)str = va_arg(args, size_t);
217 str += sizeof(size_t);
218 break;
219 case 't':
220 str += ltt_align(str, sizeof(ptrdiff_t));
221 if (buffer)
222 *(ptrdiff_t*)str = va_arg(args, ptrdiff_t);
223 str += sizeof(ptrdiff_t);
224 break;
225 case 'h':
226 str += ltt_align(str, sizeof(short));
227 if (buffer)
228 *(short*)str = (short) va_arg(args, int);
229 str += sizeof(short);
230 break;
231 case 'b':
232 if (elem_size > 0)
233 str += ltt_align(str, elem_size);
234 if (buffer)
235 switch (elem_size) {
236 case 1:
237 *(int8_t*)str = (int8_t)va_arg(args, int);
238 break;
239 case 2:
240 *(int16_t*)str = (int16_t)va_arg(args, int);
241 break;
242 case 4:
243 *(int32_t*)str = va_arg(args, int32_t);
244 break;
245 case 8:
246 *(int64_t*)str = va_arg(args, int64_t);
247 break;
248 }
249 str += elem_size;
250 default:
251 str += ltt_align(str, sizeof(int));
252 if (buffer)
253 *(int*)str = va_arg(args, int);
254 str += sizeof(int);
255 }
256 }
257 return str;
258}
259
cd61cbc4 260/* Calculate data size */
261/* Assume that the padding for alignment starts at a
262 * sizeof(void *) address. */
263static inline __attribute__((no_instrument_function))
264size_t ltt_get_data_size(ltt_facility_t fID, uint8_t eID,
265 const char *fmt, va_list args)
266{
084f56d4 267 return (size_t)ltt_serialize_data(NULL, fmt, args);
cd61cbc4 268}
269
270static inline __attribute__((no_instrument_function))
084f56d4 271void ltt_write_event_data(char *buffer,
cd61cbc4 272 ltt_facility_t fID, uint8_t eID,
273 const char *fmt, va_list args)
274{
084f56d4 275 ltt_serialize_data(buffer, fmt, args);
cd61cbc4 276}
277
278
279__attribute__((no_instrument_function))
084f56d4 280void _vtrace(ltt_facility_t fID, uint8_t eID, long flags,
cd61cbc4 281 const char *fmt, va_list args)
282{
283 size_t data_size, slot_size;
084f56d4 284 int channel_index;
cd61cbc4 285 struct ltt_channel_struct *channel;
286 struct ltt_trace_struct *trace, *dest_trace;
287 void *transport_data;
288 uint64_t tsc;
289 char *buffer;
084f56d4 290 va_list args_copy;
cd61cbc4 291
292 /* This test is useful for quickly exiting static tracing when no
293 * trace is active. */
294 if (likely(ltt_traces.num_active_traces == 0 && !(flags & LTT_FLAG_FORCE)))
295 return;
296
cd61cbc4 297 preempt_disable();
298 ltt_nesting[smp_processor_id()]++;
299
300 if (unlikely(flags & LTT_FLAG_TRACE))
301 dest_trace = va_arg(args, struct ltt_trace_struct *);
302 if (unlikely(flags & LTT_FLAG_CHANNEL))
084f56d4 303 channel_index = va_arg(args, int);
cd61cbc4 304 else
305 channel_index = ltt_get_channel_index(fID, eID);
306
084f56d4 307 va_copy(args_copy, args); /* Check : skip 2 st args if trace/ch */
308 data_size = ltt_get_data_size(fID, eID, fmt, args_copy);
309 va_end(args_copy);
310
cd61cbc4 311 /* Iterate on each traces */
312 list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
313 if (unlikely(!trace->active && !(flags & LTT_FLAG_FORCE)))
314 continue;
315 if (unlikely(flags & LTT_FLAG_TRACE && trace != dest_trace))
316 continue;
317 channel = ltt_get_channel_from_index(trace, channel_index);
318 /* reserve space : header and data */
319 buffer = ltt_reserve_slot(trace, channel, &transport_data,
320 data_size, &slot_size, &tsc);
321 if (unlikely(!buffer))
322 continue; /* buffer full */
323 /* Out-of-order write : header and data */
324 buffer = ltt_write_event_header(trace, channel, buffer,
325 fID, eID, data_size, tsc);
084f56d4 326 va_copy(args_copy, args);
327 ltt_write_event_data(buffer, fID, eID, fmt, args_copy);
328 va_end(args_copy);
cd61cbc4 329 /* Out-of-order commit */
330 ltt_commit_slot(channel, &transport_data, buffer, slot_size);
331 }
332
333 ltt_nesting[smp_processor_id()]--;
334 preempt_enable();
335}
336
337__attribute__((no_instrument_function))
084f56d4 338void _trace(ltt_facility_t fID, uint8_t eID, long flags, const char *fmt, ...)
cd61cbc4 339{
340 va_list args;
341
342 va_start(args, fmt);
084f56d4 343 _vtrace(fID, eID, flags, fmt, args);
cd61cbc4 344 va_end(args);
345}
346
This page took 0.034871 seconds and 4 git commands to generate.