Add block layer trace event support
[lttng-modules.git] / probes / lttng-events.h
1 #include <lttng.h>
2 #include <lttng-types.h>
3 #include <linux/debugfs.h>
4 #include "../ltt-tracer-core.h"
5
6 #if 0
7
8 /* keep for a later stage (copy stage) */
9 /*
10 * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to
11 * strcpy().
12 */
13 #undef tp_assign
14 #define tp_assign(dest, src) \
15 lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \
16 lib_ring_buffer_write(config, &ctx, &src, sizeof(src));
17
18 #undef tp_memcpy
19 #define tp_memcpy(dest, src, len) \
20 lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \
21 lib_ring_buffer_write(config, &ctx, &src, len);
22
23 /* TODO: tp_memcpy_dyn */
24
25 /* TODO */
26 #undef tp_strcpy
27 #define tp_strcpy(dest, src) __assign_str(dest, src);
28
29 #endif //0
30
31 /* TODO : deal with DEFINE_EVENT vs event class */
32
33 struct lttng_event_field {
34 const char *name;
35 const struct lttng_type type;
36 };
37
38 struct lttng_event_desc {
39 const struct lttng_event_field *fields;
40 const char *name;
41 unsigned int nr_fields;
42 };
43
44 /*
45 * Macro declarations used for all stages.
46 */
47
48 /*
49 * DECLARE_EVENT_CLASS can be used to add a generic function
50 * handlers for events. That is, if all events have the same
51 * parameters and just have distinct trace points.
52 * Each tracepoint can be defined with DEFINE_EVENT and that
53 * will map the DECLARE_EVENT_CLASS to the tracepoint.
54 *
55 * TRACE_EVENT is a one to one mapping between tracepoint and template.
56 */
57
58 #undef TRACE_EVENT
59 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
60 DECLARE_EVENT_CLASS(name, \
61 PARAMS(proto), \
62 PARAMS(args), \
63 PARAMS(tstruct), \
64 PARAMS(assign), \
65 PARAMS(print)) \
66 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args))
67
68 #undef DEFINE_EVENT_PRINT
69 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
70 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
71
72 /* Callbacks are meaningless to LTTng. */
73 #undef TRACE_EVENT_FN
74 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
75 assign, print, reg, unreg) \
76 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
77 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
78
79 /*
80 * Stage 1 of the trace events.
81 *
82 * Create event field type metadata section.
83 * Each event produce an array of fields.
84 */
85
86 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
87
88 /* Named field types must be defined in lttng-types.h */
89
90 #undef __field
91 #define __field(_type, _item) \
92 { .name = #_item, .type = { .atype = atype_integer, .name = #_type} },
93
94 #undef __field_ext
95 #define __field_ext(_type, _item, _filter_type) __field(_type, _item)
96
97 #undef __array
98 #define __array(_type, _item, _length) \
99 { \
100 .name = #_item, \
101 .type = { \
102 .atype = atype_array, \
103 .name = NULL, \
104 .u.array.elem_type = #_type, \
105 .u.array.length = _length, \
106 }, \
107 },
108
109 #undef __dynamic_array
110 #define __dynamic_array(_type, _item, _length) \
111 { \
112 .name = #_item, \
113 .type = { \
114 .atype = atype_sequence, \
115 .name = NULL, \
116 .u.sequence.elem_type = #_type, \
117 .u.sequence.length_type = "u32", \
118 }, \
119 },
120
121 #undef __string
122 #define __string(_item, _src) \
123 { \
124 .name = #_item, \
125 .type = { \
126 .atype = atype_string, \
127 .name = NULL, \
128 .u.string.encoding = lttng_encode_UTF8, \
129 }, \
130 },
131
132 #undef TP_STRUCT__entry
133 #define TP_STRUCT__entry(args...) args /* Only one used in this phase */
134
135 #undef DECLARE_EVENT_CLASS
136 #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
137 static const struct lttng_event_field __event_fields___##_name[] = { \
138 _tstruct \
139 };
140
141 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
142
143 /*
144 * Stage 2 of the trace events.
145 *
146 * Create an array of events.
147 */
148
149 /* Named field types must be defined in lttng-types.h */
150
151 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
152
153 #undef DEFINE_EVENT
154 #define DEFINE_EVENT(_template, _name, _proto, _args) \
155 { \
156 .fields = __event_fields___##_template, \
157 .name = #_name, \
158 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
159 },
160
161 #define TP_ID1(_token, _system) _token##_system
162 #define TP_ID(_token, _system) TP_ID1(_token, _system)
163
164 static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
165 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
166 };
167
168 #undef TP_ID1
169 #undef TP_ID
170
171 /*
172 * Stage 3 of the trace events.
173 *
174 * Create seq file metadata output.
175 */
176
177 #define TP_ID1(_token, _system) _token##_system
178 #define TP_ID(_token, _system) TP_ID1(_token, _system)
179 #define module_init_eval1(_token, _system) module_init(_token##_system)
180 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
181 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
182 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
183
184 static void *TP_ID(__lttng_seq_start__, TRACE_SYSTEM)(struct seq_file *m,
185 loff_t *pos)
186 {
187 const struct lttng_event_desc *desc =
188 &TP_ID(__event_desc___, TRACE_SYSTEM)[*pos];
189
190 if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)
191 [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1])
192 return NULL;
193 return (void *) desc;
194 }
195
196 static void *TP_ID(__lttng_seq_next__, TRACE_SYSTEM)(struct seq_file *m,
197 void *p, loff_t *ppos)
198 {
199 const struct lttng_event_desc *desc =
200 &TP_ID(__event_desc___, TRACE_SYSTEM)[++(*ppos)];
201
202 if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)
203 [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1])
204 return NULL;
205 return (void *) desc;
206 }
207
208 static void TP_ID(__lttng_seq_stop__, TRACE_SYSTEM)(struct seq_file *m,
209 void *p)
210 {
211 }
212
213 static int TP_ID(__lttng_seq_show__, TRACE_SYSTEM)(struct seq_file *m,
214 void *p)
215 {
216 const struct lttng_event_desc *desc = p;
217 int i;
218
219 seq_printf(m, "event {\n"
220 "\tname = %s;\n"
221 "\tid = UNKNOWN;\n"
222 "\tstream = UNKNOWN;\n"
223 "\tfields = {\n",
224 desc->name);
225 for (i = 0; i < desc->nr_fields; i++) {
226 if (desc->fields[i].type.name) /* Named type */
227 seq_printf(m, "\t\t%s",
228 desc->fields[i].type.name);
229 else /* Nameless type */
230 lttng_print_event_type(m, 2, &desc->fields[i].type);
231 seq_printf(m, " %s;\n", desc->fields[i].name);
232 }
233 seq_printf(m, "\t};\n");
234 seq_printf(m, "};\n");
235 return 0;
236 }
237
238 static const
239 struct seq_operations TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM) = {
240 .start = TP_ID(__lttng_seq_start__, TRACE_SYSTEM),
241 .next = TP_ID(__lttng_seq_next__, TRACE_SYSTEM),
242 .stop = TP_ID(__lttng_seq_stop__, TRACE_SYSTEM),
243 .show = TP_ID(__lttng_seq_show__, TRACE_SYSTEM),
244 };
245
246 static int
247 TP_ID(__lttng_types_open__, TRACE_SYSTEM)(struct inode *inode, struct file *file)
248 {
249 return seq_open(file, &TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM));
250 }
251
252 static const
253 struct file_operations TP_ID(__lttng_types_fops__, TRACE_SYSTEM) = {
254 .open = TP_ID(__lttng_types_open__, TRACE_SYSTEM),
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = seq_release_private,
258 };
259
260 static struct dentry *TP_ID(__lttng_types_dentry__, TRACE_SYSTEM);
261
262 static int TP_ID(__lttng_types_init__, TRACE_SYSTEM)(void)
263 {
264 int ret = 0;
265
266 TP_ID(__lttng_types_dentry__, TRACE_SYSTEM) =
267 debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM),
268 S_IWUSR, NULL, NULL,
269 &TP_ID(__lttng_types_fops__, TRACE_SYSTEM));
270 if (IS_ERR(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM))
271 || !TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) {
272 printk(KERN_ERR "Error creating LTTng type export file\n");
273 ret = -ENOMEM;
274 goto error;
275 }
276 error:
277 return ret;
278 }
279
280 module_init_eval(__lttng_types_init__, TRACE_SYSTEM);
281
282 static void TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(void)
283 {
284 debugfs_remove(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM));
285 }
286
287 module_exit_eval(__lttng_types_exit__, TRACE_SYSTEM);
288
289 #undef module_init_eval
290 #undef module_exit_eval
291 #undef TP_ID1
292 #undef TP_ID
293
294
295 /*
296 * Stage 4 of the trace events.
297 *
298 * Create static inline function that calculates event size.
299 */
300
301 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
302
303 /* Named field types must be defined in lttng-types.h */
304
305 #undef __field
306 #define __field(_type, _item) \
307 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
308 __event_len += sizeof(_type);
309
310 #undef __field_ext
311 #define __field_ext(_type, _item, _filter_type) __field(_type, _item)
312
313 #undef __array
314 #define __array(_type, _item, _length) \
315 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
316 __event_len += sizeof(_type) * (_length);
317
318 #undef __dynamic_array
319 #define __dynamic_array(_type, _item, _length) \
320 __event_len += lib_ring_buffer_align(__event_len, sizeof(u32)); \
321 __event_len += sizeof(u32); \
322 __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \
323 __event_len += sizeof(_type) * (_length);
324
325 #undef __string
326 #define __string(_item, _src) \
327 __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1;
328
329 #undef TP_PROTO
330 #define TP_PROTO(args...) args
331
332 #undef TP_STRUCT__entry
333 #define TP_STRUCT__entry(args...) args
334
335 #undef DECLARE_EVENT_CLASS
336 #define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \
337 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \
338 { \
339 size_t __event_len = 0; \
340 unsigned int __dynamic_len_idx = 0; \
341 \
342 if (0) \
343 (void) __dynamic_len_idx; /* don't warn if unused */ \
344 _tstruct \
345 return __event_len; \
346 }
347
348 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
349
350
351 #if 0
352
353 /*
354 * Stage 4 of the trace events.
355 *
356 * Create the probe function : call even size calculation and write event data
357 * into the buffer.
358 */
359
360
361
362 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
363
364
365
366
367 #include <linux/ftrace_event.h>
368
369 /*
370 * DECLARE_EVENT_CLASS can be used to add a generic function
371 * handlers for events. That is, if all events have the same
372 * parameters and just have distinct trace points.
373 * Each tracepoint can be defined with DEFINE_EVENT and that
374 * will map the DECLARE_EVENT_CLASS to the tracepoint.
375 *
376 * TRACE_EVENT is a one to one mapping between tracepoint and template.
377 */
378 #undef TRACE_EVENT
379 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
380 DECLARE_EVENT_CLASS(name, \
381 PARAMS(proto), \
382 PARAMS(args), \
383 PARAMS(tstruct), \
384 PARAMS(assign), \
385 PARAMS(print)); \
386 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
387
388
389 #undef __field
390 #define __field(type, item) type item;
391
392 #undef __field_ext
393 #define __field_ext(type, item, filter_type) type item;
394
395 #undef __array
396 #define __array(type, item, len) type item[len];
397
398 #undef __dynamic_array
399 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
400
401 #undef __string
402 #define __string(item, src) __dynamic_array(char, item, -1)
403
404 #undef TP_STRUCT__entry
405 #define TP_STRUCT__entry(args...) args
406
407 #undef DECLARE_EVENT_CLASS
408 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
409 struct ftrace_raw_##name { \
410 struct trace_entry ent; \
411 tstruct \
412 char __data[0]; \
413 }; \
414 \
415 static struct ftrace_event_class event_class_##name;
416
417 #undef DEFINE_EVENT
418 #define DEFINE_EVENT(template, name, proto, args) \
419 static struct ftrace_event_call __used \
420 __attribute__((__aligned__(4))) event_##name
421
422 #undef DEFINE_EVENT_PRINT
423 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
424 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
425
426 /* Callbacks are meaningless to ftrace. */
427 #undef TRACE_EVENT_FN
428 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
429 assign, print, reg, unreg) \
430 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
431 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
432
433 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
434
435
436 /*
437 * Stage 2 of the trace events.
438 *
439 * Create static inline function that calculates event size.
440 */
441
442 #undef __field
443 #define __field(type, item)
444
445 #undef __field_ext
446 #define __field_ext(type, item, filter_type)
447
448 #undef __array
449 #define __array(type, item, len)
450
451 #undef __dynamic_array
452 #define __dynamic_array(type, item, len) u32 item;
453
454 #undef __string
455 #define __string(item, src) __dynamic_array(char, item, -1)
456
457 #undef DECLARE_EVENT_CLASS
458 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
459 struct ftrace_data_offsets_##call { \
460 tstruct; \
461 };
462
463 #undef DEFINE_EVENT
464 #define DEFINE_EVENT(template, name, proto, args)
465
466 #undef DEFINE_EVENT_PRINT
467 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
468 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
469
470 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
471
472 /*
473 * Stage 3 of the trace events.
474 *
475 * Create the probe function : call even size calculation and write event data
476 * into the buffer.
477 */
478
479 #undef __entry
480 #define __entry field
481
482 #undef TP_printk
483 #define TP_printk(fmt, args...) fmt "\n", args
484
485 #undef __get_dynamic_array
486 #define __get_dynamic_array(field) \
487 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
488
489 #undef __get_str
490 #define __get_str(field) (char *)__get_dynamic_array(field)
491
492 #undef __print_flags
493 #define __print_flags(flag, delim, flag_array...) \
494 ({ \
495 static const struct trace_print_flags __flags[] = \
496 { flag_array, { -1, NULL }}; \
497 ftrace_print_flags_seq(p, delim, flag, __flags); \
498 })
499
500 #undef __print_symbolic
501 #define __print_symbolic(value, symbol_array...) \
502 ({ \
503 static const struct trace_print_flags symbols[] = \
504 { symbol_array, { -1, NULL }}; \
505 ftrace_print_symbols_seq(p, value, symbols); \
506 })
507
508 #undef __print_hex
509 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
510
511 #undef DECLARE_EVENT_CLASS
512 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
513 static notrace enum print_line_t \
514 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
515 struct trace_event *trace_event) \
516 { \
517 struct ftrace_event_call *event; \
518 struct trace_seq *s = &iter->seq; \
519 struct ftrace_raw_##call *field; \
520 struct trace_entry *entry; \
521 struct trace_seq *p = &iter->tmp_seq; \
522 int ret; \
523 \
524 event = container_of(trace_event, struct ftrace_event_call, \
525 event); \
526 \
527 entry = iter->ent; \
528 \
529 if (entry->type != event->event.type) { \
530 WARN_ON_ONCE(1); \
531 return TRACE_TYPE_UNHANDLED; \
532 } \
533 \
534 field = (typeof(field))entry; \
535 \
536 trace_seq_init(p); \
537 ret = trace_seq_printf(s, "%s: ", event->name); \
538 if (ret) \
539 ret = trace_seq_printf(s, print); \
540 if (!ret) \
541 return TRACE_TYPE_PARTIAL_LINE; \
542 \
543 return TRACE_TYPE_HANDLED; \
544 } \
545 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
546 .trace = ftrace_raw_output_##call, \
547 };
548
549 #undef DEFINE_EVENT_PRINT
550 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
551 static notrace enum print_line_t \
552 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
553 struct trace_event *event) \
554 { \
555 struct trace_seq *s = &iter->seq; \
556 struct ftrace_raw_##template *field; \
557 struct trace_entry *entry; \
558 struct trace_seq *p = &iter->tmp_seq; \
559 int ret; \
560 \
561 entry = iter->ent; \
562 \
563 if (entry->type != event_##call.event.type) { \
564 WARN_ON_ONCE(1); \
565 return TRACE_TYPE_UNHANDLED; \
566 } \
567 \
568 field = (typeof(field))entry; \
569 \
570 trace_seq_init(p); \
571 ret = trace_seq_printf(s, "%s: ", #call); \
572 if (ret) \
573 ret = trace_seq_printf(s, print); \
574 if (!ret) \
575 return TRACE_TYPE_PARTIAL_LINE; \
576 \
577 return TRACE_TYPE_HANDLED; \
578 } \
579 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
580 .trace = ftrace_raw_output_##call, \
581 };
582
583 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
584
585 #undef __field_ext
586 #define __field_ext(type, item, filter_type) \
587 ret = trace_define_field(event_call, #type, #item, \
588 offsetof(typeof(field), item), \
589 sizeof(field.item), \
590 is_signed_type(type), filter_type); \
591 if (ret) \
592 return ret;
593
594 #undef __field
595 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
596
597 #undef __array
598 #define __array(type, item, len) \
599 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
600 ret = trace_define_field(event_call, #type "[" #len "]", #item, \
601 offsetof(typeof(field), item), \
602 sizeof(field.item), \
603 is_signed_type(type), FILTER_OTHER); \
604 if (ret) \
605 return ret;
606
607 #undef __dynamic_array
608 #define __dynamic_array(type, item, len) \
609 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
610 offsetof(typeof(field), __data_loc_##item), \
611 sizeof(field.__data_loc_##item), \
612 is_signed_type(type), FILTER_OTHER);
613
614 #undef __string
615 #define __string(item, src) __dynamic_array(char, item, -1)
616
617 #undef DECLARE_EVENT_CLASS
618 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
619 static int notrace \
620 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
621 { \
622 struct ftrace_raw_##call field; \
623 int ret; \
624 \
625 tstruct; \
626 \
627 return ret; \
628 }
629
630 #undef DEFINE_EVENT
631 #define DEFINE_EVENT(template, name, proto, args)
632
633 #undef DEFINE_EVENT_PRINT
634 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
635 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
636
637 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
638
639 /*
640 * remember the offset of each array from the beginning of the event.
641 */
642
643 #undef __entry
644 #define __entry entry
645
646 #undef __field
647 #define __field(type, item)
648
649 #undef __field_ext
650 #define __field_ext(type, item, filter_type)
651
652 #undef __array
653 #define __array(type, item, len)
654
655 #undef __dynamic_array
656 #define __dynamic_array(type, item, len) \
657 __data_offsets->item = __data_size + \
658 offsetof(typeof(*entry), __data); \
659 __data_offsets->item |= (len * sizeof(type)) << 16; \
660 __data_size += (len) * sizeof(type);
661
662 #undef __string
663 #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1)
664
665 #undef DECLARE_EVENT_CLASS
666 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
667 static inline notrace int ftrace_get_offsets_##call( \
668 struct ftrace_data_offsets_##call *__data_offsets, proto) \
669 { \
670 int __data_size = 0; \
671 struct ftrace_raw_##call __maybe_unused *entry; \
672 \
673 tstruct; \
674 \
675 return __data_size; \
676 }
677
678 #undef DEFINE_EVENT
679 #define DEFINE_EVENT(template, name, proto, args)
680
681 #undef DEFINE_EVENT_PRINT
682 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
683 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
684
685 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
686
687 /*
688 * Stage 4 of the trace events.
689 *
690 * Override the macros in <trace/trace_events.h> to include the following:
691 *
692 * For those macros defined with TRACE_EVENT:
693 *
694 * static struct ftrace_event_call event_<call>;
695 *
696 * static void ftrace_raw_event_<call>(void *__data, proto)
697 * {
698 * struct ftrace_event_call *event_call = __data;
699 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
700 * struct ring_buffer_event *event;
701 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
702 * struct ring_buffer *buffer;
703 * unsigned long irq_flags;
704 * int __data_size;
705 * int pc;
706 *
707 * local_save_flags(irq_flags);
708 * pc = preempt_count();
709 *
710 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
711 *
712 * event = trace_current_buffer_lock_reserve(&buffer,
713 * event_<call>->event.type,
714 * sizeof(*entry) + __data_size,
715 * irq_flags, pc);
716 * if (!event)
717 * return;
718 * entry = ring_buffer_event_data(event);
719 *
720 * { <assign>; } <-- Here we assign the entries by the __field and
721 * __array macros.
722 *
723 * if (!filter_current_check_discard(buffer, event_call, entry, event))
724 * trace_current_buffer_unlock_commit(buffer,
725 * event, irq_flags, pc);
726 * }
727 *
728 * static struct trace_event ftrace_event_type_<call> = {
729 * .trace = ftrace_raw_output_<call>, <-- stage 2
730 * };
731 *
732 * static const char print_fmt_<call>[] = <TP_printk>;
733 *
734 * static struct ftrace_event_class __used event_class_<template> = {
735 * .system = "<system>",
736 * .define_fields = ftrace_define_fields_<call>,
737 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
738 * .raw_init = trace_event_raw_init,
739 * .probe = ftrace_raw_event_##call,
740 * .reg = ftrace_event_reg,
741 * };
742 *
743 * static struct ftrace_event_call __used
744 * __attribute__((__aligned__(4)))
745 * __attribute__((section("_ftrace_events"))) event_<call> = {
746 * .name = "<call>",
747 * .class = event_class_<template>,
748 * .event = &ftrace_event_type_<call>,
749 * .print_fmt = print_fmt_<call>,
750 * };
751 *
752 */
753
754 #ifdef CONFIG_PERF_EVENTS
755
756 #define _TRACE_PERF_PROTO(call, proto) \
757 static notrace void \
758 perf_trace_##call(void *__data, proto);
759
760 #define _TRACE_PERF_INIT(call) \
761 .perf_probe = perf_trace_##call,
762
763 #else
764 #define _TRACE_PERF_PROTO(call, proto)
765 #define _TRACE_PERF_INIT(call)
766 #endif /* CONFIG_PERF_EVENTS */
767
768 #undef __entry
769 #define __entry entry
770
771 #undef __field
772 #define __field(type, item)
773
774 #undef __array
775 #define __array(type, item, len)
776
777 #undef __dynamic_array
778 #define __dynamic_array(type, item, len) \
779 __entry->__data_loc_##item = __data_offsets.item;
780
781 #undef __string
782 #define __string(item, src) __dynamic_array(char, item, -1) \
783
784 #undef __assign_str
785 #define __assign_str(dst, src) \
786 strcpy(__get_str(dst), src);
787
788 #undef TP_fast_assign
789 #define TP_fast_assign(args...) args
790
791 #undef TP_perf_assign
792 #define TP_perf_assign(args...)
793
794 #undef DECLARE_EVENT_CLASS
795 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
796 \
797 static notrace void \
798 ftrace_raw_event_##call(void *__data, proto) \
799 { \
800 struct ftrace_event_call *event_call = __data; \
801 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
802 struct ring_buffer_event *event; \
803 struct ftrace_raw_##call *entry; \
804 struct ring_buffer *buffer; \
805 unsigned long irq_flags; \
806 int __data_size; \
807 int pc; \
808 \
809 local_save_flags(irq_flags); \
810 pc = preempt_count(); \
811 \
812 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
813 \
814 event = trace_current_buffer_lock_reserve(&buffer, \
815 event_call->event.type, \
816 sizeof(*entry) + __data_size, \
817 irq_flags, pc); \
818 if (!event) \
819 return; \
820 entry = ring_buffer_event_data(event); \
821 \
822 tstruct \
823 \
824 { assign; } \
825 \
826 if (!filter_current_check_discard(buffer, event_call, entry, event)) \
827 trace_nowake_buffer_unlock_commit(buffer, \
828 event, irq_flags, pc); \
829 }
830 /*
831 * The ftrace_test_probe is compiled out, it is only here as a build time check
832 * to make sure that if the tracepoint handling changes, the ftrace probe will
833 * fail to compile unless it too is updated.
834 */
835
836 #undef DEFINE_EVENT
837 #define DEFINE_EVENT(template, call, proto, args) \
838 static inline void ftrace_test_probe_##call(void) \
839 { \
840 check_trace_callback_type_##call(ftrace_raw_event_##template); \
841 }
842
843 #undef DEFINE_EVENT_PRINT
844 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
845
846 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
847
848 #undef __entry
849 #define __entry REC
850
851 #undef __print_flags
852 #undef __print_symbolic
853 #undef __get_dynamic_array
854 #undef __get_str
855
856 #undef TP_printk
857 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
858
859 #undef DECLARE_EVENT_CLASS
860 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
861 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
862 static const char print_fmt_##call[] = print; \
863 static struct ftrace_event_class __used event_class_##call = { \
864 .system = __stringify(TRACE_SYSTEM), \
865 .define_fields = ftrace_define_fields_##call, \
866 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
867 .raw_init = trace_event_raw_init, \
868 .probe = ftrace_raw_event_##call, \
869 .reg = ftrace_event_reg, \
870 _TRACE_PERF_INIT(call) \
871 };
872
873 #undef DEFINE_EVENT
874 #define DEFINE_EVENT(template, call, proto, args) \
875 \
876 static struct ftrace_event_call __used \
877 __attribute__((__aligned__(4))) \
878 __attribute__((section("_ftrace_events"))) event_##call = { \
879 .name = #call, \
880 .class = &event_class_##template, \
881 .event.funcs = &ftrace_event_type_funcs_##template, \
882 .print_fmt = print_fmt_##template, \
883 };
884
885 #undef DEFINE_EVENT_PRINT
886 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
887 \
888 static const char print_fmt_##call[] = print; \
889 \
890 static struct ftrace_event_call __used \
891 __attribute__((__aligned__(4))) \
892 __attribute__((section("_ftrace_events"))) event_##call = { \
893 .name = #call, \
894 .class = &event_class_##template, \
895 .event.funcs = &ftrace_event_type_funcs_##call, \
896 .print_fmt = print_fmt_##call, \
897 }
898
899 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
900
901 /*
902 * Define the insertion callback to perf events
903 *
904 * The job is very similar to ftrace_raw_event_<call> except that we don't
905 * insert in the ring buffer but in a perf counter.
906 *
907 * static void ftrace_perf_<call>(proto)
908 * {
909 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
910 * struct ftrace_event_call *event_call = &event_<call>;
911 * extern void perf_tp_event(int, u64, u64, void *, int);
912 * struct ftrace_raw_##call *entry;
913 * struct perf_trace_buf *trace_buf;
914 * u64 __addr = 0, __count = 1;
915 * unsigned long irq_flags;
916 * struct trace_entry *ent;
917 * int __entry_size;
918 * int __data_size;
919 * int __cpu
920 * int pc;
921 *
922 * pc = preempt_count();
923 *
924 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
925 *
926 * // Below we want to get the aligned size by taking into account
927 * // the u32 field that will later store the buffer size
928 * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),
929 * sizeof(u64));
930 * __entry_size -= sizeof(u32);
931 *
932 * // Protect the non nmi buffer
933 * // This also protects the rcu read side
934 * local_irq_save(irq_flags);
935 * __cpu = smp_processor_id();
936 *
937 * if (in_nmi())
938 * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi);
939 * else
940 * trace_buf = rcu_dereference_sched(perf_trace_buf);
941 *
942 * if (!trace_buf)
943 * goto end;
944 *
945 * trace_buf = per_cpu_ptr(trace_buf, __cpu);
946 *
947 * // Avoid recursion from perf that could mess up the buffer
948 * if (trace_buf->recursion++)
949 * goto end_recursion;
950 *
951 * raw_data = trace_buf->buf;
952 *
953 * // Make recursion update visible before entering perf_tp_event
954 * // so that we protect from perf recursions.
955 *
956 * barrier();
957 *
958 * //zero dead bytes from alignment to avoid stack leak to userspace:
959 * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL;
960 * entry = (struct ftrace_raw_<call> *)raw_data;
961 * ent = &entry->ent;
962 * tracing_generic_entry_update(ent, irq_flags, pc);
963 * ent->type = event_call->id;
964 *
965 * <tstruct> <- do some jobs with dynamic arrays
966 *
967 * <assign> <- affect our values
968 *
969 * perf_tp_event(event_call->id, __addr, __count, entry,
970 * __entry_size); <- submit them to perf counter
971 *
972 * }
973 */
974
975 #ifdef CONFIG_PERF_EVENTS
976
977 #undef __entry
978 #define __entry entry
979
980 #undef __get_dynamic_array
981 #define __get_dynamic_array(field) \
982 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
983
984 #undef __get_str
985 #define __get_str(field) (char *)__get_dynamic_array(field)
986
987 #undef __perf_addr
988 #define __perf_addr(a) __addr = (a)
989
990 #undef __perf_count
991 #define __perf_count(c) __count = (c)
992
993 #undef DECLARE_EVENT_CLASS
994 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
995 static notrace void \
996 perf_trace_##call(void *__data, proto) \
997 { \
998 struct ftrace_event_call *event_call = __data; \
999 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
1000 struct ftrace_raw_##call *entry; \
1001 struct pt_regs __regs; \
1002 u64 __addr = 0, __count = 1; \
1003 struct hlist_head *head; \
1004 int __entry_size; \
1005 int __data_size; \
1006 int rctx; \
1007 \
1008 perf_fetch_caller_regs(&__regs); \
1009 \
1010 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
1011 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
1012 sizeof(u64)); \
1013 __entry_size -= sizeof(u32); \
1014 \
1015 if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \
1016 "profile buffer not large enough")) \
1017 return; \
1018 \
1019 entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \
1020 __entry_size, event_call->event.type, &__regs, &rctx); \
1021 if (!entry) \
1022 return; \
1023 \
1024 tstruct \
1025 \
1026 { assign; } \
1027 \
1028 head = this_cpu_ptr(event_call->perf_events); \
1029 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
1030 __count, &__regs, head); \
1031 }
1032
1033 /*
1034 * This part is compiled out, it is only here as a build time check
1035 * to make sure that if the tracepoint handling changes, the
1036 * perf probe will fail to compile unless it too is updated.
1037 */
1038 #undef DEFINE_EVENT
1039 #define DEFINE_EVENT(template, call, proto, args) \
1040 static inline void perf_test_probe_##call(void) \
1041 { \
1042 check_trace_callback_type_##call(perf_trace_##template); \
1043 }
1044
1045
1046 #undef DEFINE_EVENT_PRINT
1047 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
1048 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
1049
1050 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1051 #endif /* CONFIG_PERF_EVENTS */
1052
1053 #undef _TRACE_PROFILE_INIT
1054 #endif //0
This page took 0.050578 seconds and 5 git commands to generate.