Commit | Line | Data |
---|---|---|
40652b65 | 1 | #include <lttng.h> |
299338c8 | 2 | #include <lttng-types.h> |
d0dd2ecb | 3 | #include <linux/debugfs.h> |
40652b65 MD |
4 | |
5 | /* | |
6 | * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to | |
7 | * strcpy(). | |
8 | */ | |
1d12cebd | 9 | #undef tp_assign |
40652b65 MD |
10 | #define tp_assign(dest, src) \ |
11 | lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \ | |
12 | lib_ring_buffer_write(config, &ctx, &src, sizeof(src)); | |
13 | ||
1d12cebd | 14 | #undef tp_memcpy |
40652b65 MD |
15 | #define tp_memcpy(dest, src, len) \ |
16 | lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \ | |
17 | lib_ring_buffer_write(config, &ctx, &src, len); | |
18 | ||
19 | /* TODO */ | |
1d12cebd | 20 | #undef tp_strcpy |
299338c8 MD |
21 | #define tp_strcpy(dest, src) __assign_str(dest, src); |
22 | ||
23 | struct lttng_event_field { | |
24 | const char *name; | |
25 | const struct lttng_type type; | |
26 | }; | |
27 | ||
28 | struct lttng_event_desc { | |
29 | const struct lttng_event_field *fields; | |
d0dd2ecb MD |
30 | const char *name; |
31 | unsigned int nr_fields; | |
299338c8 | 32 | }; |
40652b65 MD |
33 | |
34 | /* | |
35 | * Stage 1 of the trace events. | |
36 | * | |
37 | * Create event field type metadata section. | |
299338c8 | 38 | * Each event produce an array of fields. |
40652b65 MD |
39 | */ |
40 | ||
41 | /* | |
42 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
43 | * handlers for events. That is, if all events have the same | |
44 | * parameters and just have distinct trace points. | |
45 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
46 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
47 | * | |
48 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
49 | */ | |
50 | #undef TRACE_EVENT | |
51 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
52 | DECLARE_EVENT_CLASS(name, \ | |
53 | PARAMS(proto), \ | |
54 | PARAMS(args), \ | |
55 | PARAMS(tstruct), \ | |
56 | PARAMS(assign), \ | |
299338c8 MD |
57 | PARAMS(print)) \ |
58 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)) | |
40652b65 | 59 | |
1d12cebd MD |
60 | /* Named field types must be defined in lttng-types.h */ |
61 | ||
40652b65 | 62 | #undef __field |
299338c8 MD |
63 | #define __field(_type, _item) \ |
64 | { .name = #_item, .type = { .atype = atype_integer, .name = #_type} }, | |
40652b65 MD |
65 | |
66 | #undef __field_ext | |
299338c8 MD |
67 | #define __field_ext(_type, _item, _filter_type) \ |
68 | { .name = #_item, .type = { .atype = atype_integer, .name = #_type} }, | |
40652b65 MD |
69 | |
70 | #undef __array | |
299338c8 MD |
71 | #define __array(_type, _item, _length) \ |
72 | { \ | |
73 | .name = #_item, \ | |
74 | .type = { \ | |
75 | .atype = atype_array, \ | |
76 | .name = NULL, \ | |
77 | .u.array.elem_type = #_type, \ | |
78 | .u.array.length = _length, \ | |
79 | }, \ | |
80 | }, | |
40652b65 MD |
81 | |
82 | #undef __dynamic_array | |
299338c8 MD |
83 | #define __dynamic_array(_type, _item, _length) \ |
84 | { \ | |
85 | .name = #_item, \ | |
86 | .type = { \ | |
87 | .atype = atype_sequence, \ | |
88 | .name = NULL, \ | |
89 | .u.sequence.elem_type = #_type, \ | |
90 | .u.sequence.length_type = "u32", \ | |
91 | }, \ | |
92 | }, | |
40652b65 MD |
93 | |
94 | #undef __string | |
1d12cebd | 95 | #define __string(_item, _src) \ |
299338c8 MD |
96 | { \ |
97 | .name = _item, \ | |
98 | .type = { \ | |
99 | .atype = atype_string, \ | |
100 | .name = NULL, \ | |
101 | .u.string.encoding = lttng_encode_UTF8, \ | |
102 | }, \ | |
103 | }, | |
1d12cebd MD |
104 | |
105 | #undef TP_PROTO | |
106 | #define TP_PROTO(args...) | |
107 | ||
108 | #undef TP_ARGS | |
109 | #define TP_ARGS(args...) | |
40652b65 MD |
110 | |
111 | #undef TP_STRUCT__entry | |
1d12cebd MD |
112 | #define TP_STRUCT__entry(args...) args /* Only one used in this phase */ |
113 | ||
114 | #undef TP_fast_assign | |
115 | #define TP_fast_assign(args...) | |
116 | ||
117 | #undef TP_printk | |
118 | #define TP_printk(args...) | |
40652b65 MD |
119 | |
120 | #undef DECLARE_EVENT_CLASS | |
121 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | |
299338c8 MD |
122 | static const struct lttng_event_field __event_fields___##name[] = { \ |
123 | tstruct \ | |
124 | }; | |
125 | ||
126 | #undef DEFINE_EVENT | |
127 | #define DEFINE_EVENT(template, name, proto, args) | |
128 | ||
129 | #undef DEFINE_EVENT_PRINT | |
130 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
131 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
132 | ||
133 | /* Callbacks are meaningless to LTTng. */ | |
134 | #undef TRACE_EVENT_FN | |
135 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
136 | assign, print, reg, unreg) \ | |
137 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
138 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
139 | ||
140 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
141 | ||
142 | /* | |
143 | * Stage 2 of the trace events. | |
144 | * | |
145 | * Create an array of events. | |
146 | */ | |
147 | ||
148 | /* | |
149 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
150 | * handlers for events. That is, if all events have the same | |
151 | * parameters and just have distinct trace points. | |
152 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
153 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
154 | * | |
155 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
156 | */ | |
157 | #undef TRACE_EVENT | |
158 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
159 | DECLARE_EVENT_CLASS(name, \ | |
160 | PARAMS(proto), \ | |
161 | PARAMS(args), \ | |
162 | PARAMS(tstruct), \ | |
163 | PARAMS(assign), \ | |
164 | PARAMS(print)) \ | |
165 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)) | |
166 | ||
167 | /* Named field types must be defined in lttng-types.h */ | |
168 | ||
169 | #undef __field | |
170 | #define __field(_type, _item) | |
171 | ||
172 | #undef __field_ext | |
173 | #define __field_ext(_type, _item, _filter_type) | |
174 | ||
175 | #undef __array | |
176 | #define __array(_type, _item, _length) | |
177 | ||
178 | #undef __dynamic_array | |
179 | #define __dynamic_array(_type, _item, _length) | |
180 | ||
181 | #undef __string | |
182 | #define __string(_item, _src) | |
183 | ||
184 | #undef TP_PROTO | |
185 | #define TP_PROTO(args...) | |
186 | ||
187 | #undef TP_ARGS | |
188 | #define TP_ARGS(args...) | |
189 | ||
190 | #undef TP_STRUCT__entry | |
191 | #define TP_STRUCT__entry(args...) | |
192 | ||
193 | #undef TP_fast_assign | |
194 | #define TP_fast_assign(args...) | |
195 | ||
196 | #undef TP_printk | |
197 | #define TP_printk(args...) | |
198 | ||
199 | #undef DECLARE_EVENT_CLASS | |
d0dd2ecb MD |
200 | #define DECLARE_EVENT_CLASS(_name, proto, args, tstruct, assign, print) \ |
201 | { \ | |
202 | .fields = __event_fields___##_name, \ | |
203 | .name = #_name, \ | |
204 | .nr_fields = ARRAY_SIZE(__event_fields___##_name), \ | |
205 | }, | |
40652b65 MD |
206 | |
207 | #undef DEFINE_EVENT | |
1d12cebd | 208 | #define DEFINE_EVENT(template, name, proto, args) |
40652b65 MD |
209 | |
210 | #undef DEFINE_EVENT_PRINT | |
211 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
212 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
213 | ||
1d12cebd | 214 | /* Callbacks are meaningless to LTTng. */ |
40652b65 MD |
215 | #undef TRACE_EVENT_FN |
216 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
217 | assign, print, reg, unreg) \ | |
218 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
219 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
220 | ||
d0dd2ecb MD |
221 | #define TP_ID1(_token, _system) _token##_system |
222 | #define TP_ID(_token, _system) TP_ID1(_token, _system) | |
40652b65 | 223 | |
d0dd2ecb | 224 | static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = { |
40652b65 | 225 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) |
299338c8 MD |
226 | }; |
227 | ||
d0dd2ecb MD |
228 | #undef TP_ID1 |
229 | #undef TP_ID | |
230 | ||
231 | /* | |
232 | * Stage 3 of the trace events. | |
233 | * | |
234 | * Create seq file metadata output. | |
235 | */ | |
236 | ||
237 | /* | |
238 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
239 | * handlers for events. That is, if all events have the same | |
240 | * parameters and just have distinct trace points. | |
241 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
242 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
243 | * | |
244 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
245 | */ | |
246 | #undef TRACE_EVENT | |
247 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
248 | DECLARE_EVENT_CLASS(name, \ | |
249 | PARAMS(proto), \ | |
250 | PARAMS(args), \ | |
251 | PARAMS(tstruct), \ | |
252 | PARAMS(assign), \ | |
253 | PARAMS(print)) \ | |
254 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)) | |
255 | ||
256 | /* Named field types must be defined in lttng-types.h */ | |
257 | ||
258 | #undef __field | |
259 | #define __field(_type, _item) | |
260 | ||
261 | #undef __field_ext | |
262 | #define __field_ext(_type, _item, _filter_type) | |
263 | ||
264 | #undef __array | |
265 | #define __array(_type, _item, _length) | |
266 | ||
267 | #undef __dynamic_array | |
268 | #define __dynamic_array(_type, _item, _length) | |
269 | ||
270 | #undef __string | |
271 | #define __string(_item, _src) | |
272 | ||
273 | #undef TP_PROTO | |
274 | #define TP_PROTO(args...) | |
275 | ||
276 | #undef TP_ARGS | |
277 | #define TP_ARGS(args...) | |
278 | ||
279 | #undef TP_STRUCT__entry | |
280 | #define TP_STRUCT__entry(args...) | |
281 | ||
282 | #undef TP_fast_assign | |
283 | #define TP_fast_assign(args...) | |
284 | ||
285 | #undef TP_printk | |
286 | #define TP_printk(args...) | |
287 | ||
288 | #undef DECLARE_EVENT_CLASS | |
289 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | |
290 | static void () | |
291 | ||
292 | #undef DEFINE_EVENT | |
293 | #define DEFINE_EVENT(template, name, proto, args) | |
294 | ||
295 | #undef DEFINE_EVENT_PRINT | |
296 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
297 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
298 | ||
299 | /* Callbacks are meaningless to LTTng. */ | |
300 | #undef TRACE_EVENT_FN | |
301 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
302 | assign, print, reg, unreg) \ | |
303 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
304 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
305 | ||
306 | #define TP_ID1(_token, _system) _token##_system | |
307 | #define TP_ID(_token, _system) TP_ID1(_token, _system) | |
308 | #define module_init_eval1(_token, _system) module_init(_token##_system) | |
309 | #define module_init_eval(_token, _system) module_init_eval1(_token, _system) | |
310 | #define module_exit_eval1(_token, _system) module_exit(_token##_system) | |
311 | #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system) | |
312 | ||
313 | static void *TP_ID(__lttng_seq_start__, TRACE_SYSTEM)(struct seq_file *m, | |
314 | loff_t *pos) | |
315 | { | |
316 | const struct lttng_event_desc *desc = &TP_ID(__event_desc___, TRACE_SYSTEM)[*pos]; | |
317 | ||
318 | if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)[ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) | |
319 | return NULL; | |
320 | return (void *) desc; | |
321 | } | |
322 | ||
323 | static void *TP_ID(__lttng_seq_next__, TRACE_SYSTEM)(struct seq_file *m, | |
324 | void *p, loff_t *ppos) | |
325 | { | |
326 | const struct lttng_event_desc *desc = &TP_ID(__event_desc___, TRACE_SYSTEM)[++(*ppos)]; | |
327 | ||
328 | if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM)[ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) | |
329 | return NULL; | |
330 | return (void *) desc; | |
331 | } | |
332 | ||
333 | static void TP_ID(__lttng_seq_stop__, TRACE_SYSTEM)(struct seq_file *m, | |
334 | void *p) | |
335 | { | |
336 | } | |
337 | ||
338 | static int TP_ID(__lttng_seq_show__, TRACE_SYSTEM)(struct seq_file *m, | |
339 | void *p) | |
340 | { | |
341 | const struct lttng_event_desc *desc = p; | |
342 | int i; | |
343 | ||
344 | seq_printf(m, "event {\n" | |
345 | "\tname = %s;\n" | |
346 | "\tid = UNKNOWN;\n" | |
347 | "\tstream = UNKNOWN;\n" | |
348 | "\tfields = {\n", | |
349 | desc->name); | |
350 | for (i = 0; i < desc->nr_fields; i++) { | |
351 | if (desc->fields[i].type.name) /* Named type */ | |
352 | seq_printf(m, "\t\t%s", | |
353 | desc->fields[i].type.name); | |
354 | else /* Nameless type */ | |
355 | lttng_print_event_type(m, 2, &desc->fields[i].type); | |
356 | seq_printf(m, " %s;\n", desc->fields[i].name); | |
357 | } | |
358 | seq_printf(m, "\t};\n"); | |
359 | seq_printf(m, "};\n"); | |
360 | return 0; | |
361 | } | |
362 | ||
363 | static const | |
364 | struct seq_operations TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM) = { | |
365 | .start = TP_ID(__lttng_seq_start__, TRACE_SYSTEM), | |
366 | .next = TP_ID(__lttng_seq_next__, TRACE_SYSTEM), | |
367 | .stop = TP_ID(__lttng_seq_stop__, TRACE_SYSTEM), | |
368 | .show = TP_ID(__lttng_seq_show__, TRACE_SYSTEM), | |
369 | }; | |
370 | ||
371 | static int | |
372 | TP_ID(__lttng_types_open__, TRACE_SYSTEM)(struct inode *inode, struct file *file) | |
373 | { | |
374 | return seq_open(file, &TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM)); | |
375 | } | |
376 | ||
377 | static const struct file_operations TP_ID(__lttng_types_fops__, TRACE_SYSTEM) = { | |
378 | .open = TP_ID(__lttng_types_open__, TRACE_SYSTEM), | |
379 | .read = seq_read, | |
380 | .llseek = seq_lseek, | |
381 | .release = seq_release_private, | |
382 | }; | |
383 | ||
384 | static struct dentry *TP_ID(__lttng_types_dentry__, TRACE_SYSTEM); | |
385 | ||
386 | static int TP_ID(__lttng_types_init__, TRACE_SYSTEM)(void) | |
387 | { | |
388 | int ret = 0; | |
389 | ||
390 | TP_ID(__lttng_types_dentry__, TRACE_SYSTEM) = | |
391 | debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM), S_IWUSR, | |
392 | NULL, NULL, &TP_ID(__lttng_types_fops__, TRACE_SYSTEM)); | |
393 | if (IS_ERR(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) | |
394 | || !TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) { | |
395 | printk(KERN_ERR "Error creating LTTng type export file\n"); | |
396 | ret = -ENOMEM; | |
397 | goto error; | |
398 | } | |
399 | error: | |
400 | return ret; | |
401 | } | |
402 | ||
403 | module_init_eval(__lttng_types_init__, TRACE_SYSTEM); | |
404 | ||
405 | static void TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(void) | |
406 | { | |
407 | debugfs_remove(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)); | |
408 | } | |
409 | ||
410 | module_exit_eval(__lttng_types_exit__, TRACE_SYSTEM); | |
411 | ||
412 | #undef module_init_eval | |
413 | #undef module_exit_eval | |
414 | #undef TP_ID1 | |
415 | #undef TP_ID | |
416 | ||
1d12cebd MD |
417 | |
418 | #if 0 | |
40652b65 MD |
419 | |
420 | /* | |
299338c8 | 421 | * Stage 3 of the trace events. |
40652b65 MD |
422 | * |
423 | * Create static inline function that calculates event size. | |
424 | */ | |
425 | ||
426 | ||
427 | ||
428 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
429 | ||
430 | /* | |
299338c8 | 431 | * Stage 4 of the trace events. |
40652b65 MD |
432 | * |
433 | * Create the probe function : call even size calculation and write event data | |
434 | * into the buffer. | |
435 | */ | |
436 | ||
437 | ||
438 | ||
439 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
440 | ||
441 | ||
442 | ||
443 | ||
444 | #include <linux/ftrace_event.h> | |
445 | ||
446 | /* | |
447 | * DECLARE_EVENT_CLASS can be used to add a generic function | |
448 | * handlers for events. That is, if all events have the same | |
449 | * parameters and just have distinct trace points. | |
450 | * Each tracepoint can be defined with DEFINE_EVENT and that | |
451 | * will map the DECLARE_EVENT_CLASS to the tracepoint. | |
452 | * | |
453 | * TRACE_EVENT is a one to one mapping between tracepoint and template. | |
454 | */ | |
455 | #undef TRACE_EVENT | |
456 | #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ | |
457 | DECLARE_EVENT_CLASS(name, \ | |
458 | PARAMS(proto), \ | |
459 | PARAMS(args), \ | |
460 | PARAMS(tstruct), \ | |
461 | PARAMS(assign), \ | |
462 | PARAMS(print)); \ | |
463 | DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); | |
464 | ||
465 | ||
466 | #undef __field | |
467 | #define __field(type, item) type item; | |
468 | ||
469 | #undef __field_ext | |
470 | #define __field_ext(type, item, filter_type) type item; | |
471 | ||
472 | #undef __array | |
473 | #define __array(type, item, len) type item[len]; | |
474 | ||
475 | #undef __dynamic_array | |
476 | #define __dynamic_array(type, item, len) u32 __data_loc_##item; | |
477 | ||
478 | #undef __string | |
479 | #define __string(item, src) __dynamic_array(char, item, -1) | |
480 | ||
481 | #undef TP_STRUCT__entry | |
482 | #define TP_STRUCT__entry(args...) args | |
483 | ||
484 | #undef DECLARE_EVENT_CLASS | |
485 | #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ | |
486 | struct ftrace_raw_##name { \ | |
487 | struct trace_entry ent; \ | |
488 | tstruct \ | |
489 | char __data[0]; \ | |
490 | }; \ | |
491 | \ | |
492 | static struct ftrace_event_class event_class_##name; | |
493 | ||
494 | #undef DEFINE_EVENT | |
495 | #define DEFINE_EVENT(template, name, proto, args) \ | |
496 | static struct ftrace_event_call __used \ | |
497 | __attribute__((__aligned__(4))) event_##name | |
498 | ||
499 | #undef DEFINE_EVENT_PRINT | |
500 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
501 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
502 | ||
503 | /* Callbacks are meaningless to ftrace. */ | |
504 | #undef TRACE_EVENT_FN | |
505 | #define TRACE_EVENT_FN(name, proto, args, tstruct, \ | |
506 | assign, print, reg, unreg) \ | |
507 | TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ | |
508 | PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ | |
509 | ||
510 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
511 | ||
512 | ||
513 | /* | |
514 | * Stage 2 of the trace events. | |
515 | * | |
516 | * Create static inline function that calculates event size. | |
517 | */ | |
518 | ||
519 | #undef __field | |
520 | #define __field(type, item) | |
521 | ||
522 | #undef __field_ext | |
523 | #define __field_ext(type, item, filter_type) | |
524 | ||
525 | #undef __array | |
526 | #define __array(type, item, len) | |
527 | ||
528 | #undef __dynamic_array | |
529 | #define __dynamic_array(type, item, len) u32 item; | |
530 | ||
531 | #undef __string | |
532 | #define __string(item, src) __dynamic_array(char, item, -1) | |
533 | ||
534 | #undef DECLARE_EVENT_CLASS | |
535 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
536 | struct ftrace_data_offsets_##call { \ | |
537 | tstruct; \ | |
538 | }; | |
539 | ||
540 | #undef DEFINE_EVENT | |
541 | #define DEFINE_EVENT(template, name, proto, args) | |
542 | ||
543 | #undef DEFINE_EVENT_PRINT | |
544 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
545 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
546 | ||
547 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
548 | ||
549 | /* | |
550 | * Stage 3 of the trace events. | |
551 | * | |
552 | * Create the probe function : call even size calculation and write event data | |
553 | * into the buffer. | |
554 | */ | |
555 | ||
556 | #undef __entry | |
557 | #define __entry field | |
558 | ||
559 | #undef TP_printk | |
560 | #define TP_printk(fmt, args...) fmt "\n", args | |
561 | ||
562 | #undef __get_dynamic_array | |
563 | #define __get_dynamic_array(field) \ | |
564 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | |
565 | ||
566 | #undef __get_str | |
567 | #define __get_str(field) (char *)__get_dynamic_array(field) | |
568 | ||
569 | #undef __print_flags | |
570 | #define __print_flags(flag, delim, flag_array...) \ | |
571 | ({ \ | |
572 | static const struct trace_print_flags __flags[] = \ | |
573 | { flag_array, { -1, NULL }}; \ | |
574 | ftrace_print_flags_seq(p, delim, flag, __flags); \ | |
575 | }) | |
576 | ||
577 | #undef __print_symbolic | |
578 | #define __print_symbolic(value, symbol_array...) \ | |
579 | ({ \ | |
580 | static const struct trace_print_flags symbols[] = \ | |
581 | { symbol_array, { -1, NULL }}; \ | |
582 | ftrace_print_symbols_seq(p, value, symbols); \ | |
583 | }) | |
584 | ||
585 | #undef __print_hex | |
586 | #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) | |
587 | ||
588 | #undef DECLARE_EVENT_CLASS | |
589 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
590 | static notrace enum print_line_t \ | |
591 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |
592 | struct trace_event *trace_event) \ | |
593 | { \ | |
594 | struct ftrace_event_call *event; \ | |
595 | struct trace_seq *s = &iter->seq; \ | |
596 | struct ftrace_raw_##call *field; \ | |
597 | struct trace_entry *entry; \ | |
598 | struct trace_seq *p = &iter->tmp_seq; \ | |
599 | int ret; \ | |
600 | \ | |
601 | event = container_of(trace_event, struct ftrace_event_call, \ | |
602 | event); \ | |
603 | \ | |
604 | entry = iter->ent; \ | |
605 | \ | |
606 | if (entry->type != event->event.type) { \ | |
607 | WARN_ON_ONCE(1); \ | |
608 | return TRACE_TYPE_UNHANDLED; \ | |
609 | } \ | |
610 | \ | |
611 | field = (typeof(field))entry; \ | |
612 | \ | |
613 | trace_seq_init(p); \ | |
614 | ret = trace_seq_printf(s, "%s: ", event->name); \ | |
615 | if (ret) \ | |
616 | ret = trace_seq_printf(s, print); \ | |
617 | if (!ret) \ | |
618 | return TRACE_TYPE_PARTIAL_LINE; \ | |
619 | \ | |
620 | return TRACE_TYPE_HANDLED; \ | |
621 | } \ | |
622 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
623 | .trace = ftrace_raw_output_##call, \ | |
624 | }; | |
625 | ||
626 | #undef DEFINE_EVENT_PRINT | |
627 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
628 | static notrace enum print_line_t \ | |
629 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ | |
630 | struct trace_event *event) \ | |
631 | { \ | |
632 | struct trace_seq *s = &iter->seq; \ | |
633 | struct ftrace_raw_##template *field; \ | |
634 | struct trace_entry *entry; \ | |
635 | struct trace_seq *p = &iter->tmp_seq; \ | |
636 | int ret; \ | |
637 | \ | |
638 | entry = iter->ent; \ | |
639 | \ | |
640 | if (entry->type != event_##call.event.type) { \ | |
641 | WARN_ON_ONCE(1); \ | |
642 | return TRACE_TYPE_UNHANDLED; \ | |
643 | } \ | |
644 | \ | |
645 | field = (typeof(field))entry; \ | |
646 | \ | |
647 | trace_seq_init(p); \ | |
648 | ret = trace_seq_printf(s, "%s: ", #call); \ | |
649 | if (ret) \ | |
650 | ret = trace_seq_printf(s, print); \ | |
651 | if (!ret) \ | |
652 | return TRACE_TYPE_PARTIAL_LINE; \ | |
653 | \ | |
654 | return TRACE_TYPE_HANDLED; \ | |
655 | } \ | |
656 | static struct trace_event_functions ftrace_event_type_funcs_##call = { \ | |
657 | .trace = ftrace_raw_output_##call, \ | |
658 | }; | |
659 | ||
660 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
661 | ||
662 | #undef __field_ext | |
663 | #define __field_ext(type, item, filter_type) \ | |
664 | ret = trace_define_field(event_call, #type, #item, \ | |
665 | offsetof(typeof(field), item), \ | |
666 | sizeof(field.item), \ | |
667 | is_signed_type(type), filter_type); \ | |
668 | if (ret) \ | |
669 | return ret; | |
670 | ||
671 | #undef __field | |
672 | #define __field(type, item) __field_ext(type, item, FILTER_OTHER) | |
673 | ||
674 | #undef __array | |
675 | #define __array(type, item, len) \ | |
676 | BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ | |
677 | ret = trace_define_field(event_call, #type "[" #len "]", #item, \ | |
678 | offsetof(typeof(field), item), \ | |
679 | sizeof(field.item), \ | |
680 | is_signed_type(type), FILTER_OTHER); \ | |
681 | if (ret) \ | |
682 | return ret; | |
683 | ||
684 | #undef __dynamic_array | |
685 | #define __dynamic_array(type, item, len) \ | |
686 | ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ | |
687 | offsetof(typeof(field), __data_loc_##item), \ | |
688 | sizeof(field.__data_loc_##item), \ | |
689 | is_signed_type(type), FILTER_OTHER); | |
690 | ||
691 | #undef __string | |
692 | #define __string(item, src) __dynamic_array(char, item, -1) | |
693 | ||
694 | #undef DECLARE_EVENT_CLASS | |
695 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ | |
696 | static int notrace \ | |
697 | ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ | |
698 | { \ | |
699 | struct ftrace_raw_##call field; \ | |
700 | int ret; \ | |
701 | \ | |
702 | tstruct; \ | |
703 | \ | |
704 | return ret; \ | |
705 | } | |
706 | ||
707 | #undef DEFINE_EVENT | |
708 | #define DEFINE_EVENT(template, name, proto, args) | |
709 | ||
710 | #undef DEFINE_EVENT_PRINT | |
711 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
712 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
713 | ||
714 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
715 | ||
716 | /* | |
717 | * remember the offset of each array from the beginning of the event. | |
718 | */ | |
719 | ||
720 | #undef __entry | |
721 | #define __entry entry | |
722 | ||
723 | #undef __field | |
724 | #define __field(type, item) | |
725 | ||
726 | #undef __field_ext | |
727 | #define __field_ext(type, item, filter_type) | |
728 | ||
729 | #undef __array | |
730 | #define __array(type, item, len) | |
731 | ||
732 | #undef __dynamic_array | |
733 | #define __dynamic_array(type, item, len) \ | |
734 | __data_offsets->item = __data_size + \ | |
735 | offsetof(typeof(*entry), __data); \ | |
736 | __data_offsets->item |= (len * sizeof(type)) << 16; \ | |
737 | __data_size += (len) * sizeof(type); | |
738 | ||
739 | #undef __string | |
740 | #define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) | |
741 | ||
742 | #undef DECLARE_EVENT_CLASS | |
743 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
744 | static inline notrace int ftrace_get_offsets_##call( \ | |
745 | struct ftrace_data_offsets_##call *__data_offsets, proto) \ | |
746 | { \ | |
747 | int __data_size = 0; \ | |
748 | struct ftrace_raw_##call __maybe_unused *entry; \ | |
749 | \ | |
750 | tstruct; \ | |
751 | \ | |
752 | return __data_size; \ | |
753 | } | |
754 | ||
755 | #undef DEFINE_EVENT | |
756 | #define DEFINE_EVENT(template, name, proto, args) | |
757 | ||
758 | #undef DEFINE_EVENT_PRINT | |
759 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
760 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
761 | ||
762 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
763 | ||
764 | /* | |
765 | * Stage 4 of the trace events. | |
766 | * | |
767 | * Override the macros in <trace/trace_events.h> to include the following: | |
768 | * | |
769 | * For those macros defined with TRACE_EVENT: | |
770 | * | |
771 | * static struct ftrace_event_call event_<call>; | |
772 | * | |
773 | * static void ftrace_raw_event_<call>(void *__data, proto) | |
774 | * { | |
775 | * struct ftrace_event_call *event_call = __data; | |
776 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | |
777 | * struct ring_buffer_event *event; | |
778 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | |
779 | * struct ring_buffer *buffer; | |
780 | * unsigned long irq_flags; | |
781 | * int __data_size; | |
782 | * int pc; | |
783 | * | |
784 | * local_save_flags(irq_flags); | |
785 | * pc = preempt_count(); | |
786 | * | |
787 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | |
788 | * | |
789 | * event = trace_current_buffer_lock_reserve(&buffer, | |
790 | * event_<call>->event.type, | |
791 | * sizeof(*entry) + __data_size, | |
792 | * irq_flags, pc); | |
793 | * if (!event) | |
794 | * return; | |
795 | * entry = ring_buffer_event_data(event); | |
796 | * | |
797 | * { <assign>; } <-- Here we assign the entries by the __field and | |
798 | * __array macros. | |
799 | * | |
800 | * if (!filter_current_check_discard(buffer, event_call, entry, event)) | |
801 | * trace_current_buffer_unlock_commit(buffer, | |
802 | * event, irq_flags, pc); | |
803 | * } | |
804 | * | |
805 | * static struct trace_event ftrace_event_type_<call> = { | |
806 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | |
807 | * }; | |
808 | * | |
809 | * static const char print_fmt_<call>[] = <TP_printk>; | |
810 | * | |
811 | * static struct ftrace_event_class __used event_class_<template> = { | |
812 | * .system = "<system>", | |
813 | * .define_fields = ftrace_define_fields_<call>, | |
814 | * .fields = LIST_HEAD_INIT(event_class_##call.fields), | |
815 | * .raw_init = trace_event_raw_init, | |
816 | * .probe = ftrace_raw_event_##call, | |
817 | * .reg = ftrace_event_reg, | |
818 | * }; | |
819 | * | |
820 | * static struct ftrace_event_call __used | |
821 | * __attribute__((__aligned__(4))) | |
822 | * __attribute__((section("_ftrace_events"))) event_<call> = { | |
823 | * .name = "<call>", | |
824 | * .class = event_class_<template>, | |
825 | * .event = &ftrace_event_type_<call>, | |
826 | * .print_fmt = print_fmt_<call>, | |
827 | * }; | |
828 | * | |
829 | */ | |
830 | ||
831 | #ifdef CONFIG_PERF_EVENTS | |
832 | ||
833 | #define _TRACE_PERF_PROTO(call, proto) \ | |
834 | static notrace void \ | |
835 | perf_trace_##call(void *__data, proto); | |
836 | ||
837 | #define _TRACE_PERF_INIT(call) \ | |
838 | .perf_probe = perf_trace_##call, | |
839 | ||
840 | #else | |
841 | #define _TRACE_PERF_PROTO(call, proto) | |
842 | #define _TRACE_PERF_INIT(call) | |
843 | #endif /* CONFIG_PERF_EVENTS */ | |
844 | ||
845 | #undef __entry | |
846 | #define __entry entry | |
847 | ||
848 | #undef __field | |
849 | #define __field(type, item) | |
850 | ||
851 | #undef __array | |
852 | #define __array(type, item, len) | |
853 | ||
854 | #undef __dynamic_array | |
855 | #define __dynamic_array(type, item, len) \ | |
856 | __entry->__data_loc_##item = __data_offsets.item; | |
857 | ||
858 | #undef __string | |
859 | #define __string(item, src) __dynamic_array(char, item, -1) \ | |
860 | ||
861 | #undef __assign_str | |
862 | #define __assign_str(dst, src) \ | |
863 | strcpy(__get_str(dst), src); | |
864 | ||
865 | #undef TP_fast_assign | |
866 | #define TP_fast_assign(args...) args | |
867 | ||
868 | #undef TP_perf_assign | |
869 | #define TP_perf_assign(args...) | |
870 | ||
871 | #undef DECLARE_EVENT_CLASS | |
872 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
873 | \ | |
874 | static notrace void \ | |
875 | ftrace_raw_event_##call(void *__data, proto) \ | |
876 | { \ | |
877 | struct ftrace_event_call *event_call = __data; \ | |
878 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | |
879 | struct ring_buffer_event *event; \ | |
880 | struct ftrace_raw_##call *entry; \ | |
881 | struct ring_buffer *buffer; \ | |
882 | unsigned long irq_flags; \ | |
883 | int __data_size; \ | |
884 | int pc; \ | |
885 | \ | |
886 | local_save_flags(irq_flags); \ | |
887 | pc = preempt_count(); \ | |
888 | \ | |
889 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | |
890 | \ | |
891 | event = trace_current_buffer_lock_reserve(&buffer, \ | |
892 | event_call->event.type, \ | |
893 | sizeof(*entry) + __data_size, \ | |
894 | irq_flags, pc); \ | |
895 | if (!event) \ | |
896 | return; \ | |
897 | entry = ring_buffer_event_data(event); \ | |
898 | \ | |
899 | tstruct \ | |
900 | \ | |
901 | { assign; } \ | |
902 | \ | |
903 | if (!filter_current_check_discard(buffer, event_call, entry, event)) \ | |
904 | trace_nowake_buffer_unlock_commit(buffer, \ | |
905 | event, irq_flags, pc); \ | |
906 | } | |
907 | /* | |
908 | * The ftrace_test_probe is compiled out, it is only here as a build time check | |
909 | * to make sure that if the tracepoint handling changes, the ftrace probe will | |
910 | * fail to compile unless it too is updated. | |
911 | */ | |
912 | ||
913 | #undef DEFINE_EVENT | |
914 | #define DEFINE_EVENT(template, call, proto, args) \ | |
915 | static inline void ftrace_test_probe_##call(void) \ | |
916 | { \ | |
917 | check_trace_callback_type_##call(ftrace_raw_event_##template); \ | |
918 | } | |
919 | ||
920 | #undef DEFINE_EVENT_PRINT | |
921 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) | |
922 | ||
923 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
924 | ||
925 | #undef __entry | |
926 | #define __entry REC | |
927 | ||
928 | #undef __print_flags | |
929 | #undef __print_symbolic | |
930 | #undef __get_dynamic_array | |
931 | #undef __get_str | |
932 | ||
933 | #undef TP_printk | |
934 | #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args) | |
935 | ||
936 | #undef DECLARE_EVENT_CLASS | |
937 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
938 | _TRACE_PERF_PROTO(call, PARAMS(proto)); \ | |
939 | static const char print_fmt_##call[] = print; \ | |
940 | static struct ftrace_event_class __used event_class_##call = { \ | |
941 | .system = __stringify(TRACE_SYSTEM), \ | |
942 | .define_fields = ftrace_define_fields_##call, \ | |
943 | .fields = LIST_HEAD_INIT(event_class_##call.fields),\ | |
944 | .raw_init = trace_event_raw_init, \ | |
945 | .probe = ftrace_raw_event_##call, \ | |
946 | .reg = ftrace_event_reg, \ | |
947 | _TRACE_PERF_INIT(call) \ | |
948 | }; | |
949 | ||
950 | #undef DEFINE_EVENT | |
951 | #define DEFINE_EVENT(template, call, proto, args) \ | |
952 | \ | |
953 | static struct ftrace_event_call __used \ | |
954 | __attribute__((__aligned__(4))) \ | |
955 | __attribute__((section("_ftrace_events"))) event_##call = { \ | |
956 | .name = #call, \ | |
957 | .class = &event_class_##template, \ | |
958 | .event.funcs = &ftrace_event_type_funcs_##template, \ | |
959 | .print_fmt = print_fmt_##template, \ | |
960 | }; | |
961 | ||
962 | #undef DEFINE_EVENT_PRINT | |
963 | #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ | |
964 | \ | |
965 | static const char print_fmt_##call[] = print; \ | |
966 | \ | |
967 | static struct ftrace_event_call __used \ | |
968 | __attribute__((__aligned__(4))) \ | |
969 | __attribute__((section("_ftrace_events"))) event_##call = { \ | |
970 | .name = #call, \ | |
971 | .class = &event_class_##template, \ | |
972 | .event.funcs = &ftrace_event_type_funcs_##call, \ | |
973 | .print_fmt = print_fmt_##call, \ | |
974 | } | |
975 | ||
976 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
977 | ||
978 | /* | |
979 | * Define the insertion callback to perf events | |
980 | * | |
981 | * The job is very similar to ftrace_raw_event_<call> except that we don't | |
982 | * insert in the ring buffer but in a perf counter. | |
983 | * | |
984 | * static void ftrace_perf_<call>(proto) | |
985 | * { | |
986 | * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; | |
987 | * struct ftrace_event_call *event_call = &event_<call>; | |
988 | * extern void perf_tp_event(int, u64, u64, void *, int); | |
989 | * struct ftrace_raw_##call *entry; | |
990 | * struct perf_trace_buf *trace_buf; | |
991 | * u64 __addr = 0, __count = 1; | |
992 | * unsigned long irq_flags; | |
993 | * struct trace_entry *ent; | |
994 | * int __entry_size; | |
995 | * int __data_size; | |
996 | * int __cpu | |
997 | * int pc; | |
998 | * | |
999 | * pc = preempt_count(); | |
1000 | * | |
1001 | * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); | |
1002 | * | |
1003 | * // Below we want to get the aligned size by taking into account | |
1004 | * // the u32 field that will later store the buffer size | |
1005 | * __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32), | |
1006 | * sizeof(u64)); | |
1007 | * __entry_size -= sizeof(u32); | |
1008 | * | |
1009 | * // Protect the non nmi buffer | |
1010 | * // This also protects the rcu read side | |
1011 | * local_irq_save(irq_flags); | |
1012 | * __cpu = smp_processor_id(); | |
1013 | * | |
1014 | * if (in_nmi()) | |
1015 | * trace_buf = rcu_dereference_sched(perf_trace_buf_nmi); | |
1016 | * else | |
1017 | * trace_buf = rcu_dereference_sched(perf_trace_buf); | |
1018 | * | |
1019 | * if (!trace_buf) | |
1020 | * goto end; | |
1021 | * | |
1022 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); | |
1023 | * | |
1024 | * // Avoid recursion from perf that could mess up the buffer | |
1025 | * if (trace_buf->recursion++) | |
1026 | * goto end_recursion; | |
1027 | * | |
1028 | * raw_data = trace_buf->buf; | |
1029 | * | |
1030 | * // Make recursion update visible before entering perf_tp_event | |
1031 | * // so that we protect from perf recursions. | |
1032 | * | |
1033 | * barrier(); | |
1034 | * | |
1035 | * //zero dead bytes from alignment to avoid stack leak to userspace: | |
1036 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | |
1037 | * entry = (struct ftrace_raw_<call> *)raw_data; | |
1038 | * ent = &entry->ent; | |
1039 | * tracing_generic_entry_update(ent, irq_flags, pc); | |
1040 | * ent->type = event_call->id; | |
1041 | * | |
1042 | * <tstruct> <- do some jobs with dynamic arrays | |
1043 | * | |
1044 | * <assign> <- affect our values | |
1045 | * | |
1046 | * perf_tp_event(event_call->id, __addr, __count, entry, | |
1047 | * __entry_size); <- submit them to perf counter | |
1048 | * | |
1049 | * } | |
1050 | */ | |
1051 | ||
1052 | #ifdef CONFIG_PERF_EVENTS | |
1053 | ||
1054 | #undef __entry | |
1055 | #define __entry entry | |
1056 | ||
1057 | #undef __get_dynamic_array | |
1058 | #define __get_dynamic_array(field) \ | |
1059 | ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) | |
1060 | ||
1061 | #undef __get_str | |
1062 | #define __get_str(field) (char *)__get_dynamic_array(field) | |
1063 | ||
1064 | #undef __perf_addr | |
1065 | #define __perf_addr(a) __addr = (a) | |
1066 | ||
1067 | #undef __perf_count | |
1068 | #define __perf_count(c) __count = (c) | |
1069 | ||
1070 | #undef DECLARE_EVENT_CLASS | |
1071 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | |
1072 | static notrace void \ | |
1073 | perf_trace_##call(void *__data, proto) \ | |
1074 | { \ | |
1075 | struct ftrace_event_call *event_call = __data; \ | |
1076 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | |
1077 | struct ftrace_raw_##call *entry; \ | |
1078 | struct pt_regs __regs; \ | |
1079 | u64 __addr = 0, __count = 1; \ | |
1080 | struct hlist_head *head; \ | |
1081 | int __entry_size; \ | |
1082 | int __data_size; \ | |
1083 | int rctx; \ | |
1084 | \ | |
1085 | perf_fetch_caller_regs(&__regs); \ | |
1086 | \ | |
1087 | __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ | |
1088 | __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\ | |
1089 | sizeof(u64)); \ | |
1090 | __entry_size -= sizeof(u32); \ | |
1091 | \ | |
1092 | if (WARN_ONCE(__entry_size > PERF_MAX_TRACE_SIZE, \ | |
1093 | "profile buffer not large enough")) \ | |
1094 | return; \ | |
1095 | \ | |
1096 | entry = (struct ftrace_raw_##call *)perf_trace_buf_prepare( \ | |
1097 | __entry_size, event_call->event.type, &__regs, &rctx); \ | |
1098 | if (!entry) \ | |
1099 | return; \ | |
1100 | \ | |
1101 | tstruct \ | |
1102 | \ | |
1103 | { assign; } \ | |
1104 | \ | |
1105 | head = this_cpu_ptr(event_call->perf_events); \ | |
1106 | perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \ | |
1107 | __count, &__regs, head); \ | |
1108 | } | |
1109 | ||
1110 | /* | |
1111 | * This part is compiled out, it is only here as a build time check | |
1112 | * to make sure that if the tracepoint handling changes, the | |
1113 | * perf probe will fail to compile unless it too is updated. | |
1114 | */ | |
1115 | #undef DEFINE_EVENT | |
1116 | #define DEFINE_EVENT(template, call, proto, args) \ | |
1117 | static inline void perf_test_probe_##call(void) \ | |
1118 | { \ | |
1119 | check_trace_callback_type_##call(perf_trace_##template); \ | |
1120 | } | |
1121 | ||
1122 | ||
1123 | #undef DEFINE_EVENT_PRINT | |
1124 | #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ | |
1125 | DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) | |
1126 | ||
1127 | #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) | |
1128 | #endif /* CONFIG_PERF_EVENTS */ | |
1129 | ||
1130 | #undef _TRACE_PROFILE_INIT | |
1d12cebd | 1131 | #endif //0 |