Rename probes/lttng-events.h to probes/lttng-tracepoint-event-impl.h
[lttng-modules.git] / probes / lttng-tracepoint-event-impl.h
1 /*
2 * lttng-tracepoint-event-impl.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <linux/uaccess.h>
23 #include <linux/debugfs.h>
24 #include <linux/rculist.h>
25 #include <asm/byteorder.h>
26 #include "lttng.h"
27 #include "lttng-types.h"
28 #include "lttng-probe-user.h"
29 #include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */
30 #include "../wrapper/ringbuffer/frontend_types.h"
31 #include "../wrapper/rcu.h"
32 #include "../lttng-events.h"
33 #include "../lttng-tracer-core.h"
34
35 /*
36 * Macro declarations used for all stages.
37 */
38
39 /*
40 * LTTng name mapping macros. LTTng remaps some of the kernel events to
41 * enforce name-spacing.
42 */
43 #undef LTTNG_TRACEPOINT_EVENT_MAP
44 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
45 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
46 PARAMS(proto), \
47 PARAMS(args), \
48 PARAMS(fields)) \
49 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
50
51 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
52 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
53 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
54 PARAMS(fields)) \
55 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
56
57 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
58 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
59 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
60 PARAMS(proto), \
61 PARAMS(args), \
62 PARAMS(_locvar), \
63 PARAMS(_code_pre), \
64 PARAMS(fields), \
65 PARAMS(_code_post)) \
66 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
67
68 #undef LTTNG_TRACEPOINT_EVENT_CODE
69 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
70 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
71 PARAMS(proto), \
72 PARAMS(args), \
73 PARAMS(_locvar), \
74 PARAMS(_code_pre), \
75 PARAMS(fields), \
76 PARAMS(_code_post))
77
78 /*
79 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
80 * handlers for events. That is, if all events have the same parameters
81 * and just have distinct trace points. Each tracepoint can be defined
82 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
83 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
84 *
85 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
86 * template.
87 */
88
89 #undef LTTNG_TRACEPOINT_EVENT
90 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
91 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
92 PARAMS(proto), \
93 PARAMS(args), \
94 PARAMS(fields))
95
96 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
97 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
98 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
99
100 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
101 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
102 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
103
104 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
105 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
106 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
107
108 #undef LTTNG_TRACEPOINT_EVENT_CLASS
109 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
110 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
111 PARAMS(_fields), )
112
113 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
114 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
115 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
116
117
118 /*
119 * Stage 1 of the trace events.
120 *
121 * Create dummy trace calls for each events, verifying that the LTTng module
122 * instrumentation headers match the kernel arguments. Will be optimized
123 * out by the compiler.
124 */
125
126 /* Reset all macros within TRACEPOINT_EVENT */
127 #include "lttng-events-reset.h"
128
129 #undef TP_PROTO
130 #define TP_PROTO(...) __VA_ARGS__
131
132 #undef TP_ARGS
133 #define TP_ARGS(...) __VA_ARGS__
134
135 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
136 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
137 void trace_##_name(_proto);
138
139 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
140 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
141 void trace_##_name(void);
142
143 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
144
145 /*
146 * Stage 1.1 of the trace events.
147 *
148 * Create dummy trace prototypes for each event class, and for each used
149 * template. This will allow checking whether the prototypes from the
150 * class and the instance using the class actually match.
151 */
152
153 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
154
155 #undef TP_PROTO
156 #define TP_PROTO(...) __VA_ARGS__
157
158 #undef TP_ARGS
159 #define TP_ARGS(...) __VA_ARGS__
160
161 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
162 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
163 void __event_template_proto___##_template(_proto);
164
165 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
166 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
167 void __event_template_proto___##_template(void);
168
169 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
170 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
171 void __event_template_proto___##_name(_proto);
172
173 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
174 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
175 void __event_template_proto___##_name(void);
176
177 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
178
179 /*
180 * Stage 2 of the trace events.
181 *
182 * Create event field type metadata section.
183 * Each event produce an array of fields.
184 */
185
186 /* Reset all macros within TRACEPOINT_EVENT */
187 #include "lttng-events-reset.h"
188 #include "lttng-events-write.h"
189 #include "lttng-events-nowrite.h"
190
191 #undef _ctf_integer_ext
192 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
193 { \
194 .name = #_item, \
195 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\
196 .nowrite = _nowrite, \
197 .user = _user, \
198 },
199
200 #undef _ctf_array_encoded
201 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
202 { \
203 .name = #_item, \
204 .type = \
205 { \
206 .atype = atype_array, \
207 .u = \
208 { \
209 .array = \
210 { \
211 .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \
212 .length = _length, \
213 } \
214 } \
215 }, \
216 .nowrite = _nowrite, \
217 .user = _user, \
218 },
219
220 #undef _ctf_array_bitfield
221 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
222 { \
223 .name = #_item, \
224 .type = \
225 { \
226 .atype = atype_array, \
227 .u = \
228 { \
229 .array = \
230 { \
231 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
232 .length = (_length) * sizeof(_type) * CHAR_BIT, \
233 .elem_alignment = lttng_alignof(_type), \
234 } \
235 } \
236 }, \
237 .nowrite = _nowrite, \
238 .user = _user, \
239 },
240
241
242 #undef _ctf_sequence_encoded
243 #define _ctf_sequence_encoded(_type, _item, _src, \
244 _length_type, _src_length, _encoding, \
245 _byte_order, _base, _user, _nowrite) \
246 { \
247 .name = #_item, \
248 .type = \
249 { \
250 .atype = atype_sequence, \
251 .u = \
252 { \
253 .sequence = \
254 { \
255 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
256 .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \
257 }, \
258 }, \
259 }, \
260 .nowrite = _nowrite, \
261 .user = _user, \
262 },
263
264 #undef _ctf_sequence_bitfield
265 #define _ctf_sequence_bitfield(_type, _item, _src, \
266 _length_type, _src_length, \
267 _user, _nowrite) \
268 { \
269 .name = #_item, \
270 .type = \
271 { \
272 .atype = atype_sequence, \
273 .u = \
274 { \
275 .sequence = \
276 { \
277 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
278 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
279 .elem_alignment = lttng_alignof(_type), \
280 }, \
281 }, \
282 }, \
283 .nowrite = _nowrite, \
284 .user = _user, \
285 },
286
287 #undef _ctf_string
288 #define _ctf_string(_item, _src, _user, _nowrite) \
289 { \
290 .name = #_item, \
291 .type = \
292 { \
293 .atype = atype_string, \
294 .u = \
295 { \
296 .basic = { .string = { .encoding = lttng_encode_UTF8 } } \
297 }, \
298 }, \
299 .nowrite = _nowrite, \
300 .user = _user, \
301 },
302
303 #undef TP_FIELDS
304 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
305
306 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
307 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
308 static const struct lttng_event_field __event_fields___##_name[] = { \
309 _fields \
310 };
311
312 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
313 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
314 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
315
316 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
317
318 /*
319 * Stage 3 of the trace events.
320 *
321 * Create probe callback prototypes.
322 */
323
324 /* Reset all macros within TRACEPOINT_EVENT */
325 #include "lttng-events-reset.h"
326
327 #undef TP_PROTO
328 #define TP_PROTO(...) __VA_ARGS__
329
330 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
331 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
332 static void __event_probe__##_name(void *__data, _proto);
333
334 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
335 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
336 static void __event_probe__##_name(void *__data);
337
338 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
339
340 /*
341 * Stage 4 of the trace events.
342 *
343 * Create static inline function that calculates event size.
344 */
345
346 /* Reset all macros within TRACEPOINT_EVENT */
347 #include "lttng-events-reset.h"
348 #include "lttng-events-write.h"
349
350 #undef _ctf_integer_ext
351 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
352 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
353 __event_len += sizeof(_type);
354
355 #undef _ctf_array_encoded
356 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
357 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
358 __event_len += sizeof(_type) * (_length);
359
360 #undef _ctf_array_bitfield
361 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
362 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
363
364 #undef _ctf_sequence_encoded
365 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
366 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
367 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
368 __event_len += sizeof(_length_type); \
369 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
370 __dynamic_len[__dynamic_len_idx] = (_src_length); \
371 __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
372 __dynamic_len_idx++;
373
374 #undef _ctf_sequence_bitfield
375 #define _ctf_sequence_bitfield(_type, _item, _src, \
376 _length_type, _src_length, \
377 _user, _nowrite) \
378 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
379 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
380
381 /*
382 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
383 * 1 (\0 only).
384 */
385 #undef _ctf_string
386 #define _ctf_string(_item, _src, _user, _nowrite) \
387 if (_user) \
388 __event_len += __dynamic_len[__dynamic_len_idx++] = \
389 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
390 else \
391 __event_len += __dynamic_len[__dynamic_len_idx++] = \
392 strlen(_src) + 1;
393
394 #undef TP_PROTO
395 #define TP_PROTO(...) __VA_ARGS__
396
397 #undef TP_FIELDS
398 #define TP_FIELDS(...) __VA_ARGS__
399
400 #undef TP_locvar
401 #define TP_locvar(...) __VA_ARGS__
402
403 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
404 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
405 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
406 void *__tp_locvar, _proto) \
407 { \
408 size_t __event_len = 0; \
409 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
410 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
411 \
412 _fields \
413 return __event_len; \
414 }
415
416 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
417 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
418 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
419 void *__tp_locvar) \
420 { \
421 size_t __event_len = 0; \
422 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
423 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
424 \
425 _fields \
426 return __event_len; \
427 }
428
429 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
430
431
432 /*
433 * Stage 4.1 of tracepoint event generation.
434 *
435 * Create static inline function that layout the filter stack data.
436 * We make both write and nowrite data available to the filter.
437 */
438
439 /* Reset all macros within TRACEPOINT_EVENT */
440 #include "lttng-events-reset.h"
441 #include "lttng-events-write.h"
442 #include "lttng-events-nowrite.h"
443
444 #undef _ctf_integer_ext_fetched
445 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
446 if (lttng_is_signed_type(_type)) { \
447 int64_t __ctf_tmp_int64; \
448 switch (sizeof(_type)) { \
449 case 1: \
450 { \
451 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
452 __ctf_tmp_int64 = (int64_t) __tmp.v; \
453 break; \
454 } \
455 case 2: \
456 { \
457 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
458 __ctf_tmp_int64 = (int64_t) __tmp.v; \
459 break; \
460 } \
461 case 4: \
462 { \
463 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
464 __ctf_tmp_int64 = (int64_t) __tmp.v; \
465 break; \
466 } \
467 case 8: \
468 { \
469 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
470 __ctf_tmp_int64 = (int64_t) __tmp.v; \
471 break; \
472 } \
473 default: \
474 BUG_ON(1); \
475 }; \
476 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
477 } else { \
478 uint64_t __ctf_tmp_uint64; \
479 switch (sizeof(_type)) { \
480 case 1: \
481 { \
482 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
483 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
484 break; \
485 } \
486 case 2: \
487 { \
488 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
489 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
490 break; \
491 } \
492 case 4: \
493 { \
494 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
495 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
496 break; \
497 } \
498 case 8: \
499 { \
500 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
501 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
502 break; \
503 } \
504 default: \
505 BUG_ON(1); \
506 }; \
507 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
508 } \
509 __stack_data += sizeof(int64_t);
510
511 #undef _ctf_integer_ext_isuser0
512 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
513 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
514
515 #undef _ctf_integer_ext_isuser1
516 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
517 { \
518 __typeof__(_user_src) _src; \
519 if (get_user(_src, &(_user_src))) \
520 _src = 0; \
521 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
522 }
523
524 #undef _ctf_integer_ext
525 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
526 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
527
528 #undef _ctf_array_encoded
529 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
530 { \
531 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
532 const void *__ctf_tmp_ptr = (_src); \
533 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
534 __stack_data += sizeof(unsigned long); \
535 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
536 __stack_data += sizeof(void *); \
537 }
538
539 #undef _ctf_array_bitfield
540 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
541 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
542
543 #undef _ctf_sequence_encoded
544 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
545 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
546 { \
547 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
548 const void *__ctf_tmp_ptr = (_src); \
549 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
550 __stack_data += sizeof(unsigned long); \
551 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
552 __stack_data += sizeof(void *); \
553 }
554
555 #undef _ctf_sequence_bitfield
556 #define _ctf_sequence_bitfield(_type, _item, _src, \
557 _length_type, _src_length, \
558 _user, _nowrite) \
559 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
560 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
561
562 #undef _ctf_string
563 #define _ctf_string(_item, _src, _user, _nowrite) \
564 { \
565 const void *__ctf_tmp_ptr = (_src); \
566 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
567 __stack_data += sizeof(void *); \
568 }
569
570 #undef TP_PROTO
571 #define TP_PROTO(...) __VA_ARGS__
572
573 #undef TP_FIELDS
574 #define TP_FIELDS(...) __VA_ARGS__
575
576 #undef TP_locvar
577 #define TP_locvar(...) __VA_ARGS__
578
579 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
580 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
581 static inline \
582 void __event_prepare_filter_stack__##_name(char *__stack_data, \
583 void *__tp_locvar) \
584 { \
585 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
586 \
587 _fields \
588 }
589
590 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
591 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
592 static inline \
593 void __event_prepare_filter_stack__##_name(char *__stack_data, \
594 void *__tp_locvar, _proto) \
595 { \
596 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
597 \
598 _fields \
599 }
600
601 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
602
603 /*
604 * Stage 5 of the trace events.
605 *
606 * Create static inline function that calculates event payload alignment.
607 */
608
609 /* Reset all macros within TRACEPOINT_EVENT */
610 #include "lttng-events-reset.h"
611 #include "lttng-events-write.h"
612
613 #undef _ctf_integer_ext
614 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
615 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
616
617 #undef _ctf_array_encoded
618 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
619 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
620
621 #undef _ctf_array_bitfield
622 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
623 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
624
625 #undef _ctf_sequence_encoded
626 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
627 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
628 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
629 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
630
631 #undef _ctf_sequence_bitfield
632 #define _ctf_sequence_bitfield(_type, _item, _src, \
633 _length_type, _src_length, \
634 _user, _nowrite) \
635 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
636 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
637
638 #undef _ctf_string
639 #define _ctf_string(_item, _src, _user, _nowrite)
640
641 #undef TP_PROTO
642 #define TP_PROTO(...) __VA_ARGS__
643
644 #undef TP_FIELDS
645 #define TP_FIELDS(...) __VA_ARGS__
646
647 #undef TP_locvar
648 #define TP_locvar(...) __VA_ARGS__
649
650 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
651 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
652 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
653 { \
654 size_t __event_align = 1; \
655 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
656 \
657 _fields \
658 return __event_align; \
659 }
660
661 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
662 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
663 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
664 { \
665 size_t __event_align = 1; \
666 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
667 \
668 _fields \
669 return __event_align; \
670 }
671
672 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
673
674 /*
675 * Stage 6 of tracepoint event generation.
676 *
677 * Create the probe function. This function calls event size calculation
678 * and writes event data into the buffer.
679 */
680
681 /* Reset all macros within TRACEPOINT_EVENT */
682 #include "lttng-events-reset.h"
683 #include "lttng-events-write.h"
684
685 #undef _ctf_integer_ext_fetched
686 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
687 { \
688 _type __tmp = _src; \
689 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
690 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
691 }
692
693 #undef _ctf_integer_ext_isuser0
694 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
695 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
696
697 #undef _ctf_integer_ext_isuser1
698 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
699 { \
700 __typeof__(_user_src) _src; \
701 if (get_user(_src, &(_user_src))) \
702 _src = 0; \
703 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
704 }
705
706 #undef _ctf_integer_ext
707 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
708 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
709
710 #undef _ctf_array_encoded
711 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
712 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
713 if (_user) { \
714 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
715 } else { \
716 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
717 }
718
719 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
720 #undef _ctf_array_bitfield
721 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
722 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
723 if (_user) { \
724 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
725 } else { \
726 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
727 }
728 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
729 /*
730 * For big endian, we need to byteswap into little endian.
731 */
732 #undef _ctf_array_bitfield
733 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
734 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
735 { \
736 size_t _i; \
737 \
738 for (_i = 0; _i < (_length); _i++) { \
739 _type _tmp; \
740 \
741 if (_user) { \
742 if (get_user(_tmp, (_type *) _src + _i)) \
743 _tmp = 0; \
744 } else { \
745 _tmp = ((_type *) _src)[_i]; \
746 } \
747 switch (sizeof(_type)) { \
748 case 1: \
749 break; \
750 case 2: \
751 _tmp = cpu_to_le16(_tmp); \
752 break; \
753 case 4: \
754 _tmp = cpu_to_le32(_tmp); \
755 break; \
756 case 8: \
757 _tmp = cpu_to_le64(_tmp); \
758 break; \
759 default: \
760 BUG_ON(1); \
761 } \
762 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
763 } \
764 }
765 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
766
767 #undef _ctf_sequence_encoded
768 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
769 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
770 { \
771 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \
772 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
773 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
774 } \
775 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
776 if (_user) { \
777 __chan->ops->event_write_from_user(&__ctx, _src, \
778 sizeof(_type) * __get_dynamic_len(dest)); \
779 } else { \
780 __chan->ops->event_write(&__ctx, _src, \
781 sizeof(_type) * __get_dynamic_len(dest)); \
782 }
783
784 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
785 #undef _ctf_sequence_bitfield
786 #define _ctf_sequence_bitfield(_type, _item, _src, \
787 _length_type, _src_length, \
788 _user, _nowrite) \
789 { \
790 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
791 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
792 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
793 } \
794 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
795 if (_user) { \
796 __chan->ops->event_write_from_user(&__ctx, _src, \
797 sizeof(_type) * __get_dynamic_len(dest)); \
798 } else { \
799 __chan->ops->event_write(&__ctx, _src, \
800 sizeof(_type) * __get_dynamic_len(dest)); \
801 }
802 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
803 /*
804 * For big endian, we need to byteswap into little endian.
805 */
806 #undef _ctf_sequence_bitfield
807 #define _ctf_sequence_bitfield(_type, _item, _src, \
808 _length_type, _src_length, \
809 _user, _nowrite) \
810 { \
811 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
812 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
813 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
814 } \
815 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
816 { \
817 size_t _i, _length; \
818 \
819 _length = __get_dynamic_len(dest); \
820 for (_i = 0; _i < _length; _i++) { \
821 _type _tmp; \
822 \
823 if (_user) { \
824 if (get_user(_tmp, (_type *) _src + _i)) \
825 _tmp = 0; \
826 } else { \
827 _tmp = ((_type *) _src)[_i]; \
828 } \
829 switch (sizeof(_type)) { \
830 case 1: \
831 break; \
832 case 2: \
833 _tmp = cpu_to_le16(_tmp); \
834 break; \
835 case 4: \
836 _tmp = cpu_to_le32(_tmp); \
837 break; \
838 case 8: \
839 _tmp = cpu_to_le64(_tmp); \
840 break; \
841 default: \
842 BUG_ON(1); \
843 } \
844 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
845 } \
846 }
847 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
848
849 #undef _ctf_string
850 #define _ctf_string(_item, _src, _user, _nowrite) \
851 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
852 if (_user) { \
853 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
854 __get_dynamic_len(dest)); \
855 } else { \
856 __chan->ops->event_strcpy(&__ctx, _src, \
857 __get_dynamic_len(dest)); \
858 }
859
860 /* Beware: this get len actually consumes the len value */
861 #undef __get_dynamic_len
862 #define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++]
863
864 #undef TP_PROTO
865 #define TP_PROTO(...) __VA_ARGS__
866
867 #undef TP_ARGS
868 #define TP_ARGS(...) __VA_ARGS__
869
870 #undef TP_FIELDS
871 #define TP_FIELDS(...) __VA_ARGS__
872
873 #undef TP_locvar
874 #define TP_locvar(...) __VA_ARGS__
875
876 #undef TP_code_pre
877 #define TP_code_pre(...) __VA_ARGS__
878
879 #undef TP_code_post
880 #define TP_code_post(...) __VA_ARGS__
881
882 /*
883 * For state dump, check that "session" argument (mandatory) matches the
884 * session this event belongs to. Ensures that we write state dump data only
885 * into the started session, not into all sessions.
886 */
887 #ifdef TP_SESSION_CHECK
888 #define _TP_SESSION_CHECK(session, csession) (session == csession)
889 #else /* TP_SESSION_CHECK */
890 #define _TP_SESSION_CHECK(session, csession) 1
891 #endif /* TP_SESSION_CHECK */
892
893 /*
894 * Using twice size for filter stack data to hold size and pointer for
895 * each field (worse case). For integers, max size required is 64-bit.
896 * Same for double-precision floats. Those fit within
897 * 2*sizeof(unsigned long) for all supported architectures.
898 * Perform UNION (||) of filter runtime list.
899 */
900 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
901 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
902 static void __event_probe__##_name(void *__data, _proto) \
903 { \
904 struct probe_local_vars { _locvar }; \
905 struct lttng_event *__event = __data; \
906 struct lttng_probe_ctx __lttng_probe_ctx = { \
907 .event = __event, \
908 .interruptible = !irqs_disabled(), \
909 }; \
910 struct lttng_channel *__chan = __event->chan; \
911 struct lttng_session *__session = __chan->session; \
912 struct lib_ring_buffer_ctx __ctx; \
913 size_t __event_len, __event_align; \
914 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
915 union { \
916 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
917 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
918 } __stackvar; \
919 int __ret; \
920 struct probe_local_vars __tp_locvar; \
921 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
922 &__tp_locvar; \
923 struct lttng_pid_tracker *__lpf; \
924 \
925 if (!_TP_SESSION_CHECK(session, __session)) \
926 return; \
927 if (unlikely(!ACCESS_ONCE(__session->active))) \
928 return; \
929 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
930 return; \
931 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
932 return; \
933 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
934 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
935 return; \
936 _code_pre \
937 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
938 struct lttng_bytecode_runtime *bc_runtime; \
939 int __filter_record = __event->has_enablers_without_bytecode; \
940 \
941 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
942 tp_locvar, _args); \
943 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
944 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
945 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
946 __filter_record = 1; \
947 } \
948 if (likely(!__filter_record)) \
949 goto __post; \
950 } \
951 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \
952 tp_locvar, _args); \
953 __event_align = __event_get_align__##_name(tp_locvar, _args); \
954 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
955 __event_align, -1); \
956 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
957 if (__ret < 0) \
958 goto __post; \
959 _fields \
960 __chan->ops->event_commit(&__ctx); \
961 __post: \
962 _code_post \
963 return; \
964 }
965
966 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
967 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
968 static void __event_probe__##_name(void *__data) \
969 { \
970 struct probe_local_vars { _locvar }; \
971 struct lttng_event *__event = __data; \
972 struct lttng_probe_ctx __lttng_probe_ctx = { \
973 .event = __event, \
974 .interruptible = !irqs_disabled(), \
975 }; \
976 struct lttng_channel *__chan = __event->chan; \
977 struct lttng_session *__session = __chan->session; \
978 struct lib_ring_buffer_ctx __ctx; \
979 size_t __event_len, __event_align; \
980 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
981 union { \
982 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
983 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
984 } __stackvar; \
985 int __ret; \
986 struct probe_local_vars __tp_locvar; \
987 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
988 &__tp_locvar; \
989 struct lttng_pid_tracker *__lpf; \
990 \
991 if (!_TP_SESSION_CHECK(session, __session)) \
992 return; \
993 if (unlikely(!ACCESS_ONCE(__session->active))) \
994 return; \
995 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
996 return; \
997 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
998 return; \
999 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
1000 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
1001 return; \
1002 _code_pre \
1003 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1004 struct lttng_bytecode_runtime *bc_runtime; \
1005 int __filter_record = __event->has_enablers_without_bytecode; \
1006 \
1007 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1008 tp_locvar); \
1009 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1010 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1011 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
1012 __filter_record = 1; \
1013 } \
1014 if (likely(!__filter_record)) \
1015 goto __post; \
1016 } \
1017 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \
1018 __event_align = __event_get_align__##_name(tp_locvar); \
1019 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1020 __event_align, -1); \
1021 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1022 if (__ret < 0) \
1023 goto __post; \
1024 _fields \
1025 __chan->ops->event_commit(&__ctx); \
1026 __post: \
1027 _code_post \
1028 return; \
1029 }
1030
1031 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1032
1033 #undef __get_dynamic_len
1034
1035 /*
1036 * Stage 7 of the trace events.
1037 *
1038 * Create event descriptions.
1039 */
1040
1041 /* Named field types must be defined in lttng-types.h */
1042
1043 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1044
1045 #ifndef TP_PROBE_CB
1046 #define TP_PROBE_CB(_template) &__event_probe__##_template
1047 #endif
1048
1049 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1050 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1051 static const struct lttng_event_desc __event_desc___##_map = { \
1052 .fields = __event_fields___##_template, \
1053 .name = #_map, \
1054 .kname = #_name, \
1055 .probe_callback = (void *) TP_PROBE_CB(_template), \
1056 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1057 .owner = THIS_MODULE, \
1058 };
1059
1060 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1061 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1062 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1063
1064 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1065
1066 /*
1067 * Stage 8 of the trace events.
1068 *
1069 * Create an array of event description pointers.
1070 */
1071
1072 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1073
1074 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1075 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1076 &__event_desc___##_map,
1077
1078 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1079 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1080 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1081
1082 #define TP_ID1(_token, _system) _token##_system
1083 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1084
1085 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1086 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1087 };
1088
1089 #undef TP_ID1
1090 #undef TP_ID
1091
1092 /*
1093 * Stage 9 of the trace events.
1094 *
1095 * Create a toplevel descriptor for the whole probe.
1096 */
1097
1098 #define TP_ID1(_token, _system) _token##_system
1099 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1100
1101 /* non-const because list head will be modified when registered. */
1102 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1103 .provider = __stringify(TRACE_SYSTEM),
1104 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1105 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1106 .head = { NULL, NULL },
1107 .lazy_init_head = { NULL, NULL },
1108 .lazy = 0,
1109 };
1110
1111 #undef TP_ID1
1112 #undef TP_ID
1113
1114 /*
1115 * Stage 10 of the trace events.
1116 *
1117 * Register/unregister probes at module load/unload.
1118 */
1119
1120 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1121
1122 #define TP_ID1(_token, _system) _token##_system
1123 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1124 #define module_init_eval1(_token, _system) module_init(_token##_system)
1125 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1126 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1127 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1128
1129 #ifndef TP_MODULE_NOINIT
1130 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1131 {
1132 wrapper_vmalloc_sync_all();
1133 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1134 }
1135
1136 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1137 {
1138 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1139 }
1140
1141 #ifndef TP_MODULE_NOAUTOLOAD
1142 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1143 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1144 #endif
1145
1146 #endif
1147
1148 #undef module_init_eval
1149 #undef module_exit_eval
1150 #undef TP_ID1
1151 #undef TP_ID
1152
1153 #undef TP_PROTO
1154 #undef TP_ARGS
This page took 0.066216 seconds and 5 git commands to generate.