Use system include paths in probes/lttng-tracepoint-event-impl.h
[lttng-modules.git] / probes / lttng-tracepoint-event-impl.h
1 /*
2 * lttng-tracepoint-event-impl.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <linux/uaccess.h>
23 #include <linux/debugfs.h>
24 #include <linux/rculist.h>
25 #include <asm/byteorder.h>
26
27 #include <probes/lttng.h>
28 #include <probes/lttng-types.h>
29 #include <probes/lttng-probe-user.h>
30 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
31 #include <wrapper/ringbuffer/frontend_types.h>
32 #include <wrapper/rcu.h>
33 #include <lttng-events.h>
34 #include <lttng-tracer-core.h>
35
36 /*
37 * Macro declarations used for all stages.
38 */
39
40 /*
41 * LTTng name mapping macros. LTTng remaps some of the kernel events to
42 * enforce name-spacing.
43 */
44 #undef LTTNG_TRACEPOINT_EVENT_MAP
45 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
46 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
47 PARAMS(proto), \
48 PARAMS(args), \
49 PARAMS(fields)) \
50 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
51
52 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
53 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
54 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
55 PARAMS(fields)) \
56 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
57
58 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
59 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
60 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
61 PARAMS(proto), \
62 PARAMS(args), \
63 PARAMS(_locvar), \
64 PARAMS(_code_pre), \
65 PARAMS(fields), \
66 PARAMS(_code_post)) \
67 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
68
69 #undef LTTNG_TRACEPOINT_EVENT_CODE
70 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
71 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
72 PARAMS(proto), \
73 PARAMS(args), \
74 PARAMS(_locvar), \
75 PARAMS(_code_pre), \
76 PARAMS(fields), \
77 PARAMS(_code_post))
78
79 /*
80 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
81 * handlers for events. That is, if all events have the same parameters
82 * and just have distinct trace points. Each tracepoint can be defined
83 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
84 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
85 *
86 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
87 * template.
88 */
89
90 #undef LTTNG_TRACEPOINT_EVENT
91 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
92 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
93 PARAMS(proto), \
94 PARAMS(args), \
95 PARAMS(fields))
96
97 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
98 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
99 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
100
101 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
102 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
103 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
104
105 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
106 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
107 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
108
109 #undef LTTNG_TRACEPOINT_EVENT_CLASS
110 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
111 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
112 PARAMS(_fields), )
113
114 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
115 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
116 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
117
118
119 /*
120 * Stage 1 of the trace events.
121 *
122 * Create dummy trace calls for each events, verifying that the LTTng module
123 * instrumentation headers match the kernel arguments. Will be optimized
124 * out by the compiler.
125 */
126
127 /* Reset all macros within TRACEPOINT_EVENT */
128 #include "lttng-events-reset.h"
129
130 #undef TP_PROTO
131 #define TP_PROTO(...) __VA_ARGS__
132
133 #undef TP_ARGS
134 #define TP_ARGS(...) __VA_ARGS__
135
136 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
137 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
138 void trace_##_name(_proto);
139
140 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
141 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
142 void trace_##_name(void);
143
144 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
145
146 /*
147 * Stage 1.1 of the trace events.
148 *
149 * Create dummy trace prototypes for each event class, and for each used
150 * template. This will allow checking whether the prototypes from the
151 * class and the instance using the class actually match.
152 */
153
154 #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */
155
156 #undef TP_PROTO
157 #define TP_PROTO(...) __VA_ARGS__
158
159 #undef TP_ARGS
160 #define TP_ARGS(...) __VA_ARGS__
161
162 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
163 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
164 void __event_template_proto___##_template(_proto);
165
166 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
167 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
168 void __event_template_proto___##_template(void);
169
170 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
171 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
172 void __event_template_proto___##_name(_proto);
173
174 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
175 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
176 void __event_template_proto___##_name(void);
177
178 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
179
180 /*
181 * Stage 2 of the trace events.
182 *
183 * Create event field type metadata section.
184 * Each event produce an array of fields.
185 */
186
187 /* Reset all macros within TRACEPOINT_EVENT */
188 #include "lttng-events-reset.h"
189 #include "lttng-events-write.h"
190 #include "lttng-events-nowrite.h"
191
192 #undef _ctf_integer_ext
193 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
194 { \
195 .name = #_item, \
196 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\
197 .nowrite = _nowrite, \
198 .user = _user, \
199 },
200
201 #undef _ctf_array_encoded
202 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
203 { \
204 .name = #_item, \
205 .type = \
206 { \
207 .atype = atype_array, \
208 .u = \
209 { \
210 .array = \
211 { \
212 .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \
213 .length = _length, \
214 } \
215 } \
216 }, \
217 .nowrite = _nowrite, \
218 .user = _user, \
219 },
220
221 #undef _ctf_array_bitfield
222 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
223 { \
224 .name = #_item, \
225 .type = \
226 { \
227 .atype = atype_array, \
228 .u = \
229 { \
230 .array = \
231 { \
232 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
233 .length = (_length) * sizeof(_type) * CHAR_BIT, \
234 .elem_alignment = lttng_alignof(_type), \
235 } \
236 } \
237 }, \
238 .nowrite = _nowrite, \
239 .user = _user, \
240 },
241
242
243 #undef _ctf_sequence_encoded
244 #define _ctf_sequence_encoded(_type, _item, _src, \
245 _length_type, _src_length, _encoding, \
246 _byte_order, _base, _user, _nowrite) \
247 { \
248 .name = #_item, \
249 .type = \
250 { \
251 .atype = atype_sequence, \
252 .u = \
253 { \
254 .sequence = \
255 { \
256 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
257 .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \
258 }, \
259 }, \
260 }, \
261 .nowrite = _nowrite, \
262 .user = _user, \
263 },
264
265 #undef _ctf_sequence_bitfield
266 #define _ctf_sequence_bitfield(_type, _item, _src, \
267 _length_type, _src_length, \
268 _user, _nowrite) \
269 { \
270 .name = #_item, \
271 .type = \
272 { \
273 .atype = atype_sequence, \
274 .u = \
275 { \
276 .sequence = \
277 { \
278 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
279 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
280 .elem_alignment = lttng_alignof(_type), \
281 }, \
282 }, \
283 }, \
284 .nowrite = _nowrite, \
285 .user = _user, \
286 },
287
288 #undef _ctf_string
289 #define _ctf_string(_item, _src, _user, _nowrite) \
290 { \
291 .name = #_item, \
292 .type = \
293 { \
294 .atype = atype_string, \
295 .u = \
296 { \
297 .basic = { .string = { .encoding = lttng_encode_UTF8 } } \
298 }, \
299 }, \
300 .nowrite = _nowrite, \
301 .user = _user, \
302 },
303
304 #undef TP_FIELDS
305 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
306
307 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
308 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
309 static const struct lttng_event_field __event_fields___##_name[] = { \
310 _fields \
311 };
312
313 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
314 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
315 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
316
317 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
318
319 /*
320 * Stage 3 of the trace events.
321 *
322 * Create probe callback prototypes.
323 */
324
325 /* Reset all macros within TRACEPOINT_EVENT */
326 #include "lttng-events-reset.h"
327
328 #undef TP_PROTO
329 #define TP_PROTO(...) __VA_ARGS__
330
331 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
332 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
333 static void __event_probe__##_name(void *__data, _proto);
334
335 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
336 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
337 static void __event_probe__##_name(void *__data);
338
339 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
340
341 /*
342 * Stage 4 of the trace events.
343 *
344 * Create static inline function that calculates event size.
345 */
346
347 /* Reset all macros within TRACEPOINT_EVENT */
348 #include "lttng-events-reset.h"
349 #include "lttng-events-write.h"
350
351 #undef _ctf_integer_ext
352 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
353 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
354 __event_len += sizeof(_type);
355
356 #undef _ctf_array_encoded
357 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
358 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
359 __event_len += sizeof(_type) * (_length);
360
361 #undef _ctf_array_bitfield
362 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
363 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
364
365 #undef _ctf_sequence_encoded
366 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
367 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
368 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
369 __event_len += sizeof(_length_type); \
370 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
371 __dynamic_len[__dynamic_len_idx] = (_src_length); \
372 __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
373 __dynamic_len_idx++;
374
375 #undef _ctf_sequence_bitfield
376 #define _ctf_sequence_bitfield(_type, _item, _src, \
377 _length_type, _src_length, \
378 _user, _nowrite) \
379 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
380 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
381
382 /*
383 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
384 * 1 (\0 only).
385 */
386 #undef _ctf_string
387 #define _ctf_string(_item, _src, _user, _nowrite) \
388 if (_user) \
389 __event_len += __dynamic_len[__dynamic_len_idx++] = \
390 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
391 else \
392 __event_len += __dynamic_len[__dynamic_len_idx++] = \
393 strlen(_src) + 1;
394
395 #undef TP_PROTO
396 #define TP_PROTO(...) __VA_ARGS__
397
398 #undef TP_FIELDS
399 #define TP_FIELDS(...) __VA_ARGS__
400
401 #undef TP_locvar
402 #define TP_locvar(...) __VA_ARGS__
403
404 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
405 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
406 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
407 void *__tp_locvar, _proto) \
408 { \
409 size_t __event_len = 0; \
410 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
411 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
412 \
413 _fields \
414 return __event_len; \
415 }
416
417 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
418 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
419 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
420 void *__tp_locvar) \
421 { \
422 size_t __event_len = 0; \
423 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
424 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
425 \
426 _fields \
427 return __event_len; \
428 }
429
430 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
431
432
433 /*
434 * Stage 4.1 of tracepoint event generation.
435 *
436 * Create static inline function that layout the filter stack data.
437 * We make both write and nowrite data available to the filter.
438 */
439
440 /* Reset all macros within TRACEPOINT_EVENT */
441 #include "lttng-events-reset.h"
442 #include "lttng-events-write.h"
443 #include "lttng-events-nowrite.h"
444
445 #undef _ctf_integer_ext_fetched
446 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
447 if (lttng_is_signed_type(_type)) { \
448 int64_t __ctf_tmp_int64; \
449 switch (sizeof(_type)) { \
450 case 1: \
451 { \
452 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
453 __ctf_tmp_int64 = (int64_t) __tmp.v; \
454 break; \
455 } \
456 case 2: \
457 { \
458 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
459 __ctf_tmp_int64 = (int64_t) __tmp.v; \
460 break; \
461 } \
462 case 4: \
463 { \
464 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
465 __ctf_tmp_int64 = (int64_t) __tmp.v; \
466 break; \
467 } \
468 case 8: \
469 { \
470 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
471 __ctf_tmp_int64 = (int64_t) __tmp.v; \
472 break; \
473 } \
474 default: \
475 BUG_ON(1); \
476 }; \
477 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
478 } else { \
479 uint64_t __ctf_tmp_uint64; \
480 switch (sizeof(_type)) { \
481 case 1: \
482 { \
483 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
484 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
485 break; \
486 } \
487 case 2: \
488 { \
489 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
490 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
491 break; \
492 } \
493 case 4: \
494 { \
495 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
496 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
497 break; \
498 } \
499 case 8: \
500 { \
501 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
502 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
503 break; \
504 } \
505 default: \
506 BUG_ON(1); \
507 }; \
508 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
509 } \
510 __stack_data += sizeof(int64_t);
511
512 #undef _ctf_integer_ext_isuser0
513 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
514 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
515
516 #undef _ctf_integer_ext_isuser1
517 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
518 { \
519 __typeof__(_user_src) _src; \
520 if (get_user(_src, &(_user_src))) \
521 _src = 0; \
522 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
523 }
524
525 #undef _ctf_integer_ext
526 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
527 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
528
529 #undef _ctf_array_encoded
530 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
531 { \
532 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
533 const void *__ctf_tmp_ptr = (_src); \
534 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
535 __stack_data += sizeof(unsigned long); \
536 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
537 __stack_data += sizeof(void *); \
538 }
539
540 #undef _ctf_array_bitfield
541 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
542 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
543
544 #undef _ctf_sequence_encoded
545 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
546 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
547 { \
548 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
549 const void *__ctf_tmp_ptr = (_src); \
550 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
551 __stack_data += sizeof(unsigned long); \
552 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
553 __stack_data += sizeof(void *); \
554 }
555
556 #undef _ctf_sequence_bitfield
557 #define _ctf_sequence_bitfield(_type, _item, _src, \
558 _length_type, _src_length, \
559 _user, _nowrite) \
560 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
561 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
562
563 #undef _ctf_string
564 #define _ctf_string(_item, _src, _user, _nowrite) \
565 { \
566 const void *__ctf_tmp_ptr = (_src); \
567 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
568 __stack_data += sizeof(void *); \
569 }
570
571 #undef TP_PROTO
572 #define TP_PROTO(...) __VA_ARGS__
573
574 #undef TP_FIELDS
575 #define TP_FIELDS(...) __VA_ARGS__
576
577 #undef TP_locvar
578 #define TP_locvar(...) __VA_ARGS__
579
580 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
581 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
582 static inline \
583 void __event_prepare_filter_stack__##_name(char *__stack_data, \
584 void *__tp_locvar) \
585 { \
586 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
587 \
588 _fields \
589 }
590
591 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
592 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
593 static inline \
594 void __event_prepare_filter_stack__##_name(char *__stack_data, \
595 void *__tp_locvar, _proto) \
596 { \
597 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
598 \
599 _fields \
600 }
601
602 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
603
604 /*
605 * Stage 5 of the trace events.
606 *
607 * Create static inline function that calculates event payload alignment.
608 */
609
610 /* Reset all macros within TRACEPOINT_EVENT */
611 #include "lttng-events-reset.h"
612 #include "lttng-events-write.h"
613
614 #undef _ctf_integer_ext
615 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
616 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
617
618 #undef _ctf_array_encoded
619 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
620 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
621
622 #undef _ctf_array_bitfield
623 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
624 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
625
626 #undef _ctf_sequence_encoded
627 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
628 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
629 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
630 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
631
632 #undef _ctf_sequence_bitfield
633 #define _ctf_sequence_bitfield(_type, _item, _src, \
634 _length_type, _src_length, \
635 _user, _nowrite) \
636 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
637 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
638
639 #undef _ctf_string
640 #define _ctf_string(_item, _src, _user, _nowrite)
641
642 #undef TP_PROTO
643 #define TP_PROTO(...) __VA_ARGS__
644
645 #undef TP_FIELDS
646 #define TP_FIELDS(...) __VA_ARGS__
647
648 #undef TP_locvar
649 #define TP_locvar(...) __VA_ARGS__
650
651 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
652 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
653 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
654 { \
655 size_t __event_align = 1; \
656 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
657 \
658 _fields \
659 return __event_align; \
660 }
661
662 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
663 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
664 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
665 { \
666 size_t __event_align = 1; \
667 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
668 \
669 _fields \
670 return __event_align; \
671 }
672
673 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
674
675 /*
676 * Stage 6 of tracepoint event generation.
677 *
678 * Create the probe function. This function calls event size calculation
679 * and writes event data into the buffer.
680 */
681
682 /* Reset all macros within TRACEPOINT_EVENT */
683 #include "lttng-events-reset.h"
684 #include "lttng-events-write.h"
685
686 #undef _ctf_integer_ext_fetched
687 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
688 { \
689 _type __tmp = _src; \
690 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
691 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
692 }
693
694 #undef _ctf_integer_ext_isuser0
695 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
696 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
697
698 #undef _ctf_integer_ext_isuser1
699 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
700 { \
701 __typeof__(_user_src) _src; \
702 if (get_user(_src, &(_user_src))) \
703 _src = 0; \
704 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
705 }
706
707 #undef _ctf_integer_ext
708 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
709 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
710
711 #undef _ctf_array_encoded
712 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
713 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
714 if (_user) { \
715 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
716 } else { \
717 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
718 }
719
720 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
721 #undef _ctf_array_bitfield
722 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
723 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
724 if (_user) { \
725 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
726 } else { \
727 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
728 }
729 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
730 /*
731 * For big endian, we need to byteswap into little endian.
732 */
733 #undef _ctf_array_bitfield
734 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
735 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
736 { \
737 size_t _i; \
738 \
739 for (_i = 0; _i < (_length); _i++) { \
740 _type _tmp; \
741 \
742 if (_user) { \
743 if (get_user(_tmp, (_type *) _src + _i)) \
744 _tmp = 0; \
745 } else { \
746 _tmp = ((_type *) _src)[_i]; \
747 } \
748 switch (sizeof(_type)) { \
749 case 1: \
750 break; \
751 case 2: \
752 _tmp = cpu_to_le16(_tmp); \
753 break; \
754 case 4: \
755 _tmp = cpu_to_le32(_tmp); \
756 break; \
757 case 8: \
758 _tmp = cpu_to_le64(_tmp); \
759 break; \
760 default: \
761 BUG_ON(1); \
762 } \
763 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
764 } \
765 }
766 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
767
768 #undef _ctf_sequence_encoded
769 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
770 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
771 { \
772 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \
773 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
774 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
775 } \
776 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
777 if (_user) { \
778 __chan->ops->event_write_from_user(&__ctx, _src, \
779 sizeof(_type) * __get_dynamic_len(dest)); \
780 } else { \
781 __chan->ops->event_write(&__ctx, _src, \
782 sizeof(_type) * __get_dynamic_len(dest)); \
783 }
784
785 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
786 #undef _ctf_sequence_bitfield
787 #define _ctf_sequence_bitfield(_type, _item, _src, \
788 _length_type, _src_length, \
789 _user, _nowrite) \
790 { \
791 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
792 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
793 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
794 } \
795 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
796 if (_user) { \
797 __chan->ops->event_write_from_user(&__ctx, _src, \
798 sizeof(_type) * __get_dynamic_len(dest)); \
799 } else { \
800 __chan->ops->event_write(&__ctx, _src, \
801 sizeof(_type) * __get_dynamic_len(dest)); \
802 }
803 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
804 /*
805 * For big endian, we need to byteswap into little endian.
806 */
807 #undef _ctf_sequence_bitfield
808 #define _ctf_sequence_bitfield(_type, _item, _src, \
809 _length_type, _src_length, \
810 _user, _nowrite) \
811 { \
812 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
813 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
814 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
815 } \
816 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
817 { \
818 size_t _i, _length; \
819 \
820 _length = __get_dynamic_len(dest); \
821 for (_i = 0; _i < _length; _i++) { \
822 _type _tmp; \
823 \
824 if (_user) { \
825 if (get_user(_tmp, (_type *) _src + _i)) \
826 _tmp = 0; \
827 } else { \
828 _tmp = ((_type *) _src)[_i]; \
829 } \
830 switch (sizeof(_type)) { \
831 case 1: \
832 break; \
833 case 2: \
834 _tmp = cpu_to_le16(_tmp); \
835 break; \
836 case 4: \
837 _tmp = cpu_to_le32(_tmp); \
838 break; \
839 case 8: \
840 _tmp = cpu_to_le64(_tmp); \
841 break; \
842 default: \
843 BUG_ON(1); \
844 } \
845 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
846 } \
847 }
848 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
849
850 #undef _ctf_string
851 #define _ctf_string(_item, _src, _user, _nowrite) \
852 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
853 if (_user) { \
854 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
855 __get_dynamic_len(dest)); \
856 } else { \
857 __chan->ops->event_strcpy(&__ctx, _src, \
858 __get_dynamic_len(dest)); \
859 }
860
861 /* Beware: this get len actually consumes the len value */
862 #undef __get_dynamic_len
863 #define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++]
864
865 #undef TP_PROTO
866 #define TP_PROTO(...) __VA_ARGS__
867
868 #undef TP_ARGS
869 #define TP_ARGS(...) __VA_ARGS__
870
871 #undef TP_FIELDS
872 #define TP_FIELDS(...) __VA_ARGS__
873
874 #undef TP_locvar
875 #define TP_locvar(...) __VA_ARGS__
876
877 #undef TP_code_pre
878 #define TP_code_pre(...) __VA_ARGS__
879
880 #undef TP_code_post
881 #define TP_code_post(...) __VA_ARGS__
882
883 /*
884 * For state dump, check that "session" argument (mandatory) matches the
885 * session this event belongs to. Ensures that we write state dump data only
886 * into the started session, not into all sessions.
887 */
888 #ifdef TP_SESSION_CHECK
889 #define _TP_SESSION_CHECK(session, csession) (session == csession)
890 #else /* TP_SESSION_CHECK */
891 #define _TP_SESSION_CHECK(session, csession) 1
892 #endif /* TP_SESSION_CHECK */
893
894 /*
895 * Using twice size for filter stack data to hold size and pointer for
896 * each field (worse case). For integers, max size required is 64-bit.
897 * Same for double-precision floats. Those fit within
898 * 2*sizeof(unsigned long) for all supported architectures.
899 * Perform UNION (||) of filter runtime list.
900 */
901 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
902 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
903 static void __event_probe__##_name(void *__data, _proto) \
904 { \
905 struct probe_local_vars { _locvar }; \
906 struct lttng_event *__event = __data; \
907 struct lttng_probe_ctx __lttng_probe_ctx = { \
908 .event = __event, \
909 .interruptible = !irqs_disabled(), \
910 }; \
911 struct lttng_channel *__chan = __event->chan; \
912 struct lttng_session *__session = __chan->session; \
913 struct lib_ring_buffer_ctx __ctx; \
914 size_t __event_len, __event_align; \
915 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
916 union { \
917 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
918 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
919 } __stackvar; \
920 int __ret; \
921 struct probe_local_vars __tp_locvar; \
922 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
923 &__tp_locvar; \
924 struct lttng_pid_tracker *__lpf; \
925 \
926 if (!_TP_SESSION_CHECK(session, __session)) \
927 return; \
928 if (unlikely(!ACCESS_ONCE(__session->active))) \
929 return; \
930 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
931 return; \
932 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
933 return; \
934 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
935 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
936 return; \
937 _code_pre \
938 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
939 struct lttng_bytecode_runtime *bc_runtime; \
940 int __filter_record = __event->has_enablers_without_bytecode; \
941 \
942 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
943 tp_locvar, _args); \
944 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
945 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
946 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
947 __filter_record = 1; \
948 } \
949 if (likely(!__filter_record)) \
950 goto __post; \
951 } \
952 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \
953 tp_locvar, _args); \
954 __event_align = __event_get_align__##_name(tp_locvar, _args); \
955 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
956 __event_align, -1); \
957 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
958 if (__ret < 0) \
959 goto __post; \
960 _fields \
961 __chan->ops->event_commit(&__ctx); \
962 __post: \
963 _code_post \
964 return; \
965 }
966
967 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
968 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
969 static void __event_probe__##_name(void *__data) \
970 { \
971 struct probe_local_vars { _locvar }; \
972 struct lttng_event *__event = __data; \
973 struct lttng_probe_ctx __lttng_probe_ctx = { \
974 .event = __event, \
975 .interruptible = !irqs_disabled(), \
976 }; \
977 struct lttng_channel *__chan = __event->chan; \
978 struct lttng_session *__session = __chan->session; \
979 struct lib_ring_buffer_ctx __ctx; \
980 size_t __event_len, __event_align; \
981 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
982 union { \
983 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
984 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
985 } __stackvar; \
986 int __ret; \
987 struct probe_local_vars __tp_locvar; \
988 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
989 &__tp_locvar; \
990 struct lttng_pid_tracker *__lpf; \
991 \
992 if (!_TP_SESSION_CHECK(session, __session)) \
993 return; \
994 if (unlikely(!ACCESS_ONCE(__session->active))) \
995 return; \
996 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
997 return; \
998 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
999 return; \
1000 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
1001 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
1002 return; \
1003 _code_pre \
1004 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1005 struct lttng_bytecode_runtime *bc_runtime; \
1006 int __filter_record = __event->has_enablers_without_bytecode; \
1007 \
1008 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1009 tp_locvar); \
1010 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1011 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1012 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
1013 __filter_record = 1; \
1014 } \
1015 if (likely(!__filter_record)) \
1016 goto __post; \
1017 } \
1018 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \
1019 __event_align = __event_get_align__##_name(tp_locvar); \
1020 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1021 __event_align, -1); \
1022 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1023 if (__ret < 0) \
1024 goto __post; \
1025 _fields \
1026 __chan->ops->event_commit(&__ctx); \
1027 __post: \
1028 _code_post \
1029 return; \
1030 }
1031
1032 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1033
1034 #undef __get_dynamic_len
1035
1036 /*
1037 * Stage 7 of the trace events.
1038 *
1039 * Create event descriptions.
1040 */
1041
1042 /* Named field types must be defined in lttng-types.h */
1043
1044 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1045
1046 #ifndef TP_PROBE_CB
1047 #define TP_PROBE_CB(_template) &__event_probe__##_template
1048 #endif
1049
1050 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1051 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1052 static const struct lttng_event_desc __event_desc___##_map = { \
1053 .fields = __event_fields___##_template, \
1054 .name = #_map, \
1055 .kname = #_name, \
1056 .probe_callback = (void *) TP_PROBE_CB(_template), \
1057 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1058 .owner = THIS_MODULE, \
1059 };
1060
1061 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1062 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1063 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1064
1065 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1066
1067 /*
1068 * Stage 8 of the trace events.
1069 *
1070 * Create an array of event description pointers.
1071 */
1072
1073 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1074
1075 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1076 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1077 &__event_desc___##_map,
1078
1079 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1080 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1081 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1082
1083 #define TP_ID1(_token, _system) _token##_system
1084 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1085
1086 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1087 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1088 };
1089
1090 #undef TP_ID1
1091 #undef TP_ID
1092
1093 /*
1094 * Stage 9 of the trace events.
1095 *
1096 * Create a toplevel descriptor for the whole probe.
1097 */
1098
1099 #define TP_ID1(_token, _system) _token##_system
1100 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1101
1102 /* non-const because list head will be modified when registered. */
1103 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1104 .provider = __stringify(TRACE_SYSTEM),
1105 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1106 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1107 .head = { NULL, NULL },
1108 .lazy_init_head = { NULL, NULL },
1109 .lazy = 0,
1110 };
1111
1112 #undef TP_ID1
1113 #undef TP_ID
1114
1115 /*
1116 * Stage 10 of the trace events.
1117 *
1118 * Register/unregister probes at module load/unload.
1119 */
1120
1121 #include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1122
1123 #define TP_ID1(_token, _system) _token##_system
1124 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1125 #define module_init_eval1(_token, _system) module_init(_token##_system)
1126 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1127 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1128 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1129
1130 #ifndef TP_MODULE_NOINIT
1131 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1132 {
1133 wrapper_vmalloc_sync_all();
1134 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1135 }
1136
1137 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1138 {
1139 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1140 }
1141
1142 #ifndef TP_MODULE_NOAUTOLOAD
1143 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1144 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1145 #endif
1146
1147 #endif
1148
1149 #undef module_init_eval
1150 #undef module_exit_eval
1151 #undef TP_ID1
1152 #undef TP_ID
1153
1154 #undef TP_PROTO
1155 #undef TP_ARGS
This page took 0.058248 seconds and 5 git commands to generate.