Fix: endianness of integers received by filter
[lttng-modules.git] / probes / lttng-tracepoint-event-impl.h
1 /*
2 * lttng-tracepoint-event-impl.h
3 *
4 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
5 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; only
10 * version 2.1 of the License.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <linux/uaccess.h>
23 #include <linux/debugfs.h>
24 #include <linux/rculist.h>
25 #include <asm/byteorder.h>
26 #include <linux/swab.h>
27
28 #include <probes/lttng.h>
29 #include <probes/lttng-types.h>
30 #include <probes/lttng-probe-user.h>
31 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
32 #include <wrapper/ringbuffer/frontend_types.h>
33 #include <wrapper/ringbuffer/backend.h>
34 #include <wrapper/rcu.h>
35 #include <lttng-events.h>
36 #include <lttng-tracer-core.h>
37
38 /*
39 * Macro declarations used for all stages.
40 */
41
42 /*
43 * LTTng name mapping macros. LTTng remaps some of the kernel events to
44 * enforce name-spacing.
45 */
46 #undef LTTNG_TRACEPOINT_EVENT_MAP
47 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
48 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
49 PARAMS(proto), \
50 PARAMS(args), \
51 PARAMS(fields)) \
52 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
53
54 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
55 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
56 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
57 PARAMS(fields)) \
58 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
59
60 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
61 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
62 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
63 PARAMS(proto), \
64 PARAMS(args), \
65 PARAMS(_locvar), \
66 PARAMS(_code_pre), \
67 PARAMS(fields), \
68 PARAMS(_code_post)) \
69 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
70
71 #undef LTTNG_TRACEPOINT_EVENT_CODE
72 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
73 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
74 PARAMS(proto), \
75 PARAMS(args), \
76 PARAMS(_locvar), \
77 PARAMS(_code_pre), \
78 PARAMS(fields), \
79 PARAMS(_code_post))
80
81 /*
82 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
83 * handlers for events. That is, if all events have the same parameters
84 * and just have distinct trace points. Each tracepoint can be defined
85 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
86 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
87 *
88 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
89 * template.
90 */
91
92 #undef LTTNG_TRACEPOINT_EVENT
93 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
94 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
95 PARAMS(proto), \
96 PARAMS(args), \
97 PARAMS(fields))
98
99 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
100 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
101 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
102
103 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
104 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
105 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
106
107 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
108 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
109 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
110
111 #undef LTTNG_TRACEPOINT_EVENT_CLASS
112 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
113 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
114 PARAMS(_fields), )
115
116 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
117 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
118 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
119
120
121 /*
122 * Stage 1 of the trace events.
123 *
124 * Create dummy trace calls for each events, verifying that the LTTng module
125 * instrumentation headers match the kernel arguments. Will be optimized
126 * out by the compiler.
127 */
128
129 /* Reset all macros within TRACEPOINT_EVENT */
130 #include <probes/lttng-events-reset.h>
131
132 #undef TP_PROTO
133 #define TP_PROTO(...) __VA_ARGS__
134
135 #undef TP_ARGS
136 #define TP_ARGS(...) __VA_ARGS__
137
138 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
139 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
140 void trace_##_name(_proto);
141
142 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
143 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
144 void trace_##_name(void);
145
146 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
147
148 /*
149 * Stage 1.1 of the trace events.
150 *
151 * Create dummy trace prototypes for each event class, and for each used
152 * template. This will allow checking whether the prototypes from the
153 * class and the instance using the class actually match.
154 */
155
156 #include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
157
158 #undef TP_PROTO
159 #define TP_PROTO(...) __VA_ARGS__
160
161 #undef TP_ARGS
162 #define TP_ARGS(...) __VA_ARGS__
163
164 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
165 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
166 void __event_template_proto___##_template(_proto);
167
168 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
169 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
170 void __event_template_proto___##_template(void);
171
172 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
173 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
174 void __event_template_proto___##_name(_proto);
175
176 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
177 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
178 void __event_template_proto___##_name(void);
179
180 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
181
182 /*
183 * Stage 2 of the trace events.
184 *
185 * Create event field type metadata section.
186 * Each event produce an array of fields.
187 */
188
189 /* Reset all macros within TRACEPOINT_EVENT */
190 #include <probes/lttng-events-reset.h>
191 #include <probes/lttng-events-write.h>
192 #include <probes/lttng-events-nowrite.h>
193
194 #undef _ctf_integer_ext
195 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
196 { \
197 .name = #_item, \
198 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\
199 .nowrite = _nowrite, \
200 .user = _user, \
201 },
202
203 #undef _ctf_array_encoded
204 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
205 { \
206 .name = #_item, \
207 .type = \
208 { \
209 .atype = atype_array, \
210 .u = \
211 { \
212 .array = \
213 { \
214 .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \
215 .length = _length, \
216 } \
217 } \
218 }, \
219 .nowrite = _nowrite, \
220 .user = _user, \
221 },
222
223 #undef _ctf_array_bitfield
224 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
225 { \
226 .name = #_item, \
227 .type = \
228 { \
229 .atype = atype_array, \
230 .u = \
231 { \
232 .array = \
233 { \
234 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
235 .length = (_length) * sizeof(_type) * CHAR_BIT, \
236 .elem_alignment = lttng_alignof(_type), \
237 } \
238 } \
239 }, \
240 .nowrite = _nowrite, \
241 .user = _user, \
242 },
243
244
245 #undef _ctf_sequence_encoded
246 #define _ctf_sequence_encoded(_type, _item, _src, \
247 _length_type, _src_length, _encoding, \
248 _byte_order, _base, _user, _nowrite) \
249 { \
250 .name = #_item, \
251 .type = \
252 { \
253 .atype = atype_sequence, \
254 .u = \
255 { \
256 .sequence = \
257 { \
258 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
259 .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \
260 }, \
261 }, \
262 }, \
263 .nowrite = _nowrite, \
264 .user = _user, \
265 },
266
267 #undef _ctf_sequence_bitfield
268 #define _ctf_sequence_bitfield(_type, _item, _src, \
269 _length_type, _src_length, \
270 _user, _nowrite) \
271 { \
272 .name = #_item, \
273 .type = \
274 { \
275 .atype = atype_sequence, \
276 .u = \
277 { \
278 .sequence = \
279 { \
280 .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \
281 .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \
282 .elem_alignment = lttng_alignof(_type), \
283 }, \
284 }, \
285 }, \
286 .nowrite = _nowrite, \
287 .user = _user, \
288 },
289
290 #undef _ctf_string
291 #define _ctf_string(_item, _src, _user, _nowrite) \
292 { \
293 .name = #_item, \
294 .type = \
295 { \
296 .atype = atype_string, \
297 .u = \
298 { \
299 .basic = { .string = { .encoding = lttng_encode_UTF8 } } \
300 }, \
301 }, \
302 .nowrite = _nowrite, \
303 .user = _user, \
304 },
305
306 #undef TP_FIELDS
307 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
308
309 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
310 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
311 static const struct lttng_event_field __event_fields___##_name[] = { \
312 _fields \
313 };
314
315 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
316 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
317 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
318
319 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
320
321 /*
322 * Stage 3 of the trace events.
323 *
324 * Create probe callback prototypes.
325 */
326
327 /* Reset all macros within TRACEPOINT_EVENT */
328 #include <probes/lttng-events-reset.h>
329
330 #undef TP_PROTO
331 #define TP_PROTO(...) __VA_ARGS__
332
333 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
334 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
335 static void __event_probe__##_name(void *__data, _proto);
336
337 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
338 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
339 static void __event_probe__##_name(void *__data);
340
341 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
342
343 /*
344 * Stage 4 of the trace events.
345 *
346 * Create static inline function that calculates event size.
347 */
348
349 /* Reset all macros within TRACEPOINT_EVENT */
350 #include <probes/lttng-events-reset.h>
351 #include <probes/lttng-events-write.h>
352
353 #undef _ctf_integer_ext
354 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
355 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
356 __event_len += sizeof(_type);
357
358 #undef _ctf_array_encoded
359 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
360 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
361 __event_len += sizeof(_type) * (_length);
362
363 #undef _ctf_array_bitfield
364 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
365 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
366
367 #undef _ctf_sequence_encoded
368 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
369 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
370 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
371 __event_len += sizeof(_length_type); \
372 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
373 __dynamic_len[__dynamic_len_idx] = (_src_length); \
374 __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
375 __dynamic_len_idx++;
376
377 #undef _ctf_sequence_bitfield
378 #define _ctf_sequence_bitfield(_type, _item, _src, \
379 _length_type, _src_length, \
380 _user, _nowrite) \
381 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
382 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
383
384 /*
385 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
386 * 1 (\0 only).
387 */
388 #undef _ctf_string
389 #define _ctf_string(_item, _src, _user, _nowrite) \
390 if (_user) \
391 __event_len += __dynamic_len[__dynamic_len_idx++] = \
392 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
393 else \
394 __event_len += __dynamic_len[__dynamic_len_idx++] = \
395 strlen(_src) + 1;
396
397 #undef TP_PROTO
398 #define TP_PROTO(...) __VA_ARGS__
399
400 #undef TP_FIELDS
401 #define TP_FIELDS(...) __VA_ARGS__
402
403 #undef TP_locvar
404 #define TP_locvar(...) __VA_ARGS__
405
406 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
407 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
408 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
409 void *__tp_locvar, _proto) \
410 { \
411 size_t __event_len = 0; \
412 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
413 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
414 \
415 _fields \
416 return __event_len; \
417 }
418
419 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
420 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
421 static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \
422 void *__tp_locvar) \
423 { \
424 size_t __event_len = 0; \
425 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
426 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
427 \
428 _fields \
429 return __event_len; \
430 }
431
432 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
433
434
435 /*
436 * Stage 4.1 of tracepoint event generation.
437 *
438 * Create static inline function that layout the filter stack data.
439 * We make both write and nowrite data available to the filter.
440 */
441
442 /* Reset all macros within TRACEPOINT_EVENT */
443 #include <probes/lttng-events-reset.h>
444 #include <probes/lttng-events-write.h>
445 #include <probes/lttng-events-nowrite.h>
446
447 #undef _ctf_integer_ext_fetched
448 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
449 if (lttng_is_signed_type(_type)) { \
450 int64_t __ctf_tmp_int64; \
451 switch (sizeof(_type)) { \
452 case 1: \
453 { \
454 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
455 __ctf_tmp_int64 = (int64_t) __tmp.v; \
456 break; \
457 } \
458 case 2: \
459 { \
460 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
461 if (_byte_order != __BYTE_ORDER) \
462 __swab16s(&__tmp.v); \
463 __ctf_tmp_int64 = (int64_t) __tmp.v; \
464 break; \
465 } \
466 case 4: \
467 { \
468 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
469 if (_byte_order != __BYTE_ORDER) \
470 __swab32s(&__tmp.v); \
471 __ctf_tmp_int64 = (int64_t) __tmp.v; \
472 break; \
473 } \
474 case 8: \
475 { \
476 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
477 if (_byte_order != __BYTE_ORDER) \
478 __swab64s(&__tmp.v); \
479 __ctf_tmp_int64 = (int64_t) __tmp.v; \
480 break; \
481 } \
482 default: \
483 BUG_ON(1); \
484 }; \
485 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
486 } else { \
487 uint64_t __ctf_tmp_uint64; \
488 switch (sizeof(_type)) { \
489 case 1: \
490 { \
491 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
492 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
493 break; \
494 } \
495 case 2: \
496 { \
497 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
498 if (_byte_order != __BYTE_ORDER) \
499 __swab16s(&__tmp.v); \
500 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
501 break; \
502 } \
503 case 4: \
504 { \
505 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
506 if (_byte_order != __BYTE_ORDER) \
507 __swab32s(&__tmp.v); \
508 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
509 break; \
510 } \
511 case 8: \
512 { \
513 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
514 if (_byte_order != __BYTE_ORDER) \
515 __swab64s(&__tmp.v); \
516 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
517 break; \
518 } \
519 default: \
520 BUG_ON(1); \
521 }; \
522 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
523 } \
524 __stack_data += sizeof(int64_t);
525
526 #undef _ctf_integer_ext_isuser0
527 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
528 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
529
530 #undef _ctf_integer_ext_isuser1
531 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
532 { \
533 union { \
534 char __array[sizeof(_user_src)]; \
535 __typeof__(_user_src) __v; \
536 } __tmp_fetch; \
537 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
538 &(_user_src), sizeof(_user_src))) \
539 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
540 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
541 }
542
543 #undef _ctf_integer_ext
544 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
545 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
546
547 #undef _ctf_array_encoded
548 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
549 { \
550 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
551 const void *__ctf_tmp_ptr = (_src); \
552 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
553 __stack_data += sizeof(unsigned long); \
554 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
555 __stack_data += sizeof(void *); \
556 }
557
558 #undef _ctf_array_bitfield
559 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
560 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
561
562 #undef _ctf_sequence_encoded
563 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
564 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
565 { \
566 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
567 const void *__ctf_tmp_ptr = (_src); \
568 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
569 __stack_data += sizeof(unsigned long); \
570 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
571 __stack_data += sizeof(void *); \
572 }
573
574 #undef _ctf_sequence_bitfield
575 #define _ctf_sequence_bitfield(_type, _item, _src, \
576 _length_type, _src_length, \
577 _user, _nowrite) \
578 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
579 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
580
581 #undef _ctf_string
582 #define _ctf_string(_item, _src, _user, _nowrite) \
583 { \
584 const void *__ctf_tmp_ptr = (_src); \
585 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
586 __stack_data += sizeof(void *); \
587 }
588
589 #undef TP_PROTO
590 #define TP_PROTO(...) __VA_ARGS__
591
592 #undef TP_FIELDS
593 #define TP_FIELDS(...) __VA_ARGS__
594
595 #undef TP_locvar
596 #define TP_locvar(...) __VA_ARGS__
597
598 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
599 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
600 static inline \
601 void __event_prepare_filter_stack__##_name(char *__stack_data, \
602 void *__tp_locvar) \
603 { \
604 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
605 \
606 _fields \
607 }
608
609 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
610 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
611 static inline \
612 void __event_prepare_filter_stack__##_name(char *__stack_data, \
613 void *__tp_locvar, _proto) \
614 { \
615 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
616 \
617 _fields \
618 }
619
620 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
621
622 /*
623 * Stage 5 of the trace events.
624 *
625 * Create static inline function that calculates event payload alignment.
626 */
627
628 /* Reset all macros within TRACEPOINT_EVENT */
629 #include <probes/lttng-events-reset.h>
630 #include <probes/lttng-events-write.h>
631
632 #undef _ctf_integer_ext
633 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
634 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
635
636 #undef _ctf_array_encoded
637 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
638 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
639
640 #undef _ctf_array_bitfield
641 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
642 _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite)
643
644 #undef _ctf_sequence_encoded
645 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
646 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
647 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
648 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
649
650 #undef _ctf_sequence_bitfield
651 #define _ctf_sequence_bitfield(_type, _item, _src, \
652 _length_type, _src_length, \
653 _user, _nowrite) \
654 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
655 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
656
657 #undef _ctf_string
658 #define _ctf_string(_item, _src, _user, _nowrite)
659
660 #undef TP_PROTO
661 #define TP_PROTO(...) __VA_ARGS__
662
663 #undef TP_FIELDS
664 #define TP_FIELDS(...) __VA_ARGS__
665
666 #undef TP_locvar
667 #define TP_locvar(...) __VA_ARGS__
668
669 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
670 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
671 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
672 { \
673 size_t __event_align = 1; \
674 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
675 \
676 _fields \
677 return __event_align; \
678 }
679
680 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
681 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
682 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
683 { \
684 size_t __event_align = 1; \
685 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
686 \
687 _fields \
688 return __event_align; \
689 }
690
691 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
692
693 /*
694 * Stage 6 of tracepoint event generation.
695 *
696 * Create the probe function. This function calls event size calculation
697 * and writes event data into the buffer.
698 */
699
700 /* Reset all macros within TRACEPOINT_EVENT */
701 #include <probes/lttng-events-reset.h>
702 #include <probes/lttng-events-write.h>
703
704 #undef _ctf_integer_ext_fetched
705 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
706 { \
707 _type __tmp = _src; \
708 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
709 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
710 }
711
712 #undef _ctf_integer_ext_isuser0
713 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
714 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
715
716 #undef _ctf_integer_ext_isuser1
717 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
718 { \
719 union { \
720 char __array[sizeof(_user_src)]; \
721 __typeof__(_user_src) __v; \
722 } __tmp_fetch; \
723 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
724 &(_user_src), sizeof(_user_src))) \
725 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
726 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
727 }
728
729 #undef _ctf_integer_ext
730 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
731 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
732
733 #undef _ctf_array_encoded
734 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \
735 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
736 if (_user) { \
737 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
738 } else { \
739 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
740 }
741
742 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
743 #undef _ctf_array_bitfield
744 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
745 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
746 if (_user) { \
747 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
748 } else { \
749 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
750 }
751 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
752 /*
753 * For big endian, we need to byteswap into little endian.
754 */
755 #undef _ctf_array_bitfield
756 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
757 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
758 { \
759 size_t _i; \
760 \
761 for (_i = 0; _i < (_length); _i++) { \
762 _type _tmp; \
763 \
764 if (_user) { \
765 if (get_user(_tmp, (_type *) _src + _i)) \
766 _tmp = 0; \
767 } else { \
768 _tmp = ((_type *) _src)[_i]; \
769 } \
770 switch (sizeof(_type)) { \
771 case 1: \
772 break; \
773 case 2: \
774 _tmp = cpu_to_le16(_tmp); \
775 break; \
776 case 4: \
777 _tmp = cpu_to_le32(_tmp); \
778 break; \
779 case 8: \
780 _tmp = cpu_to_le64(_tmp); \
781 break; \
782 default: \
783 BUG_ON(1); \
784 } \
785 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
786 } \
787 }
788 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
789
790 #undef _ctf_sequence_encoded
791 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
792 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
793 { \
794 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \
795 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
796 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
797 } \
798 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
799 if (_user) { \
800 __chan->ops->event_write_from_user(&__ctx, _src, \
801 sizeof(_type) * __get_dynamic_len(dest)); \
802 } else { \
803 __chan->ops->event_write(&__ctx, _src, \
804 sizeof(_type) * __get_dynamic_len(dest)); \
805 }
806
807 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
808 #undef _ctf_sequence_bitfield
809 #define _ctf_sequence_bitfield(_type, _item, _src, \
810 _length_type, _src_length, \
811 _user, _nowrite) \
812 { \
813 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
814 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
815 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
816 } \
817 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
818 if (_user) { \
819 __chan->ops->event_write_from_user(&__ctx, _src, \
820 sizeof(_type) * __get_dynamic_len(dest)); \
821 } else { \
822 __chan->ops->event_write(&__ctx, _src, \
823 sizeof(_type) * __get_dynamic_len(dest)); \
824 }
825 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
826 /*
827 * For big endian, we need to byteswap into little endian.
828 */
829 #undef _ctf_sequence_bitfield
830 #define _ctf_sequence_bitfield(_type, _item, _src, \
831 _length_type, _src_length, \
832 _user, _nowrite) \
833 { \
834 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
835 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
836 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
837 } \
838 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
839 { \
840 size_t _i, _length; \
841 \
842 _length = __get_dynamic_len(dest); \
843 for (_i = 0; _i < _length; _i++) { \
844 _type _tmp; \
845 \
846 if (_user) { \
847 if (get_user(_tmp, (_type *) _src + _i)) \
848 _tmp = 0; \
849 } else { \
850 _tmp = ((_type *) _src)[_i]; \
851 } \
852 switch (sizeof(_type)) { \
853 case 1: \
854 break; \
855 case 2: \
856 _tmp = cpu_to_le16(_tmp); \
857 break; \
858 case 4: \
859 _tmp = cpu_to_le32(_tmp); \
860 break; \
861 case 8: \
862 _tmp = cpu_to_le64(_tmp); \
863 break; \
864 default: \
865 BUG_ON(1); \
866 } \
867 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
868 } \
869 }
870 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
871
872 #undef _ctf_string
873 #define _ctf_string(_item, _src, _user, _nowrite) \
874 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
875 if (_user) { \
876 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
877 __get_dynamic_len(dest)); \
878 } else { \
879 __chan->ops->event_strcpy(&__ctx, _src, \
880 __get_dynamic_len(dest)); \
881 }
882
883 /* Beware: this get len actually consumes the len value */
884 #undef __get_dynamic_len
885 #define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++]
886
887 #undef TP_PROTO
888 #define TP_PROTO(...) __VA_ARGS__
889
890 #undef TP_ARGS
891 #define TP_ARGS(...) __VA_ARGS__
892
893 #undef TP_FIELDS
894 #define TP_FIELDS(...) __VA_ARGS__
895
896 #undef TP_locvar
897 #define TP_locvar(...) __VA_ARGS__
898
899 #undef TP_code_pre
900 #define TP_code_pre(...) __VA_ARGS__
901
902 #undef TP_code_post
903 #define TP_code_post(...) __VA_ARGS__
904
905 /*
906 * For state dump, check that "session" argument (mandatory) matches the
907 * session this event belongs to. Ensures that we write state dump data only
908 * into the started session, not into all sessions.
909 */
910 #ifdef TP_SESSION_CHECK
911 #define _TP_SESSION_CHECK(session, csession) (session == csession)
912 #else /* TP_SESSION_CHECK */
913 #define _TP_SESSION_CHECK(session, csession) 1
914 #endif /* TP_SESSION_CHECK */
915
916 /*
917 * Using twice size for filter stack data to hold size and pointer for
918 * each field (worse case). For integers, max size required is 64-bit.
919 * Same for double-precision floats. Those fit within
920 * 2*sizeof(unsigned long) for all supported architectures.
921 * Perform UNION (||) of filter runtime list.
922 */
923 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
924 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
925 static void __event_probe__##_name(void *__data, _proto) \
926 { \
927 struct probe_local_vars { _locvar }; \
928 struct lttng_event *__event = __data; \
929 struct lttng_probe_ctx __lttng_probe_ctx = { \
930 .event = __event, \
931 .interruptible = !irqs_disabled(), \
932 }; \
933 struct lttng_channel *__chan = __event->chan; \
934 struct lttng_session *__session = __chan->session; \
935 struct lib_ring_buffer_ctx __ctx; \
936 size_t __event_len, __event_align; \
937 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
938 union { \
939 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
940 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
941 } __stackvar; \
942 int __ret; \
943 struct probe_local_vars __tp_locvar; \
944 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
945 &__tp_locvar; \
946 struct lttng_pid_tracker *__lpf; \
947 \
948 if (!_TP_SESSION_CHECK(session, __session)) \
949 return; \
950 if (unlikely(!ACCESS_ONCE(__session->active))) \
951 return; \
952 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
953 return; \
954 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
955 return; \
956 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
957 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
958 return; \
959 _code_pre \
960 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
961 struct lttng_bytecode_runtime *bc_runtime; \
962 int __filter_record = __event->has_enablers_without_bytecode; \
963 \
964 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
965 tp_locvar, _args); \
966 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
967 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
968 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
969 __filter_record = 1; \
970 } \
971 if (likely(!__filter_record)) \
972 goto __post; \
973 } \
974 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \
975 tp_locvar, _args); \
976 __event_align = __event_get_align__##_name(tp_locvar, _args); \
977 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
978 __event_align, -1); \
979 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
980 if (__ret < 0) \
981 goto __post; \
982 _fields \
983 __chan->ops->event_commit(&__ctx); \
984 __post: \
985 _code_post \
986 return; \
987 }
988
989 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
990 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
991 static void __event_probe__##_name(void *__data) \
992 { \
993 struct probe_local_vars { _locvar }; \
994 struct lttng_event *__event = __data; \
995 struct lttng_probe_ctx __lttng_probe_ctx = { \
996 .event = __event, \
997 .interruptible = !irqs_disabled(), \
998 }; \
999 struct lttng_channel *__chan = __event->chan; \
1000 struct lttng_session *__session = __chan->session; \
1001 struct lib_ring_buffer_ctx __ctx; \
1002 size_t __event_len, __event_align; \
1003 size_t __dynamic_len_idx __attribute__((unused)) = 0; \
1004 union { \
1005 size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
1006 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1007 } __stackvar; \
1008 int __ret; \
1009 struct probe_local_vars __tp_locvar; \
1010 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1011 &__tp_locvar; \
1012 struct lttng_pid_tracker *__lpf; \
1013 \
1014 if (!_TP_SESSION_CHECK(session, __session)) \
1015 return; \
1016 if (unlikely(!ACCESS_ONCE(__session->active))) \
1017 return; \
1018 if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
1019 return; \
1020 if (unlikely(!ACCESS_ONCE(__event->enabled))) \
1021 return; \
1022 __lpf = lttng_rcu_dereference(__session->pid_tracker); \
1023 if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
1024 return; \
1025 _code_pre \
1026 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1027 struct lttng_bytecode_runtime *bc_runtime; \
1028 int __filter_record = __event->has_enablers_without_bytecode; \
1029 \
1030 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1031 tp_locvar); \
1032 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1033 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1034 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
1035 __filter_record = 1; \
1036 } \
1037 if (likely(!__filter_record)) \
1038 goto __post; \
1039 } \
1040 __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \
1041 __event_align = __event_get_align__##_name(tp_locvar); \
1042 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1043 __event_align, -1); \
1044 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1045 if (__ret < 0) \
1046 goto __post; \
1047 _fields \
1048 __chan->ops->event_commit(&__ctx); \
1049 __post: \
1050 _code_post \
1051 return; \
1052 }
1053
1054 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1055
1056 #undef __get_dynamic_len
1057
1058 /*
1059 * Stage 7 of the trace events.
1060 *
1061 * Create event descriptions.
1062 */
1063
1064 /* Named field types must be defined in lttng-types.h */
1065
1066 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1067
1068 #ifndef TP_PROBE_CB
1069 #define TP_PROBE_CB(_template) &__event_probe__##_template
1070 #endif
1071
1072 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1073 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1074 static const struct lttng_event_desc __event_desc___##_map = { \
1075 .fields = __event_fields___##_template, \
1076 .name = #_map, \
1077 .kname = #_name, \
1078 .probe_callback = (void *) TP_PROBE_CB(_template), \
1079 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1080 .owner = THIS_MODULE, \
1081 };
1082
1083 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1084 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1085 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1086
1087 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1088
1089 /*
1090 * Stage 8 of the trace events.
1091 *
1092 * Create an array of event description pointers.
1093 */
1094
1095 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1096
1097 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1098 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1099 &__event_desc___##_map,
1100
1101 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1102 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1103 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1104
1105 #define TP_ID1(_token, _system) _token##_system
1106 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1107
1108 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1109 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1110 };
1111
1112 #undef TP_ID1
1113 #undef TP_ID
1114
1115 /*
1116 * Stage 9 of the trace events.
1117 *
1118 * Create a toplevel descriptor for the whole probe.
1119 */
1120
1121 #define TP_ID1(_token, _system) _token##_system
1122 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1123
1124 /* non-const because list head will be modified when registered. */
1125 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1126 .provider = __stringify(TRACE_SYSTEM),
1127 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1128 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1129 .head = { NULL, NULL },
1130 .lazy_init_head = { NULL, NULL },
1131 .lazy = 0,
1132 };
1133
1134 #undef TP_ID1
1135 #undef TP_ID
1136
1137 /*
1138 * Stage 10 of the trace events.
1139 *
1140 * Register/unregister probes at module load/unload.
1141 */
1142
1143 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1144
1145 #define TP_ID1(_token, _system) _token##_system
1146 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1147 #define module_init_eval1(_token, _system) module_init(_token##_system)
1148 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1149 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1150 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1151
1152 #ifndef TP_MODULE_NOINIT
1153 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1154 {
1155 wrapper_vmalloc_sync_all();
1156 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1157 }
1158
1159 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1160 {
1161 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1162 }
1163
1164 #ifndef TP_MODULE_NOAUTOLOAD
1165 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1166 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1167 #endif
1168
1169 #endif
1170
1171 #undef module_init_eval
1172 #undef module_exit_eval
1173 #undef TP_ID1
1174 #undef TP_ID
1175
1176 #undef TP_PROTO
1177 #undef TP_ARGS
This page took 0.07996 seconds and 5 git commands to generate.