X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=probes%2Flttng-events.h;h=1b7df762b92586b29b90b6301b0f7fbf25b06cf7;hb=f127e61ee231d002fb9a7803643a157e06f6d2e2;hp=b4bac43c7dde81eb814cc43d63fe4ccf7ef1aaa7;hpb=1d12cebd26ab7c1ee4a23714ec71660c5352fbd4;p=lttng-modules.git diff --git a/probes/lttng-events.h b/probes/lttng-events.h index b4bac43c..1b7df762 100644 --- a/probes/lttng-events.h +++ b/probes/lttng-events.h @@ -1,823 +1,908 @@ -#include - /* - * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to - * strcpy(). + * lttng-events.h + * + * Copyright (C) 2009 Steven Rostedt + * Copyright (C) 2009-2014 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#undef tp_assign -#define tp_assign(dest, src) \ - lib_ring_buffer_align_ctx(config, &ctx, sizeof(src)); \ - lib_ring_buffer_write(config, &ctx, &src, sizeof(src)); -#undef tp_memcpy -#define tp_memcpy(dest, src, len) \ - lib_ring_buffer_align_ctx(config, &ctx, sizeof(*(src))); \ - lib_ring_buffer_write(config, &ctx, &src, len); - -/* TODO */ -#undef tp_strcpy -#define tp_strcpy(dest, src) __assign_str(dest, src) +#include +#include +#include +#include "lttng.h" +#include "lttng-types.h" +#include "lttng-probe-user.h" +#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ +#include "../wrapper/ringbuffer/frontend_types.h" +#include "../lttng-events.h" +#include "../lttng-tracer-core.h" /* - * Stage 1 of the trace events. - * - * Create event field type metadata section. + * Macro declarations used for all stages. */ /* - * DECLARE_EVENT_CLASS can be used to add a generic function - * handlers for events. That is, if all events have the same - * parameters and just have distinct trace points. - * Each tracepoint can be defined with DEFINE_EVENT and that - * will map the DECLARE_EVENT_CLASS to the tracepoint. - * - * TRACE_EVENT is a one to one mapping between tracepoint and template. + * LTTng name mapping macros. LTTng remaps some of the kernel events to + * enforce name-spacing. */ -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - DECLARE_EVENT_CLASS(name, \ - PARAMS(proto), \ - PARAMS(args), \ - PARAMS(tstruct), \ - PARAMS(assign), \ - PARAMS(print)); \ - DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); - -/* Named field types must be defined in lttng-types.h */ - -/* TODO turn into a structure definition ? */ +#undef LTTNG_TRACEPOINT_EVENT_MAP +#define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS(map, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args)) + +#undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map) + +#undef LTTNG_TRACEPOINT_EVENT_CODE_MAP +#define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(_locvar), \ + PARAMS(_code), \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args)) + +#undef LTTNG_TRACEPOINT_EVENT_CODE +#define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code, fields) \ + LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(_locvar), \ + PARAMS(_code), \ + PARAMS(fields)) -#undef __field -#define __field(_type, _item) #_type " " #_item ";\n" - -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) #_type " " #_item ";\n" +/* + * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function + * handlers for events. That is, if all events have the same parameters + * and just have distinct trace points. Each tracepoint can be defined + * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the + * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint. + * + * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and + * template. + */ -#undef __array -#define __array(_type, _item, _len) \ - "type { parent = array; length = " #_len "; elem_type = " #_type "; } " #_item";\n" +#undef LTTNG_TRACEPOINT_EVENT +#define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \ + LTTNG_TRACEPOINT_EVENT_MAP(name, name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(fields)) -#undef __dynamic_array -#define __dynamic_array(_type, _item, _len) \ - "type { parent = sequence; length_type = u32; elem_type = " #_type "; } " #_item ";\n" +#undef LTTNG_TRACEPOINT_EVENT_NOARGS +#define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \ + LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields)) -#undef __string -#define __string(_item, _src) \ - "type { parent = string; encoding = UTF8; } " #_item ";\n" +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE +#define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args)) -#undef TP_PROTO -#define TP_PROTO(args...) +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name) -#undef TP_ARGS -#define TP_ARGS(args...) +#undef LTTNG_TRACEPOINT_EVENT_CLASS +#define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \ + PARAMS(_fields)) -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args /* Only one used in this phase */ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields)) -#undef TP_fast_assign -#define TP_fast_assign(args...) -#undef TP_printk -#define TP_printk(args...) +/* + * Stage 1 of the trace events. + * + * Create dummy trace calls for each events, verifying that the LTTng module + * instrumentation headers match the kernel arguments. Will be optimized + * out by the compiler. + */ -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ - tstruct +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ -/* Callbacks are meaningless to LTTng. */ -#undef TRACE_EVENT_FN -#define TRACE_EVENT_FN(name, proto, args, tstruct, \ - assign, print, reg, unreg) \ - TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ - PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ +void trace_##_name(_proto); -#undef DEFINE_TRACE_EVENT_METADATA -#define DEFINE_TRACE_EVENT_METADATA \ - const char trace_event_metadata_##TRACE_SYSTEM[] +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ +void trace_##_name(void *__data); -//static DEFINE_TRACE_EVENT_METADATA = -static const char blah[] = #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -; - -#if 0 /* * Stage 2 of the trace events. * - * Create static inline function that calculates event size. + * Create event field type metadata section. + * Each event produce an array of fields. */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" +#include "lttng-events-nowrite.h" + +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = __type_integer(_type, _byte_order, _base, none),\ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_array, \ + .u = \ + { \ + .array = \ + { \ + .elem_type = __type_integer(_type, __BYTE_ORDER, 10, _encoding), \ + .length = _length, \ + } \ + } \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, \ + _length_type, _src_length, _encoding, \ + _base, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_sequence, \ + .u = \ + { \ + .sequence = \ + { \ + .length_type = __type_integer(_length_type, __BYTE_ORDER, 10, none), \ + .elem_type = __type_integer(_type, __BYTE_ORDER, _base, _encoding), \ + }, \ + }, \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_string, \ + .u = \ + { \ + .basic = { .string = { .encoding = lttng_encode_UTF8 } } \ + }, \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */ + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ + static const struct lttng_event_field __event_fields___##_name[] = { \ + _fields \ + }; +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, PARAMS(_fields)) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* * Stage 3 of the trace events. * - * Create the probe function : call even size calculation and write event data - * into the buffer. + * Create probe callback prototypes. */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data, _proto); +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data); +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +/* + * Stage 4 of the trace events. + * + * Create static inline function that calculates event size. + */ -#include +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" + +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __event_len += sizeof(_type); + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __event_len += sizeof(_type) * (_length); + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _base, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \ + __event_len += sizeof(_length_type); \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __dynamic_len[__dynamic_len_idx] = (_src_length); \ + __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \ + __dynamic_len_idx++; /* - * DECLARE_EVENT_CLASS can be used to add a generic function - * handlers for events. That is, if all events have the same - * parameters and just have distinct trace points. - * Each tracepoint can be defined with DEFINE_EVENT and that - * will map the DECLARE_EVENT_CLASS to the tracepoint. - * - * TRACE_EVENT is a one to one mapping between tracepoint and template. + * ctf_user_string includes \0. If returns 0, it faulted, so we set size to + * 1 (\0 only). */ -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - DECLARE_EVENT_CLASS(name, \ - PARAMS(proto), \ - PARAMS(args), \ - PARAMS(tstruct), \ - PARAMS(assign), \ - PARAMS(print)); \ - DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); - - -#undef __field -#define __field(type, item) type item; - -#undef __field_ext -#define __field_ext(type, item, filter_type) type item; - -#undef __array -#define __array(type, item, len) type item[len]; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 __data_loc_##item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ - struct ftrace_raw_##name { \ - struct trace_entry ent; \ - tstruct \ - char __data[0]; \ - }; \ - \ - static struct ftrace_event_class event_class_##name; - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ - static struct ftrace_event_call __used \ - __attribute__((__aligned__(4))) event_##name - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -/* Callbacks are meaningless to ftrace. */ -#undef TRACE_EVENT_FN -#define TRACE_EVENT_FN(name, proto, args, tstruct, \ - assign, print, reg, unreg) \ - TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ - PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + if (_user) \ + __event_len += __dynamic_len[__dynamic_len_idx++] = \ + strlen(_src) + 1; \ + else \ + __event_len += __dynamic_len[__dynamic_len_idx++] = \ + max_t(size_t, lttng_strlen_user_inatomic(_src), 1); + +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ + +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ + void *__tp_locvar, _proto) \ +{ \ + size_t __event_len = 0; \ + unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_len; \ +} + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ + void *__tp_locvar) \ +{ \ + size_t __event_len = 0; \ + unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_len; \ +} #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Stage 2 of the trace events. + * Stage 4.1 of tracepoint event generation. * - * Create static inline function that calculates event size. + * Create static inline function that layout the filter stack data. + * We make both write and nowrite data available to the filter. */ -#undef __field -#define __field(type, item) +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" +#include "lttng-events-nowrite.h" + +#undef _ctf_integer_ext_fetched +#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ + if (lttng_is_signed_type(_type)) { \ + int64_t __ctf_tmp_int64; \ + switch (sizeof(_type)) { \ + case 1: \ + { \ + union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 2: \ + { \ + union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 4: \ + { \ + union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 8: \ + { \ + union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + default: \ + BUG_ON(1); \ + }; \ + memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \ + } else { \ + uint64_t __ctf_tmp_uint64; \ + switch (sizeof(_type)) { \ + case 1: \ + { \ + union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 2: \ + { \ + union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 4: \ + { \ + union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 8: \ + { \ + union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + default: \ + BUG_ON(1); \ + }; \ + memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \ + } \ + __stack_data += sizeof(int64_t); + +#undef _ctf_integer_ext_isuser0 +#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) + +#undef _ctf_integer_ext_isuser1 +#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ +{ \ + __typeof__(_user_src) _src; \ + if (get_user(_src, &(_user_src))) \ + _src = 0; \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ +} -#undef __field_ext -#define __field_ext(type, item, filter_type) +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \ + _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + { \ + unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ + __stack_data += sizeof(unsigned long); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ + __stack_data += sizeof(void **); \ + } + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _base, _user, _nowrite) \ + { \ + unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ + __stack_data += sizeof(unsigned long); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ + __stack_data += sizeof(void **); \ + } + +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + { \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ + __stack_data += sizeof(void **); \ + } -#undef __array -#define __array(type, item, len) +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ + +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline \ +void __event_prepare_filter_stack__##_name(char *__stack_data, \ + void *__tp_locvar) \ +{ \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ +} -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 item; +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline \ +void __event_prepare_filter_stack__##_name(char *__stack_data, \ + void *__tp_locvar, _proto) \ +{ \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ +} -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ - struct ftrace_data_offsets_##call { \ - tstruct; \ - }; +/* + * Stage 5 of the trace events. + * + * Create static inline function that calculates event payload alignment. + */ + +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" + +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _base, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) + +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ + +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \ +{ \ + size_t __event_align = 1; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_align; \ +} + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline size_t __event_get_align__##_name(void *__tp_locvar) \ +{ \ + size_t __event_align = 1; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_align; \ +} #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Stage 3 of the trace events. + * Stage 6 of tracepoint event generation. * - * Create the probe function : call even size calculation and write event data - * into the buffer. + * Create the probe function. This function calls event size calculation + * and writes event data into the buffer. */ -#undef __entry -#define __entry field - -#undef TP_printk -#define TP_printk(fmt, args...) fmt "\n", args - -#undef __get_dynamic_array -#define __get_dynamic_array(field) \ - ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) - -#undef __get_str -#define __get_str(field) (char *)__get_dynamic_array(field) - -#undef __print_flags -#define __print_flags(flag, delim, flag_array...) \ - ({ \ - static const struct trace_print_flags __flags[] = \ - { flag_array, { -1, NULL }}; \ - ftrace_print_flags_seq(p, delim, flag, __flags); \ - }) - -#undef __print_symbolic -#define __print_symbolic(value, symbol_array...) \ - ({ \ - static const struct trace_print_flags symbols[] = \ - { symbol_array, { -1, NULL }}; \ - ftrace_print_symbols_seq(p, value, symbols); \ - }) - -#undef __print_hex -#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ -static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ - struct trace_event *trace_event) \ -{ \ - struct ftrace_event_call *event; \ - struct trace_seq *s = &iter->seq; \ - struct ftrace_raw_##call *field; \ - struct trace_entry *entry; \ - struct trace_seq *p = &iter->tmp_seq; \ - int ret; \ - \ - event = container_of(trace_event, struct ftrace_event_call, \ - event); \ - \ - entry = iter->ent; \ - \ - if (entry->type != event->event.type) { \ - WARN_ON_ONCE(1); \ - return TRACE_TYPE_UNHANDLED; \ - } \ - \ - field = (typeof(field))entry; \ - \ - trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", event->name); \ - if (ret) \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ - \ - return TRACE_TYPE_HANDLED; \ -} \ -static struct trace_event_functions ftrace_event_type_funcs_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" + +#undef _ctf_integer_ext_fetched +#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ + { \ + _type __tmp = _src; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\ + __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\ + } + +#undef _ctf_integer_ext_isuser0 +#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) + +#undef _ctf_integer_ext_isuser1 +#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ +{ \ + __typeof__(_user_src) _src; \ + if (get_user(_src, &(_user_src))) \ + _src = 0; \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ +} -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ -static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ - struct trace_event *event) \ -{ \ - struct trace_seq *s = &iter->seq; \ - struct ftrace_raw_##template *field; \ - struct trace_entry *entry; \ - struct trace_seq *p = &iter->tmp_seq; \ - int ret; \ - \ - entry = iter->ent; \ - \ - if (entry->type != event_##call.event.type) { \ - WARN_ON_ONCE(1); \ - return TRACE_TYPE_UNHANDLED; \ +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \ + _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \ + } + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _base, _user, _nowrite) \ + { \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ } \ - \ - field = (typeof(field))entry; \ - \ - trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", #call); \ - if (ret) \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ - \ - return TRACE_TYPE_HANDLED; \ -} \ -static struct trace_event_functions ftrace_event_type_funcs_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } + +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \ + if (_user) { \ + __chan->ops->event_strcpy_from_user(&__ctx, _src, \ + __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_strcpy(&__ctx, _src, \ + __get_dynamic_len(dest)); \ + } + +/* Beware: this get len actually consumes the len value */ +#undef __get_dynamic_len +#define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++] -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -#undef __field_ext -#define __field_ext(type, item, filter_type) \ - ret = trace_define_field(event_call, #type, #item, \ - offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), filter_type); \ - if (ret) \ - return ret; - -#undef __field -#define __field(type, item) __field_ext(type, item, FILTER_OTHER) - -#undef __array -#define __array(type, item, len) \ - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ - ret = trace_define_field(event_call, #type "[" #len "]", #item, \ - offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), FILTER_OTHER); \ - if (ret) \ - return ret; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ - offsetof(typeof(field), __data_loc_##item), \ - sizeof(field.__data_loc_##item), \ - is_signed_type(type), FILTER_OTHER); - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ -static int notrace \ -ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ -{ \ - struct ftrace_raw_##call field; \ - int ret; \ - \ - tstruct; \ - \ - return ret; \ -} +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef TP_code +#define TP_code(...) __VA_ARGS__ /* - * remember the offset of each array from the beginning of the event. + * For state dump, check that "session" argument (mandatory) matches the + * session this event belongs to. Ensures that we write state dump data only + * into the started session, not into all sessions. */ +#ifdef TP_SESSION_CHECK +#define _TP_SESSION_CHECK(session, csession) (session == csession) +#else /* TP_SESSION_CHECK */ +#define _TP_SESSION_CHECK(session, csession) 1 +#endif /* TP_SESSION_CHECK */ -#undef __entry -#define __entry entry - -#undef __field -#define __field(type, item) - -#undef __field_ext -#define __field_ext(type, item, filter_type) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - __data_offsets->item = __data_size + \ - offsetof(typeof(*entry), __data); \ - __data_offsets->item |= (len * sizeof(type)) << 16; \ - __data_size += (len) * sizeof(type); - -#undef __string -#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ -static inline notrace int ftrace_get_offsets_##call( \ - struct ftrace_data_offsets_##call *__data_offsets, proto) \ -{ \ - int __data_size = 0; \ - struct ftrace_raw_##call __maybe_unused *entry; \ - \ - tstruct; \ - \ - return __data_size; \ +/* + * Using twice size for filter stack data to hold size and pointer for + * each field (worse case). For integers, max size required is 64-bit. + * Same for double-precision floats. Those fit within + * 2*sizeof(unsigned long) for all supported architectures. + * Perform UNION (||) of filter runtime list. + */ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data, _proto) \ +{ \ + struct probe_local_vars { _locvar }; \ + struct lttng_event *__event = __data; \ + struct lttng_channel *__chan = __event->chan; \ + struct lttng_session *__session = __chan->session; \ + struct lib_ring_buffer_ctx __ctx; \ + size_t __event_len, __event_align; \ + size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + union { \ + size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ + } __stackvar; \ + int __ret; \ + struct probe_local_vars __tp_locvar; \ + struct probe_local_vars *tp_locvar __attribute__((unused)) = \ + &__tp_locvar; \ + struct lttng_pid_tracker *__lpf; \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ + if (unlikely(!ACCESS_ONCE(__session->active))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + return; \ + __lpf = rcu_dereference(__session->pid_tracker); \ + if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + return; \ + _code \ + if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *bc_runtime; \ + int __filter_record = __event->has_enablers_without_bytecode; \ + \ + __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ + tp_locvar, _args); \ + list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __filter_record = 1; \ + } \ + if (likely(!__filter_record)) \ + return; \ + } \ + __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \ + tp_locvar, _args); \ + __event_align = __event_get_align__##_name(tp_locvar, _args); \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ + __event_align, -1); \ + __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ + if (__ret < 0) \ + return; \ + _fields \ + __chan->ops->event_commit(&__ctx); \ } -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data) \ +{ \ + struct probe_local_vars { _locvar }; \ + struct lttng_event *__event = __data; \ + struct lttng_channel *__chan = __event->chan; \ + struct lttng_session *__session = __chan->session; \ + struct lib_ring_buffer_ctx __ctx; \ + size_t __event_len, __event_align; \ + size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + union { \ + size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ + } __stackvar; \ + int __ret; \ + struct probe_local_vars __tp_locvar; \ + struct probe_local_vars *tp_locvar __attribute__((unused)) = \ + &__tp_locvar; \ + struct lttng_pid_tracker *__lpf; \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ + if (unlikely(!ACCESS_ONCE(__session->active))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + return; \ + __lpf = rcu_dereference(__session->pid_tracker); \ + if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + return; \ + _code \ + if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *bc_runtime; \ + int __filter_record = __event->has_enablers_without_bytecode; \ + \ + __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ + tp_locvar); \ + list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __filter_record = 1; \ + } \ + if (likely(!__filter_record)) \ + return; \ + } \ + __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \ + __event_align = __event_get_align__##_name(tp_locvar); \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ + __event_align, -1); \ + __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ + if (__ret < 0) \ + return; \ + _fields \ + __chan->ops->event_commit(&__ctx); \ +} #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef __get_dynamic_len + /* - * Stage 4 of the trace events. - * - * Override the macros in to include the following: - * - * For those macros defined with TRACE_EVENT: - * - * static struct ftrace_event_call event_; - * - * static void ftrace_raw_event_(void *__data, proto) - * { - * struct ftrace_event_call *event_call = __data; - * struct ftrace_data_offsets_ __maybe_unused __data_offsets; - * struct ring_buffer_event *event; - * struct ftrace_raw_ *entry; <-- defined in stage 1 - * struct ring_buffer *buffer; - * unsigned long irq_flags; - * int __data_size; - * int pc; - * - * local_save_flags(irq_flags); - * pc = preempt_count(); - * - * __data_size = ftrace_get_offsets_(&__data_offsets, args); - * - * event = trace_current_buffer_lock_reserve(&buffer, - * event_->event.type, - * sizeof(*entry) + __data_size, - * irq_flags, pc); - * if (!event) - * return; - * entry = ring_buffer_event_data(event); - * - * { ; } <-- Here we assign the entries by the __field and - * __array macros. - * - * if (!filter_current_check_discard(buffer, event_call, entry, event)) - * trace_current_buffer_unlock_commit(buffer, - * event, irq_flags, pc); - * } - * - * static struct trace_event ftrace_event_type_ = { - * .trace = ftrace_raw_output_, <-- stage 2 - * }; - * - * static const char print_fmt_[] = ; - * - * static struct ftrace_event_class __used event_class_