-#undef __entry
-#define __entry field
-
-#undef TP_printk
-#define TP_printk(fmt, args...) fmt "\n", args
-
-#undef __get_dynamic_array
-#define __get_dynamic_array(field) \
- ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
-
-#undef __get_str
-#define __get_str(field) (char *)__get_dynamic_array(field)
-
-#undef __print_flags
-#define __print_flags(flag, delim, flag_array...) \
- ({ \
- static const struct trace_print_flags __flags[] = \
- { flag_array, { -1, NULL }}; \
- ftrace_print_flags_seq(p, delim, flag, __flags); \
- })
-
-#undef __print_symbolic
-#define __print_symbolic(value, symbol_array...) \
- ({ \
- static const struct trace_print_flags symbols[] = \
- { symbol_array, { -1, NULL }}; \
- ftrace_print_symbols_seq(p, value, symbols); \
- })
-
-#undef __print_hex
-#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
-static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
- struct trace_event *trace_event) \
-{ \
- struct ftrace_event_call *event; \
- struct trace_seq *s = &iter->seq; \
- struct ftrace_raw_##call *field; \
- struct trace_entry *entry; \
- struct trace_seq *p = &iter->tmp_seq; \
- int ret; \
- \
- event = container_of(trace_event, struct ftrace_event_call, \
- event); \
- \
- entry = iter->ent; \
- \
- if (entry->type != event->event.type) { \
- WARN_ON_ONCE(1); \
- return TRACE_TYPE_UNHANDLED; \
- } \
- \
- field = (typeof(field))entry; \
- \
- trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", event->name); \
- if (ret) \
- ret = trace_seq_printf(s, print); \
- if (!ret) \
- return TRACE_TYPE_PARTIAL_LINE; \
- \
- return TRACE_TYPE_HANDLED; \
-} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
-};
-
-#undef DEFINE_EVENT_PRINT
-#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
-static notrace enum print_line_t \
-ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
- struct trace_event *event) \
-{ \
- struct trace_seq *s = &iter->seq; \
- struct ftrace_raw_##template *field; \
- struct trace_entry *entry; \
- struct trace_seq *p = &iter->tmp_seq; \
- int ret; \
- \
- entry = iter->ent; \
- \
- if (entry->type != event_##call.event.type) { \
- WARN_ON_ONCE(1); \
- return TRACE_TYPE_UNHANDLED; \
- } \
- \
- field = (typeof(field))entry; \
- \
- trace_seq_init(p); \
- ret = trace_seq_printf(s, "%s: ", #call); \
- if (ret) \
- ret = trace_seq_printf(s, print); \
- if (!ret) \
- return TRACE_TYPE_PARTIAL_LINE; \
- \
- return TRACE_TYPE_HANDLED; \
-} \
-static struct trace_event_functions ftrace_event_type_funcs_##call = { \
- .trace = ftrace_raw_output_##call, \
-};
-
-#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
-
-#undef __field_ext
-#define __field_ext(type, item, filter_type) \
- ret = trace_define_field(event_call, #type, #item, \
- offsetof(typeof(field), item), \
- sizeof(field.item), \
- is_signed_type(type), filter_type); \
- if (ret) \
- return ret;
-
-#undef __field
-#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
-
-#undef __array
-#define __array(type, item, len) \
- BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- ret = trace_define_field(event_call, #type "[" #len "]", #item, \
- offsetof(typeof(field), item), \
- sizeof(field.item), \
- is_signed_type(type), FILTER_OTHER); \
- if (ret) \
- return ret;
-
-#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
- offsetof(typeof(field), __data_loc_##item), \
- sizeof(field.__data_loc_##item), \
- is_signed_type(type), FILTER_OTHER);
-
-#undef __string
-#define __string(item, src) __dynamic_array(char, item, -1)
-
-#undef DECLARE_EVENT_CLASS
-#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int notrace \
-ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
-{ \
- struct ftrace_raw_##call field; \
- int ret; \
- \
- tstruct; \
- \
- return ret; \
+/*
+ * Using twice size for filter stack data to hold size and pointer for
+ * each field (worse case). For integers, max size required is 64-bit.
+ * Same for double-precision floats. Those fit within
+ * 2*sizeof(unsigned long) for all supported architectures.
+ * Perform UNION (||) of filter runtime list.
+ */
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \
+static void __event_probe__##_name(void *__data, _proto) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_event *__event = __data; \
+ struct lttng_channel *__chan = __event->chan; \
+ struct lttng_session *__session = __chan->session; \
+ struct lib_ring_buffer_ctx __ctx; \
+ size_t __event_len, __event_align; \
+ size_t __dynamic_len_idx __attribute__((unused)) = 0; \
+ union { \
+ size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ int __ret; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ struct lttng_pid_tracker *__lpf; \
+ \
+ if (!_TP_SESSION_CHECK(session, __session)) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__session->active))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ return; \
+ __lpf = lttng_rcu_dereference(__session->pid_tracker); \
+ if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
+ return; \
+ _code \
+ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __event->has_enablers_without_bytecode; \
+ \
+ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ tp_locvar, _args); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->filter(bc_runtime, \
+ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ return; \
+ } \
+ __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \
+ tp_locvar, _args); \
+ __event_align = __event_get_align__##_name(tp_locvar, _args); \
+ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
+ __event_align, -1); \
+ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
+ if (__ret < 0) \
+ return; \
+ _fields \
+ __chan->ops->event_commit(&__ctx); \