+#undef TP_ARGS
+#define TP_ARGS(...) __VA_ARGS__
+
+#undef TP_FIELDS
+#define TP_FIELDS(...) __VA_ARGS__
+
+#undef TP_locvar
+#define TP_locvar(...) __VA_ARGS__
+
+#undef TP_code
+#define TP_code(...) __VA_ARGS__
+
+/*
+ * For state dump, check that "session" argument (mandatory) matches the
+ * session this event belongs to. Ensures that we write state dump data only
+ * into the started session, not into all sessions.
+ */
+#ifdef TP_SESSION_CHECK
+#define _TP_SESSION_CHECK(session, csession) (session == csession)
+#else /* TP_SESSION_CHECK */
+#define _TP_SESSION_CHECK(session, csession) 1
+#endif /* TP_SESSION_CHECK */
+
+/*
+ * Using twice size for filter stack data to hold size and pointer for
+ * each field (worse case). For integers, max size required is 64-bit.
+ * Same for double-precision floats. Those fit within
+ * 2*sizeof(unsigned long) for all supported architectures.
+ * Perform UNION (||) of filter runtime list.
+ */
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \
+static void __event_probe__##_name(void *__data, _proto) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_event *__event = __data; \
+ struct lttng_channel *__chan = __event->chan; \
+ struct lttng_session *__session = __chan->session; \
+ struct lib_ring_buffer_ctx __ctx; \
+ size_t __event_len, __event_align; \
+ size_t __dynamic_len_idx __attribute__((unused)) = 0; \
+ union { \
+ size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ int __ret; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ struct lttng_pid_tracker *__lpf; \
+ \
+ if (!_TP_SESSION_CHECK(session, __session)) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__session->active))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ return; \
+ __lpf = lttng_rcu_dereference(__session->pid_tracker); \
+ if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
+ return; \
+ _code \
+ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __event->has_enablers_without_bytecode; \
+ \
+ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ tp_locvar, _args); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->filter(bc_runtime, \
+ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ return; \
+ } \
+ __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \
+ tp_locvar, _args); \
+ __event_align = __event_get_align__##_name(tp_locvar, _args); \
+ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
+ __event_align, -1); \
+ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
+ if (__ret < 0) \
+ return; \
+ _fields \
+ __chan->ops->event_commit(&__ctx); \
+}
+
+#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
+#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \
+static void __event_probe__##_name(void *__data) \
+{ \
+ struct probe_local_vars { _locvar }; \
+ struct lttng_event *__event = __data; \
+ struct lttng_channel *__chan = __event->chan; \
+ struct lttng_session *__session = __chan->session; \
+ struct lib_ring_buffer_ctx __ctx; \
+ size_t __event_len, __event_align; \
+ size_t __dynamic_len_idx __attribute__((unused)) = 0; \
+ union { \
+ size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \
+ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
+ } __stackvar; \
+ int __ret; \
+ struct probe_local_vars __tp_locvar; \
+ struct probe_local_vars *tp_locvar __attribute__((unused)) = \
+ &__tp_locvar; \
+ struct lttng_pid_tracker *__lpf; \
+ \
+ if (!_TP_SESSION_CHECK(session, __session)) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__session->active))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__chan->enabled))) \
+ return; \
+ if (unlikely(!ACCESS_ONCE(__event->enabled))) \
+ return; \
+ __lpf = lttng_rcu_dereference(__session->pid_tracker); \
+ if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \
+ return; \
+ _code \
+ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
+ struct lttng_bytecode_runtime *bc_runtime; \
+ int __filter_record = __event->has_enablers_without_bytecode; \
+ \
+ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
+ tp_locvar); \
+ lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
+ if (unlikely(bc_runtime->filter(bc_runtime, \
+ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
+ __filter_record = 1; \
+ } \
+ if (likely(!__filter_record)) \
+ return; \
+ } \
+ __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \
+ __event_align = __event_get_align__##_name(tp_locvar); \
+ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
+ __event_align, -1); \
+ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
+ if (__ret < 0) \
+ return; \
+ _fields \
+ __chan->ops->event_commit(&__ctx); \
+}