X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=probes%2Flttng-tracepoint-event-impl.h;h=b304dde368b90e46ba480e9cbcd3ede633221b04;hb=b7cdc18250880cc44edeef4a4b42c8ac7a135a6d;hp=f774dff5594c6fb8998b808fdd3bfe851908d63b;hpb=ba012e22f899425f71dcd4482cc8ff3654aca483;p=lttng-modules.git diff --git a/probes/lttng-tracepoint-event-impl.h b/probes/lttng-tracepoint-event-impl.h index f774dff5..b304dde3 100644 --- a/probes/lttng-tracepoint-event-impl.h +++ b/probes/lttng-tracepoint-event-impl.h @@ -1,37 +1,30 @@ -/* +/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) + * * lttng-tracepoint-event-impl.h * * Copyright (C) 2009 Steven Rostedt * Copyright (C) 2009-2014 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include #include #include #include +#include #include #include #include #include /* for wrapper_vmalloc_sync_all() */ #include +#include #include +#include #include #include +#include + +#define __LTTNG_NULL_STRING "(null)" /* * Macro declarations used for all stages. @@ -177,6 +170,77 @@ void __event_template_proto___##_name(void); #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +/* + * Stage 1.2 of tracepoint event generation + * + * Unfolding the enums + */ +#include /* Reset all macros within TRACE_EVENT */ + +/* Enumeration entry (single value) */ +#undef ctf_enum_value +#define ctf_enum_value(_string, _value) \ + { \ + .start = { \ + .signedness = lttng_is_signed_type(__typeof__(_value)), \ + .value = lttng_is_signed_type(__typeof__(_value)) ? \ + (long long) (_value) : (_value), \ + }, \ + .end = { \ + .signedness = lttng_is_signed_type(__typeof__(_value)), \ + .value = lttng_is_signed_type(__typeof__(_value)) ? \ + (long long) (_value) : (_value), \ + }, \ + .string = (_string), \ + }, + +/* Enumeration entry (range) */ +#undef ctf_enum_range +#define ctf_enum_range(_string, _range_start, _range_end) \ + { \ + .start = { \ + .signedness = lttng_is_signed_type(__typeof__(_range_start)), \ + .value = lttng_is_signed_type(__typeof__(_range_start)) ? \ + (long long) (_range_start) : (_range_start), \ + }, \ + .end = { \ + .signedness = lttng_is_signed_type(__typeof__(_range_end)), \ + .value = lttng_is_signed_type(__typeof__(_range_end)) ? \ + (long long) (_range_end) : (_range_end), \ + }, \ + .string = (_string), \ + }, + +/* Enumeration entry (automatic value; follows the rules of CTF) */ +#undef ctf_enum_auto +#define ctf_enum_auto(_string) \ + { \ + .start = { \ + .signedness = -1, \ + .value = -1, \ + }, \ + .end = { \ + .signedness = -1, \ + .value = -1, \ + }, \ + .string = (_string), \ + .options = { \ + .is_auto = 1, \ + } \ + }, + +#undef TP_ENUM_VALUES +#define TP_ENUM_VALUES(...) \ + __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_ENUM +#define LTTNG_TRACEPOINT_ENUM(_name, _values) \ + const struct lttng_enum_entry __enum_values__##_name[] = { \ + _values \ + }; + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + /* * Stage 2 of the trace events. * @@ -199,7 +263,7 @@ void __event_template_proto___##_name(void); }, #undef _ctf_array_encoded -#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \ { \ .name = #_item, \ .type = \ @@ -209,7 +273,7 @@ void __event_template_proto___##_name(void); { \ .array = \ { \ - .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \ + .elem_type = __type_integer(_type, 0, 0, 0, _byte_order, _base, _encoding), \ .length = _length, \ } \ } \ @@ -224,7 +288,7 @@ void __event_template_proto___##_name(void); .name = #_item, \ .type = \ { \ - .atype = atype_array, \ + .atype = atype_array_bitfield, \ .u = \ { \ .array = \ @@ -270,7 +334,7 @@ void __event_template_proto___##_name(void); .name = #_item, \ .type = \ { \ - .atype = atype_sequence, \ + .atype = atype_sequence_bitfield, \ .u = \ { \ .sequence = \ @@ -301,6 +365,44 @@ void __event_template_proto___##_name(void); .user = _user, \ }, +#undef _ctf_enum +#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = { \ + .atype = atype_enum, \ + .u = { \ + .basic = { \ + .enumeration = { \ + .desc = &__enum_##_name, \ + .container_type = { \ + .size = sizeof(_type) * CHAR_BIT, \ + .alignment = lttng_alignof(_type) * CHAR_BIT, \ + .signedness = lttng_is_signed_type(_type), \ + .reverse_byte_order = 0, \ + .base = 10, \ + .encoding = lttng_encode_none, \ + }, \ + }, \ + }, \ + }, \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef ctf_custom_field +#define ctf_custom_field(_type, _item, _code) \ + { \ + .name = #_item, \ + .type = { _type }, \ + .nowrite = 0, \ + .user = 0, \ + }, + +#undef ctf_custom_type +#define ctf_custom_type(...) __VA_ARGS__ + #undef TP_FIELDS #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */ @@ -314,6 +416,14 @@ void __event_template_proto___##_name(void); #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post) +#undef LTTNG_TRACEPOINT_ENUM +#define LTTNG_TRACEPOINT_ENUM(_name, _values) \ + static const struct lttng_enum_desc __enum_##_name = { \ + .name = #_name, \ + .entries = __enum_values__##_name, \ + .nr_entries = ARRAY_SIZE(__enum_values__##_name), \ + }; + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* @@ -354,13 +464,13 @@ static void __event_probe__##_name(void *__data); __event_len += sizeof(_type); #undef _ctf_array_encoded -#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ __event_len += sizeof(_type) * (_length); #undef _ctf_array_bitfield #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ - _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite) #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ @@ -368,9 +478,15 @@ static void __event_probe__##_name(void *__data); __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \ __event_len += sizeof(_length_type); \ __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ - __dynamic_len[__dynamic_len_idx] = (_src_length); \ - __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \ - __dynamic_len_idx++; + { \ + size_t __seqlen = (_src_length); \ + \ + if (unlikely(++this_cpu_ptr(<tng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \ + goto error; \ + barrier(); /* reserve before use. */ \ + this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = __seqlen; \ + __event_len += sizeof(_type) * __seqlen; \ + } #undef _ctf_sequence_bitfield #define _ctf_sequence_bitfield(_type, _item, _src, \ @@ -385,12 +501,33 @@ static void __event_probe__##_name(void *__data); */ #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) \ - if (_user) \ - __event_len += __dynamic_len[__dynamic_len_idx++] = \ + if (unlikely(++this_cpu_ptr(<tng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \ + goto error; \ + barrier(); /* reserve before use. */ \ + if (_user) { \ + __event_len += this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = \ max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \ - else \ - __event_len += __dynamic_len[__dynamic_len_idx++] = \ - strlen(_src) + 1; + } else { \ + __event_len += this_cpu_ptr(<tng_dynamic_len_stack)->stack[this_cpu_ptr(<tng_dynamic_len_stack)->offset - 1] = \ + strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \ + } + +#undef _ctf_enum +#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \ + _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite) + +#undef ctf_align +#define ctf_align(_type) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); + +#undef ctf_custom_field +#define ctf_custom_field(_type, _item, _code) \ + { \ + _code \ + } + +#undef ctf_custom_code +#define ctf_custom_code(...) __VA_ARGS__ #undef TP_PROTO #define TP_PROTO(...) __VA_ARGS__ @@ -403,8 +540,7 @@ static void __event_probe__##_name(void *__data); #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ -static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ - void *__tp_locvar, _proto) \ +static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \ { \ size_t __event_len = 0; \ unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ @@ -412,12 +548,15 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ \ _fields \ return __event_len; \ + \ +error: \ + __attribute__((unused)); \ + return -1; \ } #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ -static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ - void *__tp_locvar) \ +static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \ { \ size_t __event_len = 0; \ unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ @@ -425,6 +564,10 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ \ _fields \ return __event_len; \ + \ +error: \ + __attribute__((unused)); \ + return -1; \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -456,18 +599,24 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ case 2: \ { \ union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab16s(&__tmp.v); \ __ctf_tmp_int64 = (int64_t) __tmp.v; \ break; \ } \ case 4: \ { \ union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab32s(&__tmp.v); \ __ctf_tmp_int64 = (int64_t) __tmp.v; \ break; \ } \ case 8: \ { \ union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab64s(&__tmp.v); \ __ctf_tmp_int64 = (int64_t) __tmp.v; \ break; \ } \ @@ -487,18 +636,24 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ case 2: \ { \ union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab16s(&__tmp.v); \ __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ break; \ } \ case 4: \ { \ union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab32s(&__tmp.v); \ __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ break; \ } \ case 8: \ { \ union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \ + if (_byte_order != __BYTE_ORDER) \ + __swab64s(&__tmp.v); \ __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ break; \ } \ @@ -515,11 +670,15 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ #undef _ctf_integer_ext_isuser1 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ -{ \ - __typeof__(_user_src) _src; \ - if (get_user(_src, &(_user_src))) \ - _src = 0; \ - _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ +{ \ + union { \ + char __array[sizeof(_user_src)]; \ + __typeof__(_user_src) __v; \ + } __tmp_fetch; \ + if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \ + &(_user_src), sizeof(_user_src))) \ + memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \ + _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \ } #undef _ctf_integer_ext @@ -527,7 +686,7 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) #undef _ctf_array_encoded -#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \ { \ unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \ const void *__ctf_tmp_ptr = (_src); \ @@ -539,7 +698,7 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ #undef _ctf_array_bitfield #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ - _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite) #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ @@ -563,11 +722,16 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) \ { \ - const void *__ctf_tmp_ptr = (_src); \ + const void *__ctf_tmp_ptr = \ + ((_src) ? (_src) : __LTTNG_NULL_STRING); \ memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ __stack_data += sizeof(void *); \ } +#undef _ctf_enum +#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \ + _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite) + #undef TP_PROTO #define TP_PROTO(...) __VA_ARGS__ @@ -616,12 +780,12 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); #undef _ctf_array_encoded -#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \ __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); #undef _ctf_array_bitfield #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ - _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite) #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ @@ -639,6 +803,14 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) +#undef _ctf_enum +#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \ + _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite) + +#undef ctf_align +#define ctf_align(_type) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); + #undef TP_PROTO #define TP_PROTO(...) __VA_ARGS__ @@ -648,6 +820,15 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ #undef TP_locvar #define TP_locvar(...) __VA_ARGS__ +#undef ctf_custom_field +#define ctf_custom_field(_type, _item, _code) _code + +#undef ctf_custom_code +#define ctf_custom_code(...) \ + { \ + __VA_ARGS__ \ + } + #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \ @@ -698,10 +879,14 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ #undef _ctf_integer_ext_isuser1 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ { \ - __typeof__(_user_src) _src; \ - if (get_user(_src, &(_user_src))) \ - _src = 0; \ - _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ + union { \ + char __array[sizeof(_user_src)]; \ + __typeof__(_user_src) __v; \ + } __tmp_fetch; \ + if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \ + &(_user_src), sizeof(_user_src))) \ + memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \ + _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \ } #undef _ctf_integer_ext @@ -709,7 +894,7 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) #undef _ctf_array_encoded -#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ if (_user) { \ __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \ @@ -769,7 +954,7 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ { \ - _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \ + _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx]; \ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ } \ @@ -788,7 +973,7 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ _length_type, _src_length, \ _user, _nowrite) \ { \ - _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ } \ @@ -809,7 +994,7 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ _length_type, _src_length, \ _user, _nowrite) \ { \ - _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + _length_type __tmpl = this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ } \ @@ -849,18 +1034,39 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) \ - lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \ if (_user) { \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \ __chan->ops->event_strcpy_from_user(&__ctx, _src, \ __get_dynamic_len(dest)); \ } else { \ - __chan->ops->event_strcpy(&__ctx, _src, \ + const char *__ctf_tmp_string = \ + ((_src) ? (_src) : __LTTNG_NULL_STRING); \ + lib_ring_buffer_align_ctx(&__ctx, \ + lttng_alignof(*__ctf_tmp_string)); \ + __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \ __get_dynamic_len(dest)); \ } +#undef _ctf_enum +#define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \ + _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite) + +#undef ctf_align +#define ctf_align(_type) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); + +#undef ctf_custom_field +#define ctf_custom_field(_type, _item, _code) _code + +#undef ctf_custom_code +#define ctf_custom_code(...) \ + { \ + __VA_ARGS__ \ + } + /* Beware: this get len actually consumes the len value */ #undef __get_dynamic_len -#define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++] +#define __get_dynamic_len(field) this_cpu_ptr(<tng_dynamic_len_stack)->stack[__dynamic_len_idx++] #undef TP_PROTO #define TP_PROTO(...) __VA_ARGS__ @@ -911,29 +1117,51 @@ static void __event_probe__##_name(void *__data, _proto) \ struct lttng_channel *__chan = __event->chan; \ struct lttng_session *__session = __chan->session; \ struct lib_ring_buffer_ctx __ctx; \ - size_t __event_len, __event_align; \ - size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + ssize_t __event_len; \ + size_t __event_align; \ + size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \ union { \ - size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ } __stackvar; \ int __ret; \ struct probe_local_vars __tp_locvar; \ struct probe_local_vars *tp_locvar __attribute__((unused)) = \ &__tp_locvar; \ - struct lttng_pid_tracker *__lpf; \ + struct lttng_id_tracker_rcu *__lf; \ \ if (!_TP_SESSION_CHECK(session, __session)) \ return; \ - if (unlikely(!ACCESS_ONCE(__session->active))) \ + if (unlikely(!READ_ONCE(__session->active))) \ + return; \ + if (unlikely(!READ_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!READ_ONCE(__event->enabled))) \ + return; \ + __lf = lttng_rcu_dereference(__session->pid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \ + return; \ + __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \ return; \ - if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + __lf = lttng_rcu_dereference(__session->uid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_uid()))) \ return; \ - if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_vuid()))) \ return; \ - __lpf = lttng_rcu_dereference(__session->pid_tracker); \ - if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + __lf = lttng_rcu_dereference(__session->gid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_gid()))) \ return; \ + __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_vgid()))) \ + return; \ + __orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \ + __dynamic_len_idx = __orig_dynamic_len_offset; \ _code_pre \ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ struct lttng_bytecode_runtime *bc_runtime; \ @@ -943,14 +1171,19 @@ static void __event_probe__##_name(void *__data, _proto) \ tp_locvar, _args); \ lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \ - __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \ __filter_record = 1; \ + break; \ + } \ } \ if (likely(!__filter_record)) \ goto __post; \ } \ - __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \ - tp_locvar, _args); \ + __event_len = __event_get_size__##_name(tp_locvar, _args); \ + if (unlikely(__event_len < 0)) { \ + lib_ring_buffer_lost_event_too_big(__chan->chan); \ + goto __post; \ + } \ __event_align = __event_get_align__##_name(tp_locvar, _args); \ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \ __event_align, -1); \ @@ -961,6 +1194,8 @@ static void __event_probe__##_name(void *__data, _proto) \ __chan->ops->event_commit(&__ctx); \ __post: \ _code_post \ + barrier(); /* use before un-reserve. */ \ + this_cpu_ptr(<tng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \ return; \ } @@ -977,29 +1212,51 @@ static void __event_probe__##_name(void *__data) \ struct lttng_channel *__chan = __event->chan; \ struct lttng_session *__session = __chan->session; \ struct lib_ring_buffer_ctx __ctx; \ - size_t __event_len, __event_align; \ - size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + ssize_t __event_len; \ + size_t __event_align; \ + size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \ union { \ - size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \ char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ } __stackvar; \ int __ret; \ struct probe_local_vars __tp_locvar; \ struct probe_local_vars *tp_locvar __attribute__((unused)) = \ &__tp_locvar; \ - struct lttng_pid_tracker *__lpf; \ + struct lttng_id_tracker_rcu *__lf; \ \ if (!_TP_SESSION_CHECK(session, __session)) \ return; \ - if (unlikely(!ACCESS_ONCE(__session->active))) \ + if (unlikely(!READ_ONCE(__session->active))) \ + return; \ + if (unlikely(!READ_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!READ_ONCE(__event->enabled))) \ return; \ - if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + __lf = lttng_rcu_dereference(__session->pid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \ return; \ - if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \ return; \ - __lpf = lttng_rcu_dereference(__session->pid_tracker); \ - if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + __lf = lttng_rcu_dereference(__session->uid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_uid()))) \ return; \ + __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_vuid()))) \ + return; \ + __lf = lttng_rcu_dereference(__session->gid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_gid()))) \ + return; \ + __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \ + if (__lf && likely(!lttng_id_tracker_lookup(__lf, \ + lttng_current_vgid()))) \ + return; \ + __orig_dynamic_len_offset = this_cpu_ptr(<tng_dynamic_len_stack)->offset; \ + __dynamic_len_idx = __orig_dynamic_len_offset; \ _code_pre \ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ struct lttng_bytecode_runtime *bc_runtime; \ @@ -1009,13 +1266,19 @@ static void __event_probe__##_name(void *__data) \ tp_locvar); \ lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \ - __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \ __filter_record = 1; \ + break; \ + } \ } \ if (likely(!__filter_record)) \ goto __post; \ } \ - __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \ + __event_len = __event_get_size__##_name(tp_locvar); \ + if (unlikely(__event_len < 0)) { \ + lib_ring_buffer_lost_event_too_big(__chan->chan); \ + goto __post; \ + } \ __event_align = __event_get_align__##_name(tp_locvar); \ lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \ __event_align, -1); \ @@ -1026,6 +1289,8 @@ static void __event_probe__##_name(void *__data) \ __chan->ops->event_commit(&__ctx); \ __post: \ _code_post \ + barrier(); /* use before un-reserve. */ \ + this_cpu_ptr(<tng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \ return; \ }