1 /* SPDX-License-Identifier: MIT
5 * LTTng modules filter code.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/list.h>
11 #include <linux/slab.h>
13 #include <lttng-filter.h>
15 static const char *opnames
[] = {
16 [ FILTER_OP_UNKNOWN
] = "UNKNOWN",
18 [ FILTER_OP_RETURN
] = "RETURN",
21 [ FILTER_OP_MUL
] = "MUL",
22 [ FILTER_OP_DIV
] = "DIV",
23 [ FILTER_OP_MOD
] = "MOD",
24 [ FILTER_OP_PLUS
] = "PLUS",
25 [ FILTER_OP_MINUS
] = "MINUS",
26 [ FILTER_OP_BIT_RSHIFT
] = "BIT_RSHIFT",
27 [ FILTER_OP_BIT_LSHIFT
] = "BIT_LSHIFT",
28 [ FILTER_OP_BIT_AND
] = "BIT_AND",
29 [ FILTER_OP_BIT_OR
] = "BIT_OR",
30 [ FILTER_OP_BIT_XOR
] = "BIT_XOR",
32 /* binary comparators */
33 [ FILTER_OP_EQ
] = "EQ",
34 [ FILTER_OP_NE
] = "NE",
35 [ FILTER_OP_GT
] = "GT",
36 [ FILTER_OP_LT
] = "LT",
37 [ FILTER_OP_GE
] = "GE",
38 [ FILTER_OP_LE
] = "LE",
40 /* string binary comparators */
41 [ FILTER_OP_EQ_STRING
] = "EQ_STRING",
42 [ FILTER_OP_NE_STRING
] = "NE_STRING",
43 [ FILTER_OP_GT_STRING
] = "GT_STRING",
44 [ FILTER_OP_LT_STRING
] = "LT_STRING",
45 [ FILTER_OP_GE_STRING
] = "GE_STRING",
46 [ FILTER_OP_LE_STRING
] = "LE_STRING",
48 /* s64 binary comparators */
49 [ FILTER_OP_EQ_S64
] = "EQ_S64",
50 [ FILTER_OP_NE_S64
] = "NE_S64",
51 [ FILTER_OP_GT_S64
] = "GT_S64",
52 [ FILTER_OP_LT_S64
] = "LT_S64",
53 [ FILTER_OP_GE_S64
] = "GE_S64",
54 [ FILTER_OP_LE_S64
] = "LE_S64",
56 /* double binary comparators */
57 [ FILTER_OP_EQ_DOUBLE
] = "EQ_DOUBLE",
58 [ FILTER_OP_NE_DOUBLE
] = "NE_DOUBLE",
59 [ FILTER_OP_GT_DOUBLE
] = "GT_DOUBLE",
60 [ FILTER_OP_LT_DOUBLE
] = "LT_DOUBLE",
61 [ FILTER_OP_GE_DOUBLE
] = "GE_DOUBLE",
62 [ FILTER_OP_LE_DOUBLE
] = "LE_DOUBLE",
64 /* Mixed S64-double binary comparators */
65 [ FILTER_OP_EQ_DOUBLE_S64
] = "EQ_DOUBLE_S64",
66 [ FILTER_OP_NE_DOUBLE_S64
] = "NE_DOUBLE_S64",
67 [ FILTER_OP_GT_DOUBLE_S64
] = "GT_DOUBLE_S64",
68 [ FILTER_OP_LT_DOUBLE_S64
] = "LT_DOUBLE_S64",
69 [ FILTER_OP_GE_DOUBLE_S64
] = "GE_DOUBLE_S64",
70 [ FILTER_OP_LE_DOUBLE_S64
] = "LE_DOUBLE_S64",
72 [ FILTER_OP_EQ_S64_DOUBLE
] = "EQ_S64_DOUBLE",
73 [ FILTER_OP_NE_S64_DOUBLE
] = "NE_S64_DOUBLE",
74 [ FILTER_OP_GT_S64_DOUBLE
] = "GT_S64_DOUBLE",
75 [ FILTER_OP_LT_S64_DOUBLE
] = "LT_S64_DOUBLE",
76 [ FILTER_OP_GE_S64_DOUBLE
] = "GE_S64_DOUBLE",
77 [ FILTER_OP_LE_S64_DOUBLE
] = "LE_S64_DOUBLE",
80 [ FILTER_OP_UNARY_PLUS
] = "UNARY_PLUS",
81 [ FILTER_OP_UNARY_MINUS
] = "UNARY_MINUS",
82 [ FILTER_OP_UNARY_NOT
] = "UNARY_NOT",
83 [ FILTER_OP_UNARY_PLUS_S64
] = "UNARY_PLUS_S64",
84 [ FILTER_OP_UNARY_MINUS_S64
] = "UNARY_MINUS_S64",
85 [ FILTER_OP_UNARY_NOT_S64
] = "UNARY_NOT_S64",
86 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = "UNARY_PLUS_DOUBLE",
87 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = "UNARY_MINUS_DOUBLE",
88 [ FILTER_OP_UNARY_NOT_DOUBLE
] = "UNARY_NOT_DOUBLE",
91 [ FILTER_OP_AND
] = "AND",
92 [ FILTER_OP_OR
] = "OR",
95 [ FILTER_OP_LOAD_FIELD_REF
] = "LOAD_FIELD_REF",
96 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = "LOAD_FIELD_REF_STRING",
97 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = "LOAD_FIELD_REF_SEQUENCE",
98 [ FILTER_OP_LOAD_FIELD_REF_S64
] = "LOAD_FIELD_REF_S64",
99 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = "LOAD_FIELD_REF_DOUBLE",
101 /* load from immediate operand */
102 [ FILTER_OP_LOAD_STRING
] = "LOAD_STRING",
103 [ FILTER_OP_LOAD_S64
] = "LOAD_S64",
104 [ FILTER_OP_LOAD_DOUBLE
] = "LOAD_DOUBLE",
107 [ FILTER_OP_CAST_TO_S64
] = "CAST_TO_S64",
108 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = "CAST_DOUBLE_TO_S64",
109 [ FILTER_OP_CAST_NOP
] = "CAST_NOP",
111 /* get context ref */
112 [ FILTER_OP_GET_CONTEXT_REF
] = "GET_CONTEXT_REF",
113 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = "GET_CONTEXT_REF_STRING",
114 [ FILTER_OP_GET_CONTEXT_REF_S64
] = "GET_CONTEXT_REF_S64",
115 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = "GET_CONTEXT_REF_DOUBLE",
117 /* load userspace field ref */
118 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = "LOAD_FIELD_REF_USER_STRING",
119 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = "LOAD_FIELD_REF_USER_SEQUENCE",
122 * load immediate star globbing pattern (literal string)
125 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = "LOAD_STAR_GLOB_STRING",
127 /* globbing pattern binary operator: apply to */
128 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = "EQ_STAR_GLOB_STRING",
129 [ FILTER_OP_NE_STAR_GLOB_STRING
] = "NE_STAR_GLOB_STRING",
132 * Instructions for recursive traversal through composed types.
134 [ FILTER_OP_GET_CONTEXT_ROOT
] = "GET_CONTEXT_ROOT",
135 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = "GET_APP_CONTEXT_ROOT",
136 [ FILTER_OP_GET_PAYLOAD_ROOT
] = "GET_PAYLOAD_ROOT",
138 [ FILTER_OP_GET_SYMBOL
] = "GET_SYMBOL",
139 [ FILTER_OP_GET_SYMBOL_FIELD
] = "GET_SYMBOL_FIELD",
140 [ FILTER_OP_GET_INDEX_U16
] = "GET_INDEX_U16",
141 [ FILTER_OP_GET_INDEX_U64
] = "GET_INDEX_U64",
143 [ FILTER_OP_LOAD_FIELD
] = "LOAD_FIELD",
144 [ FILTER_OP_LOAD_FIELD_S8
] = "LOAD_FIELD_S8",
145 [ FILTER_OP_LOAD_FIELD_S16
] = "LOAD_FIELD_S16",
146 [ FILTER_OP_LOAD_FIELD_S32
] = "LOAD_FIELD_S32",
147 [ FILTER_OP_LOAD_FIELD_S64
] = "LOAD_FIELD_S64",
148 [ FILTER_OP_LOAD_FIELD_U8
] = "LOAD_FIELD_U8",
149 [ FILTER_OP_LOAD_FIELD_U16
] = "LOAD_FIELD_U16",
150 [ FILTER_OP_LOAD_FIELD_U32
] = "LOAD_FIELD_U32",
151 [ FILTER_OP_LOAD_FIELD_U64
] = "LOAD_FIELD_U64",
152 [ FILTER_OP_LOAD_FIELD_STRING
] = "LOAD_FIELD_STRING",
153 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = "LOAD_FIELD_SEQUENCE",
154 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = "LOAD_FIELD_DOUBLE",
156 [ FILTER_OP_UNARY_BIT_NOT
] = "UNARY_BIT_NOT",
158 [ FILTER_OP_RETURN_S64
] = "RETURN_S64",
161 const char *lttng_filter_print_op(enum filter_op op
)
163 if (op
>= NR_FILTER_OPS
)
170 int apply_field_reloc(struct lttng_event
*event
,
171 struct bytecode_runtime
*runtime
,
172 uint32_t runtime_len
,
173 uint32_t reloc_offset
,
174 const char *field_name
,
175 enum filter_op filter_op
)
177 const struct lttng_event_desc
*desc
;
178 const struct lttng_event_field
*fields
, *field
= NULL
;
179 unsigned int nr_fields
, i
;
181 uint32_t field_offset
= 0;
183 dbg_printk("Apply field reloc: %u %s\n", reloc_offset
, field_name
);
185 /* Lookup event by name */
189 fields
= desc
->fields
;
192 nr_fields
= desc
->nr_fields
;
193 for (i
= 0; i
< nr_fields
; i
++) {
194 if (!strcmp(fields
[i
].name
, field_name
)) {
198 /* compute field offset */
199 switch (fields
[i
].type
.atype
) {
202 field_offset
+= sizeof(int64_t);
206 case atype_array_bitfield
:
207 case atype_sequence_bitfield
:
208 field_offset
+= sizeof(unsigned long);
209 field_offset
+= sizeof(void *);
212 field_offset
+= sizeof(void *);
214 case atype_struct
: /* Unsupported. */
215 case atype_array_compound
: /* Unsupported. */
216 case atype_sequence_compound
: /* Unsupported. */
217 case atype_variant
: /* Unsupported. */
225 /* Check if field offset is too large for 16-bit offset */
226 if (field_offset
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
230 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
233 case FILTER_OP_LOAD_FIELD_REF
:
235 struct field_ref
*field_ref
;
237 field_ref
= (struct field_ref
*) op
->data
;
238 switch (field
->type
.atype
) {
241 op
->op
= FILTER_OP_LOAD_FIELD_REF_S64
;
245 const struct lttng_basic_type
*elem_type
= &field
->type
.u
.array
.elem_type
;
247 if (elem_type
->atype
!= atype_integer
|| elem_type
->u
.basic
.integer
.encoding
== lttng_encode_none
)
250 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
;
252 op
->op
= FILTER_OP_LOAD_FIELD_REF_SEQUENCE
;
257 const struct lttng_basic_type
*elem_type
= &field
->type
.u
.sequence
.elem_type
;
259 if (elem_type
->atype
!= atype_integer
|| elem_type
->u
.basic
.integer
.encoding
== lttng_encode_none
)
262 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
;
264 op
->op
= FILTER_OP_LOAD_FIELD_REF_SEQUENCE
;
269 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_STRING
;
271 op
->op
= FILTER_OP_LOAD_FIELD_REF_STRING
;
273 case atype_struct
: /* Unsupported. */
274 case atype_array_compound
: /* Unsupported. */
275 case atype_sequence_compound
: /* Unsupported. */
276 case atype_variant
: /* Unsupported. */
277 case atype_array_bitfield
: /* Unsupported. */
278 case atype_sequence_bitfield
: /* Unsupported. */
283 field_ref
->offset
= (uint16_t) field_offset
;
293 int apply_context_reloc(struct lttng_event
*event
,
294 struct bytecode_runtime
*runtime
,
295 uint32_t runtime_len
,
296 uint32_t reloc_offset
,
297 const char *context_name
,
298 enum filter_op filter_op
)
301 struct lttng_ctx_field
*ctx_field
;
304 dbg_printk("Apply context reloc: %u %s\n", reloc_offset
, context_name
);
306 /* Get context index */
307 idx
= lttng_get_context_index(lttng_static_ctx
, context_name
);
311 /* Check if idx is too large for 16-bit offset */
312 if (idx
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
315 /* Get context return type */
316 ctx_field
= <tng_static_ctx
->fields
[idx
];
317 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
320 case FILTER_OP_GET_CONTEXT_REF
:
322 struct field_ref
*field_ref
;
324 field_ref
= (struct field_ref
*) op
->data
;
325 switch (ctx_field
->event_field
.type
.atype
) {
328 op
->op
= FILTER_OP_GET_CONTEXT_REF_S64
;
330 /* Sequence and array supported as string */
333 const struct lttng_basic_type
*elem_type
= &ctx_field
->event_field
.type
.u
.array
.elem_type
;
335 if (elem_type
->atype
!= atype_integer
|| elem_type
->u
.basic
.integer
.encoding
== lttng_encode_none
)
337 BUG_ON(ctx_field
->event_field
.user
);
338 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
343 const struct lttng_basic_type
*elem_type
= &ctx_field
->event_field
.type
.u
.sequence
.elem_type
;
345 if (elem_type
->atype
!= atype_integer
|| elem_type
->u
.basic
.integer
.encoding
== lttng_encode_none
)
347 BUG_ON(ctx_field
->event_field
.user
);
348 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
352 BUG_ON(ctx_field
->event_field
.user
);
353 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
355 case atype_struct
: /* Unsupported. */
356 case atype_array_compound
: /* Unsupported. */
357 case atype_sequence_compound
: /* Unsupported. */
358 case atype_variant
: /* Unsupported. */
359 case atype_array_bitfield
: /* Unsupported. */
360 case atype_sequence_bitfield
: /* Unsupported. */
364 /* set offset to context index within channel contexts */
365 field_ref
->offset
= (uint16_t) idx
;
375 int apply_reloc(struct lttng_event
*event
,
376 struct bytecode_runtime
*runtime
,
377 uint32_t runtime_len
,
378 uint32_t reloc_offset
,
383 dbg_printk("Apply reloc: %u %s\n", reloc_offset
, name
);
385 /* Ensure that the reloc is within the code */
386 if (runtime_len
- reloc_offset
< sizeof(uint16_t))
389 op
= (struct load_op
*) &runtime
->code
[reloc_offset
];
391 case FILTER_OP_LOAD_FIELD_REF
:
392 return apply_field_reloc(event
, runtime
, runtime_len
,
393 reloc_offset
, name
, op
->op
);
394 case FILTER_OP_GET_CONTEXT_REF
:
395 return apply_context_reloc(event
, runtime
, runtime_len
,
396 reloc_offset
, name
, op
->op
);
397 case FILTER_OP_GET_SYMBOL
:
398 case FILTER_OP_GET_SYMBOL_FIELD
:
400 * Will be handled by load specialize phase or
401 * dynamically by interpreter.
405 printk(KERN_WARNING
"Unknown reloc op type %u\n", op
->op
);
412 int bytecode_is_linked(struct lttng_filter_bytecode_node
*filter_bytecode
,
413 struct lttng_event
*event
)
415 struct lttng_bytecode_runtime
*bc_runtime
;
417 list_for_each_entry(bc_runtime
,
418 &event
->bytecode_runtime_head
, node
) {
419 if (bc_runtime
->bc
== filter_bytecode
)
426 * Take a bytecode with reloc table and link it to an event to create a
430 int _lttng_filter_event_link_bytecode(struct lttng_event
*event
,
431 struct lttng_filter_bytecode_node
*filter_bytecode
,
432 struct list_head
*insert_loc
)
434 int ret
, offset
, next_offset
;
435 struct bytecode_runtime
*runtime
= NULL
;
436 size_t runtime_alloc_len
;
438 if (!filter_bytecode
)
440 /* Bytecode already linked */
441 if (bytecode_is_linked(filter_bytecode
, event
))
444 dbg_printk("Linking...\n");
446 /* We don't need the reloc table in the runtime */
447 runtime_alloc_len
= sizeof(*runtime
) + filter_bytecode
->bc
.reloc_offset
;
448 runtime
= kzalloc(runtime_alloc_len
, GFP_KERNEL
);
453 runtime
->p
.bc
= filter_bytecode
;
454 runtime
->p
.event
= event
;
455 runtime
->len
= filter_bytecode
->bc
.reloc_offset
;
456 /* copy original bytecode */
457 memcpy(runtime
->code
, filter_bytecode
->bc
.data
, runtime
->len
);
459 * apply relocs. Those are a uint16_t (offset in bytecode)
460 * followed by a string (field name).
462 for (offset
= filter_bytecode
->bc
.reloc_offset
;
463 offset
< filter_bytecode
->bc
.len
;
464 offset
= next_offset
) {
465 uint16_t reloc_offset
=
466 *(uint16_t *) &filter_bytecode
->bc
.data
[offset
];
468 (const char *) &filter_bytecode
->bc
.data
[offset
+ sizeof(uint16_t)];
470 ret
= apply_reloc(event
, runtime
, runtime
->len
, reloc_offset
, name
);
474 next_offset
= offset
+ sizeof(uint16_t) + strlen(name
) + 1;
476 /* Validate bytecode */
477 ret
= lttng_filter_validate_bytecode(runtime
);
481 /* Specialize bytecode */
482 ret
= lttng_filter_specialize_bytecode(event
, runtime
);
486 runtime
->p
.filter
= lttng_filter_interpret_bytecode
;
487 runtime
->p
.link_failed
= 0;
488 list_add_rcu(&runtime
->p
.node
, insert_loc
);
489 dbg_printk("Linking successful.\n");
493 runtime
->p
.filter
= lttng_filter_false
;
494 runtime
->p
.link_failed
= 1;
495 list_add_rcu(&runtime
->p
.node
, insert_loc
);
497 dbg_printk("Linking failed.\n");
501 void lttng_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)
503 struct lttng_filter_bytecode_node
*bc
= runtime
->bc
;
505 if (!bc
->enabler
->enabled
|| runtime
->link_failed
)
506 runtime
->filter
= lttng_filter_false
;
508 runtime
->filter
= lttng_filter_interpret_bytecode
;
512 * Link bytecode for all enablers referenced by an event.
514 void lttng_enabler_event_link_bytecode(struct lttng_event
*event
,
515 struct lttng_enabler
*enabler
)
517 struct lttng_filter_bytecode_node
*bc
;
518 struct lttng_bytecode_runtime
*runtime
;
520 /* Can only be called for events with desc attached */
521 WARN_ON_ONCE(!event
->desc
);
523 /* Link each bytecode. */
524 list_for_each_entry(bc
, &enabler
->filter_bytecode_head
, node
) {
526 struct list_head
*insert_loc
;
528 list_for_each_entry(runtime
,
529 &event
->bytecode_runtime_head
, node
) {
530 if (runtime
->bc
== bc
) {
535 /* Skip bytecode already linked */
540 * Insert at specified priority (seqnum) in increasing
543 list_for_each_entry_reverse(runtime
,
544 &event
->bytecode_runtime_head
, node
) {
545 if (runtime
->bc
->bc
.seqnum
< bc
->bc
.seqnum
) {
547 insert_loc
= &runtime
->node
;
551 /* Add to head to list */
552 insert_loc
= &event
->bytecode_runtime_head
;
554 dbg_printk("linking bytecode\n");
555 ret
= _lttng_filter_event_link_bytecode(event
, bc
,
558 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
564 * We own the filter_bytecode if we return success.
566 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler
*enabler
,
567 struct lttng_filter_bytecode_node
*filter_bytecode
)
569 list_add(&filter_bytecode
->node
, &enabler
->filter_bytecode_head
);
573 void lttng_free_enabler_filter_bytecode(struct lttng_enabler
*enabler
)
575 struct lttng_filter_bytecode_node
*filter_bytecode
, *tmp
;
577 list_for_each_entry_safe(filter_bytecode
, tmp
,
578 &enabler
->filter_bytecode_head
, node
) {
579 kfree(filter_bytecode
);
583 void lttng_free_event_filter_runtime(struct lttng_event
*event
)
585 struct bytecode_runtime
*runtime
, *tmp
;
587 list_for_each_entry_safe(runtime
, tmp
,
588 &event
->bytecode_runtime_head
, p
.node
) {
589 kfree(runtime
->data
);