1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-interpreter.c
5 * LTTng modules filter interpreter.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <wrapper/compiler_attributes.h>
11 #include <wrapper/uaccess.h>
12 #include <wrapper/objtool.h>
13 #include <wrapper/types.h>
14 #include <linux/swab.h>
16 #include <lttng-filter.h>
17 #include <lttng-string-utils.h>
19 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
22 * get_char should be called with page fault handler disabled if it is expected
23 * to handle user-space read.
26 char get_char(const struct estack_entry
*reg
, size_t offset
)
28 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
33 /* Handle invalid access as end of string. */
34 if (unlikely(!lttng_access_ok(VERIFY_READ
,
35 reg
->u
.s
.user_str
+ offset
,
38 /* Handle fault (nonzero return value) as end of string. */
39 if (unlikely(__copy_from_user_inatomic(&c
,
40 reg
->u
.s
.user_str
+ offset
,
45 return reg
->u
.s
.str
[offset
];
51 * -2: unknown escape char.
55 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
60 *c
= get_char(reg
, *offset
);
76 char get_char_at_cb(size_t at
, void *data
)
78 return get_char(data
, at
);
82 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
84 bool has_user
= false;
86 struct estack_entry
*pattern_reg
;
87 struct estack_entry
*candidate_reg
;
89 /* Disable the page fault handler when reading from userspace. */
90 if (estack_bx(stack
, top
)->u
.s
.user
91 || estack_ax(stack
, top
)->u
.s
.user
) {
96 /* Find out which side is the pattern vs. the candidate. */
97 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
98 pattern_reg
= estack_ax(stack
, top
);
99 candidate_reg
= estack_bx(stack
, top
);
101 pattern_reg
= estack_bx(stack
, top
);
102 candidate_reg
= estack_ax(stack
, top
);
105 /* Perform the match operation. */
106 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
107 pattern_reg
, get_char_at_cb
, candidate_reg
);
115 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
117 size_t offset_bx
= 0, offset_ax
= 0;
118 int diff
, has_user
= 0;
120 if (estack_bx(stack
, top
)->u
.s
.user
121 || estack_ax(stack
, top
)->u
.s
.user
) {
129 char char_bx
, char_ax
;
131 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
132 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
134 if (unlikely(char_bx
== '\0')) {
135 if (char_ax
== '\0') {
139 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
140 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
141 ret
= parse_char(estack_ax(stack
, top
),
142 &char_ax
, &offset_ax
);
152 if (unlikely(char_ax
== '\0')) {
153 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
154 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
155 ret
= parse_char(estack_bx(stack
, top
),
156 &char_bx
, &offset_bx
);
165 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
166 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
167 ret
= parse_char(estack_bx(stack
, top
),
168 &char_bx
, &offset_bx
);
172 } else if (ret
== -2) {
175 /* else compare both char */
177 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
178 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
179 ret
= parse_char(estack_ax(stack
, top
),
180 &char_ax
, &offset_ax
);
184 } else if (ret
== -2) {
201 diff
= char_bx
- char_ax
;
213 uint64_t lttng_filter_false(void *filter_data
,
214 struct lttng_probe_ctx
*lttng_probe_ctx
,
215 const char *filter_stack_data
)
220 #ifdef INTERPRETER_USE_SWITCH
223 * Fallback for compilers that do not support taking address of labels.
227 start_pc = &bytecode->data[0]; \
228 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
230 dbg_printk("Executing op %s (%u)\n", \
231 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
232 (unsigned int) *(filter_opcode_t *) pc); \
233 switch (*(filter_opcode_t *) pc) {
235 #define OP(name) case name
245 * Dispatch-table based interpreter.
249 start_pc = &bytecode->code[0]; \
250 pc = next_pc = start_pc; \
251 if (unlikely(pc - start_pc >= bytecode->len)) \
253 goto *dispatch[*(filter_opcode_t *) pc];
260 goto *dispatch[*(filter_opcode_t *) pc];
266 static int context_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
267 struct load_ptr
*ptr
,
271 struct lttng_ctx_field
*ctx_field
;
272 struct lttng_event_field
*field
;
273 union lttng_ctx_value v
;
275 ctx_field
= <tng_static_ctx
->fields
[idx
];
276 field
= &ctx_field
->event_field
;
277 ptr
->type
= LOAD_OBJECT
;
278 /* field is only used for types nested within variants. */
281 switch (field
->type
.atype
) {
283 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
284 if (field
->type
.u
.basic
.integer
.signedness
) {
285 ptr
->object_type
= OBJECT_TYPE_S64
;
287 ptr
->ptr
= &ptr
->u
.s64
;
289 ptr
->object_type
= OBJECT_TYPE_U64
;
290 ptr
->u
.u64
= v
.s64
; /* Cast. */
291 ptr
->ptr
= &ptr
->u
.u64
;
293 ptr
->rev_bo
= field
->type
.u
.basic
.integer
.reverse_byte_order
;
297 const struct lttng_integer_type
*itype
=
298 &field
->type
.u
.basic
.enumeration
.container_type
;
300 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
301 if (itype
->signedness
) {
302 ptr
->object_type
= OBJECT_TYPE_S64
;
304 ptr
->ptr
= &ptr
->u
.s64
;
306 ptr
->object_type
= OBJECT_TYPE_U64
;
307 ptr
->u
.u64
= v
.s64
; /* Cast. */
308 ptr
->ptr
= &ptr
->u
.u64
;
310 ptr
->rev_bo
= itype
->reverse_byte_order
;
314 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
315 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
318 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
319 printk(KERN_WARNING
"Only string arrays are supported for contexts.\n");
322 ptr
->object_type
= OBJECT_TYPE_STRING
;
323 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
327 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
328 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
331 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
332 printk(KERN_WARNING
"Only string sequences are supported for contexts.\n");
335 ptr
->object_type
= OBJECT_TYPE_STRING
;
336 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
339 case atype_array_bitfield
:
340 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
342 case atype_sequence_bitfield
:
343 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
346 ptr
->object_type
= OBJECT_TYPE_STRING
;
347 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
351 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
354 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
360 static int dynamic_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
361 struct bytecode_runtime
*runtime
,
362 uint64_t index
, struct estack_entry
*stack_top
)
365 const struct filter_get_index_data
*gid
;
368 * Types nested within variants need to perform dynamic lookup
369 * based on the field descriptions. LTTng-UST does not implement
372 if (stack_top
->u
.ptr
.field
)
374 gid
= (const struct filter_get_index_data
*) &runtime
->data
[index
];
375 switch (stack_top
->u
.ptr
.type
) {
377 switch (stack_top
->u
.ptr
.object_type
) {
378 case OBJECT_TYPE_ARRAY
:
382 WARN_ON_ONCE(gid
->offset
>= gid
->array_len
);
383 /* Skip count (unsigned long) */
384 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
385 ptr
= ptr
+ gid
->offset
;
386 stack_top
->u
.ptr
.ptr
= ptr
;
387 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
388 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
389 /* field is only used for types nested within variants. */
390 stack_top
->u
.ptr
.field
= NULL
;
393 case OBJECT_TYPE_SEQUENCE
:
398 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
399 ptr_seq_len
= *(unsigned long *) stack_top
->u
.ptr
.ptr
;
400 if (gid
->offset
>= gid
->elem
.len
* ptr_seq_len
) {
404 ptr
= ptr
+ gid
->offset
;
405 stack_top
->u
.ptr
.ptr
= ptr
;
406 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
407 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
408 /* field is only used for types nested within variants. */
409 stack_top
->u
.ptr
.field
= NULL
;
412 case OBJECT_TYPE_STRUCT
:
413 printk(KERN_WARNING
"Nested structures are not supported yet.\n");
416 case OBJECT_TYPE_VARIANT
:
418 printk(KERN_WARNING
"Unexpected get index type %d",
419 (int) stack_top
->u
.ptr
.object_type
);
424 case LOAD_ROOT_CONTEXT
:
426 case LOAD_ROOT_APP_CONTEXT
:
428 ret
= context_get_index(lttng_probe_ctx
,
436 case LOAD_ROOT_PAYLOAD
:
437 stack_top
->u
.ptr
.ptr
+= gid
->offset
;
438 if (gid
->elem
.type
== OBJECT_TYPE_STRING
)
439 stack_top
->u
.ptr
.ptr
= *(const char * const *) stack_top
->u
.ptr
.ptr
;
440 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
441 stack_top
->u
.ptr
.type
= LOAD_OBJECT
;
442 /* field is only used for types nested within variants. */
443 stack_top
->u
.ptr
.field
= NULL
;
444 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
453 static int dynamic_load_field(struct estack_entry
*stack_top
)
457 switch (stack_top
->u
.ptr
.type
) {
460 case LOAD_ROOT_CONTEXT
:
461 case LOAD_ROOT_APP_CONTEXT
:
462 case LOAD_ROOT_PAYLOAD
:
464 dbg_printk("Filter warning: cannot load root, missing field name.\n");
468 switch (stack_top
->u
.ptr
.object_type
) {
470 dbg_printk("op load field s8\n");
471 stack_top
->u
.v
= *(int8_t *) stack_top
->u
.ptr
.ptr
;
473 case OBJECT_TYPE_S16
:
477 dbg_printk("op load field s16\n");
478 tmp
= *(int16_t *) stack_top
->u
.ptr
.ptr
;
479 if (stack_top
->u
.ptr
.rev_bo
)
481 stack_top
->u
.v
= tmp
;
484 case OBJECT_TYPE_S32
:
488 dbg_printk("op load field s32\n");
489 tmp
= *(int32_t *) stack_top
->u
.ptr
.ptr
;
490 if (stack_top
->u
.ptr
.rev_bo
)
492 stack_top
->u
.v
= tmp
;
495 case OBJECT_TYPE_S64
:
499 dbg_printk("op load field s64\n");
500 tmp
= *(int64_t *) stack_top
->u
.ptr
.ptr
;
501 if (stack_top
->u
.ptr
.rev_bo
)
503 stack_top
->u
.v
= tmp
;
507 dbg_printk("op load field u8\n");
508 stack_top
->u
.v
= *(uint8_t *) stack_top
->u
.ptr
.ptr
;
510 case OBJECT_TYPE_U16
:
514 dbg_printk("op load field s16\n");
515 tmp
= *(uint16_t *) stack_top
->u
.ptr
.ptr
;
516 if (stack_top
->u
.ptr
.rev_bo
)
518 stack_top
->u
.v
= tmp
;
521 case OBJECT_TYPE_U32
:
525 dbg_printk("op load field u32\n");
526 tmp
= *(uint32_t *) stack_top
->u
.ptr
.ptr
;
527 if (stack_top
->u
.ptr
.rev_bo
)
529 stack_top
->u
.v
= tmp
;
532 case OBJECT_TYPE_U64
:
536 dbg_printk("op load field u64\n");
537 tmp
= *(uint64_t *) stack_top
->u
.ptr
.ptr
;
538 if (stack_top
->u
.ptr
.rev_bo
)
540 stack_top
->u
.v
= tmp
;
543 case OBJECT_TYPE_STRING
:
547 dbg_printk("op load field string\n");
548 str
= (const char *) stack_top
->u
.ptr
.ptr
;
549 stack_top
->u
.s
.str
= str
;
550 if (unlikely(!stack_top
->u
.s
.str
)) {
551 dbg_printk("Filter warning: loading a NULL string.\n");
555 stack_top
->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
556 stack_top
->u
.s
.literal_type
=
557 ESTACK_STRING_LITERAL_TYPE_NONE
;
560 case OBJECT_TYPE_STRING_SEQUENCE
:
564 dbg_printk("op load field string sequence\n");
565 ptr
= stack_top
->u
.ptr
.ptr
;
566 stack_top
->u
.s
.seq_len
= *(unsigned long *) ptr
;
567 stack_top
->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
568 if (unlikely(!stack_top
->u
.s
.str
)) {
569 dbg_printk("Filter warning: loading a NULL sequence.\n");
573 stack_top
->u
.s
.literal_type
=
574 ESTACK_STRING_LITERAL_TYPE_NONE
;
577 case OBJECT_TYPE_DYNAMIC
:
579 * Dynamic types in context are looked up
580 * by context get index.
584 case OBJECT_TYPE_DOUBLE
:
587 case OBJECT_TYPE_SEQUENCE
:
588 case OBJECT_TYPE_ARRAY
:
589 case OBJECT_TYPE_STRUCT
:
590 case OBJECT_TYPE_VARIANT
:
591 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
603 #define DBG_USER_STR_CUTOFF 32
606 * In debug mode, print user string (truncated, if necessary).
609 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
613 char user_str
[DBG_USER_STR_CUTOFF
];
617 last_char
= get_char(user_str_reg
, pos
);
618 user_str
[pos
] = last_char
;
620 } while (last_char
!= '\0' && pos
< sizeof(user_str
));
623 user_str
[sizeof(user_str
) - 1] = '\0';
624 dbg_printk("load field ref user string: '%s%s'\n", user_str
,
625 last_char
!= '\0' ? "[...]" : "");
629 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
635 * Return 0 (discard), or raise the 0x1 flag (log event).
636 * Currently, other flags are kept for future extensions and have no
639 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
640 struct lttng_probe_ctx
*lttng_probe_ctx
,
641 const char *filter_stack_data
)
643 struct bytecode_runtime
*bytecode
= filter_data
;
644 void *pc
, *next_pc
, *start_pc
;
647 struct estack _stack
;
648 struct estack
*stack
= &_stack
;
649 register int64_t ax
= 0, bx
= 0;
650 register int top
= FILTER_STACK_EMPTY
;
651 #ifndef INTERPRETER_USE_SWITCH
652 static void *dispatch
[NR_FILTER_OPS
] = {
653 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
655 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
658 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
659 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
660 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
661 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
662 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
663 [ FILTER_OP_BIT_RSHIFT
] = &&LABEL_FILTER_OP_BIT_RSHIFT
,
664 [ FILTER_OP_BIT_LSHIFT
] = &&LABEL_FILTER_OP_BIT_LSHIFT
,
665 [ FILTER_OP_BIT_AND
] = &&LABEL_FILTER_OP_BIT_AND
,
666 [ FILTER_OP_BIT_OR
] = &&LABEL_FILTER_OP_BIT_OR
,
667 [ FILTER_OP_BIT_XOR
] = &&LABEL_FILTER_OP_BIT_XOR
,
669 /* binary comparators */
670 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
671 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
672 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
673 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
674 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
675 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
677 /* string binary comparator */
678 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
679 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
680 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
681 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
682 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
683 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
685 /* globbing pattern binary comparator */
686 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
687 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
689 /* s64 binary comparator */
690 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
691 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
692 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
693 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
694 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
695 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
697 /* double binary comparator */
698 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
699 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
700 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
701 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
702 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
703 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
705 /* Mixed S64-double binary comparators */
706 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
707 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
708 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
709 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
710 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
711 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
713 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
714 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
715 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
716 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
717 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
718 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
721 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
722 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
723 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
724 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
725 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
726 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
727 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
728 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
729 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
732 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
733 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
736 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
737 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
738 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
739 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
740 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
742 /* load from immediate operand */
743 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
744 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
745 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
746 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
749 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
750 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
751 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
753 /* get context ref */
754 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
755 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
756 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
757 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
759 /* load userspace field ref */
760 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
761 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
763 /* Instructions for recursive traversal through composed types. */
764 [ FILTER_OP_GET_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT
,
765 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT
,
766 [ FILTER_OP_GET_PAYLOAD_ROOT
] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT
,
768 [ FILTER_OP_GET_SYMBOL
] = &&LABEL_FILTER_OP_GET_SYMBOL
,
769 [ FILTER_OP_GET_SYMBOL_FIELD
] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD
,
770 [ FILTER_OP_GET_INDEX_U16
] = &&LABEL_FILTER_OP_GET_INDEX_U16
,
771 [ FILTER_OP_GET_INDEX_U64
] = &&LABEL_FILTER_OP_GET_INDEX_U64
,
773 [ FILTER_OP_LOAD_FIELD
] = &&LABEL_FILTER_OP_LOAD_FIELD
,
774 [ FILTER_OP_LOAD_FIELD_S8
] = &&LABEL_FILTER_OP_LOAD_FIELD_S8
,
775 [ FILTER_OP_LOAD_FIELD_S16
] = &&LABEL_FILTER_OP_LOAD_FIELD_S16
,
776 [ FILTER_OP_LOAD_FIELD_S32
] = &&LABEL_FILTER_OP_LOAD_FIELD_S32
,
777 [ FILTER_OP_LOAD_FIELD_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_S64
,
778 [ FILTER_OP_LOAD_FIELD_U8
] = &&LABEL_FILTER_OP_LOAD_FIELD_U8
,
779 [ FILTER_OP_LOAD_FIELD_U16
] = &&LABEL_FILTER_OP_LOAD_FIELD_U16
,
780 [ FILTER_OP_LOAD_FIELD_U32
] = &&LABEL_FILTER_OP_LOAD_FIELD_U32
,
781 [ FILTER_OP_LOAD_FIELD_U64
] = &&LABEL_FILTER_OP_LOAD_FIELD_U64
,
782 [ FILTER_OP_LOAD_FIELD_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING
,
783 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE
,
784 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE
,
786 [ FILTER_OP_UNARY_BIT_NOT
] = &&LABEL_FILTER_OP_UNARY_BIT_NOT
,
788 [ FILTER_OP_RETURN_S64
] = &&LABEL_FILTER_OP_RETURN_S64
,
790 #endif /* #ifndef INTERPRETER_USE_SWITCH */
794 OP(FILTER_OP_UNKNOWN
):
795 OP(FILTER_OP_LOAD_FIELD_REF
):
796 OP(FILTER_OP_GET_CONTEXT_REF
):
797 #ifdef INTERPRETER_USE_SWITCH
799 #endif /* INTERPRETER_USE_SWITCH */
800 printk(KERN_WARNING
"unknown bytecode op %u\n",
801 (unsigned int) *(filter_opcode_t
*) pc
);
805 OP(FILTER_OP_RETURN
):
806 OP(FILTER_OP_RETURN_S64
):
807 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
808 retval
= !!estack_ax_v
;
818 printk(KERN_WARNING
"unsupported bytecode op %u\n",
819 (unsigned int) *(filter_opcode_t
*) pc
);
829 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
830 (unsigned int) *(filter_opcode_t
*) pc
);
834 OP(FILTER_OP_EQ_STRING
):
838 res
= (stack_strcmp(stack
, top
, "==") == 0);
839 estack_pop(stack
, top
, ax
, bx
);
841 next_pc
+= sizeof(struct binary_op
);
844 OP(FILTER_OP_NE_STRING
):
848 res
= (stack_strcmp(stack
, top
, "!=") != 0);
849 estack_pop(stack
, top
, ax
, bx
);
851 next_pc
+= sizeof(struct binary_op
);
854 OP(FILTER_OP_GT_STRING
):
858 res
= (stack_strcmp(stack
, top
, ">") > 0);
859 estack_pop(stack
, top
, ax
, bx
);
861 next_pc
+= sizeof(struct binary_op
);
864 OP(FILTER_OP_LT_STRING
):
868 res
= (stack_strcmp(stack
, top
, "<") < 0);
869 estack_pop(stack
, top
, ax
, bx
);
871 next_pc
+= sizeof(struct binary_op
);
874 OP(FILTER_OP_GE_STRING
):
878 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
879 estack_pop(stack
, top
, ax
, bx
);
881 next_pc
+= sizeof(struct binary_op
);
884 OP(FILTER_OP_LE_STRING
):
888 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
889 estack_pop(stack
, top
, ax
, bx
);
891 next_pc
+= sizeof(struct binary_op
);
895 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
899 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
900 estack_pop(stack
, top
, ax
, bx
);
902 next_pc
+= sizeof(struct binary_op
);
905 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
909 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
910 estack_pop(stack
, top
, ax
, bx
);
912 next_pc
+= sizeof(struct binary_op
);
916 OP(FILTER_OP_EQ_S64
):
920 res
= (estack_bx_v
== estack_ax_v
);
921 estack_pop(stack
, top
, ax
, bx
);
923 next_pc
+= sizeof(struct binary_op
);
926 OP(FILTER_OP_NE_S64
):
930 res
= (estack_bx_v
!= estack_ax_v
);
931 estack_pop(stack
, top
, ax
, bx
);
933 next_pc
+= sizeof(struct binary_op
);
936 OP(FILTER_OP_GT_S64
):
940 res
= (estack_bx_v
> estack_ax_v
);
941 estack_pop(stack
, top
, ax
, bx
);
943 next_pc
+= sizeof(struct binary_op
);
946 OP(FILTER_OP_LT_S64
):
950 res
= (estack_bx_v
< estack_ax_v
);
951 estack_pop(stack
, top
, ax
, bx
);
953 next_pc
+= sizeof(struct binary_op
);
956 OP(FILTER_OP_GE_S64
):
960 res
= (estack_bx_v
>= estack_ax_v
);
961 estack_pop(stack
, top
, ax
, bx
);
963 next_pc
+= sizeof(struct binary_op
);
966 OP(FILTER_OP_LE_S64
):
970 res
= (estack_bx_v
<= estack_ax_v
);
971 estack_pop(stack
, top
, ax
, bx
);
973 next_pc
+= sizeof(struct binary_op
);
977 OP(FILTER_OP_EQ_DOUBLE
):
978 OP(FILTER_OP_NE_DOUBLE
):
979 OP(FILTER_OP_GT_DOUBLE
):
980 OP(FILTER_OP_LT_DOUBLE
):
981 OP(FILTER_OP_GE_DOUBLE
):
982 OP(FILTER_OP_LE_DOUBLE
):
988 /* Mixed S64-double binary comparators */
989 OP(FILTER_OP_EQ_DOUBLE_S64
):
990 OP(FILTER_OP_NE_DOUBLE_S64
):
991 OP(FILTER_OP_GT_DOUBLE_S64
):
992 OP(FILTER_OP_LT_DOUBLE_S64
):
993 OP(FILTER_OP_GE_DOUBLE_S64
):
994 OP(FILTER_OP_LE_DOUBLE_S64
):
995 OP(FILTER_OP_EQ_S64_DOUBLE
):
996 OP(FILTER_OP_NE_S64_DOUBLE
):
997 OP(FILTER_OP_GT_S64_DOUBLE
):
998 OP(FILTER_OP_LT_S64_DOUBLE
):
999 OP(FILTER_OP_GE_S64_DOUBLE
):
1000 OP(FILTER_OP_LE_S64_DOUBLE
):
1005 OP(FILTER_OP_BIT_RSHIFT
):
1009 /* Catch undefined behavior. */
1010 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1014 res
= ((uint64_t) estack_bx_v
>> (uint32_t) estack_ax_v
);
1015 estack_pop(stack
, top
, ax
, bx
);
1017 next_pc
+= sizeof(struct binary_op
);
1020 OP(FILTER_OP_BIT_LSHIFT
):
1024 /* Catch undefined behavior. */
1025 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1029 res
= ((uint64_t) estack_bx_v
<< (uint32_t) estack_ax_v
);
1030 estack_pop(stack
, top
, ax
, bx
);
1032 next_pc
+= sizeof(struct binary_op
);
1035 OP(FILTER_OP_BIT_AND
):
1039 res
= ((uint64_t) estack_bx_v
& (uint64_t) estack_ax_v
);
1040 estack_pop(stack
, top
, ax
, bx
);
1042 next_pc
+= sizeof(struct binary_op
);
1045 OP(FILTER_OP_BIT_OR
):
1049 res
= ((uint64_t) estack_bx_v
| (uint64_t) estack_ax_v
);
1050 estack_pop(stack
, top
, ax
, bx
);
1052 next_pc
+= sizeof(struct binary_op
);
1055 OP(FILTER_OP_BIT_XOR
):
1059 res
= ((uint64_t) estack_bx_v
^ (uint64_t) estack_ax_v
);
1060 estack_pop(stack
, top
, ax
, bx
);
1062 next_pc
+= sizeof(struct binary_op
);
1067 OP(FILTER_OP_UNARY_PLUS
):
1068 OP(FILTER_OP_UNARY_MINUS
):
1069 OP(FILTER_OP_UNARY_NOT
):
1070 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1071 (unsigned int) *(filter_opcode_t
*) pc
);
1076 OP(FILTER_OP_UNARY_BIT_NOT
):
1078 estack_ax_v
= ~(uint64_t) estack_ax_v
;
1079 next_pc
+= sizeof(struct unary_op
);
1083 OP(FILTER_OP_UNARY_PLUS_S64
):
1085 next_pc
+= sizeof(struct unary_op
);
1088 OP(FILTER_OP_UNARY_MINUS_S64
):
1090 estack_ax_v
= -estack_ax_v
;
1091 next_pc
+= sizeof(struct unary_op
);
1094 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
1095 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
1100 OP(FILTER_OP_UNARY_NOT_S64
):
1102 estack_ax_v
= !estack_ax_v
;
1103 next_pc
+= sizeof(struct unary_op
);
1106 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
1115 struct logical_op
*insn
= (struct logical_op
*) pc
;
1117 /* If AX is 0, skip and evaluate to 0 */
1118 if (unlikely(estack_ax_v
== 0)) {
1119 dbg_printk("Jumping to bytecode offset %u\n",
1120 (unsigned int) insn
->skip_offset
);
1121 next_pc
= start_pc
+ insn
->skip_offset
;
1123 /* Pop 1 when jump not taken */
1124 estack_pop(stack
, top
, ax
, bx
);
1125 next_pc
+= sizeof(struct logical_op
);
1131 struct logical_op
*insn
= (struct logical_op
*) pc
;
1133 /* If AX is nonzero, skip and evaluate to 1 */
1135 if (unlikely(estack_ax_v
!= 0)) {
1137 dbg_printk("Jumping to bytecode offset %u\n",
1138 (unsigned int) insn
->skip_offset
);
1139 next_pc
= start_pc
+ insn
->skip_offset
;
1141 /* Pop 1 when jump not taken */
1142 estack_pop(stack
, top
, ax
, bx
);
1143 next_pc
+= sizeof(struct logical_op
);
1149 /* load field ref */
1150 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
1152 struct load_op
*insn
= (struct load_op
*) pc
;
1153 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1155 dbg_printk("load field ref offset %u type string\n",
1157 estack_push(stack
, top
, ax
, bx
);
1158 estack_ax(stack
, top
)->u
.s
.str
=
1159 *(const char * const *) &filter_stack_data
[ref
->offset
];
1160 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1161 dbg_printk("Filter warning: loading a NULL string.\n");
1165 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1166 estack_ax(stack
, top
)->u
.s
.literal_type
=
1167 ESTACK_STRING_LITERAL_TYPE_NONE
;
1168 estack_ax(stack
, top
)->u
.s
.user
= 0;
1169 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1170 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1174 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
1176 struct load_op
*insn
= (struct load_op
*) pc
;
1177 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1179 dbg_printk("load field ref offset %u type sequence\n",
1181 estack_push(stack
, top
, ax
, bx
);
1182 estack_ax(stack
, top
)->u
.s
.seq_len
=
1183 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1184 estack_ax(stack
, top
)->u
.s
.str
=
1185 *(const char **) (&filter_stack_data
[ref
->offset
1186 + sizeof(unsigned long)]);
1187 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1188 dbg_printk("Filter warning: loading a NULL sequence.\n");
1192 estack_ax(stack
, top
)->u
.s
.literal_type
=
1193 ESTACK_STRING_LITERAL_TYPE_NONE
;
1194 estack_ax(stack
, top
)->u
.s
.user
= 0;
1195 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1199 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
1201 struct load_op
*insn
= (struct load_op
*) pc
;
1202 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1204 dbg_printk("load field ref offset %u type s64\n",
1206 estack_push(stack
, top
, ax
, bx
);
1208 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
1209 dbg_printk("ref load s64 %lld\n",
1210 (long long) estack_ax_v
);
1211 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1215 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
1221 /* load from immediate operand */
1222 OP(FILTER_OP_LOAD_STRING
):
1224 struct load_op
*insn
= (struct load_op
*) pc
;
1226 dbg_printk("load string %s\n", insn
->data
);
1227 estack_push(stack
, top
, ax
, bx
);
1228 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1229 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1230 estack_ax(stack
, top
)->u
.s
.literal_type
=
1231 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
1232 estack_ax(stack
, top
)->u
.s
.user
= 0;
1233 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1237 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
1239 struct load_op
*insn
= (struct load_op
*) pc
;
1241 dbg_printk("load globbing pattern %s\n", insn
->data
);
1242 estack_push(stack
, top
, ax
, bx
);
1243 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1244 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1245 estack_ax(stack
, top
)->u
.s
.literal_type
=
1246 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
1247 estack_ax(stack
, top
)->u
.s
.user
= 0;
1248 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1252 OP(FILTER_OP_LOAD_S64
):
1254 struct load_op
*insn
= (struct load_op
*) pc
;
1256 estack_push(stack
, top
, ax
, bx
);
1257 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
1258 dbg_printk("load s64 %lld\n",
1259 (long long) estack_ax_v
);
1260 next_pc
+= sizeof(struct load_op
)
1261 + sizeof(struct literal_numeric
);
1265 OP(FILTER_OP_LOAD_DOUBLE
):
1272 OP(FILTER_OP_CAST_TO_S64
):
1273 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1274 (unsigned int) *(filter_opcode_t
*) pc
);
1278 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
1284 OP(FILTER_OP_CAST_NOP
):
1286 next_pc
+= sizeof(struct cast_op
);
1290 /* get context ref */
1291 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
1293 struct load_op
*insn
= (struct load_op
*) pc
;
1294 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1295 struct lttng_ctx_field
*ctx_field
;
1296 union lttng_ctx_value v
;
1298 dbg_printk("get context ref offset %u type string\n",
1300 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1301 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1302 estack_push(stack
, top
, ax
, bx
);
1303 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
1304 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1305 dbg_printk("Filter warning: loading a NULL string.\n");
1309 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1310 estack_ax(stack
, top
)->u
.s
.literal_type
=
1311 ESTACK_STRING_LITERAL_TYPE_NONE
;
1312 estack_ax(stack
, top
)->u
.s
.user
= 0;
1313 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1314 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1318 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
1320 struct load_op
*insn
= (struct load_op
*) pc
;
1321 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1322 struct lttng_ctx_field
*ctx_field
;
1323 union lttng_ctx_value v
;
1325 dbg_printk("get context ref offset %u type s64\n",
1327 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1328 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1329 estack_push(stack
, top
, ax
, bx
);
1330 estack_ax_v
= v
.s64
;
1331 dbg_printk("ref get context s64 %lld\n",
1332 (long long) estack_ax_v
);
1333 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1337 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
1343 /* load userspace field ref */
1344 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
1346 struct load_op
*insn
= (struct load_op
*) pc
;
1347 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1349 dbg_printk("load field ref offset %u type user string\n",
1351 estack_push(stack
, top
, ax
, bx
);
1352 estack_ax(stack
, top
)->u
.s
.user_str
=
1353 *(const char * const *) &filter_stack_data
[ref
->offset
];
1354 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1355 dbg_printk("Filter warning: loading a NULL string.\n");
1359 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1360 estack_ax(stack
, top
)->u
.s
.literal_type
=
1361 ESTACK_STRING_LITERAL_TYPE_NONE
;
1362 estack_ax(stack
, top
)->u
.s
.user
= 1;
1363 dbg_load_ref_user_str_printk(estack_ax(stack
, top
));
1364 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1368 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
1370 struct load_op
*insn
= (struct load_op
*) pc
;
1371 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1373 dbg_printk("load field ref offset %u type user sequence\n",
1375 estack_push(stack
, top
, ax
, bx
);
1376 estack_ax(stack
, top
)->u
.s
.seq_len
=
1377 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1378 estack_ax(stack
, top
)->u
.s
.user_str
=
1379 *(const char **) (&filter_stack_data
[ref
->offset
1380 + sizeof(unsigned long)]);
1381 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1382 dbg_printk("Filter warning: loading a NULL sequence.\n");
1386 estack_ax(stack
, top
)->u
.s
.literal_type
=
1387 ESTACK_STRING_LITERAL_TYPE_NONE
;
1388 estack_ax(stack
, top
)->u
.s
.user
= 1;
1389 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1393 OP(FILTER_OP_GET_CONTEXT_ROOT
):
1395 dbg_printk("op get context root\n");
1396 estack_push(stack
, top
, ax
, bx
);
1397 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_CONTEXT
;
1398 /* "field" only needed for variants. */
1399 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1400 next_pc
+= sizeof(struct load_op
);
1404 OP(FILTER_OP_GET_APP_CONTEXT_ROOT
):
1410 OP(FILTER_OP_GET_PAYLOAD_ROOT
):
1412 dbg_printk("op get app payload root\n");
1413 estack_push(stack
, top
, ax
, bx
);
1414 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_PAYLOAD
;
1415 estack_ax(stack
, top
)->u
.ptr
.ptr
= filter_stack_data
;
1416 /* "field" only needed for variants. */
1417 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1418 next_pc
+= sizeof(struct load_op
);
1422 OP(FILTER_OP_GET_SYMBOL
):
1424 dbg_printk("op get symbol\n");
1425 switch (estack_ax(stack
, top
)->u
.ptr
.type
) {
1427 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1430 case LOAD_ROOT_CONTEXT
:
1431 case LOAD_ROOT_APP_CONTEXT
:
1432 case LOAD_ROOT_PAYLOAD
:
1434 * symbol lookup is performed by
1440 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1444 OP(FILTER_OP_GET_SYMBOL_FIELD
):
1447 * Used for first variant encountered in a
1448 * traversal. Variants are not implemented yet.
1454 OP(FILTER_OP_GET_INDEX_U16
):
1456 struct load_op
*insn
= (struct load_op
*) pc
;
1457 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1459 dbg_printk("op get index u16\n");
1460 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1463 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1464 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1468 OP(FILTER_OP_GET_INDEX_U64
):
1470 struct load_op
*insn
= (struct load_op
*) pc
;
1471 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1473 dbg_printk("op get index u64\n");
1474 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1477 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1478 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1482 OP(FILTER_OP_LOAD_FIELD
):
1484 dbg_printk("op load field\n");
1485 ret
= dynamic_load_field(estack_ax(stack
, top
));
1488 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1489 next_pc
+= sizeof(struct load_op
);
1493 OP(FILTER_OP_LOAD_FIELD_S8
):
1495 dbg_printk("op load field s8\n");
1497 estack_ax_v
= *(int8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1498 next_pc
+= sizeof(struct load_op
);
1501 OP(FILTER_OP_LOAD_FIELD_S16
):
1503 dbg_printk("op load field s16\n");
1505 estack_ax_v
= *(int16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1506 next_pc
+= sizeof(struct load_op
);
1509 OP(FILTER_OP_LOAD_FIELD_S32
):
1511 dbg_printk("op load field s32\n");
1513 estack_ax_v
= *(int32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1514 next_pc
+= sizeof(struct load_op
);
1517 OP(FILTER_OP_LOAD_FIELD_S64
):
1519 dbg_printk("op load field s64\n");
1521 estack_ax_v
= *(int64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1522 next_pc
+= sizeof(struct load_op
);
1525 OP(FILTER_OP_LOAD_FIELD_U8
):
1527 dbg_printk("op load field u8\n");
1529 estack_ax_v
= *(uint8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1530 next_pc
+= sizeof(struct load_op
);
1533 OP(FILTER_OP_LOAD_FIELD_U16
):
1535 dbg_printk("op load field u16\n");
1537 estack_ax_v
= *(uint16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1538 next_pc
+= sizeof(struct load_op
);
1541 OP(FILTER_OP_LOAD_FIELD_U32
):
1543 dbg_printk("op load field u32\n");
1545 estack_ax_v
= *(uint32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1546 next_pc
+= sizeof(struct load_op
);
1549 OP(FILTER_OP_LOAD_FIELD_U64
):
1551 dbg_printk("op load field u64\n");
1553 estack_ax_v
= *(uint64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1554 next_pc
+= sizeof(struct load_op
);
1557 OP(FILTER_OP_LOAD_FIELD_DOUBLE
):
1563 OP(FILTER_OP_LOAD_FIELD_STRING
):
1567 dbg_printk("op load field string\n");
1568 str
= (const char *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1569 estack_ax(stack
, top
)->u
.s
.str
= str
;
1570 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1571 dbg_printk("Filter warning: loading a NULL string.\n");
1575 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1576 estack_ax(stack
, top
)->u
.s
.literal_type
=
1577 ESTACK_STRING_LITERAL_TYPE_NONE
;
1578 next_pc
+= sizeof(struct load_op
);
1582 OP(FILTER_OP_LOAD_FIELD_SEQUENCE
):
1586 dbg_printk("op load field string sequence\n");
1587 ptr
= estack_ax(stack
, top
)->u
.ptr
.ptr
;
1588 estack_ax(stack
, top
)->u
.s
.seq_len
= *(unsigned long *) ptr
;
1589 estack_ax(stack
, top
)->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
1590 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1591 dbg_printk("Filter warning: loading a NULL sequence.\n");
1595 estack_ax(stack
, top
)->u
.s
.literal_type
=
1596 ESTACK_STRING_LITERAL_TYPE_NONE
;
1597 next_pc
+= sizeof(struct load_op
);
1603 /* return 0 (discard) on error */