1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-interpreter.c
5 * LTTng modules filter interpreter.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <wrapper/uaccess.h>
11 #include <wrapper/objtool.h>
12 #include <wrapper/types.h>
13 #include <linux/swab.h>
15 #include <lttng-filter.h>
16 #include <lttng-string-utils.h>
18 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
25 char get_char(const struct estack_entry
*reg
, size_t offset
)
27 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
32 /* Handle invalid access as end of string. */
33 if (unlikely(!lttng_access_ok(VERIFY_READ
,
34 reg
->u
.s
.user_str
+ offset
,
37 /* Handle fault (nonzero return value) as end of string. */
38 if (unlikely(__copy_from_user_inatomic(&c
,
39 reg
->u
.s
.user_str
+ offset
,
44 return reg
->u
.s
.str
[offset
];
50 * -2: unknown escape char.
54 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
59 *c
= get_char(reg
, *offset
);
75 char get_char_at_cb(size_t at
, void *data
)
77 return get_char(data
, at
);
81 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
83 bool has_user
= false;
85 struct estack_entry
*pattern_reg
;
86 struct estack_entry
*candidate_reg
;
88 /* Disable the page fault handler when reading from userspace. */
89 if (estack_bx(stack
, top
)->u
.s
.user
90 || estack_ax(stack
, top
)->u
.s
.user
) {
95 /* Find out which side is the pattern vs. the candidate. */
96 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
97 pattern_reg
= estack_ax(stack
, top
);
98 candidate_reg
= estack_bx(stack
, top
);
100 pattern_reg
= estack_bx(stack
, top
);
101 candidate_reg
= estack_ax(stack
, top
);
104 /* Perform the match operation. */
105 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
106 pattern_reg
, get_char_at_cb
, candidate_reg
);
114 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
116 size_t offset_bx
= 0, offset_ax
= 0;
117 int diff
, has_user
= 0;
119 if (estack_bx(stack
, top
)->u
.s
.user
120 || estack_ax(stack
, top
)->u
.s
.user
) {
128 char char_bx
, char_ax
;
130 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
131 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
133 if (unlikely(char_bx
== '\0')) {
134 if (char_ax
== '\0') {
138 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
139 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
140 ret
= parse_char(estack_ax(stack
, top
),
141 &char_ax
, &offset_ax
);
151 if (unlikely(char_ax
== '\0')) {
152 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
153 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
154 ret
= parse_char(estack_bx(stack
, top
),
155 &char_bx
, &offset_bx
);
164 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
165 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
166 ret
= parse_char(estack_bx(stack
, top
),
167 &char_bx
, &offset_bx
);
171 } else if (ret
== -2) {
174 /* else compare both char */
176 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
178 ret
= parse_char(estack_ax(stack
, top
),
179 &char_ax
, &offset_ax
);
183 } else if (ret
== -2) {
200 diff
= char_bx
- char_ax
;
212 uint64_t lttng_filter_false(void *filter_data
,
213 struct lttng_probe_ctx
*lttng_probe_ctx
,
214 const char *filter_stack_data
)
219 #ifdef INTERPRETER_USE_SWITCH
222 * Fallback for compilers that do not support taking address of labels.
226 start_pc = &bytecode->data[0]; \
227 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
229 dbg_printk("Executing op %s (%u)\n", \
230 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
231 (unsigned int) *(filter_opcode_t *) pc); \
232 switch (*(filter_opcode_t *) pc) {
234 #define OP(name) case name
244 * Dispatch-table based interpreter.
248 start_pc = &bytecode->code[0]; \
249 pc = next_pc = start_pc; \
250 if (unlikely(pc - start_pc >= bytecode->len)) \
252 goto *dispatch[*(filter_opcode_t *) pc];
259 goto *dispatch[*(filter_opcode_t *) pc];
265 static int context_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
266 struct load_ptr
*ptr
,
270 struct lttng_ctx_field
*ctx_field
;
271 struct lttng_event_field
*field
;
272 union lttng_ctx_value v
;
274 ctx_field
= <tng_static_ctx
->fields
[idx
];
275 field
= &ctx_field
->event_field
;
276 ptr
->type
= LOAD_OBJECT
;
277 /* field is only used for types nested within variants. */
280 switch (field
->type
.atype
) {
282 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
283 if (field
->type
.u
.basic
.integer
.signedness
) {
284 ptr
->object_type
= OBJECT_TYPE_S64
;
286 ptr
->ptr
= &ptr
->u
.s64
;
288 ptr
->object_type
= OBJECT_TYPE_U64
;
289 ptr
->u
.u64
= v
.s64
; /* Cast. */
290 ptr
->ptr
= &ptr
->u
.u64
;
295 const struct lttng_integer_type
*itype
=
296 &field
->type
.u
.basic
.enumeration
.container_type
;
298 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
299 if (itype
->signedness
) {
300 ptr
->object_type
= OBJECT_TYPE_S64
;
302 ptr
->ptr
= &ptr
->u
.s64
;
304 ptr
->object_type
= OBJECT_TYPE_U64
;
305 ptr
->u
.u64
= v
.s64
; /* Cast. */
306 ptr
->ptr
= &ptr
->u
.u64
;
311 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
312 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
315 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
316 printk(KERN_WARNING
"Only string arrays are supported for contexts.\n");
319 ptr
->object_type
= OBJECT_TYPE_STRING
;
320 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
324 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
325 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
328 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
329 printk(KERN_WARNING
"Only string sequences are supported for contexts.\n");
332 ptr
->object_type
= OBJECT_TYPE_STRING
;
333 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
336 case atype_array_bitfield
:
337 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
339 case atype_sequence_bitfield
:
340 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
343 ptr
->object_type
= OBJECT_TYPE_STRING
;
344 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
348 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
351 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
357 static int dynamic_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
358 struct bytecode_runtime
*runtime
,
359 uint64_t index
, struct estack_entry
*stack_top
)
362 const struct filter_get_index_data
*gid
;
365 * Types nested within variants need to perform dynamic lookup
366 * based on the field descriptions. LTTng-UST does not implement
369 if (stack_top
->u
.ptr
.field
)
371 gid
= (const struct filter_get_index_data
*) &runtime
->data
[index
];
372 switch (stack_top
->u
.ptr
.type
) {
374 switch (stack_top
->u
.ptr
.object_type
) {
375 case OBJECT_TYPE_ARRAY
:
379 WARN_ON_ONCE(gid
->offset
>= gid
->array_len
);
380 /* Skip count (unsigned long) */
381 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
382 ptr
= ptr
+ gid
->offset
;
383 stack_top
->u
.ptr
.ptr
= ptr
;
384 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
385 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
386 /* field is only used for types nested within variants. */
387 stack_top
->u
.ptr
.field
= NULL
;
390 case OBJECT_TYPE_SEQUENCE
:
395 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
396 ptr_seq_len
= *(unsigned long *) stack_top
->u
.ptr
.ptr
;
397 if (gid
->offset
>= gid
->elem
.len
* ptr_seq_len
) {
401 ptr
= ptr
+ gid
->offset
;
402 stack_top
->u
.ptr
.ptr
= ptr
;
403 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
404 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
405 /* field is only used for types nested within variants. */
406 stack_top
->u
.ptr
.field
= NULL
;
409 case OBJECT_TYPE_STRUCT
:
410 printk(KERN_WARNING
"Nested structures are not supported yet.\n");
413 case OBJECT_TYPE_VARIANT
:
415 printk(KERN_WARNING
"Unexpected get index type %d",
416 (int) stack_top
->u
.ptr
.object_type
);
421 case LOAD_ROOT_CONTEXT
:
422 case LOAD_ROOT_APP_CONTEXT
: /* Fall-through */
424 ret
= context_get_index(lttng_probe_ctx
,
432 case LOAD_ROOT_PAYLOAD
:
433 stack_top
->u
.ptr
.ptr
+= gid
->offset
;
434 if (gid
->elem
.type
== OBJECT_TYPE_STRING
)
435 stack_top
->u
.ptr
.ptr
= *(const char * const *) stack_top
->u
.ptr
.ptr
;
436 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
437 stack_top
->u
.ptr
.type
= LOAD_OBJECT
;
438 /* field is only used for types nested within variants. */
439 stack_top
->u
.ptr
.field
= NULL
;
448 static int dynamic_load_field(struct estack_entry
*stack_top
)
452 switch (stack_top
->u
.ptr
.type
) {
455 case LOAD_ROOT_CONTEXT
:
456 case LOAD_ROOT_APP_CONTEXT
:
457 case LOAD_ROOT_PAYLOAD
:
459 dbg_printk("Filter warning: cannot load root, missing field name.\n");
463 switch (stack_top
->u
.ptr
.object_type
) {
465 dbg_printk("op load field s8\n");
466 stack_top
->u
.v
= *(int8_t *) stack_top
->u
.ptr
.ptr
;
468 case OBJECT_TYPE_S16
:
472 dbg_printk("op load field s16\n");
473 tmp
= *(int16_t *) stack_top
->u
.ptr
.ptr
;
474 if (stack_top
->u
.ptr
.rev_bo
)
476 stack_top
->u
.v
= tmp
;
479 case OBJECT_TYPE_S32
:
483 dbg_printk("op load field s32\n");
484 tmp
= *(int32_t *) stack_top
->u
.ptr
.ptr
;
485 if (stack_top
->u
.ptr
.rev_bo
)
487 stack_top
->u
.v
= tmp
;
490 case OBJECT_TYPE_S64
:
494 dbg_printk("op load field s64\n");
495 tmp
= *(int64_t *) stack_top
->u
.ptr
.ptr
;
496 if (stack_top
->u
.ptr
.rev_bo
)
498 stack_top
->u
.v
= tmp
;
502 dbg_printk("op load field u8\n");
503 stack_top
->u
.v
= *(uint8_t *) stack_top
->u
.ptr
.ptr
;
505 case OBJECT_TYPE_U16
:
509 dbg_printk("op load field s16\n");
510 tmp
= *(uint16_t *) stack_top
->u
.ptr
.ptr
;
511 if (stack_top
->u
.ptr
.rev_bo
)
513 stack_top
->u
.v
= tmp
;
516 case OBJECT_TYPE_U32
:
520 dbg_printk("op load field u32\n");
521 tmp
= *(uint32_t *) stack_top
->u
.ptr
.ptr
;
522 if (stack_top
->u
.ptr
.rev_bo
)
524 stack_top
->u
.v
= tmp
;
527 case OBJECT_TYPE_U64
:
531 dbg_printk("op load field u64\n");
532 tmp
= *(uint64_t *) stack_top
->u
.ptr
.ptr
;
533 if (stack_top
->u
.ptr
.rev_bo
)
535 stack_top
->u
.v
= tmp
;
538 case OBJECT_TYPE_STRING
:
542 dbg_printk("op load field string\n");
543 str
= (const char *) stack_top
->u
.ptr
.ptr
;
544 stack_top
->u
.s
.str
= str
;
545 if (unlikely(!stack_top
->u
.s
.str
)) {
546 dbg_printk("Filter warning: loading a NULL string.\n");
550 stack_top
->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
551 stack_top
->u
.s
.literal_type
=
552 ESTACK_STRING_LITERAL_TYPE_NONE
;
555 case OBJECT_TYPE_STRING_SEQUENCE
:
559 dbg_printk("op load field string sequence\n");
560 ptr
= stack_top
->u
.ptr
.ptr
;
561 stack_top
->u
.s
.seq_len
= *(unsigned long *) ptr
;
562 stack_top
->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
563 if (unlikely(!stack_top
->u
.s
.str
)) {
564 dbg_printk("Filter warning: loading a NULL sequence.\n");
568 stack_top
->u
.s
.literal_type
=
569 ESTACK_STRING_LITERAL_TYPE_NONE
;
572 case OBJECT_TYPE_DYNAMIC
:
574 * Dynamic types in context are looked up
575 * by context get index.
579 case OBJECT_TYPE_DOUBLE
:
582 case OBJECT_TYPE_SEQUENCE
:
583 case OBJECT_TYPE_ARRAY
:
584 case OBJECT_TYPE_STRUCT
:
585 case OBJECT_TYPE_VARIANT
:
586 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
598 #define DBG_USER_STR_CUTOFF 32
601 * In debug mode, print user string (truncated, if necessary).
604 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
608 char user_str
[DBG_USER_STR_CUTOFF
];
612 last_char
= get_char(user_str_reg
, pos
);
613 user_str
[pos
] = last_char
;
615 } while (last_char
!= '\0' && pos
< sizeof(user_str
));
618 user_str
[sizeof(user_str
) - 1] = '\0';
619 dbg_printk("load field ref user string: '%s%s'\n", user_str
,
620 last_char
!= '\0' ? "[...]" : "");
624 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
630 * Return 0 (discard), or raise the 0x1 flag (log event).
631 * Currently, other flags are kept for future extensions and have no
634 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
635 struct lttng_probe_ctx
*lttng_probe_ctx
,
636 const char *filter_stack_data
)
638 struct bytecode_runtime
*bytecode
= filter_data
;
639 void *pc
, *next_pc
, *start_pc
;
642 struct estack _stack
;
643 struct estack
*stack
= &_stack
;
644 register int64_t ax
= 0, bx
= 0;
645 register int top
= FILTER_STACK_EMPTY
;
646 #ifndef INTERPRETER_USE_SWITCH
647 static void *dispatch
[NR_FILTER_OPS
] = {
648 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
650 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
653 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
654 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
655 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
656 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
657 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
658 [ FILTER_OP_BIT_RSHIFT
] = &&LABEL_FILTER_OP_BIT_RSHIFT
,
659 [ FILTER_OP_BIT_LSHIFT
] = &&LABEL_FILTER_OP_BIT_LSHIFT
,
660 [ FILTER_OP_BIT_AND
] = &&LABEL_FILTER_OP_BIT_AND
,
661 [ FILTER_OP_BIT_OR
] = &&LABEL_FILTER_OP_BIT_OR
,
662 [ FILTER_OP_BIT_XOR
] = &&LABEL_FILTER_OP_BIT_XOR
,
664 /* binary comparators */
665 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
666 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
667 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
668 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
669 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
670 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
672 /* string binary comparator */
673 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
674 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
675 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
676 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
677 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
678 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
680 /* globbing pattern binary comparator */
681 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
682 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
684 /* s64 binary comparator */
685 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
686 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
687 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
688 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
689 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
690 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
692 /* double binary comparator */
693 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
694 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
695 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
696 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
697 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
698 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
700 /* Mixed S64-double binary comparators */
701 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
702 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
703 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
704 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
705 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
706 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
708 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
709 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
710 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
711 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
712 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
713 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
716 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
717 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
718 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
719 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
720 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
721 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
722 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
723 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
724 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
727 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
728 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
731 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
732 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
733 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
734 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
735 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
737 /* load from immediate operand */
738 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
739 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
740 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
741 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
744 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
745 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
746 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
748 /* get context ref */
749 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
750 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
751 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
752 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
754 /* load userspace field ref */
755 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
756 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
758 /* Instructions for recursive traversal through composed types. */
759 [ FILTER_OP_GET_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT
,
760 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT
,
761 [ FILTER_OP_GET_PAYLOAD_ROOT
] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT
,
763 [ FILTER_OP_GET_SYMBOL
] = &&LABEL_FILTER_OP_GET_SYMBOL
,
764 [ FILTER_OP_GET_SYMBOL_FIELD
] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD
,
765 [ FILTER_OP_GET_INDEX_U16
] = &&LABEL_FILTER_OP_GET_INDEX_U16
,
766 [ FILTER_OP_GET_INDEX_U64
] = &&LABEL_FILTER_OP_GET_INDEX_U64
,
768 [ FILTER_OP_LOAD_FIELD
] = &&LABEL_FILTER_OP_LOAD_FIELD
,
769 [ FILTER_OP_LOAD_FIELD_S8
] = &&LABEL_FILTER_OP_LOAD_FIELD_S8
,
770 [ FILTER_OP_LOAD_FIELD_S16
] = &&LABEL_FILTER_OP_LOAD_FIELD_S16
,
771 [ FILTER_OP_LOAD_FIELD_S32
] = &&LABEL_FILTER_OP_LOAD_FIELD_S32
,
772 [ FILTER_OP_LOAD_FIELD_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_S64
,
773 [ FILTER_OP_LOAD_FIELD_U8
] = &&LABEL_FILTER_OP_LOAD_FIELD_U8
,
774 [ FILTER_OP_LOAD_FIELD_U16
] = &&LABEL_FILTER_OP_LOAD_FIELD_U16
,
775 [ FILTER_OP_LOAD_FIELD_U32
] = &&LABEL_FILTER_OP_LOAD_FIELD_U32
,
776 [ FILTER_OP_LOAD_FIELD_U64
] = &&LABEL_FILTER_OP_LOAD_FIELD_U64
,
777 [ FILTER_OP_LOAD_FIELD_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING
,
778 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE
,
779 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE
,
781 [ FILTER_OP_UNARY_BIT_NOT
] = &&LABEL_FILTER_OP_UNARY_BIT_NOT
,
783 [ FILTER_OP_RETURN_S64
] = &&LABEL_FILTER_OP_RETURN_S64
,
785 #endif /* #ifndef INTERPRETER_USE_SWITCH */
789 OP(FILTER_OP_UNKNOWN
):
790 OP(FILTER_OP_LOAD_FIELD_REF
):
791 OP(FILTER_OP_GET_CONTEXT_REF
):
792 #ifdef INTERPRETER_USE_SWITCH
794 #endif /* INTERPRETER_USE_SWITCH */
795 printk(KERN_WARNING
"unknown bytecode op %u\n",
796 (unsigned int) *(filter_opcode_t
*) pc
);
800 OP(FILTER_OP_RETURN
):
801 OP(FILTER_OP_RETURN_S64
):
802 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
803 retval
= !!estack_ax_v
;
813 printk(KERN_WARNING
"unsupported bytecode op %u\n",
814 (unsigned int) *(filter_opcode_t
*) pc
);
824 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
825 (unsigned int) *(filter_opcode_t
*) pc
);
829 OP(FILTER_OP_EQ_STRING
):
833 res
= (stack_strcmp(stack
, top
, "==") == 0);
834 estack_pop(stack
, top
, ax
, bx
);
836 next_pc
+= sizeof(struct binary_op
);
839 OP(FILTER_OP_NE_STRING
):
843 res
= (stack_strcmp(stack
, top
, "!=") != 0);
844 estack_pop(stack
, top
, ax
, bx
);
846 next_pc
+= sizeof(struct binary_op
);
849 OP(FILTER_OP_GT_STRING
):
853 res
= (stack_strcmp(stack
, top
, ">") > 0);
854 estack_pop(stack
, top
, ax
, bx
);
856 next_pc
+= sizeof(struct binary_op
);
859 OP(FILTER_OP_LT_STRING
):
863 res
= (stack_strcmp(stack
, top
, "<") < 0);
864 estack_pop(stack
, top
, ax
, bx
);
866 next_pc
+= sizeof(struct binary_op
);
869 OP(FILTER_OP_GE_STRING
):
873 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
874 estack_pop(stack
, top
, ax
, bx
);
876 next_pc
+= sizeof(struct binary_op
);
879 OP(FILTER_OP_LE_STRING
):
883 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
884 estack_pop(stack
, top
, ax
, bx
);
886 next_pc
+= sizeof(struct binary_op
);
890 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
894 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
895 estack_pop(stack
, top
, ax
, bx
);
897 next_pc
+= sizeof(struct binary_op
);
900 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
904 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
905 estack_pop(stack
, top
, ax
, bx
);
907 next_pc
+= sizeof(struct binary_op
);
911 OP(FILTER_OP_EQ_S64
):
915 res
= (estack_bx_v
== estack_ax_v
);
916 estack_pop(stack
, top
, ax
, bx
);
918 next_pc
+= sizeof(struct binary_op
);
921 OP(FILTER_OP_NE_S64
):
925 res
= (estack_bx_v
!= estack_ax_v
);
926 estack_pop(stack
, top
, ax
, bx
);
928 next_pc
+= sizeof(struct binary_op
);
931 OP(FILTER_OP_GT_S64
):
935 res
= (estack_bx_v
> estack_ax_v
);
936 estack_pop(stack
, top
, ax
, bx
);
938 next_pc
+= sizeof(struct binary_op
);
941 OP(FILTER_OP_LT_S64
):
945 res
= (estack_bx_v
< estack_ax_v
);
946 estack_pop(stack
, top
, ax
, bx
);
948 next_pc
+= sizeof(struct binary_op
);
951 OP(FILTER_OP_GE_S64
):
955 res
= (estack_bx_v
>= estack_ax_v
);
956 estack_pop(stack
, top
, ax
, bx
);
958 next_pc
+= sizeof(struct binary_op
);
961 OP(FILTER_OP_LE_S64
):
965 res
= (estack_bx_v
<= estack_ax_v
);
966 estack_pop(stack
, top
, ax
, bx
);
968 next_pc
+= sizeof(struct binary_op
);
972 OP(FILTER_OP_EQ_DOUBLE
):
973 OP(FILTER_OP_NE_DOUBLE
):
974 OP(FILTER_OP_GT_DOUBLE
):
975 OP(FILTER_OP_LT_DOUBLE
):
976 OP(FILTER_OP_GE_DOUBLE
):
977 OP(FILTER_OP_LE_DOUBLE
):
983 /* Mixed S64-double binary comparators */
984 OP(FILTER_OP_EQ_DOUBLE_S64
):
985 OP(FILTER_OP_NE_DOUBLE_S64
):
986 OP(FILTER_OP_GT_DOUBLE_S64
):
987 OP(FILTER_OP_LT_DOUBLE_S64
):
988 OP(FILTER_OP_GE_DOUBLE_S64
):
989 OP(FILTER_OP_LE_DOUBLE_S64
):
990 OP(FILTER_OP_EQ_S64_DOUBLE
):
991 OP(FILTER_OP_NE_S64_DOUBLE
):
992 OP(FILTER_OP_GT_S64_DOUBLE
):
993 OP(FILTER_OP_LT_S64_DOUBLE
):
994 OP(FILTER_OP_GE_S64_DOUBLE
):
995 OP(FILTER_OP_LE_S64_DOUBLE
):
1000 OP(FILTER_OP_BIT_RSHIFT
):
1004 /* Catch undefined behavior. */
1005 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1009 res
= ((uint64_t) estack_bx_v
>> (uint32_t) estack_ax_v
);
1010 estack_pop(stack
, top
, ax
, bx
);
1012 next_pc
+= sizeof(struct binary_op
);
1015 OP(FILTER_OP_BIT_LSHIFT
):
1019 /* Catch undefined behavior. */
1020 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1024 res
= ((uint64_t) estack_bx_v
<< (uint32_t) estack_ax_v
);
1025 estack_pop(stack
, top
, ax
, bx
);
1027 next_pc
+= sizeof(struct binary_op
);
1030 OP(FILTER_OP_BIT_AND
):
1034 res
= ((uint64_t) estack_bx_v
& (uint64_t) estack_ax_v
);
1035 estack_pop(stack
, top
, ax
, bx
);
1037 next_pc
+= sizeof(struct binary_op
);
1040 OP(FILTER_OP_BIT_OR
):
1044 res
= ((uint64_t) estack_bx_v
| (uint64_t) estack_ax_v
);
1045 estack_pop(stack
, top
, ax
, bx
);
1047 next_pc
+= sizeof(struct binary_op
);
1050 OP(FILTER_OP_BIT_XOR
):
1054 res
= ((uint64_t) estack_bx_v
^ (uint64_t) estack_ax_v
);
1055 estack_pop(stack
, top
, ax
, bx
);
1057 next_pc
+= sizeof(struct binary_op
);
1062 OP(FILTER_OP_UNARY_PLUS
):
1063 OP(FILTER_OP_UNARY_MINUS
):
1064 OP(FILTER_OP_UNARY_NOT
):
1065 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1066 (unsigned int) *(filter_opcode_t
*) pc
);
1071 OP(FILTER_OP_UNARY_BIT_NOT
):
1073 estack_ax_v
= ~(uint64_t) estack_ax_v
;
1074 next_pc
+= sizeof(struct unary_op
);
1078 OP(FILTER_OP_UNARY_PLUS_S64
):
1080 next_pc
+= sizeof(struct unary_op
);
1083 OP(FILTER_OP_UNARY_MINUS_S64
):
1085 estack_ax_v
= -estack_ax_v
;
1086 next_pc
+= sizeof(struct unary_op
);
1089 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
1090 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
1095 OP(FILTER_OP_UNARY_NOT_S64
):
1097 estack_ax_v
= !estack_ax_v
;
1098 next_pc
+= sizeof(struct unary_op
);
1101 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
1110 struct logical_op
*insn
= (struct logical_op
*) pc
;
1112 /* If AX is 0, skip and evaluate to 0 */
1113 if (unlikely(estack_ax_v
== 0)) {
1114 dbg_printk("Jumping to bytecode offset %u\n",
1115 (unsigned int) insn
->skip_offset
);
1116 next_pc
= start_pc
+ insn
->skip_offset
;
1118 /* Pop 1 when jump not taken */
1119 estack_pop(stack
, top
, ax
, bx
);
1120 next_pc
+= sizeof(struct logical_op
);
1126 struct logical_op
*insn
= (struct logical_op
*) pc
;
1128 /* If AX is nonzero, skip and evaluate to 1 */
1130 if (unlikely(estack_ax_v
!= 0)) {
1132 dbg_printk("Jumping to bytecode offset %u\n",
1133 (unsigned int) insn
->skip_offset
);
1134 next_pc
= start_pc
+ insn
->skip_offset
;
1136 /* Pop 1 when jump not taken */
1137 estack_pop(stack
, top
, ax
, bx
);
1138 next_pc
+= sizeof(struct logical_op
);
1144 /* load field ref */
1145 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
1147 struct load_op
*insn
= (struct load_op
*) pc
;
1148 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1150 dbg_printk("load field ref offset %u type string\n",
1152 estack_push(stack
, top
, ax
, bx
);
1153 estack_ax(stack
, top
)->u
.s
.str
=
1154 *(const char * const *) &filter_stack_data
[ref
->offset
];
1155 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1156 dbg_printk("Filter warning: loading a NULL string.\n");
1160 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1161 estack_ax(stack
, top
)->u
.s
.literal_type
=
1162 ESTACK_STRING_LITERAL_TYPE_NONE
;
1163 estack_ax(stack
, top
)->u
.s
.user
= 0;
1164 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1165 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1169 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
1171 struct load_op
*insn
= (struct load_op
*) pc
;
1172 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1174 dbg_printk("load field ref offset %u type sequence\n",
1176 estack_push(stack
, top
, ax
, bx
);
1177 estack_ax(stack
, top
)->u
.s
.seq_len
=
1178 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1179 estack_ax(stack
, top
)->u
.s
.str
=
1180 *(const char **) (&filter_stack_data
[ref
->offset
1181 + sizeof(unsigned long)]);
1182 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1183 dbg_printk("Filter warning: loading a NULL sequence.\n");
1187 estack_ax(stack
, top
)->u
.s
.literal_type
=
1188 ESTACK_STRING_LITERAL_TYPE_NONE
;
1189 estack_ax(stack
, top
)->u
.s
.user
= 0;
1190 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1194 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
1196 struct load_op
*insn
= (struct load_op
*) pc
;
1197 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1199 dbg_printk("load field ref offset %u type s64\n",
1201 estack_push(stack
, top
, ax
, bx
);
1203 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
1204 dbg_printk("ref load s64 %lld\n",
1205 (long long) estack_ax_v
);
1206 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1210 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
1216 /* load from immediate operand */
1217 OP(FILTER_OP_LOAD_STRING
):
1219 struct load_op
*insn
= (struct load_op
*) pc
;
1221 dbg_printk("load string %s\n", insn
->data
);
1222 estack_push(stack
, top
, ax
, bx
);
1223 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1224 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1225 estack_ax(stack
, top
)->u
.s
.literal_type
=
1226 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
1227 estack_ax(stack
, top
)->u
.s
.user
= 0;
1228 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1232 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
1234 struct load_op
*insn
= (struct load_op
*) pc
;
1236 dbg_printk("load globbing pattern %s\n", insn
->data
);
1237 estack_push(stack
, top
, ax
, bx
);
1238 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1239 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1240 estack_ax(stack
, top
)->u
.s
.literal_type
=
1241 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
1242 estack_ax(stack
, top
)->u
.s
.user
= 0;
1243 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1247 OP(FILTER_OP_LOAD_S64
):
1249 struct load_op
*insn
= (struct load_op
*) pc
;
1251 estack_push(stack
, top
, ax
, bx
);
1252 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
1253 dbg_printk("load s64 %lld\n",
1254 (long long) estack_ax_v
);
1255 next_pc
+= sizeof(struct load_op
)
1256 + sizeof(struct literal_numeric
);
1260 OP(FILTER_OP_LOAD_DOUBLE
):
1267 OP(FILTER_OP_CAST_TO_S64
):
1268 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1269 (unsigned int) *(filter_opcode_t
*) pc
);
1273 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
1279 OP(FILTER_OP_CAST_NOP
):
1281 next_pc
+= sizeof(struct cast_op
);
1285 /* get context ref */
1286 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
1288 struct load_op
*insn
= (struct load_op
*) pc
;
1289 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1290 struct lttng_ctx_field
*ctx_field
;
1291 union lttng_ctx_value v
;
1293 dbg_printk("get context ref offset %u type string\n",
1295 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1296 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1297 estack_push(stack
, top
, ax
, bx
);
1298 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
1299 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1300 dbg_printk("Filter warning: loading a NULL string.\n");
1304 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1305 estack_ax(stack
, top
)->u
.s
.literal_type
=
1306 ESTACK_STRING_LITERAL_TYPE_NONE
;
1307 estack_ax(stack
, top
)->u
.s
.user
= 0;
1308 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1309 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1313 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
1315 struct load_op
*insn
= (struct load_op
*) pc
;
1316 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1317 struct lttng_ctx_field
*ctx_field
;
1318 union lttng_ctx_value v
;
1320 dbg_printk("get context ref offset %u type s64\n",
1322 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1323 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1324 estack_push(stack
, top
, ax
, bx
);
1325 estack_ax_v
= v
.s64
;
1326 dbg_printk("ref get context s64 %lld\n",
1327 (long long) estack_ax_v
);
1328 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1332 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
1338 /* load userspace field ref */
1339 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
1341 struct load_op
*insn
= (struct load_op
*) pc
;
1342 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1344 dbg_printk("load field ref offset %u type user string\n",
1346 estack_push(stack
, top
, ax
, bx
);
1347 estack_ax(stack
, top
)->u
.s
.user_str
=
1348 *(const char * const *) &filter_stack_data
[ref
->offset
];
1349 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1350 dbg_printk("Filter warning: loading a NULL string.\n");
1354 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1355 estack_ax(stack
, top
)->u
.s
.literal_type
=
1356 ESTACK_STRING_LITERAL_TYPE_NONE
;
1357 estack_ax(stack
, top
)->u
.s
.user
= 1;
1358 dbg_load_ref_user_str_printk(estack_ax(stack
, top
));
1359 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1363 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
1365 struct load_op
*insn
= (struct load_op
*) pc
;
1366 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1368 dbg_printk("load field ref offset %u type user sequence\n",
1370 estack_push(stack
, top
, ax
, bx
);
1371 estack_ax(stack
, top
)->u
.s
.seq_len
=
1372 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1373 estack_ax(stack
, top
)->u
.s
.user_str
=
1374 *(const char **) (&filter_stack_data
[ref
->offset
1375 + sizeof(unsigned long)]);
1376 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1377 dbg_printk("Filter warning: loading a NULL sequence.\n");
1381 estack_ax(stack
, top
)->u
.s
.literal_type
=
1382 ESTACK_STRING_LITERAL_TYPE_NONE
;
1383 estack_ax(stack
, top
)->u
.s
.user
= 1;
1384 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1388 OP(FILTER_OP_GET_CONTEXT_ROOT
):
1390 dbg_printk("op get context root\n");
1391 estack_push(stack
, top
, ax
, bx
);
1392 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_CONTEXT
;
1393 /* "field" only needed for variants. */
1394 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1395 next_pc
+= sizeof(struct load_op
);
1399 OP(FILTER_OP_GET_APP_CONTEXT_ROOT
):
1405 OP(FILTER_OP_GET_PAYLOAD_ROOT
):
1407 dbg_printk("op get app payload root\n");
1408 estack_push(stack
, top
, ax
, bx
);
1409 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_PAYLOAD
;
1410 estack_ax(stack
, top
)->u
.ptr
.ptr
= filter_stack_data
;
1411 /* "field" only needed for variants. */
1412 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1413 next_pc
+= sizeof(struct load_op
);
1417 OP(FILTER_OP_GET_SYMBOL
):
1419 dbg_printk("op get symbol\n");
1420 switch (estack_ax(stack
, top
)->u
.ptr
.type
) {
1422 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1425 case LOAD_ROOT_CONTEXT
:
1426 case LOAD_ROOT_APP_CONTEXT
:
1427 case LOAD_ROOT_PAYLOAD
:
1429 * symbol lookup is performed by
1435 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1439 OP(FILTER_OP_GET_SYMBOL_FIELD
):
1442 * Used for first variant encountered in a
1443 * traversal. Variants are not implemented yet.
1449 OP(FILTER_OP_GET_INDEX_U16
):
1451 struct load_op
*insn
= (struct load_op
*) pc
;
1452 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1454 dbg_printk("op get index u16\n");
1455 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1458 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1459 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1463 OP(FILTER_OP_GET_INDEX_U64
):
1465 struct load_op
*insn
= (struct load_op
*) pc
;
1466 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1468 dbg_printk("op get index u64\n");
1469 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1472 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1473 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1477 OP(FILTER_OP_LOAD_FIELD
):
1479 dbg_printk("op load field\n");
1480 ret
= dynamic_load_field(estack_ax(stack
, top
));
1483 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1484 next_pc
+= sizeof(struct load_op
);
1488 OP(FILTER_OP_LOAD_FIELD_S8
):
1490 dbg_printk("op load field s8\n");
1492 estack_ax_v
= *(int8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1493 next_pc
+= sizeof(struct load_op
);
1496 OP(FILTER_OP_LOAD_FIELD_S16
):
1498 dbg_printk("op load field s16\n");
1500 estack_ax_v
= *(int16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1501 next_pc
+= sizeof(struct load_op
);
1504 OP(FILTER_OP_LOAD_FIELD_S32
):
1506 dbg_printk("op load field s32\n");
1508 estack_ax_v
= *(int32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1509 next_pc
+= sizeof(struct load_op
);
1512 OP(FILTER_OP_LOAD_FIELD_S64
):
1514 dbg_printk("op load field s64\n");
1516 estack_ax_v
= *(int64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1517 next_pc
+= sizeof(struct load_op
);
1520 OP(FILTER_OP_LOAD_FIELD_U8
):
1522 dbg_printk("op load field u8\n");
1524 estack_ax_v
= *(uint8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1525 next_pc
+= sizeof(struct load_op
);
1528 OP(FILTER_OP_LOAD_FIELD_U16
):
1530 dbg_printk("op load field u16\n");
1532 estack_ax_v
= *(uint16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1533 next_pc
+= sizeof(struct load_op
);
1536 OP(FILTER_OP_LOAD_FIELD_U32
):
1538 dbg_printk("op load field u32\n");
1540 estack_ax_v
= *(uint32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1541 next_pc
+= sizeof(struct load_op
);
1544 OP(FILTER_OP_LOAD_FIELD_U64
):
1546 dbg_printk("op load field u64\n");
1548 estack_ax_v
= *(uint64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1549 next_pc
+= sizeof(struct load_op
);
1552 OP(FILTER_OP_LOAD_FIELD_DOUBLE
):
1558 OP(FILTER_OP_LOAD_FIELD_STRING
):
1562 dbg_printk("op load field string\n");
1563 str
= (const char *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1564 estack_ax(stack
, top
)->u
.s
.str
= str
;
1565 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1566 dbg_printk("Filter warning: loading a NULL string.\n");
1570 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1571 estack_ax(stack
, top
)->u
.s
.literal_type
=
1572 ESTACK_STRING_LITERAL_TYPE_NONE
;
1573 next_pc
+= sizeof(struct load_op
);
1577 OP(FILTER_OP_LOAD_FIELD_SEQUENCE
):
1581 dbg_printk("op load field string sequence\n");
1582 ptr
= estack_ax(stack
, top
)->u
.ptr
.ptr
;
1583 estack_ax(stack
, top
)->u
.s
.seq_len
= *(unsigned long *) ptr
;
1584 estack_ax(stack
, top
)->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
1585 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1586 dbg_printk("Filter warning: loading a NULL sequence.\n");
1590 estack_ax(stack
, top
)->u
.s
.literal_type
=
1591 ESTACK_STRING_LITERAL_TYPE_NONE
;
1592 next_pc
+= sizeof(struct load_op
);
1598 /* return 0 (discard) on error */