2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/uaccess.h>
28 #include <wrapper/frame.h>
29 #include <wrapper/types.h>
30 #include <linux/swab.h>
32 #include <lttng-filter.h>
33 #include <lttng-string-utils.h>
35 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
38 * get_char should be called with page fault handler disabled if it is expected
39 * to handle user-space read.
42 char get_char(struct estack_entry
*reg
, size_t offset
)
44 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
49 /* Handle invalid access as end of string. */
50 if (unlikely(!access_ok(VERIFY_READ
,
51 reg
->u
.s
.user_str
+ offset
,
54 /* Handle fault (nonzero return value) as end of string. */
55 if (unlikely(__copy_from_user_inatomic(&c
,
56 reg
->u
.s
.user_str
+ offset
,
61 return reg
->u
.s
.str
[offset
];
67 * -2: unknown escape char.
71 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
76 *c
= get_char(reg
, *offset
);
92 char get_char_at_cb(size_t at
, void *data
)
94 return get_char(data
, at
);
98 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
100 bool has_user
= false;
103 struct estack_entry
*pattern_reg
;
104 struct estack_entry
*candidate_reg
;
106 if (estack_bx(stack
, top
)->u
.s
.user
107 || estack_ax(stack
, top
)->u
.s
.user
) {
114 /* Find out which side is the pattern vs. the candidate. */
115 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
116 pattern_reg
= estack_ax(stack
, top
);
117 candidate_reg
= estack_bx(stack
, top
);
119 pattern_reg
= estack_bx(stack
, top
);
120 candidate_reg
= estack_ax(stack
, top
);
123 /* Perform the match operation. */
124 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
125 pattern_reg
, get_char_at_cb
, candidate_reg
);
135 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
137 size_t offset_bx
= 0, offset_ax
= 0;
138 int diff
, has_user
= 0;
141 if (estack_bx(stack
, top
)->u
.s
.user
142 || estack_ax(stack
, top
)->u
.s
.user
) {
152 char char_bx
, char_ax
;
154 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
155 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
157 if (unlikely(char_bx
== '\0')) {
158 if (char_ax
== '\0') {
162 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
163 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
164 ret
= parse_char(estack_ax(stack
, top
),
165 &char_ax
, &offset_ax
);
175 if (unlikely(char_ax
== '\0')) {
176 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
178 ret
= parse_char(estack_bx(stack
, top
),
179 &char_bx
, &offset_bx
);
188 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
189 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
190 ret
= parse_char(estack_bx(stack
, top
),
191 &char_bx
, &offset_bx
);
195 } else if (ret
== -2) {
198 /* else compare both char */
200 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
201 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
202 ret
= parse_char(estack_ax(stack
, top
),
203 &char_ax
, &offset_ax
);
207 } else if (ret
== -2) {
224 diff
= char_bx
- char_ax
;
237 uint64_t lttng_filter_false(void *filter_data
,
238 struct lttng_probe_ctx
*lttng_probe_ctx
,
239 const char *filter_stack_data
)
244 #ifdef INTERPRETER_USE_SWITCH
247 * Fallback for compilers that do not support taking address of labels.
251 start_pc = &bytecode->data[0]; \
252 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
254 dbg_printk("Executing op %s (%u)\n", \
255 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
256 (unsigned int) *(filter_opcode_t *) pc); \
257 switch (*(filter_opcode_t *) pc) {
259 #define OP(name) case name
269 * Dispatch-table based interpreter.
273 start_pc = &bytecode->code[0]; \
274 pc = next_pc = start_pc; \
275 if (unlikely(pc - start_pc >= bytecode->len)) \
277 goto *dispatch[*(filter_opcode_t *) pc];
284 goto *dispatch[*(filter_opcode_t *) pc];
290 static int context_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
291 struct load_ptr
*ptr
,
295 struct lttng_ctx_field
*ctx_field
;
296 struct lttng_event_field
*field
;
297 union lttng_ctx_value v
;
299 ctx_field
= <tng_static_ctx
->fields
[idx
];
300 field
= &ctx_field
->event_field
;
301 ptr
->type
= LOAD_OBJECT
;
302 /* field is only used for types nested within variants. */
305 switch (field
->type
.atype
) {
307 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
308 if (field
->type
.u
.basic
.integer
.signedness
) {
309 ptr
->object_type
= OBJECT_TYPE_S64
;
311 ptr
->ptr
= &ptr
->u
.s64
;
313 ptr
->object_type
= OBJECT_TYPE_U64
;
314 ptr
->u
.u64
= v
.s64
; /* Cast. */
315 ptr
->ptr
= &ptr
->u
.u64
;
320 const struct lttng_integer_type
*itype
=
321 &field
->type
.u
.basic
.enumeration
.container_type
;
323 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
324 if (itype
->signedness
) {
325 ptr
->object_type
= OBJECT_TYPE_S64
;
327 ptr
->ptr
= &ptr
->u
.s64
;
329 ptr
->object_type
= OBJECT_TYPE_U64
;
330 ptr
->u
.u64
= v
.s64
; /* Cast. */
331 ptr
->ptr
= &ptr
->u
.u64
;
336 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
337 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
340 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
341 printk(KERN_WARNING
"Only string arrays are supported for contexts.\n");
344 ptr
->object_type
= OBJECT_TYPE_STRING
;
345 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
349 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
350 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
353 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
354 printk(KERN_WARNING
"Only string sequences are supported for contexts.\n");
357 ptr
->object_type
= OBJECT_TYPE_STRING
;
358 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
361 case atype_array_bitfield
:
362 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
364 case atype_sequence_bitfield
:
365 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
368 ptr
->object_type
= OBJECT_TYPE_STRING
;
369 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
373 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
376 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
382 static int dynamic_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
383 struct bytecode_runtime
*runtime
,
384 uint64_t index
, struct estack_entry
*stack_top
)
387 const struct filter_get_index_data
*gid
;
390 * Types nested within variants need to perform dynamic lookup
391 * based on the field descriptions. LTTng-UST does not implement
394 if (stack_top
->u
.ptr
.field
)
396 gid
= (const struct filter_get_index_data
*) &runtime
->data
[index
];
397 switch (stack_top
->u
.ptr
.type
) {
399 switch (stack_top
->u
.ptr
.object_type
) {
400 case OBJECT_TYPE_ARRAY
:
404 WARN_ON_ONCE(gid
->offset
>= gid
->array_len
);
405 /* Skip count (unsigned long) */
406 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
407 ptr
= ptr
+ gid
->offset
;
408 stack_top
->u
.ptr
.ptr
= ptr
;
409 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
410 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
411 /* field is only used for types nested within variants. */
412 stack_top
->u
.ptr
.field
= NULL
;
415 case OBJECT_TYPE_SEQUENCE
:
420 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
421 ptr_seq_len
= *(unsigned long *) stack_top
->u
.ptr
.ptr
;
422 if (gid
->offset
>= gid
->elem
.len
* ptr_seq_len
) {
426 ptr
= ptr
+ gid
->offset
;
427 stack_top
->u
.ptr
.ptr
= ptr
;
428 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
429 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
430 /* field is only used for types nested within variants. */
431 stack_top
->u
.ptr
.field
= NULL
;
434 case OBJECT_TYPE_STRUCT
:
435 printk(KERN_WARNING
"Nested structures are not supported yet.\n");
438 case OBJECT_TYPE_VARIANT
:
440 printk(KERN_WARNING
"Unexpected get index type %d",
441 (int) stack_top
->u
.ptr
.object_type
);
446 case LOAD_ROOT_CONTEXT
:
447 case LOAD_ROOT_APP_CONTEXT
: /* Fall-through */
449 ret
= context_get_index(lttng_probe_ctx
,
457 case LOAD_ROOT_PAYLOAD
:
458 stack_top
->u
.ptr
.ptr
+= gid
->offset
;
459 if (gid
->elem
.type
== OBJECT_TYPE_STRING
)
460 stack_top
->u
.ptr
.ptr
= *(const char * const *) stack_top
->u
.ptr
.ptr
;
461 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
462 stack_top
->u
.ptr
.type
= LOAD_OBJECT
;
463 /* field is only used for types nested within variants. */
464 stack_top
->u
.ptr
.field
= NULL
;
473 static int dynamic_load_field(struct estack_entry
*stack_top
)
477 switch (stack_top
->u
.ptr
.type
) {
480 case LOAD_ROOT_CONTEXT
:
481 case LOAD_ROOT_APP_CONTEXT
:
482 case LOAD_ROOT_PAYLOAD
:
484 dbg_printk("Filter warning: cannot load root, missing field name.\n");
488 switch (stack_top
->u
.ptr
.object_type
) {
490 dbg_printk("op load field s8\n");
491 stack_top
->u
.v
= *(int8_t *) stack_top
->u
.ptr
.ptr
;
493 case OBJECT_TYPE_S16
:
497 dbg_printk("op load field s16\n");
498 tmp
= *(int16_t *) stack_top
->u
.ptr
.ptr
;
499 if (stack_top
->u
.ptr
.rev_bo
)
501 stack_top
->u
.v
= tmp
;
504 case OBJECT_TYPE_S32
:
508 dbg_printk("op load field s32\n");
509 tmp
= *(int32_t *) stack_top
->u
.ptr
.ptr
;
510 if (stack_top
->u
.ptr
.rev_bo
)
512 stack_top
->u
.v
= tmp
;
515 case OBJECT_TYPE_S64
:
519 dbg_printk("op load field s64\n");
520 tmp
= *(int64_t *) stack_top
->u
.ptr
.ptr
;
521 if (stack_top
->u
.ptr
.rev_bo
)
523 stack_top
->u
.v
= tmp
;
527 dbg_printk("op load field u8\n");
528 stack_top
->u
.v
= *(uint8_t *) stack_top
->u
.ptr
.ptr
;
530 case OBJECT_TYPE_U16
:
534 dbg_printk("op load field s16\n");
535 tmp
= *(uint16_t *) stack_top
->u
.ptr
.ptr
;
536 if (stack_top
->u
.ptr
.rev_bo
)
538 stack_top
->u
.v
= tmp
;
541 case OBJECT_TYPE_U32
:
545 dbg_printk("op load field u32\n");
546 tmp
= *(uint32_t *) stack_top
->u
.ptr
.ptr
;
547 if (stack_top
->u
.ptr
.rev_bo
)
549 stack_top
->u
.v
= tmp
;
552 case OBJECT_TYPE_U64
:
556 dbg_printk("op load field u64\n");
557 tmp
= *(uint64_t *) stack_top
->u
.ptr
.ptr
;
558 if (stack_top
->u
.ptr
.rev_bo
)
560 stack_top
->u
.v
= tmp
;
563 case OBJECT_TYPE_STRING
:
567 dbg_printk("op load field string\n");
568 str
= (const char *) stack_top
->u
.ptr
.ptr
;
569 stack_top
->u
.s
.str
= str
;
570 if (unlikely(!stack_top
->u
.s
.str
)) {
571 dbg_printk("Filter warning: loading a NULL string.\n");
575 stack_top
->u
.s
.seq_len
= SIZE_MAX
;
576 stack_top
->u
.s
.literal_type
=
577 ESTACK_STRING_LITERAL_TYPE_NONE
;
580 case OBJECT_TYPE_STRING_SEQUENCE
:
584 dbg_printk("op load field string sequence\n");
585 ptr
= stack_top
->u
.ptr
.ptr
;
586 stack_top
->u
.s
.seq_len
= *(unsigned long *) ptr
;
587 stack_top
->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
588 if (unlikely(!stack_top
->u
.s
.str
)) {
589 dbg_printk("Filter warning: loading a NULL sequence.\n");
593 stack_top
->u
.s
.literal_type
=
594 ESTACK_STRING_LITERAL_TYPE_NONE
;
597 case OBJECT_TYPE_DYNAMIC
:
599 * Dynamic types in context are looked up
600 * by context get index.
604 case OBJECT_TYPE_DOUBLE
:
607 case OBJECT_TYPE_SEQUENCE
:
608 case OBJECT_TYPE_ARRAY
:
609 case OBJECT_TYPE_STRUCT
:
610 case OBJECT_TYPE_VARIANT
:
611 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
622 * Return 0 (discard), or raise the 0x1 flag (log event).
623 * Currently, other flags are kept for future extensions and have no
626 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
627 struct lttng_probe_ctx
*lttng_probe_ctx
,
628 const char *filter_stack_data
)
630 struct bytecode_runtime
*bytecode
= filter_data
;
631 void *pc
, *next_pc
, *start_pc
;
634 struct estack _stack
;
635 struct estack
*stack
= &_stack
;
636 register int64_t ax
= 0, bx
= 0;
637 register int top
= FILTER_STACK_EMPTY
;
638 #ifndef INTERPRETER_USE_SWITCH
639 static void *dispatch
[NR_FILTER_OPS
] = {
640 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
642 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
645 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
646 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
647 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
648 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
649 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
650 [ FILTER_OP_BIT_RSHIFT
] = &&LABEL_FILTER_OP_BIT_RSHIFT
,
651 [ FILTER_OP_BIT_LSHIFT
] = &&LABEL_FILTER_OP_BIT_LSHIFT
,
652 [ FILTER_OP_BIT_AND
] = &&LABEL_FILTER_OP_BIT_AND
,
653 [ FILTER_OP_BIT_OR
] = &&LABEL_FILTER_OP_BIT_OR
,
654 [ FILTER_OP_BIT_XOR
] = &&LABEL_FILTER_OP_BIT_XOR
,
656 /* binary comparators */
657 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
658 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
659 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
660 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
661 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
662 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
664 /* string binary comparator */
665 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
666 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
667 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
668 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
669 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
670 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
672 /* globbing pattern binary comparator */
673 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
674 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
676 /* s64 binary comparator */
677 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
678 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
679 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
680 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
681 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
682 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
684 /* double binary comparator */
685 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
686 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
687 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
688 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
689 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
690 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
692 /* Mixed S64-double binary comparators */
693 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
694 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
695 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
696 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
697 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
698 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
700 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
701 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
702 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
703 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
704 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
705 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
708 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
709 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
710 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
711 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
712 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
713 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
714 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
715 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
716 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
719 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
720 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
723 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
724 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
725 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
726 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
727 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
729 /* load from immediate operand */
730 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
731 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
732 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
733 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
736 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
737 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
738 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
740 /* get context ref */
741 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
742 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
743 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
744 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
746 /* load userspace field ref */
747 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
748 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
750 /* Instructions for recursive traversal through composed types. */
751 [ FILTER_OP_GET_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT
,
752 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT
,
753 [ FILTER_OP_GET_PAYLOAD_ROOT
] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT
,
755 [ FILTER_OP_GET_SYMBOL
] = &&LABEL_FILTER_OP_GET_SYMBOL
,
756 [ FILTER_OP_GET_SYMBOL_FIELD
] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD
,
757 [ FILTER_OP_GET_INDEX_U16
] = &&LABEL_FILTER_OP_GET_INDEX_U16
,
758 [ FILTER_OP_GET_INDEX_U64
] = &&LABEL_FILTER_OP_GET_INDEX_U64
,
760 [ FILTER_OP_LOAD_FIELD
] = &&LABEL_FILTER_OP_LOAD_FIELD
,
761 [ FILTER_OP_LOAD_FIELD_S8
] = &&LABEL_FILTER_OP_LOAD_FIELD_S8
,
762 [ FILTER_OP_LOAD_FIELD_S16
] = &&LABEL_FILTER_OP_LOAD_FIELD_S16
,
763 [ FILTER_OP_LOAD_FIELD_S32
] = &&LABEL_FILTER_OP_LOAD_FIELD_S32
,
764 [ FILTER_OP_LOAD_FIELD_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_S64
,
765 [ FILTER_OP_LOAD_FIELD_U8
] = &&LABEL_FILTER_OP_LOAD_FIELD_U8
,
766 [ FILTER_OP_LOAD_FIELD_U16
] = &&LABEL_FILTER_OP_LOAD_FIELD_U16
,
767 [ FILTER_OP_LOAD_FIELD_U32
] = &&LABEL_FILTER_OP_LOAD_FIELD_U32
,
768 [ FILTER_OP_LOAD_FIELD_U64
] = &&LABEL_FILTER_OP_LOAD_FIELD_U64
,
769 [ FILTER_OP_LOAD_FIELD_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING
,
770 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE
,
771 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE
,
773 [ FILTER_OP_UNARY_BIT_NOT
] = &&LABEL_FILTER_OP_UNARY_BIT_NOT
,
775 #endif /* #ifndef INTERPRETER_USE_SWITCH */
779 OP(FILTER_OP_UNKNOWN
):
780 OP(FILTER_OP_LOAD_FIELD_REF
):
781 OP(FILTER_OP_GET_CONTEXT_REF
):
782 #ifdef INTERPRETER_USE_SWITCH
784 #endif /* INTERPRETER_USE_SWITCH */
785 printk(KERN_WARNING
"unknown bytecode op %u\n",
786 (unsigned int) *(filter_opcode_t
*) pc
);
790 OP(FILTER_OP_RETURN
):
791 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
792 retval
= !!estack_ax_v
;
802 printk(KERN_WARNING
"unsupported bytecode op %u\n",
803 (unsigned int) *(filter_opcode_t
*) pc
);
813 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
814 (unsigned int) *(filter_opcode_t
*) pc
);
818 OP(FILTER_OP_EQ_STRING
):
822 res
= (stack_strcmp(stack
, top
, "==") == 0);
823 estack_pop(stack
, top
, ax
, bx
);
825 next_pc
+= sizeof(struct binary_op
);
828 OP(FILTER_OP_NE_STRING
):
832 res
= (stack_strcmp(stack
, top
, "!=") != 0);
833 estack_pop(stack
, top
, ax
, bx
);
835 next_pc
+= sizeof(struct binary_op
);
838 OP(FILTER_OP_GT_STRING
):
842 res
= (stack_strcmp(stack
, top
, ">") > 0);
843 estack_pop(stack
, top
, ax
, bx
);
845 next_pc
+= sizeof(struct binary_op
);
848 OP(FILTER_OP_LT_STRING
):
852 res
= (stack_strcmp(stack
, top
, "<") < 0);
853 estack_pop(stack
, top
, ax
, bx
);
855 next_pc
+= sizeof(struct binary_op
);
858 OP(FILTER_OP_GE_STRING
):
862 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
863 estack_pop(stack
, top
, ax
, bx
);
865 next_pc
+= sizeof(struct binary_op
);
868 OP(FILTER_OP_LE_STRING
):
872 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
873 estack_pop(stack
, top
, ax
, bx
);
875 next_pc
+= sizeof(struct binary_op
);
879 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
883 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
884 estack_pop(stack
, top
, ax
, bx
);
886 next_pc
+= sizeof(struct binary_op
);
889 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
893 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
894 estack_pop(stack
, top
, ax
, bx
);
896 next_pc
+= sizeof(struct binary_op
);
900 OP(FILTER_OP_EQ_S64
):
904 res
= (estack_bx_v
== estack_ax_v
);
905 estack_pop(stack
, top
, ax
, bx
);
907 next_pc
+= sizeof(struct binary_op
);
910 OP(FILTER_OP_NE_S64
):
914 res
= (estack_bx_v
!= estack_ax_v
);
915 estack_pop(stack
, top
, ax
, bx
);
917 next_pc
+= sizeof(struct binary_op
);
920 OP(FILTER_OP_GT_S64
):
924 res
= (estack_bx_v
> estack_ax_v
);
925 estack_pop(stack
, top
, ax
, bx
);
927 next_pc
+= sizeof(struct binary_op
);
930 OP(FILTER_OP_LT_S64
):
934 res
= (estack_bx_v
< estack_ax_v
);
935 estack_pop(stack
, top
, ax
, bx
);
937 next_pc
+= sizeof(struct binary_op
);
940 OP(FILTER_OP_GE_S64
):
944 res
= (estack_bx_v
>= estack_ax_v
);
945 estack_pop(stack
, top
, ax
, bx
);
947 next_pc
+= sizeof(struct binary_op
);
950 OP(FILTER_OP_LE_S64
):
954 res
= (estack_bx_v
<= estack_ax_v
);
955 estack_pop(stack
, top
, ax
, bx
);
957 next_pc
+= sizeof(struct binary_op
);
961 OP(FILTER_OP_EQ_DOUBLE
):
962 OP(FILTER_OP_NE_DOUBLE
):
963 OP(FILTER_OP_GT_DOUBLE
):
964 OP(FILTER_OP_LT_DOUBLE
):
965 OP(FILTER_OP_GE_DOUBLE
):
966 OP(FILTER_OP_LE_DOUBLE
):
972 /* Mixed S64-double binary comparators */
973 OP(FILTER_OP_EQ_DOUBLE_S64
):
974 OP(FILTER_OP_NE_DOUBLE_S64
):
975 OP(FILTER_OP_GT_DOUBLE_S64
):
976 OP(FILTER_OP_LT_DOUBLE_S64
):
977 OP(FILTER_OP_GE_DOUBLE_S64
):
978 OP(FILTER_OP_LE_DOUBLE_S64
):
979 OP(FILTER_OP_EQ_S64_DOUBLE
):
980 OP(FILTER_OP_NE_S64_DOUBLE
):
981 OP(FILTER_OP_GT_S64_DOUBLE
):
982 OP(FILTER_OP_LT_S64_DOUBLE
):
983 OP(FILTER_OP_GE_S64_DOUBLE
):
984 OP(FILTER_OP_LE_S64_DOUBLE
):
989 OP(FILTER_OP_BIT_RSHIFT
):
993 /* Catch undefined behavior. */
994 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
998 res
= (estack_bx_v
>> estack_ax_v
);
999 estack_pop(stack
, top
, ax
, bx
);
1001 next_pc
+= sizeof(struct binary_op
);
1004 OP(FILTER_OP_BIT_LSHIFT
):
1008 /* Catch undefined behavior. */
1009 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1013 res
= (estack_bx_v
<< estack_ax_v
);
1014 estack_pop(stack
, top
, ax
, bx
);
1016 next_pc
+= sizeof(struct binary_op
);
1019 OP(FILTER_OP_BIT_AND
):
1023 res
= (estack_bx_v
& estack_ax_v
);
1024 estack_pop(stack
, top
, ax
, bx
);
1026 next_pc
+= sizeof(struct binary_op
);
1029 OP(FILTER_OP_BIT_OR
):
1033 res
= (estack_bx_v
| estack_ax_v
);
1034 estack_pop(stack
, top
, ax
, bx
);
1036 next_pc
+= sizeof(struct binary_op
);
1039 OP(FILTER_OP_BIT_XOR
):
1043 res
= (estack_bx_v
^ estack_ax_v
);
1044 estack_pop(stack
, top
, ax
, bx
);
1046 next_pc
+= sizeof(struct binary_op
);
1051 OP(FILTER_OP_UNARY_PLUS
):
1052 OP(FILTER_OP_UNARY_MINUS
):
1053 OP(FILTER_OP_UNARY_NOT
):
1054 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1055 (unsigned int) *(filter_opcode_t
*) pc
);
1060 OP(FILTER_OP_UNARY_BIT_NOT
):
1062 estack_ax_v
= ~estack_ax_v
;
1063 next_pc
+= sizeof(struct unary_op
);
1067 OP(FILTER_OP_UNARY_PLUS_S64
):
1069 next_pc
+= sizeof(struct unary_op
);
1072 OP(FILTER_OP_UNARY_MINUS_S64
):
1074 estack_ax_v
= -estack_ax_v
;
1075 next_pc
+= sizeof(struct unary_op
);
1078 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
1079 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
1084 OP(FILTER_OP_UNARY_NOT_S64
):
1086 estack_ax_v
= !estack_ax_v
;
1087 next_pc
+= sizeof(struct unary_op
);
1090 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
1099 struct logical_op
*insn
= (struct logical_op
*) pc
;
1101 /* If AX is 0, skip and evaluate to 0 */
1102 if (unlikely(estack_ax_v
== 0)) {
1103 dbg_printk("Jumping to bytecode offset %u\n",
1104 (unsigned int) insn
->skip_offset
);
1105 next_pc
= start_pc
+ insn
->skip_offset
;
1107 /* Pop 1 when jump not taken */
1108 estack_pop(stack
, top
, ax
, bx
);
1109 next_pc
+= sizeof(struct logical_op
);
1115 struct logical_op
*insn
= (struct logical_op
*) pc
;
1117 /* If AX is nonzero, skip and evaluate to 1 */
1119 if (unlikely(estack_ax_v
!= 0)) {
1121 dbg_printk("Jumping to bytecode offset %u\n",
1122 (unsigned int) insn
->skip_offset
);
1123 next_pc
= start_pc
+ insn
->skip_offset
;
1125 /* Pop 1 when jump not taken */
1126 estack_pop(stack
, top
, ax
, bx
);
1127 next_pc
+= sizeof(struct logical_op
);
1133 /* load field ref */
1134 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
1136 struct load_op
*insn
= (struct load_op
*) pc
;
1137 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1139 dbg_printk("load field ref offset %u type string\n",
1141 estack_push(stack
, top
, ax
, bx
);
1142 estack_ax(stack
, top
)->u
.s
.str
=
1143 *(const char * const *) &filter_stack_data
[ref
->offset
];
1144 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1145 dbg_printk("Filter warning: loading a NULL string.\n");
1149 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1150 estack_ax(stack
, top
)->u
.s
.literal_type
=
1151 ESTACK_STRING_LITERAL_TYPE_NONE
;
1152 estack_ax(stack
, top
)->u
.s
.user
= 0;
1153 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1154 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1158 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
1160 struct load_op
*insn
= (struct load_op
*) pc
;
1161 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1163 dbg_printk("load field ref offset %u type sequence\n",
1165 estack_push(stack
, top
, ax
, bx
);
1166 estack_ax(stack
, top
)->u
.s
.seq_len
=
1167 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1168 estack_ax(stack
, top
)->u
.s
.str
=
1169 *(const char **) (&filter_stack_data
[ref
->offset
1170 + sizeof(unsigned long)]);
1171 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1172 dbg_printk("Filter warning: loading a NULL sequence.\n");
1176 estack_ax(stack
, top
)->u
.s
.literal_type
=
1177 ESTACK_STRING_LITERAL_TYPE_NONE
;
1178 estack_ax(stack
, top
)->u
.s
.user
= 0;
1179 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1183 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
1185 struct load_op
*insn
= (struct load_op
*) pc
;
1186 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1188 dbg_printk("load field ref offset %u type s64\n",
1190 estack_push(stack
, top
, ax
, bx
);
1192 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
1193 dbg_printk("ref load s64 %lld\n",
1194 (long long) estack_ax_v
);
1195 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1199 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
1205 /* load from immediate operand */
1206 OP(FILTER_OP_LOAD_STRING
):
1208 struct load_op
*insn
= (struct load_op
*) pc
;
1210 dbg_printk("load string %s\n", insn
->data
);
1211 estack_push(stack
, top
, ax
, bx
);
1212 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1213 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1214 estack_ax(stack
, top
)->u
.s
.literal_type
=
1215 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
1216 estack_ax(stack
, top
)->u
.s
.user
= 0;
1217 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1221 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
1223 struct load_op
*insn
= (struct load_op
*) pc
;
1225 dbg_printk("load globbing pattern %s\n", insn
->data
);
1226 estack_push(stack
, top
, ax
, bx
);
1227 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1228 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1229 estack_ax(stack
, top
)->u
.s
.literal_type
=
1230 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
1231 estack_ax(stack
, top
)->u
.s
.user
= 0;
1232 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1236 OP(FILTER_OP_LOAD_S64
):
1238 struct load_op
*insn
= (struct load_op
*) pc
;
1240 estack_push(stack
, top
, ax
, bx
);
1241 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
1242 dbg_printk("load s64 %lld\n",
1243 (long long) estack_ax_v
);
1244 next_pc
+= sizeof(struct load_op
)
1245 + sizeof(struct literal_numeric
);
1249 OP(FILTER_OP_LOAD_DOUBLE
):
1256 OP(FILTER_OP_CAST_TO_S64
):
1257 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1258 (unsigned int) *(filter_opcode_t
*) pc
);
1262 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
1268 OP(FILTER_OP_CAST_NOP
):
1270 next_pc
+= sizeof(struct cast_op
);
1274 /* get context ref */
1275 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
1277 struct load_op
*insn
= (struct load_op
*) pc
;
1278 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1279 struct lttng_ctx_field
*ctx_field
;
1280 union lttng_ctx_value v
;
1282 dbg_printk("get context ref offset %u type string\n",
1284 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1285 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1286 estack_push(stack
, top
, ax
, bx
);
1287 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
1288 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1289 dbg_printk("Filter warning: loading a NULL string.\n");
1293 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1294 estack_ax(stack
, top
)->u
.s
.literal_type
=
1295 ESTACK_STRING_LITERAL_TYPE_NONE
;
1296 estack_ax(stack
, top
)->u
.s
.user
= 0;
1297 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1298 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1302 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
1304 struct load_op
*insn
= (struct load_op
*) pc
;
1305 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1306 struct lttng_ctx_field
*ctx_field
;
1307 union lttng_ctx_value v
;
1309 dbg_printk("get context ref offset %u type s64\n",
1311 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1312 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1313 estack_push(stack
, top
, ax
, bx
);
1314 estack_ax_v
= v
.s64
;
1315 dbg_printk("ref get context s64 %lld\n",
1316 (long long) estack_ax_v
);
1317 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1321 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
1327 /* load userspace field ref */
1328 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
1330 struct load_op
*insn
= (struct load_op
*) pc
;
1331 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1333 dbg_printk("load field ref offset %u type user string\n",
1335 estack_push(stack
, top
, ax
, bx
);
1336 estack_ax(stack
, top
)->u
.s
.user_str
=
1337 *(const char * const *) &filter_stack_data
[ref
->offset
];
1338 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1339 dbg_printk("Filter warning: loading a NULL string.\n");
1343 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1344 estack_ax(stack
, top
)->u
.s
.literal_type
=
1345 ESTACK_STRING_LITERAL_TYPE_NONE
;
1346 estack_ax(stack
, top
)->u
.s
.user
= 1;
1347 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1348 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1352 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
1354 struct load_op
*insn
= (struct load_op
*) pc
;
1355 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1357 dbg_printk("load field ref offset %u type user sequence\n",
1359 estack_push(stack
, top
, ax
, bx
);
1360 estack_ax(stack
, top
)->u
.s
.seq_len
=
1361 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1362 estack_ax(stack
, top
)->u
.s
.user_str
=
1363 *(const char **) (&filter_stack_data
[ref
->offset
1364 + sizeof(unsigned long)]);
1365 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1366 dbg_printk("Filter warning: loading a NULL sequence.\n");
1370 estack_ax(stack
, top
)->u
.s
.literal_type
=
1371 ESTACK_STRING_LITERAL_TYPE_NONE
;
1372 estack_ax(stack
, top
)->u
.s
.user
= 1;
1373 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1377 OP(FILTER_OP_GET_CONTEXT_ROOT
):
1379 dbg_printk("op get context root\n");
1380 estack_push(stack
, top
, ax
, bx
);
1381 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_CONTEXT
;
1382 /* "field" only needed for variants. */
1383 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1384 next_pc
+= sizeof(struct load_op
);
1388 OP(FILTER_OP_GET_APP_CONTEXT_ROOT
):
1394 OP(FILTER_OP_GET_PAYLOAD_ROOT
):
1396 dbg_printk("op get app payload root\n");
1397 estack_push(stack
, top
, ax
, bx
);
1398 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_PAYLOAD
;
1399 estack_ax(stack
, top
)->u
.ptr
.ptr
= filter_stack_data
;
1400 /* "field" only needed for variants. */
1401 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1402 next_pc
+= sizeof(struct load_op
);
1406 OP(FILTER_OP_GET_SYMBOL
):
1408 dbg_printk("op get symbol\n");
1409 switch (estack_ax(stack
, top
)->u
.ptr
.type
) {
1411 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1414 case LOAD_ROOT_CONTEXT
:
1415 case LOAD_ROOT_APP_CONTEXT
:
1416 case LOAD_ROOT_PAYLOAD
:
1418 * symbol lookup is performed by
1424 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1428 OP(FILTER_OP_GET_SYMBOL_FIELD
):
1431 * Used for first variant encountered in a
1432 * traversal. Variants are not implemented yet.
1438 OP(FILTER_OP_GET_INDEX_U16
):
1440 struct load_op
*insn
= (struct load_op
*) pc
;
1441 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1443 dbg_printk("op get index u16\n");
1444 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1447 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1448 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1452 OP(FILTER_OP_GET_INDEX_U64
):
1454 struct load_op
*insn
= (struct load_op
*) pc
;
1455 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1457 dbg_printk("op get index u64\n");
1458 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1461 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1462 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1466 OP(FILTER_OP_LOAD_FIELD
):
1468 dbg_printk("op load field\n");
1469 ret
= dynamic_load_field(estack_ax(stack
, top
));
1472 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1473 next_pc
+= sizeof(struct load_op
);
1477 OP(FILTER_OP_LOAD_FIELD_S8
):
1479 dbg_printk("op load field s8\n");
1481 estack_ax_v
= *(int8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1482 next_pc
+= sizeof(struct load_op
);
1485 OP(FILTER_OP_LOAD_FIELD_S16
):
1487 dbg_printk("op load field s16\n");
1489 estack_ax_v
= *(int16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1490 next_pc
+= sizeof(struct load_op
);
1493 OP(FILTER_OP_LOAD_FIELD_S32
):
1495 dbg_printk("op load field s32\n");
1497 estack_ax_v
= *(int32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1498 next_pc
+= sizeof(struct load_op
);
1501 OP(FILTER_OP_LOAD_FIELD_S64
):
1503 dbg_printk("op load field s64\n");
1505 estack_ax_v
= *(int64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1506 next_pc
+= sizeof(struct load_op
);
1509 OP(FILTER_OP_LOAD_FIELD_U8
):
1511 dbg_printk("op load field u8\n");
1513 estack_ax_v
= *(uint8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1514 next_pc
+= sizeof(struct load_op
);
1517 OP(FILTER_OP_LOAD_FIELD_U16
):
1519 dbg_printk("op load field u16\n");
1521 estack_ax_v
= *(uint16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1522 next_pc
+= sizeof(struct load_op
);
1525 OP(FILTER_OP_LOAD_FIELD_U32
):
1527 dbg_printk("op load field u32\n");
1529 estack_ax_v
= *(uint32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1530 next_pc
+= sizeof(struct load_op
);
1533 OP(FILTER_OP_LOAD_FIELD_U64
):
1535 dbg_printk("op load field u64\n");
1537 estack_ax_v
= *(uint64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1538 next_pc
+= sizeof(struct load_op
);
1541 OP(FILTER_OP_LOAD_FIELD_DOUBLE
):
1547 OP(FILTER_OP_LOAD_FIELD_STRING
):
1551 dbg_printk("op load field string\n");
1552 str
= (const char *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1553 estack_ax(stack
, top
)->u
.s
.str
= str
;
1554 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1555 dbg_printk("Filter warning: loading a NULL string.\n");
1559 estack_ax(stack
, top
)->u
.s
.seq_len
= SIZE_MAX
;
1560 estack_ax(stack
, top
)->u
.s
.literal_type
=
1561 ESTACK_STRING_LITERAL_TYPE_NONE
;
1562 next_pc
+= sizeof(struct load_op
);
1566 OP(FILTER_OP_LOAD_FIELD_SEQUENCE
):
1570 dbg_printk("op load field string sequence\n");
1571 ptr
= estack_ax(stack
, top
)->u
.ptr
.ptr
;
1572 estack_ax(stack
, top
)->u
.s
.seq_len
= *(unsigned long *) ptr
;
1573 estack_ax(stack
, top
)->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
1574 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1575 dbg_printk("Filter warning: loading a NULL sequence.\n");
1579 estack_ax(stack
, top
)->u
.s
.literal_type
=
1580 ESTACK_STRING_LITERAL_TYPE_NONE
;
1581 next_pc
+= sizeof(struct load_op
);
1587 /* return 0 (discard) on error */