1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-interpreter.c
5 * LTTng modules filter interpreter.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <wrapper/compiler_attributes.h>
11 #include <wrapper/uaccess.h>
12 #include <wrapper/objtool.h>
13 #include <wrapper/types.h>
14 #include <linux/swab.h>
16 #include <lttng-filter.h>
17 #include <lttng-string-utils.h>
18 #include <probes/lttng-probe-user.h>
20 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
23 * get_char should be called with page fault handler disabled if it is expected
24 * to handle user-space read.
27 char get_char(const struct estack_entry
*reg
, size_t offset
)
29 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
34 /* Handle invalid access as end of string. */
35 if (unlikely(!lttng_access_ok(VERIFY_READ
,
36 reg
->u
.s
.user_str
+ offset
,
39 /* Handle fault (nonzero return value) as end of string. */
40 if (unlikely(__copy_from_user_inatomic(&c
,
41 reg
->u
.s
.user_str
+ offset
,
46 return reg
->u
.s
.str
[offset
];
52 * -2: unknown escape char.
56 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
61 *c
= get_char(reg
, *offset
);
77 char get_char_at_cb(size_t at
, void *data
)
79 return get_char(data
, at
);
83 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
85 bool has_user
= false;
87 struct estack_entry
*pattern_reg
;
88 struct estack_entry
*candidate_reg
;
90 /* Disable the page fault handler when reading from userspace. */
91 if (estack_bx(stack
, top
)->u
.s
.user
92 || estack_ax(stack
, top
)->u
.s
.user
) {
97 /* Find out which side is the pattern vs. the candidate. */
98 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
99 pattern_reg
= estack_ax(stack
, top
);
100 candidate_reg
= estack_bx(stack
, top
);
102 pattern_reg
= estack_bx(stack
, top
);
103 candidate_reg
= estack_ax(stack
, top
);
106 /* Perform the match operation. */
107 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
108 pattern_reg
, get_char_at_cb
, candidate_reg
);
116 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
118 size_t offset_bx
= 0, offset_ax
= 0;
119 int diff
, has_user
= 0;
121 if (estack_bx(stack
, top
)->u
.s
.user
122 || estack_ax(stack
, top
)->u
.s
.user
) {
130 char char_bx
, char_ax
;
132 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
133 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
135 if (unlikely(char_bx
== '\0')) {
136 if (char_ax
== '\0') {
140 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
141 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
142 ret
= parse_char(estack_ax(stack
, top
),
143 &char_ax
, &offset_ax
);
153 if (unlikely(char_ax
== '\0')) {
154 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
155 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
156 ret
= parse_char(estack_bx(stack
, top
),
157 &char_bx
, &offset_bx
);
166 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
167 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
168 ret
= parse_char(estack_bx(stack
, top
),
169 &char_bx
, &offset_bx
);
173 } else if (ret
== -2) {
176 /* else compare both char */
178 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
179 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
180 ret
= parse_char(estack_ax(stack
, top
),
181 &char_ax
, &offset_ax
);
185 } else if (ret
== -2) {
202 diff
= char_bx
- char_ax
;
214 uint64_t lttng_filter_false(void *filter_data
,
215 struct lttng_probe_ctx
*lttng_probe_ctx
,
216 const char *filter_stack_data
)
221 #ifdef INTERPRETER_USE_SWITCH
224 * Fallback for compilers that do not support taking address of labels.
228 start_pc = &bytecode->data[0]; \
229 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
231 dbg_printk("Executing op %s (%u)\n", \
232 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
233 (unsigned int) *(filter_opcode_t *) pc); \
234 switch (*(filter_opcode_t *) pc) {
236 #define OP(name) case name
246 * Dispatch-table based interpreter.
250 start_pc = &bytecode->code[0]; \
251 pc = next_pc = start_pc; \
252 if (unlikely(pc - start_pc >= bytecode->len)) \
254 goto *dispatch[*(filter_opcode_t *) pc];
261 goto *dispatch[*(filter_opcode_t *) pc];
267 static int context_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
268 struct load_ptr
*ptr
,
272 struct lttng_ctx_field
*ctx_field
;
273 struct lttng_event_field
*field
;
274 union lttng_ctx_value v
;
276 ctx_field
= <tng_static_ctx
->fields
[idx
];
277 field
= &ctx_field
->event_field
;
278 ptr
->type
= LOAD_OBJECT
;
279 /* field is only used for types nested within variants. */
282 switch (field
->type
.atype
) {
284 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
285 if (field
->type
.u
.basic
.integer
.signedness
) {
286 ptr
->object_type
= OBJECT_TYPE_S64
;
288 ptr
->ptr
= &ptr
->u
.s64
;
290 ptr
->object_type
= OBJECT_TYPE_U64
;
291 ptr
->u
.u64
= v
.s64
; /* Cast. */
292 ptr
->ptr
= &ptr
->u
.u64
;
294 ptr
->rev_bo
= field
->type
.u
.basic
.integer
.reverse_byte_order
;
295 ptr
->user
= field
->type
.u
.basic
.integer
.user
;
299 const struct lttng_integer_type
*itype
=
300 &field
->type
.u
.basic
.enumeration
.container_type
;
302 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
303 if (itype
->signedness
) {
304 ptr
->object_type
= OBJECT_TYPE_S64
;
306 ptr
->ptr
= &ptr
->u
.s64
;
308 ptr
->object_type
= OBJECT_TYPE_U64
;
309 ptr
->u
.u64
= v
.s64
; /* Cast. */
310 ptr
->ptr
= &ptr
->u
.u64
;
312 ptr
->rev_bo
= itype
->reverse_byte_order
;
313 ptr
->user
= itype
->user
;
317 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
318 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
321 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
322 printk(KERN_WARNING
"Only string arrays are supported for contexts.\n");
325 ptr
->object_type
= OBJECT_TYPE_STRING
;
326 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
328 ptr
->user
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.user
;
331 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
332 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
335 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
336 printk(KERN_WARNING
"Only string sequences are supported for contexts.\n");
339 ptr
->object_type
= OBJECT_TYPE_STRING
;
340 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
342 ptr
->user
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.user
;
344 case atype_array_bitfield
:
345 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
347 case atype_sequence_bitfield
:
348 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
351 ptr
->object_type
= OBJECT_TYPE_STRING
;
352 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
354 ptr
->user
= field
->type
.u
.basic
.string
.user
;
357 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
360 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
366 static int dynamic_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
367 struct bytecode_runtime
*runtime
,
368 uint64_t index
, struct estack_entry
*stack_top
)
371 const struct filter_get_index_data
*gid
;
374 * Types nested within variants need to perform dynamic lookup
375 * based on the field descriptions. LTTng-UST does not implement
378 if (stack_top
->u
.ptr
.field
)
380 gid
= (const struct filter_get_index_data
*) &runtime
->data
[index
];
381 switch (stack_top
->u
.ptr
.type
) {
383 switch (stack_top
->u
.ptr
.object_type
) {
384 case OBJECT_TYPE_ARRAY
:
388 WARN_ON_ONCE(gid
->offset
>= gid
->array_len
);
389 /* Skip count (unsigned long) */
390 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
391 ptr
= ptr
+ gid
->offset
;
392 stack_top
->u
.ptr
.ptr
= ptr
;
393 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
394 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
395 stack_top
->u
.ptr
.user
= gid
->elem
.user
;
396 /* field is only used for types nested within variants. */
397 stack_top
->u
.ptr
.field
= NULL
;
400 case OBJECT_TYPE_SEQUENCE
:
405 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
406 ptr_seq_len
= *(unsigned long *) stack_top
->u
.ptr
.ptr
;
407 if (gid
->offset
>= gid
->elem
.len
* ptr_seq_len
) {
411 ptr
= ptr
+ gid
->offset
;
412 stack_top
->u
.ptr
.ptr
= ptr
;
413 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
414 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
415 stack_top
->u
.ptr
.user
= gid
->elem
.user
;
416 /* field is only used for types nested within variants. */
417 stack_top
->u
.ptr
.field
= NULL
;
420 case OBJECT_TYPE_STRUCT
:
421 printk(KERN_WARNING
"Nested structures are not supported yet.\n");
424 case OBJECT_TYPE_VARIANT
:
426 printk(KERN_WARNING
"Unexpected get index type %d",
427 (int) stack_top
->u
.ptr
.object_type
);
432 case LOAD_ROOT_CONTEXT
:
434 case LOAD_ROOT_APP_CONTEXT
:
436 ret
= context_get_index(lttng_probe_ctx
,
444 case LOAD_ROOT_PAYLOAD
:
445 stack_top
->u
.ptr
.ptr
+= gid
->offset
;
446 if (gid
->elem
.type
== OBJECT_TYPE_STRING
)
447 stack_top
->u
.ptr
.ptr
= *(const char * const *) stack_top
->u
.ptr
.ptr
;
448 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
449 stack_top
->u
.ptr
.type
= LOAD_OBJECT
;
450 /* field is only used for types nested within variants. */
451 stack_top
->u
.ptr
.field
= NULL
;
452 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
453 stack_top
->u
.ptr
.user
= gid
->elem
.user
;
462 static int dynamic_load_field(struct estack_entry
*stack_top
)
466 switch (stack_top
->u
.ptr
.type
) {
469 case LOAD_ROOT_CONTEXT
:
470 case LOAD_ROOT_APP_CONTEXT
:
471 case LOAD_ROOT_PAYLOAD
:
473 dbg_printk("Filter warning: cannot load root, missing field name.\n");
477 switch (stack_top
->u
.ptr
.object_type
) {
479 dbg_printk("op load field s8\n");
480 if (stack_top
->u
.ptr
.user
) {
481 if (lttng_copy_from_user_check_nofault(&stack_top
->u
.v
, (int8_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(int8_t)))
484 stack_top
->u
.v
= *(int8_t *) stack_top
->u
.ptr
.ptr
;
487 case OBJECT_TYPE_S16
:
491 dbg_printk("op load field s16\n");
492 if (stack_top
->u
.ptr
.user
) {
493 if (lttng_copy_from_user_check_nofault(&tmp
, (int16_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(int16_t)))
496 tmp
= *(int16_t *) stack_top
->u
.ptr
.ptr
;
498 if (stack_top
->u
.ptr
.rev_bo
)
500 stack_top
->u
.v
= tmp
;
503 case OBJECT_TYPE_S32
:
507 dbg_printk("op load field s32\n");
508 if (stack_top
->u
.ptr
.user
) {
509 if (lttng_copy_from_user_check_nofault(&tmp
, (int32_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(int32_t)))
512 tmp
= *(int32_t *) stack_top
->u
.ptr
.ptr
;
514 if (stack_top
->u
.ptr
.rev_bo
)
516 stack_top
->u
.v
= tmp
;
519 case OBJECT_TYPE_S64
:
523 dbg_printk("op load field s64\n");
524 if (stack_top
->u
.ptr
.user
) {
525 if (lttng_copy_from_user_check_nofault(&tmp
, (int64_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(int64_t)))
528 tmp
= *(int64_t *) stack_top
->u
.ptr
.ptr
;
530 if (stack_top
->u
.ptr
.rev_bo
)
532 stack_top
->u
.v
= tmp
;
536 dbg_printk("op load field u8\n");
537 if (stack_top
->u
.ptr
.user
) {
538 if (lttng_copy_from_user_check_nofault(&stack_top
->u
.v
, (uint8_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(uint8_t)))
541 stack_top
->u
.v
= *(uint8_t *) stack_top
->u
.ptr
.ptr
;
544 case OBJECT_TYPE_U16
:
548 dbg_printk("op load field s16\n");
549 if (stack_top
->u
.ptr
.user
) {
550 if (lttng_copy_from_user_check_nofault(&tmp
, (uint16_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(uint16_t)))
553 tmp
= *(uint16_t *) stack_top
->u
.ptr
.ptr
;
555 if (stack_top
->u
.ptr
.rev_bo
)
557 stack_top
->u
.v
= tmp
;
560 case OBJECT_TYPE_U32
:
564 dbg_printk("op load field u32\n");
565 if (stack_top
->u
.ptr
.user
) {
566 if (lttng_copy_from_user_check_nofault(&tmp
, (uint32_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(uint32_t)))
569 tmp
= *(uint32_t *) stack_top
->u
.ptr
.ptr
;
571 if (stack_top
->u
.ptr
.rev_bo
)
573 stack_top
->u
.v
= tmp
;
576 case OBJECT_TYPE_U64
:
580 dbg_printk("op load field u64\n");
581 if (stack_top
->u
.ptr
.user
) {
582 if (lttng_copy_from_user_check_nofault(&tmp
, (uint64_t __user
*) stack_top
->u
.ptr
.ptr
, sizeof(uint64_t)))
585 tmp
= *(uint64_t *) stack_top
->u
.ptr
.ptr
;
587 if (stack_top
->u
.ptr
.rev_bo
)
589 stack_top
->u
.v
= tmp
;
592 case OBJECT_TYPE_STRING
:
594 dbg_printk("op load field string: user=%d\n", stack_top
->u
.ptr
.user
);
595 if (stack_top
->u
.ptr
.user
) {
596 const char __user
*user_str
= (const char __user
*) stack_top
->u
.ptr
.ptr
;
598 stack_top
->u
.s
.user_str
= user_str
;
599 if (unlikely(!stack_top
->u
.s
.user_str
)) {
600 dbg_printk("Bytecode warning: loading a NULL user string.\n");
604 stack_top
->u
.s
.user
= 1;
606 const char *str
= (const char *) stack_top
->u
.ptr
.ptr
;
608 stack_top
->u
.s
.str
= str
;
609 if (unlikely(!stack_top
->u
.s
.str
)) {
610 dbg_printk("Bytecode warning: loading a NULL string.\n");
614 stack_top
->u
.s
.user
= 0;
616 stack_top
->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
617 stack_top
->u
.s
.literal_type
= ESTACK_STRING_LITERAL_TYPE_NONE
;
620 case OBJECT_TYPE_STRING_SEQUENCE
:
624 dbg_printk("op load field string sequence\n");
625 ptr
= stack_top
->u
.ptr
.ptr
;
626 stack_top
->u
.s
.seq_len
= *(unsigned long *) ptr
;
627 if (stack_top
->u
.ptr
.user
) {
628 stack_top
->u
.s
.user_str
= *(const char __user
**) (ptr
+ sizeof(unsigned long));
629 if (unlikely(!stack_top
->u
.s
.user_str
)) {
630 dbg_printk("Bytecode warning: loading a NULL user sequence.\n");
634 stack_top
->u
.s
.user
= 1;
636 stack_top
->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
637 if (unlikely(!stack_top
->u
.s
.str
)) {
638 dbg_printk("Bytecode warning: loading a NULL sequence.\n");
642 stack_top
->u
.s
.user
= 0;
644 stack_top
->u
.s
.literal_type
= ESTACK_STRING_LITERAL_TYPE_NONE
;
647 case OBJECT_TYPE_DYNAMIC
:
649 * Dynamic types in context are looked up
650 * by context get index.
654 case OBJECT_TYPE_DOUBLE
:
657 case OBJECT_TYPE_SEQUENCE
:
658 case OBJECT_TYPE_ARRAY
:
659 case OBJECT_TYPE_STRUCT
:
660 case OBJECT_TYPE_VARIANT
:
661 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
673 #define DBG_USER_STR_CUTOFF 32
676 * In debug mode, print user string (truncated, if necessary).
679 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
683 char user_str
[DBG_USER_STR_CUTOFF
];
687 last_char
= get_char(user_str_reg
, pos
);
688 user_str
[pos
] = last_char
;
690 } while (last_char
!= '\0' && pos
< sizeof(user_str
));
693 user_str
[sizeof(user_str
) - 1] = '\0';
694 dbg_printk("load field ref user string: '%s%s'\n", user_str
,
695 last_char
!= '\0' ? "[...]" : "");
699 void dbg_load_ref_user_str_printk(const struct estack_entry
*user_str_reg
)
705 * Return 0 (discard), or raise the 0x1 flag (log event).
706 * Currently, other flags are kept for future extensions and have no
709 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
710 struct lttng_probe_ctx
*lttng_probe_ctx
,
711 const char *filter_stack_data
)
713 struct bytecode_runtime
*bytecode
= filter_data
;
714 void *pc
, *next_pc
, *start_pc
;
717 struct estack _stack
;
718 struct estack
*stack
= &_stack
;
719 register int64_t ax
= 0, bx
= 0;
720 register int top
= FILTER_STACK_EMPTY
;
721 #ifndef INTERPRETER_USE_SWITCH
722 static void *dispatch
[NR_FILTER_OPS
] = {
723 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
725 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
728 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
729 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
730 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
731 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
732 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
733 [ FILTER_OP_BIT_RSHIFT
] = &&LABEL_FILTER_OP_BIT_RSHIFT
,
734 [ FILTER_OP_BIT_LSHIFT
] = &&LABEL_FILTER_OP_BIT_LSHIFT
,
735 [ FILTER_OP_BIT_AND
] = &&LABEL_FILTER_OP_BIT_AND
,
736 [ FILTER_OP_BIT_OR
] = &&LABEL_FILTER_OP_BIT_OR
,
737 [ FILTER_OP_BIT_XOR
] = &&LABEL_FILTER_OP_BIT_XOR
,
739 /* binary comparators */
740 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
741 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
742 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
743 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
744 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
745 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
747 /* string binary comparator */
748 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
749 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
750 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
751 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
752 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
753 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
755 /* globbing pattern binary comparator */
756 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
757 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
759 /* s64 binary comparator */
760 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
761 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
762 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
763 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
764 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
765 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
767 /* double binary comparator */
768 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
769 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
770 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
771 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
772 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
773 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
775 /* Mixed S64-double binary comparators */
776 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
777 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
778 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
779 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
780 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
781 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
783 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
784 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
785 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
786 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
787 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
788 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
791 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
792 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
793 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
794 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
795 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
796 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
797 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
798 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
799 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
802 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
803 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
806 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
807 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
808 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
809 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
810 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
812 /* load from immediate operand */
813 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
814 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
815 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
816 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
819 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
820 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
821 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
823 /* get context ref */
824 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
825 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
826 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
827 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
829 /* load userspace field ref */
830 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
831 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
833 /* Instructions for recursive traversal through composed types. */
834 [ FILTER_OP_GET_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT
,
835 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT
,
836 [ FILTER_OP_GET_PAYLOAD_ROOT
] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT
,
838 [ FILTER_OP_GET_SYMBOL
] = &&LABEL_FILTER_OP_GET_SYMBOL
,
839 [ FILTER_OP_GET_SYMBOL_FIELD
] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD
,
840 [ FILTER_OP_GET_INDEX_U16
] = &&LABEL_FILTER_OP_GET_INDEX_U16
,
841 [ FILTER_OP_GET_INDEX_U64
] = &&LABEL_FILTER_OP_GET_INDEX_U64
,
843 [ FILTER_OP_LOAD_FIELD
] = &&LABEL_FILTER_OP_LOAD_FIELD
,
844 [ FILTER_OP_LOAD_FIELD_S8
] = &&LABEL_FILTER_OP_LOAD_FIELD_S8
,
845 [ FILTER_OP_LOAD_FIELD_S16
] = &&LABEL_FILTER_OP_LOAD_FIELD_S16
,
846 [ FILTER_OP_LOAD_FIELD_S32
] = &&LABEL_FILTER_OP_LOAD_FIELD_S32
,
847 [ FILTER_OP_LOAD_FIELD_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_S64
,
848 [ FILTER_OP_LOAD_FIELD_U8
] = &&LABEL_FILTER_OP_LOAD_FIELD_U8
,
849 [ FILTER_OP_LOAD_FIELD_U16
] = &&LABEL_FILTER_OP_LOAD_FIELD_U16
,
850 [ FILTER_OP_LOAD_FIELD_U32
] = &&LABEL_FILTER_OP_LOAD_FIELD_U32
,
851 [ FILTER_OP_LOAD_FIELD_U64
] = &&LABEL_FILTER_OP_LOAD_FIELD_U64
,
852 [ FILTER_OP_LOAD_FIELD_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING
,
853 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE
,
854 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE
,
856 [ FILTER_OP_UNARY_BIT_NOT
] = &&LABEL_FILTER_OP_UNARY_BIT_NOT
,
858 [ FILTER_OP_RETURN_S64
] = &&LABEL_FILTER_OP_RETURN_S64
,
860 #endif /* #ifndef INTERPRETER_USE_SWITCH */
864 OP(FILTER_OP_UNKNOWN
):
865 OP(FILTER_OP_LOAD_FIELD_REF
):
866 OP(FILTER_OP_GET_CONTEXT_REF
):
867 #ifdef INTERPRETER_USE_SWITCH
869 #endif /* INTERPRETER_USE_SWITCH */
870 printk(KERN_WARNING
"unknown bytecode op %u\n",
871 (unsigned int) *(filter_opcode_t
*) pc
);
875 OP(FILTER_OP_RETURN
):
876 OP(FILTER_OP_RETURN_S64
):
877 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
878 retval
= !!estack_ax_v
;
888 printk(KERN_WARNING
"unsupported bytecode op %u\n",
889 (unsigned int) *(filter_opcode_t
*) pc
);
899 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
900 (unsigned int) *(filter_opcode_t
*) pc
);
904 OP(FILTER_OP_EQ_STRING
):
908 res
= (stack_strcmp(stack
, top
, "==") == 0);
909 estack_pop(stack
, top
, ax
, bx
);
911 next_pc
+= sizeof(struct binary_op
);
914 OP(FILTER_OP_NE_STRING
):
918 res
= (stack_strcmp(stack
, top
, "!=") != 0);
919 estack_pop(stack
, top
, ax
, bx
);
921 next_pc
+= sizeof(struct binary_op
);
924 OP(FILTER_OP_GT_STRING
):
928 res
= (stack_strcmp(stack
, top
, ">") > 0);
929 estack_pop(stack
, top
, ax
, bx
);
931 next_pc
+= sizeof(struct binary_op
);
934 OP(FILTER_OP_LT_STRING
):
938 res
= (stack_strcmp(stack
, top
, "<") < 0);
939 estack_pop(stack
, top
, ax
, bx
);
941 next_pc
+= sizeof(struct binary_op
);
944 OP(FILTER_OP_GE_STRING
):
948 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
949 estack_pop(stack
, top
, ax
, bx
);
951 next_pc
+= sizeof(struct binary_op
);
954 OP(FILTER_OP_LE_STRING
):
958 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
959 estack_pop(stack
, top
, ax
, bx
);
961 next_pc
+= sizeof(struct binary_op
);
965 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
969 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
970 estack_pop(stack
, top
, ax
, bx
);
972 next_pc
+= sizeof(struct binary_op
);
975 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
979 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
980 estack_pop(stack
, top
, ax
, bx
);
982 next_pc
+= sizeof(struct binary_op
);
986 OP(FILTER_OP_EQ_S64
):
990 res
= (estack_bx_v
== estack_ax_v
);
991 estack_pop(stack
, top
, ax
, bx
);
993 next_pc
+= sizeof(struct binary_op
);
996 OP(FILTER_OP_NE_S64
):
1000 res
= (estack_bx_v
!= estack_ax_v
);
1001 estack_pop(stack
, top
, ax
, bx
);
1003 next_pc
+= sizeof(struct binary_op
);
1006 OP(FILTER_OP_GT_S64
):
1010 res
= (estack_bx_v
> estack_ax_v
);
1011 estack_pop(stack
, top
, ax
, bx
);
1013 next_pc
+= sizeof(struct binary_op
);
1016 OP(FILTER_OP_LT_S64
):
1020 res
= (estack_bx_v
< estack_ax_v
);
1021 estack_pop(stack
, top
, ax
, bx
);
1023 next_pc
+= sizeof(struct binary_op
);
1026 OP(FILTER_OP_GE_S64
):
1030 res
= (estack_bx_v
>= estack_ax_v
);
1031 estack_pop(stack
, top
, ax
, bx
);
1033 next_pc
+= sizeof(struct binary_op
);
1036 OP(FILTER_OP_LE_S64
):
1040 res
= (estack_bx_v
<= estack_ax_v
);
1041 estack_pop(stack
, top
, ax
, bx
);
1043 next_pc
+= sizeof(struct binary_op
);
1047 OP(FILTER_OP_EQ_DOUBLE
):
1048 OP(FILTER_OP_NE_DOUBLE
):
1049 OP(FILTER_OP_GT_DOUBLE
):
1050 OP(FILTER_OP_LT_DOUBLE
):
1051 OP(FILTER_OP_GE_DOUBLE
):
1052 OP(FILTER_OP_LE_DOUBLE
):
1058 /* Mixed S64-double binary comparators */
1059 OP(FILTER_OP_EQ_DOUBLE_S64
):
1060 OP(FILTER_OP_NE_DOUBLE_S64
):
1061 OP(FILTER_OP_GT_DOUBLE_S64
):
1062 OP(FILTER_OP_LT_DOUBLE_S64
):
1063 OP(FILTER_OP_GE_DOUBLE_S64
):
1064 OP(FILTER_OP_LE_DOUBLE_S64
):
1065 OP(FILTER_OP_EQ_S64_DOUBLE
):
1066 OP(FILTER_OP_NE_S64_DOUBLE
):
1067 OP(FILTER_OP_GT_S64_DOUBLE
):
1068 OP(FILTER_OP_LT_S64_DOUBLE
):
1069 OP(FILTER_OP_GE_S64_DOUBLE
):
1070 OP(FILTER_OP_LE_S64_DOUBLE
):
1075 OP(FILTER_OP_BIT_RSHIFT
):
1079 /* Catch undefined behavior. */
1080 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1084 res
= ((uint64_t) estack_bx_v
>> (uint32_t) estack_ax_v
);
1085 estack_pop(stack
, top
, ax
, bx
);
1087 next_pc
+= sizeof(struct binary_op
);
1090 OP(FILTER_OP_BIT_LSHIFT
):
1094 /* Catch undefined behavior. */
1095 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1099 res
= ((uint64_t) estack_bx_v
<< (uint32_t) estack_ax_v
);
1100 estack_pop(stack
, top
, ax
, bx
);
1102 next_pc
+= sizeof(struct binary_op
);
1105 OP(FILTER_OP_BIT_AND
):
1109 res
= ((uint64_t) estack_bx_v
& (uint64_t) estack_ax_v
);
1110 estack_pop(stack
, top
, ax
, bx
);
1112 next_pc
+= sizeof(struct binary_op
);
1115 OP(FILTER_OP_BIT_OR
):
1119 res
= ((uint64_t) estack_bx_v
| (uint64_t) estack_ax_v
);
1120 estack_pop(stack
, top
, ax
, bx
);
1122 next_pc
+= sizeof(struct binary_op
);
1125 OP(FILTER_OP_BIT_XOR
):
1129 res
= ((uint64_t) estack_bx_v
^ (uint64_t) estack_ax_v
);
1130 estack_pop(stack
, top
, ax
, bx
);
1132 next_pc
+= sizeof(struct binary_op
);
1137 OP(FILTER_OP_UNARY_PLUS
):
1138 OP(FILTER_OP_UNARY_MINUS
):
1139 OP(FILTER_OP_UNARY_NOT
):
1140 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1141 (unsigned int) *(filter_opcode_t
*) pc
);
1146 OP(FILTER_OP_UNARY_BIT_NOT
):
1148 estack_ax_v
= ~(uint64_t) estack_ax_v
;
1149 next_pc
+= sizeof(struct unary_op
);
1153 OP(FILTER_OP_UNARY_PLUS_S64
):
1155 next_pc
+= sizeof(struct unary_op
);
1158 OP(FILTER_OP_UNARY_MINUS_S64
):
1160 estack_ax_v
= -estack_ax_v
;
1161 next_pc
+= sizeof(struct unary_op
);
1164 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
1165 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
1170 OP(FILTER_OP_UNARY_NOT_S64
):
1172 estack_ax_v
= !estack_ax_v
;
1173 next_pc
+= sizeof(struct unary_op
);
1176 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
1185 struct logical_op
*insn
= (struct logical_op
*) pc
;
1187 /* If AX is 0, skip and evaluate to 0 */
1188 if (unlikely(estack_ax_v
== 0)) {
1189 dbg_printk("Jumping to bytecode offset %u\n",
1190 (unsigned int) insn
->skip_offset
);
1191 next_pc
= start_pc
+ insn
->skip_offset
;
1193 /* Pop 1 when jump not taken */
1194 estack_pop(stack
, top
, ax
, bx
);
1195 next_pc
+= sizeof(struct logical_op
);
1201 struct logical_op
*insn
= (struct logical_op
*) pc
;
1203 /* If AX is nonzero, skip and evaluate to 1 */
1205 if (unlikely(estack_ax_v
!= 0)) {
1207 dbg_printk("Jumping to bytecode offset %u\n",
1208 (unsigned int) insn
->skip_offset
);
1209 next_pc
= start_pc
+ insn
->skip_offset
;
1211 /* Pop 1 when jump not taken */
1212 estack_pop(stack
, top
, ax
, bx
);
1213 next_pc
+= sizeof(struct logical_op
);
1219 /* load field ref */
1220 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
1222 struct load_op
*insn
= (struct load_op
*) pc
;
1223 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1225 dbg_printk("load field ref offset %u type string\n",
1227 estack_push(stack
, top
, ax
, bx
);
1228 estack_ax(stack
, top
)->u
.s
.str
=
1229 *(const char * const *) &filter_stack_data
[ref
->offset
];
1230 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1231 dbg_printk("Filter warning: loading a NULL string.\n");
1235 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1236 estack_ax(stack
, top
)->u
.s
.literal_type
=
1237 ESTACK_STRING_LITERAL_TYPE_NONE
;
1238 estack_ax(stack
, top
)->u
.s
.user
= 0;
1239 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1240 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1244 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
1246 struct load_op
*insn
= (struct load_op
*) pc
;
1247 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1249 dbg_printk("load field ref offset %u type sequence\n",
1251 estack_push(stack
, top
, ax
, bx
);
1252 estack_ax(stack
, top
)->u
.s
.seq_len
=
1253 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1254 estack_ax(stack
, top
)->u
.s
.str
=
1255 *(const char **) (&filter_stack_data
[ref
->offset
1256 + sizeof(unsigned long)]);
1257 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1258 dbg_printk("Filter warning: loading a NULL sequence.\n");
1262 estack_ax(stack
, top
)->u
.s
.literal_type
=
1263 ESTACK_STRING_LITERAL_TYPE_NONE
;
1264 estack_ax(stack
, top
)->u
.s
.user
= 0;
1265 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1269 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
1271 struct load_op
*insn
= (struct load_op
*) pc
;
1272 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1274 dbg_printk("load field ref offset %u type s64\n",
1276 estack_push(stack
, top
, ax
, bx
);
1278 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
1279 dbg_printk("ref load s64 %lld\n",
1280 (long long) estack_ax_v
);
1281 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1285 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
1291 /* load from immediate operand */
1292 OP(FILTER_OP_LOAD_STRING
):
1294 struct load_op
*insn
= (struct load_op
*) pc
;
1296 dbg_printk("load string %s\n", insn
->data
);
1297 estack_push(stack
, top
, ax
, bx
);
1298 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1299 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1300 estack_ax(stack
, top
)->u
.s
.literal_type
=
1301 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
1302 estack_ax(stack
, top
)->u
.s
.user
= 0;
1303 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1307 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
1309 struct load_op
*insn
= (struct load_op
*) pc
;
1311 dbg_printk("load globbing pattern %s\n", insn
->data
);
1312 estack_push(stack
, top
, ax
, bx
);
1313 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1314 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1315 estack_ax(stack
, top
)->u
.s
.literal_type
=
1316 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
1317 estack_ax(stack
, top
)->u
.s
.user
= 0;
1318 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1322 OP(FILTER_OP_LOAD_S64
):
1324 struct load_op
*insn
= (struct load_op
*) pc
;
1326 estack_push(stack
, top
, ax
, bx
);
1327 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
1328 dbg_printk("load s64 %lld\n",
1329 (long long) estack_ax_v
);
1330 next_pc
+= sizeof(struct load_op
)
1331 + sizeof(struct literal_numeric
);
1335 OP(FILTER_OP_LOAD_DOUBLE
):
1342 OP(FILTER_OP_CAST_TO_S64
):
1343 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
1344 (unsigned int) *(filter_opcode_t
*) pc
);
1348 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
1354 OP(FILTER_OP_CAST_NOP
):
1356 next_pc
+= sizeof(struct cast_op
);
1360 /* get context ref */
1361 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
1363 struct load_op
*insn
= (struct load_op
*) pc
;
1364 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1365 struct lttng_ctx_field
*ctx_field
;
1366 union lttng_ctx_value v
;
1368 dbg_printk("get context ref offset %u type string\n",
1370 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1371 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1372 estack_push(stack
, top
, ax
, bx
);
1373 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
1374 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1375 dbg_printk("Filter warning: loading a NULL string.\n");
1379 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1380 estack_ax(stack
, top
)->u
.s
.literal_type
=
1381 ESTACK_STRING_LITERAL_TYPE_NONE
;
1382 estack_ax(stack
, top
)->u
.s
.user
= 0;
1383 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1384 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1388 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
1390 struct load_op
*insn
= (struct load_op
*) pc
;
1391 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1392 struct lttng_ctx_field
*ctx_field
;
1393 union lttng_ctx_value v
;
1395 dbg_printk("get context ref offset %u type s64\n",
1397 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1398 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1399 estack_push(stack
, top
, ax
, bx
);
1400 estack_ax_v
= v
.s64
;
1401 dbg_printk("ref get context s64 %lld\n",
1402 (long long) estack_ax_v
);
1403 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1407 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
1413 /* load userspace field ref */
1414 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
1416 struct load_op
*insn
= (struct load_op
*) pc
;
1417 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1419 dbg_printk("load field ref offset %u type user string\n",
1421 estack_push(stack
, top
, ax
, bx
);
1422 estack_ax(stack
, top
)->u
.s
.user_str
=
1423 *(const char * const *) &filter_stack_data
[ref
->offset
];
1424 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1425 dbg_printk("Filter warning: loading a NULL string.\n");
1429 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1430 estack_ax(stack
, top
)->u
.s
.literal_type
=
1431 ESTACK_STRING_LITERAL_TYPE_NONE
;
1432 estack_ax(stack
, top
)->u
.s
.user
= 1;
1433 dbg_load_ref_user_str_printk(estack_ax(stack
, top
));
1434 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1438 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
1440 struct load_op
*insn
= (struct load_op
*) pc
;
1441 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1443 dbg_printk("load field ref offset %u type user sequence\n",
1445 estack_push(stack
, top
, ax
, bx
);
1446 estack_ax(stack
, top
)->u
.s
.seq_len
=
1447 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1448 estack_ax(stack
, top
)->u
.s
.user_str
=
1449 *(const char **) (&filter_stack_data
[ref
->offset
1450 + sizeof(unsigned long)]);
1451 if (unlikely(!estack_ax(stack
, top
)->u
.s
.user_str
)) {
1452 dbg_printk("Filter warning: loading a NULL sequence.\n");
1456 estack_ax(stack
, top
)->u
.s
.literal_type
=
1457 ESTACK_STRING_LITERAL_TYPE_NONE
;
1458 estack_ax(stack
, top
)->u
.s
.user
= 1;
1459 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1463 OP(FILTER_OP_GET_CONTEXT_ROOT
):
1465 dbg_printk("op get context root\n");
1466 estack_push(stack
, top
, ax
, bx
);
1467 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_CONTEXT
;
1468 /* "field" only needed for variants. */
1469 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1470 next_pc
+= sizeof(struct load_op
);
1474 OP(FILTER_OP_GET_APP_CONTEXT_ROOT
):
1480 OP(FILTER_OP_GET_PAYLOAD_ROOT
):
1482 dbg_printk("op get app payload root\n");
1483 estack_push(stack
, top
, ax
, bx
);
1484 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_PAYLOAD
;
1485 estack_ax(stack
, top
)->u
.ptr
.ptr
= filter_stack_data
;
1486 /* "field" only needed for variants. */
1487 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1488 next_pc
+= sizeof(struct load_op
);
1492 OP(FILTER_OP_GET_SYMBOL
):
1494 dbg_printk("op get symbol\n");
1495 switch (estack_ax(stack
, top
)->u
.ptr
.type
) {
1497 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1500 case LOAD_ROOT_CONTEXT
:
1501 case LOAD_ROOT_APP_CONTEXT
:
1502 case LOAD_ROOT_PAYLOAD
:
1504 * symbol lookup is performed by
1510 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1514 OP(FILTER_OP_GET_SYMBOL_FIELD
):
1517 * Used for first variant encountered in a
1518 * traversal. Variants are not implemented yet.
1524 OP(FILTER_OP_GET_INDEX_U16
):
1526 struct load_op
*insn
= (struct load_op
*) pc
;
1527 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1529 dbg_printk("op get index u16\n");
1530 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1533 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1534 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1538 OP(FILTER_OP_GET_INDEX_U64
):
1540 struct load_op
*insn
= (struct load_op
*) pc
;
1541 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1543 dbg_printk("op get index u64\n");
1544 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1547 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1548 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1552 OP(FILTER_OP_LOAD_FIELD
):
1554 dbg_printk("op load field\n");
1555 ret
= dynamic_load_field(estack_ax(stack
, top
));
1558 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1559 next_pc
+= sizeof(struct load_op
);
1563 OP(FILTER_OP_LOAD_FIELD_S8
):
1565 dbg_printk("op load field s8\n");
1567 estack_ax_v
= *(int8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1568 next_pc
+= sizeof(struct load_op
);
1571 OP(FILTER_OP_LOAD_FIELD_S16
):
1573 dbg_printk("op load field s16\n");
1575 estack_ax_v
= *(int16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1576 next_pc
+= sizeof(struct load_op
);
1579 OP(FILTER_OP_LOAD_FIELD_S32
):
1581 dbg_printk("op load field s32\n");
1583 estack_ax_v
= *(int32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1584 next_pc
+= sizeof(struct load_op
);
1587 OP(FILTER_OP_LOAD_FIELD_S64
):
1589 dbg_printk("op load field s64\n");
1591 estack_ax_v
= *(int64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1592 next_pc
+= sizeof(struct load_op
);
1595 OP(FILTER_OP_LOAD_FIELD_U8
):
1597 dbg_printk("op load field u8\n");
1599 estack_ax_v
= *(uint8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1600 next_pc
+= sizeof(struct load_op
);
1603 OP(FILTER_OP_LOAD_FIELD_U16
):
1605 dbg_printk("op load field u16\n");
1607 estack_ax_v
= *(uint16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1608 next_pc
+= sizeof(struct load_op
);
1611 OP(FILTER_OP_LOAD_FIELD_U32
):
1613 dbg_printk("op load field u32\n");
1615 estack_ax_v
= *(uint32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1616 next_pc
+= sizeof(struct load_op
);
1619 OP(FILTER_OP_LOAD_FIELD_U64
):
1621 dbg_printk("op load field u64\n");
1623 estack_ax_v
= *(uint64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1624 next_pc
+= sizeof(struct load_op
);
1627 OP(FILTER_OP_LOAD_FIELD_DOUBLE
):
1633 OP(FILTER_OP_LOAD_FIELD_STRING
):
1637 dbg_printk("op load field string\n");
1638 str
= (const char *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1639 estack_ax(stack
, top
)->u
.s
.str
= str
;
1640 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1641 dbg_printk("Filter warning: loading a NULL string.\n");
1645 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1646 estack_ax(stack
, top
)->u
.s
.literal_type
=
1647 ESTACK_STRING_LITERAL_TYPE_NONE
;
1648 estack_ax(stack
, top
)->u
.s
.user
= 0;
1649 next_pc
+= sizeof(struct load_op
);
1653 OP(FILTER_OP_LOAD_FIELD_SEQUENCE
):
1657 dbg_printk("op load field string sequence\n");
1658 ptr
= estack_ax(stack
, top
)->u
.ptr
.ptr
;
1659 estack_ax(stack
, top
)->u
.s
.seq_len
= *(unsigned long *) ptr
;
1660 estack_ax(stack
, top
)->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
1661 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1662 dbg_printk("Filter warning: loading a NULL sequence.\n");
1666 estack_ax(stack
, top
)->u
.s
.literal_type
=
1667 ESTACK_STRING_LITERAL_TYPE_NONE
;
1668 estack_ax(stack
, top
)->u
.s
.user
= 0;
1669 next_pc
+= sizeof(struct load_op
);
1675 /* return 0 (discard) on error */