1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-specialize.c
5 * LTTng modules filter code specializer.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
14 #include <wrapper/compiler_attributes.h>
16 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
17 size_t align
, size_t len
)
20 size_t padding
= offset_align(runtime
->data_len
, align
);
21 size_t new_len
= runtime
->data_len
+ padding
+ len
;
22 size_t new_alloc_len
= new_len
;
23 size_t old_alloc_len
= runtime
->data_alloc_len
;
25 if (new_len
> FILTER_MAX_DATA_LEN
)
28 if (new_alloc_len
> old_alloc_len
) {
32 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
33 newptr
= krealloc(runtime
->data
, new_alloc_len
, GFP_KERNEL
);
36 runtime
->data
= newptr
;
37 /* We zero directly the memory from start of allocation. */
38 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
39 runtime
->data_alloc_len
= new_alloc_len
;
41 runtime
->data_len
+= padding
;
42 ret
= runtime
->data_len
;
43 runtime
->data_len
+= len
;
47 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
48 const void *p
, size_t align
, size_t len
)
52 offset
= bytecode_reserve_data(runtime
, align
, len
);
55 memcpy(&runtime
->data
[offset
], p
, len
);
59 static int specialize_load_field(struct vstack_entry
*stack_top
,
64 switch (stack_top
->load
.type
) {
67 case LOAD_ROOT_CONTEXT
:
68 case LOAD_ROOT_APP_CONTEXT
:
69 case LOAD_ROOT_PAYLOAD
:
71 dbg_printk("Filter warning: cannot load root, missing field name.\n");
75 switch (stack_top
->load
.object_type
) {
77 dbg_printk("op load field s8\n");
78 stack_top
->type
= REG_S64
;
79 if (!stack_top
->load
.rev_bo
)
80 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
83 dbg_printk("op load field s16\n");
84 stack_top
->type
= REG_S64
;
85 if (!stack_top
->load
.rev_bo
)
86 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
89 dbg_printk("op load field s32\n");
90 stack_top
->type
= REG_S64
;
91 if (!stack_top
->load
.rev_bo
)
92 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
95 dbg_printk("op load field s64\n");
96 stack_top
->type
= REG_S64
;
97 if (!stack_top
->load
.rev_bo
)
98 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
101 dbg_printk("op load field u8\n");
102 stack_top
->type
= REG_S64
;
103 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
105 case OBJECT_TYPE_U16
:
106 dbg_printk("op load field u16\n");
107 stack_top
->type
= REG_S64
;
108 if (!stack_top
->load
.rev_bo
)
109 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
111 case OBJECT_TYPE_U32
:
112 dbg_printk("op load field u32\n");
113 stack_top
->type
= REG_S64
;
114 if (!stack_top
->load
.rev_bo
)
115 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
117 case OBJECT_TYPE_U64
:
118 dbg_printk("op load field u64\n");
119 stack_top
->type
= REG_S64
;
120 if (!stack_top
->load
.rev_bo
)
121 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
123 case OBJECT_TYPE_DOUBLE
:
124 printk(KERN_WARNING
"Double type unsupported\n\n");
127 case OBJECT_TYPE_STRING
:
128 dbg_printk("op load field string\n");
129 stack_top
->type
= REG_STRING
;
130 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
132 case OBJECT_TYPE_STRING_SEQUENCE
:
133 dbg_printk("op load field string sequence\n");
134 stack_top
->type
= REG_STRING
;
135 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
137 case OBJECT_TYPE_DYNAMIC
:
140 case OBJECT_TYPE_SEQUENCE
:
141 case OBJECT_TYPE_ARRAY
:
142 case OBJECT_TYPE_STRUCT
:
143 case OBJECT_TYPE_VARIANT
:
144 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
154 static int specialize_get_index_object_type(enum object_type
*otype
,
155 int signedness
, uint32_t elem_len
)
160 *otype
= OBJECT_TYPE_S8
;
162 *otype
= OBJECT_TYPE_U8
;
166 *otype
= OBJECT_TYPE_S16
;
168 *otype
= OBJECT_TYPE_U16
;
172 *otype
= OBJECT_TYPE_S32
;
174 *otype
= OBJECT_TYPE_U32
;
178 *otype
= OBJECT_TYPE_S64
;
180 *otype
= OBJECT_TYPE_U64
;
188 static int specialize_get_index(struct bytecode_runtime
*runtime
,
189 struct load_op
*insn
, uint64_t index
,
190 struct vstack_entry
*stack_top
,
194 struct filter_get_index_data gid
;
197 memset(&gid
, 0, sizeof(gid
));
198 switch (stack_top
->load
.type
) {
200 switch (stack_top
->load
.object_type
) {
201 case OBJECT_TYPE_ARRAY
:
203 const struct lttng_event_field
*field
;
204 uint32_t elem_len
, num_elems
;
207 field
= stack_top
->load
.field
;
208 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
209 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
210 num_elems
= field
->type
.u
.array
.length
;
211 if (index
>= num_elems
) {
215 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
216 signedness
, elem_len
);
219 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
220 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
221 gid
.elem
.type
= stack_top
->load
.object_type
;
222 gid
.elem
.len
= elem_len
;
223 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
224 gid
.elem
.rev_bo
= true;
225 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
228 case OBJECT_TYPE_SEQUENCE
:
230 const struct lttng_event_field
*field
;
234 field
= stack_top
->load
.field
;
235 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
236 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
237 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
238 signedness
, elem_len
);
241 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
242 gid
.elem
.type
= stack_top
->load
.object_type
;
243 gid
.elem
.len
= elem_len
;
244 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
245 gid
.elem
.rev_bo
= true;
246 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
249 case OBJECT_TYPE_STRUCT
:
250 /* Only generated by the specialize phase. */
251 case OBJECT_TYPE_VARIANT
:
254 printk(KERN_WARNING
"Unexpected get index type %d",
255 (int) stack_top
->load
.object_type
);
260 case LOAD_ROOT_CONTEXT
:
261 case LOAD_ROOT_APP_CONTEXT
:
262 case LOAD_ROOT_PAYLOAD
:
263 printk(KERN_WARNING
"Index lookup for root field not implemented yet.\n");
267 data_offset
= bytecode_push_data(runtime
, &gid
,
268 __alignof__(gid
), sizeof(gid
));
269 if (data_offset
< 0) {
275 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
278 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
291 static int specialize_context_lookup_name(struct bytecode_runtime
*bytecode
,
292 struct load_op
*insn
)
297 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
298 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
299 return lttng_get_context_index(lttng_static_ctx
, name
);
302 static int specialize_load_object(const struct lttng_event_field
*field
,
303 struct vstack_load
*load
, bool is_context
)
305 load
->type
= LOAD_OBJECT
;
307 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
309 switch (field
->type
.atype
) {
311 if (field
->type
.u
.basic
.integer
.signedness
)
312 load
->object_type
= OBJECT_TYPE_S64
;
314 load
->object_type
= OBJECT_TYPE_U64
;
315 load
->rev_bo
= false;
319 const struct lttng_integer_type
*itype
=
320 &field
->type
.u
.basic
.enumeration
.container_type
;
322 if (itype
->signedness
)
323 load
->object_type
= OBJECT_TYPE_S64
;
325 load
->object_type
= OBJECT_TYPE_U64
;
326 load
->rev_bo
= false;
330 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
331 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
335 load
->object_type
= OBJECT_TYPE_STRING
;
337 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
338 load
->object_type
= OBJECT_TYPE_ARRAY
;
341 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
346 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
347 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
351 load
->object_type
= OBJECT_TYPE_STRING
;
353 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
354 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
357 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
361 case atype_array_bitfield
:
362 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
364 case atype_sequence_bitfield
:
365 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
368 load
->object_type
= OBJECT_TYPE_STRING
;
371 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
374 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
380 static int specialize_context_lookup(struct bytecode_runtime
*runtime
,
381 struct load_op
*insn
,
382 struct vstack_load
*load
)
385 struct lttng_ctx_field
*ctx_field
;
386 struct lttng_event_field
*field
;
387 struct filter_get_index_data gid
;
390 idx
= specialize_context_lookup_name(runtime
, insn
);
394 ctx_field
= <tng_static_ctx
->fields
[idx
];
395 field
= &ctx_field
->event_field
;
396 ret
= specialize_load_object(field
, load
, true);
399 /* Specialize each get_symbol into a get_index. */
400 insn
->op
= FILTER_OP_GET_INDEX_U16
;
401 memset(&gid
, 0, sizeof(gid
));
403 gid
.elem
.type
= load
->object_type
;
404 gid
.elem
.rev_bo
= load
->rev_bo
;
405 data_offset
= bytecode_push_data(runtime
, &gid
,
406 __alignof__(gid
), sizeof(gid
));
407 if (data_offset
< 0) {
410 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
414 static int specialize_event_payload_lookup(struct lttng_event
*event
,
415 struct bytecode_runtime
*runtime
,
416 struct load_op
*insn
,
417 struct vstack_load
*load
)
421 const struct lttng_event_desc
*desc
= event
->desc
;
422 unsigned int i
, nr_fields
;
424 uint32_t field_offset
= 0;
425 const struct lttng_event_field
*field
;
427 struct filter_get_index_data gid
;
430 nr_fields
= desc
->nr_fields
;
431 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
432 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
433 for (i
= 0; i
< nr_fields
; i
++) {
434 field
= &desc
->fields
[i
];
435 if (!strcmp(field
->name
, name
)) {
439 /* compute field offset on stack */
440 switch (field
->type
.atype
) {
443 field_offset
+= sizeof(int64_t);
447 case atype_array_bitfield
:
448 case atype_sequence_bitfield
:
449 field_offset
+= sizeof(unsigned long);
450 field_offset
+= sizeof(void *);
453 field_offset
+= sizeof(void *);
465 ret
= specialize_load_object(field
, load
, false);
469 /* Specialize each get_symbol into a get_index. */
470 insn
->op
= FILTER_OP_GET_INDEX_U16
;
471 memset(&gid
, 0, sizeof(gid
));
472 gid
.offset
= field_offset
;
473 gid
.elem
.type
= load
->object_type
;
474 gid
.elem
.rev_bo
= load
->rev_bo
;
475 data_offset
= bytecode_push_data(runtime
, &gid
,
476 __alignof__(gid
), sizeof(gid
));
477 if (data_offset
< 0) {
481 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
487 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
488 struct bytecode_runtime
*bytecode
)
490 void *pc
, *next_pc
, *start_pc
;
492 struct vstack _stack
;
493 struct vstack
*stack
= &_stack
;
497 start_pc
= &bytecode
->code
[0];
498 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
500 switch (*(filter_opcode_t
*) pc
) {
501 case FILTER_OP_UNKNOWN
:
503 printk(KERN_WARNING
"unknown bytecode op %u\n",
504 (unsigned int) *(filter_opcode_t
*) pc
);
508 case FILTER_OP_RETURN
:
509 case FILTER_OP_RETURN_S64
:
518 case FILTER_OP_MINUS
:
519 printk(KERN_WARNING
"unsupported bytecode op %u\n",
520 (unsigned int) *(filter_opcode_t
*) pc
);
526 struct binary_op
*insn
= (struct binary_op
*) pc
;
528 switch(vstack_ax(stack
)->type
) {
530 printk(KERN_WARNING
"unknown register type\n");
535 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
536 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
538 insn
->op
= FILTER_OP_EQ_STRING
;
540 case REG_STAR_GLOB_STRING
:
541 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
544 if (vstack_bx(stack
)->type
== REG_S64
)
545 insn
->op
= FILTER_OP_EQ_S64
;
547 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
550 if (vstack_bx(stack
)->type
== REG_S64
)
551 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
553 insn
->op
= FILTER_OP_EQ_DOUBLE
;
557 if (vstack_pop(stack
)) {
561 vstack_ax(stack
)->type
= REG_S64
;
562 next_pc
+= sizeof(struct binary_op
);
568 struct binary_op
*insn
= (struct binary_op
*) pc
;
570 switch(vstack_ax(stack
)->type
) {
572 printk(KERN_WARNING
"unknown register type\n");
577 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
578 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
580 insn
->op
= FILTER_OP_NE_STRING
;
582 case REG_STAR_GLOB_STRING
:
583 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
586 if (vstack_bx(stack
)->type
== REG_S64
)
587 insn
->op
= FILTER_OP_NE_S64
;
589 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
592 if (vstack_bx(stack
)->type
== REG_S64
)
593 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
595 insn
->op
= FILTER_OP_NE_DOUBLE
;
599 if (vstack_pop(stack
)) {
603 vstack_ax(stack
)->type
= REG_S64
;
604 next_pc
+= sizeof(struct binary_op
);
610 struct binary_op
*insn
= (struct binary_op
*) pc
;
612 switch(vstack_ax(stack
)->type
) {
614 printk(KERN_WARNING
"unknown register type\n");
618 case REG_STAR_GLOB_STRING
:
619 printk(KERN_WARNING
"invalid register type for > binary operator\n");
623 insn
->op
= FILTER_OP_GT_STRING
;
626 if (vstack_bx(stack
)->type
== REG_S64
)
627 insn
->op
= FILTER_OP_GT_S64
;
629 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
632 if (vstack_bx(stack
)->type
== REG_S64
)
633 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
635 insn
->op
= FILTER_OP_GT_DOUBLE
;
639 if (vstack_pop(stack
)) {
643 vstack_ax(stack
)->type
= REG_S64
;
644 next_pc
+= sizeof(struct binary_op
);
650 struct binary_op
*insn
= (struct binary_op
*) pc
;
652 switch(vstack_ax(stack
)->type
) {
654 printk(KERN_WARNING
"unknown register type\n");
658 case REG_STAR_GLOB_STRING
:
659 printk(KERN_WARNING
"invalid register type for < binary operator\n");
663 insn
->op
= FILTER_OP_LT_STRING
;
666 if (vstack_bx(stack
)->type
== REG_S64
)
667 insn
->op
= FILTER_OP_LT_S64
;
669 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
672 if (vstack_bx(stack
)->type
== REG_S64
)
673 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
675 insn
->op
= FILTER_OP_LT_DOUBLE
;
679 if (vstack_pop(stack
)) {
683 vstack_ax(stack
)->type
= REG_S64
;
684 next_pc
+= sizeof(struct binary_op
);
690 struct binary_op
*insn
= (struct binary_op
*) pc
;
692 switch(vstack_ax(stack
)->type
) {
694 printk(KERN_WARNING
"unknown register type\n");
698 case REG_STAR_GLOB_STRING
:
699 printk(KERN_WARNING
"invalid register type for >= binary operator\n");
703 insn
->op
= FILTER_OP_GE_STRING
;
706 if (vstack_bx(stack
)->type
== REG_S64
)
707 insn
->op
= FILTER_OP_GE_S64
;
709 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
712 if (vstack_bx(stack
)->type
== REG_S64
)
713 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
715 insn
->op
= FILTER_OP_GE_DOUBLE
;
719 if (vstack_pop(stack
)) {
723 vstack_ax(stack
)->type
= REG_S64
;
724 next_pc
+= sizeof(struct binary_op
);
729 struct binary_op
*insn
= (struct binary_op
*) pc
;
731 switch(vstack_ax(stack
)->type
) {
733 printk(KERN_WARNING
"unknown register type\n");
737 case REG_STAR_GLOB_STRING
:
738 printk(KERN_WARNING
"invalid register type for <= binary operator\n");
742 insn
->op
= FILTER_OP_LE_STRING
;
745 if (vstack_bx(stack
)->type
== REG_S64
)
746 insn
->op
= FILTER_OP_LE_S64
;
748 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
751 if (vstack_bx(stack
)->type
== REG_S64
)
752 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
754 insn
->op
= FILTER_OP_LE_DOUBLE
;
757 vstack_ax(stack
)->type
= REG_S64
;
758 next_pc
+= sizeof(struct binary_op
);
762 case FILTER_OP_EQ_STRING
:
763 case FILTER_OP_NE_STRING
:
764 case FILTER_OP_GT_STRING
:
765 case FILTER_OP_LT_STRING
:
766 case FILTER_OP_GE_STRING
:
767 case FILTER_OP_LE_STRING
:
768 case FILTER_OP_EQ_STAR_GLOB_STRING
:
769 case FILTER_OP_NE_STAR_GLOB_STRING
:
770 case FILTER_OP_EQ_S64
:
771 case FILTER_OP_NE_S64
:
772 case FILTER_OP_GT_S64
:
773 case FILTER_OP_LT_S64
:
774 case FILTER_OP_GE_S64
:
775 case FILTER_OP_LE_S64
:
776 case FILTER_OP_EQ_DOUBLE
:
777 case FILTER_OP_NE_DOUBLE
:
778 case FILTER_OP_GT_DOUBLE
:
779 case FILTER_OP_LT_DOUBLE
:
780 case FILTER_OP_GE_DOUBLE
:
781 case FILTER_OP_LE_DOUBLE
:
782 case FILTER_OP_EQ_DOUBLE_S64
:
783 case FILTER_OP_NE_DOUBLE_S64
:
784 case FILTER_OP_GT_DOUBLE_S64
:
785 case FILTER_OP_LT_DOUBLE_S64
:
786 case FILTER_OP_GE_DOUBLE_S64
:
787 case FILTER_OP_LE_DOUBLE_S64
:
788 case FILTER_OP_EQ_S64_DOUBLE
:
789 case FILTER_OP_NE_S64_DOUBLE
:
790 case FILTER_OP_GT_S64_DOUBLE
:
791 case FILTER_OP_LT_S64_DOUBLE
:
792 case FILTER_OP_GE_S64_DOUBLE
:
793 case FILTER_OP_LE_S64_DOUBLE
:
794 case FILTER_OP_BIT_RSHIFT
:
795 case FILTER_OP_BIT_LSHIFT
:
796 case FILTER_OP_BIT_AND
:
797 case FILTER_OP_BIT_OR
:
798 case FILTER_OP_BIT_XOR
:
801 if (vstack_pop(stack
)) {
805 vstack_ax(stack
)->type
= REG_S64
;
806 next_pc
+= sizeof(struct binary_op
);
811 case FILTER_OP_UNARY_PLUS
:
813 struct unary_op
*insn
= (struct unary_op
*) pc
;
815 switch(vstack_ax(stack
)->type
) {
817 printk(KERN_WARNING
"unknown register type\n");
822 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
825 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
829 next_pc
+= sizeof(struct unary_op
);
833 case FILTER_OP_UNARY_MINUS
:
835 struct unary_op
*insn
= (struct unary_op
*) pc
;
837 switch(vstack_ax(stack
)->type
) {
839 printk(KERN_WARNING
"unknown register type\n");
844 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
847 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
851 next_pc
+= sizeof(struct unary_op
);
855 case FILTER_OP_UNARY_NOT
:
857 struct unary_op
*insn
= (struct unary_op
*) pc
;
859 switch(vstack_ax(stack
)->type
) {
861 printk(KERN_WARNING
"unknown register type\n");
866 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
869 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
873 next_pc
+= sizeof(struct unary_op
);
877 case FILTER_OP_UNARY_BIT_NOT
:
880 next_pc
+= sizeof(struct unary_op
);
884 case FILTER_OP_UNARY_PLUS_S64
:
885 case FILTER_OP_UNARY_MINUS_S64
:
886 case FILTER_OP_UNARY_NOT_S64
:
887 case FILTER_OP_UNARY_PLUS_DOUBLE
:
888 case FILTER_OP_UNARY_MINUS_DOUBLE
:
889 case FILTER_OP_UNARY_NOT_DOUBLE
:
892 next_pc
+= sizeof(struct unary_op
);
900 /* Continue to next instruction */
901 /* Pop 1 when jump not taken */
902 if (vstack_pop(stack
)) {
906 next_pc
+= sizeof(struct logical_op
);
911 case FILTER_OP_LOAD_FIELD_REF
:
913 printk(KERN_WARNING
"Unknown field ref type\n");
917 /* get context ref */
918 case FILTER_OP_GET_CONTEXT_REF
:
920 printk(KERN_WARNING
"Unknown get context ref type\n");
924 case FILTER_OP_LOAD_FIELD_REF_STRING
:
925 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
926 case FILTER_OP_GET_CONTEXT_REF_STRING
:
927 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
928 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
930 if (vstack_push(stack
)) {
934 vstack_ax(stack
)->type
= REG_STRING
;
935 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
938 case FILTER_OP_LOAD_FIELD_REF_S64
:
939 case FILTER_OP_GET_CONTEXT_REF_S64
:
941 if (vstack_push(stack
)) {
945 vstack_ax(stack
)->type
= REG_S64
;
946 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
949 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
950 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
952 if (vstack_push(stack
)) {
956 vstack_ax(stack
)->type
= REG_DOUBLE
;
957 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
961 /* load from immediate operand */
962 case FILTER_OP_LOAD_STRING
:
964 struct load_op
*insn
= (struct load_op
*) pc
;
966 if (vstack_push(stack
)) {
970 vstack_ax(stack
)->type
= REG_STRING
;
971 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
975 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
977 struct load_op
*insn
= (struct load_op
*) pc
;
979 if (vstack_push(stack
)) {
983 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
984 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
988 case FILTER_OP_LOAD_S64
:
990 if (vstack_push(stack
)) {
994 vstack_ax(stack
)->type
= REG_S64
;
995 next_pc
+= sizeof(struct load_op
)
996 + sizeof(struct literal_numeric
);
1000 case FILTER_OP_LOAD_DOUBLE
:
1002 if (vstack_push(stack
)) {
1006 vstack_ax(stack
)->type
= REG_DOUBLE
;
1007 next_pc
+= sizeof(struct load_op
)
1008 + sizeof(struct literal_double
);
1013 case FILTER_OP_CAST_TO_S64
:
1015 struct cast_op
*insn
= (struct cast_op
*) pc
;
1017 switch (vstack_ax(stack
)->type
) {
1019 printk(KERN_WARNING
"unknown register type\n");
1024 case REG_STAR_GLOB_STRING
:
1025 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
1029 insn
->op
= FILTER_OP_CAST_NOP
;
1032 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1036 vstack_ax(stack
)->type
= REG_S64
;
1037 next_pc
+= sizeof(struct cast_op
);
1040 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1043 vstack_ax(stack
)->type
= REG_S64
;
1044 next_pc
+= sizeof(struct cast_op
);
1047 case FILTER_OP_CAST_NOP
:
1049 next_pc
+= sizeof(struct cast_op
);
1054 * Instructions for recursive traversal through composed types.
1056 case FILTER_OP_GET_CONTEXT_ROOT
:
1058 if (vstack_push(stack
)) {
1062 vstack_ax(stack
)->type
= REG_PTR
;
1063 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1064 next_pc
+= sizeof(struct load_op
);
1067 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1069 if (vstack_push(stack
)) {
1073 vstack_ax(stack
)->type
= REG_PTR
;
1074 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1075 next_pc
+= sizeof(struct load_op
);
1078 case FILTER_OP_GET_PAYLOAD_ROOT
:
1080 if (vstack_push(stack
)) {
1084 vstack_ax(stack
)->type
= REG_PTR
;
1085 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1086 next_pc
+= sizeof(struct load_op
);
1090 case FILTER_OP_LOAD_FIELD
:
1092 struct load_op
*insn
= (struct load_op
*) pc
;
1094 WARN_ON_ONCE(vstack_ax(stack
)->type
!= REG_PTR
);
1096 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1100 next_pc
+= sizeof(struct load_op
);
1104 case FILTER_OP_LOAD_FIELD_S8
:
1105 case FILTER_OP_LOAD_FIELD_S16
:
1106 case FILTER_OP_LOAD_FIELD_S32
:
1107 case FILTER_OP_LOAD_FIELD_S64
:
1108 case FILTER_OP_LOAD_FIELD_U8
:
1109 case FILTER_OP_LOAD_FIELD_U16
:
1110 case FILTER_OP_LOAD_FIELD_U32
:
1111 case FILTER_OP_LOAD_FIELD_U64
:
1114 vstack_ax(stack
)->type
= REG_S64
;
1115 next_pc
+= sizeof(struct load_op
);
1119 case FILTER_OP_LOAD_FIELD_STRING
:
1120 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1123 vstack_ax(stack
)->type
= REG_STRING
;
1124 next_pc
+= sizeof(struct load_op
);
1128 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1131 vstack_ax(stack
)->type
= REG_DOUBLE
;
1132 next_pc
+= sizeof(struct load_op
);
1136 case FILTER_OP_GET_SYMBOL
:
1138 struct load_op
*insn
= (struct load_op
*) pc
;
1140 dbg_printk("op get symbol\n");
1141 switch (vstack_ax(stack
)->load
.type
) {
1143 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1146 case LOAD_ROOT_CONTEXT
:
1147 /* Lookup context field. */
1148 ret
= specialize_context_lookup(bytecode
, insn
,
1149 &vstack_ax(stack
)->load
);
1153 case LOAD_ROOT_APP_CONTEXT
:
1156 case LOAD_ROOT_PAYLOAD
:
1157 /* Lookup event payload field. */
1158 ret
= specialize_event_payload_lookup(event
,
1160 &vstack_ax(stack
)->load
);
1165 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1169 case FILTER_OP_GET_SYMBOL_FIELD
:
1171 /* Always generated by specialize phase. */
1176 case FILTER_OP_GET_INDEX_U16
:
1178 struct load_op
*insn
= (struct load_op
*) pc
;
1179 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1181 dbg_printk("op get index u16\n");
1183 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1184 vstack_ax(stack
), sizeof(*index
));
1187 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1191 case FILTER_OP_GET_INDEX_U64
:
1193 struct load_op
*insn
= (struct load_op
*) pc
;
1194 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1196 dbg_printk("op get index u64\n");
1198 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1199 vstack_ax(stack
), sizeof(*index
));
1202 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);