1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-specialize.c
5 * LTTng modules filter code specializer.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
14 #include <wrapper/compiler_attributes.h>
16 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
17 size_t align
, size_t len
)
20 size_t padding
= offset_align(runtime
->data_len
, align
);
21 size_t new_len
= runtime
->data_len
+ padding
+ len
;
22 size_t new_alloc_len
= new_len
;
23 size_t old_alloc_len
= runtime
->data_alloc_len
;
25 if (new_len
> FILTER_MAX_DATA_LEN
)
28 if (new_alloc_len
> old_alloc_len
) {
32 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
33 newptr
= krealloc(runtime
->data
, new_alloc_len
, GFP_KERNEL
);
36 runtime
->data
= newptr
;
37 /* We zero directly the memory from start of allocation. */
38 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
39 runtime
->data_alloc_len
= new_alloc_len
;
41 runtime
->data_len
+= padding
;
42 ret
= runtime
->data_len
;
43 runtime
->data_len
+= len
;
47 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
48 const void *p
, size_t align
, size_t len
)
52 offset
= bytecode_reserve_data(runtime
, align
, len
);
55 memcpy(&runtime
->data
[offset
], p
, len
);
59 static int specialize_load_field(struct vstack_entry
*stack_top
,
64 switch (stack_top
->load
.type
) {
67 case LOAD_ROOT_CONTEXT
:
68 case LOAD_ROOT_APP_CONTEXT
:
69 case LOAD_ROOT_PAYLOAD
:
71 dbg_printk("Filter warning: cannot load root, missing field name.\n");
75 switch (stack_top
->load
.object_type
) {
77 dbg_printk("op load field s8\n");
78 stack_top
->type
= REG_S64
;
79 if (!stack_top
->load
.user
)
80 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
83 dbg_printk("op load field s16\n");
84 stack_top
->type
= REG_S64
;
85 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
86 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
89 dbg_printk("op load field s32\n");
90 stack_top
->type
= REG_S64
;
91 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
92 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
95 dbg_printk("op load field s64\n");
96 stack_top
->type
= REG_S64
;
97 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
98 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
101 dbg_printk("op load field u8\n");
102 stack_top
->type
= REG_S64
;
103 if (!stack_top
->load
.user
)
104 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
106 case OBJECT_TYPE_U16
:
107 dbg_printk("op load field u16\n");
108 stack_top
->type
= REG_S64
;
109 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
110 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
112 case OBJECT_TYPE_U32
:
113 dbg_printk("op load field u32\n");
114 stack_top
->type
= REG_S64
;
115 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
116 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
118 case OBJECT_TYPE_U64
:
119 dbg_printk("op load field u64\n");
120 stack_top
->type
= REG_S64
;
121 if (!stack_top
->load
.rev_bo
&& !stack_top
->load
.user
)
122 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
124 case OBJECT_TYPE_DOUBLE
:
125 printk(KERN_WARNING
"Double type unsupported\n\n");
128 case OBJECT_TYPE_STRING
:
129 dbg_printk("op load field string\n");
130 stack_top
->type
= REG_STRING
;
131 if (!stack_top
->load
.user
)
132 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
134 case OBJECT_TYPE_STRING_SEQUENCE
:
135 dbg_printk("op load field string sequence\n");
136 stack_top
->type
= REG_STRING
;
137 if (!stack_top
->load
.user
)
138 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
140 case OBJECT_TYPE_DYNAMIC
:
143 case OBJECT_TYPE_SEQUENCE
:
144 case OBJECT_TYPE_ARRAY
:
145 case OBJECT_TYPE_STRUCT
:
146 case OBJECT_TYPE_VARIANT
:
147 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
157 static int specialize_get_index_object_type(enum object_type
*otype
,
158 int signedness
, uint32_t elem_len
)
163 *otype
= OBJECT_TYPE_S8
;
165 *otype
= OBJECT_TYPE_U8
;
169 *otype
= OBJECT_TYPE_S16
;
171 *otype
= OBJECT_TYPE_U16
;
175 *otype
= OBJECT_TYPE_S32
;
177 *otype
= OBJECT_TYPE_U32
;
181 *otype
= OBJECT_TYPE_S64
;
183 *otype
= OBJECT_TYPE_U64
;
191 static int specialize_get_index(struct bytecode_runtime
*runtime
,
192 struct load_op
*insn
, uint64_t index
,
193 struct vstack_entry
*stack_top
,
197 struct filter_get_index_data gid
;
200 memset(&gid
, 0, sizeof(gid
));
201 switch (stack_top
->load
.type
) {
203 switch (stack_top
->load
.object_type
) {
204 case OBJECT_TYPE_ARRAY
:
206 const struct lttng_event_field
*field
;
207 uint32_t elem_len
, num_elems
;
210 field
= stack_top
->load
.field
;
211 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
212 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
213 num_elems
= field
->type
.u
.array
.length
;
214 if (index
>= num_elems
) {
218 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
219 signedness
, elem_len
);
222 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
223 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
224 gid
.elem
.type
= stack_top
->load
.object_type
;
225 gid
.elem
.len
= elem_len
;
226 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
;
227 stack_top
->load
.user
= gid
.elem
.user
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.user
;
230 case OBJECT_TYPE_SEQUENCE
:
232 const struct lttng_event_field
*field
;
236 field
= stack_top
->load
.field
;
237 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
238 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
239 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
240 signedness
, elem_len
);
243 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
244 gid
.elem
.type
= stack_top
->load
.object_type
;
245 gid
.elem
.len
= elem_len
;
246 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
;
247 stack_top
->load
.user
= gid
.elem
.user
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.user
;
250 case OBJECT_TYPE_STRUCT
:
251 /* Only generated by the specialize phase. */
252 case OBJECT_TYPE_VARIANT
:
255 printk(KERN_WARNING
"Unexpected get index type %d",
256 (int) stack_top
->load
.object_type
);
261 case LOAD_ROOT_CONTEXT
:
262 case LOAD_ROOT_APP_CONTEXT
:
263 case LOAD_ROOT_PAYLOAD
:
264 printk(KERN_WARNING
"Index lookup for root field not implemented yet.\n");
268 data_offset
= bytecode_push_data(runtime
, &gid
,
269 __alignof__(gid
), sizeof(gid
));
270 if (data_offset
< 0) {
276 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
279 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
292 static int specialize_context_lookup_name(struct bytecode_runtime
*bytecode
,
293 struct load_op
*insn
)
298 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
299 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
300 return lttng_get_context_index(lttng_static_ctx
, name
);
303 static int specialize_load_object(const struct lttng_event_field
*field
,
304 struct vstack_load
*load
, bool is_context
)
306 load
->type
= LOAD_OBJECT
;
308 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
310 switch (field
->type
.atype
) {
312 if (field
->type
.u
.basic
.integer
.signedness
)
313 load
->object_type
= OBJECT_TYPE_S64
;
315 load
->object_type
= OBJECT_TYPE_U64
;
316 load
->rev_bo
= field
->type
.u
.basic
.integer
.reverse_byte_order
;
317 load
->user
= field
->type
.u
.basic
.integer
.user
;
321 const struct lttng_integer_type
*itype
=
322 &field
->type
.u
.basic
.enumeration
.container_type
;
324 if (itype
->signedness
)
325 load
->object_type
= OBJECT_TYPE_S64
;
327 load
->object_type
= OBJECT_TYPE_U64
;
328 load
->rev_bo
= itype
->reverse_byte_order
;
329 load
->user
= itype
->user
;
333 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
334 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
338 load
->object_type
= OBJECT_TYPE_STRING
;
339 load
->user
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.user
;
341 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
342 load
->object_type
= OBJECT_TYPE_ARRAY
;
345 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
346 load
->user
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.user
;
351 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
352 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
356 load
->object_type
= OBJECT_TYPE_STRING
;
357 load
->user
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.user
;
359 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
360 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
363 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
364 load
->user
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.user
;
368 case atype_array_bitfield
:
369 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
371 case atype_sequence_bitfield
:
372 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
375 load
->object_type
= OBJECT_TYPE_STRING
;
376 load
->user
= field
->type
.u
.basic
.string
.user
;
379 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
382 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
388 static int specialize_context_lookup(struct bytecode_runtime
*runtime
,
389 struct load_op
*insn
,
390 struct vstack_load
*load
)
393 struct lttng_ctx_field
*ctx_field
;
394 struct lttng_event_field
*field
;
395 struct filter_get_index_data gid
;
398 idx
= specialize_context_lookup_name(runtime
, insn
);
402 ctx_field
= <tng_static_ctx
->fields
[idx
];
403 field
= &ctx_field
->event_field
;
404 ret
= specialize_load_object(field
, load
, true);
407 /* Specialize each get_symbol into a get_index. */
408 insn
->op
= FILTER_OP_GET_INDEX_U16
;
409 memset(&gid
, 0, sizeof(gid
));
411 gid
.elem
.type
= load
->object_type
;
412 gid
.elem
.rev_bo
= load
->rev_bo
;
413 gid
.elem
.user
= load
->user
;
414 data_offset
= bytecode_push_data(runtime
, &gid
,
415 __alignof__(gid
), sizeof(gid
));
416 if (data_offset
< 0) {
419 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
423 static int specialize_event_payload_lookup(struct lttng_event
*event
,
424 struct bytecode_runtime
*runtime
,
425 struct load_op
*insn
,
426 struct vstack_load
*load
)
430 const struct lttng_event_desc
*desc
= event
->desc
;
431 unsigned int i
, nr_fields
;
433 uint32_t field_offset
= 0;
434 const struct lttng_event_field
*field
;
436 struct filter_get_index_data gid
;
439 nr_fields
= desc
->nr_fields
;
440 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
441 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
442 for (i
= 0; i
< nr_fields
; i
++) {
443 field
= &desc
->fields
[i
];
444 if (!strcmp(field
->name
, name
)) {
448 /* compute field offset on stack */
449 switch (field
->type
.atype
) {
452 field_offset
+= sizeof(int64_t);
456 case atype_array_bitfield
:
457 case atype_sequence_bitfield
:
458 field_offset
+= sizeof(unsigned long);
459 field_offset
+= sizeof(void *);
462 field_offset
+= sizeof(void *);
474 ret
= specialize_load_object(field
, load
, false);
478 /* Specialize each get_symbol into a get_index. */
479 insn
->op
= FILTER_OP_GET_INDEX_U16
;
480 memset(&gid
, 0, sizeof(gid
));
481 gid
.offset
= field_offset
;
482 gid
.elem
.type
= load
->object_type
;
483 gid
.elem
.rev_bo
= load
->rev_bo
;
484 gid
.elem
.user
= load
->user
;
485 data_offset
= bytecode_push_data(runtime
, &gid
,
486 __alignof__(gid
), sizeof(gid
));
487 if (data_offset
< 0) {
491 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
497 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
498 struct bytecode_runtime
*bytecode
)
500 void *pc
, *next_pc
, *start_pc
;
502 struct vstack _stack
;
503 struct vstack
*stack
= &_stack
;
507 start_pc
= &bytecode
->code
[0];
508 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
510 switch (*(filter_opcode_t
*) pc
) {
511 case FILTER_OP_UNKNOWN
:
513 printk(KERN_WARNING
"unknown bytecode op %u\n",
514 (unsigned int) *(filter_opcode_t
*) pc
);
518 case FILTER_OP_RETURN
:
519 case FILTER_OP_RETURN_S64
:
528 case FILTER_OP_MINUS
:
529 printk(KERN_WARNING
"unsupported bytecode op %u\n",
530 (unsigned int) *(filter_opcode_t
*) pc
);
536 struct binary_op
*insn
= (struct binary_op
*) pc
;
538 switch(vstack_ax(stack
)->type
) {
540 printk(KERN_WARNING
"unknown register type\n");
545 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
546 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
548 insn
->op
= FILTER_OP_EQ_STRING
;
550 case REG_STAR_GLOB_STRING
:
551 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
554 if (vstack_bx(stack
)->type
== REG_S64
)
555 insn
->op
= FILTER_OP_EQ_S64
;
557 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
560 if (vstack_bx(stack
)->type
== REG_S64
)
561 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
563 insn
->op
= FILTER_OP_EQ_DOUBLE
;
567 if (vstack_pop(stack
)) {
571 vstack_ax(stack
)->type
= REG_S64
;
572 next_pc
+= sizeof(struct binary_op
);
578 struct binary_op
*insn
= (struct binary_op
*) pc
;
580 switch(vstack_ax(stack
)->type
) {
582 printk(KERN_WARNING
"unknown register type\n");
587 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
588 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
590 insn
->op
= FILTER_OP_NE_STRING
;
592 case REG_STAR_GLOB_STRING
:
593 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
596 if (vstack_bx(stack
)->type
== REG_S64
)
597 insn
->op
= FILTER_OP_NE_S64
;
599 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
602 if (vstack_bx(stack
)->type
== REG_S64
)
603 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
605 insn
->op
= FILTER_OP_NE_DOUBLE
;
609 if (vstack_pop(stack
)) {
613 vstack_ax(stack
)->type
= REG_S64
;
614 next_pc
+= sizeof(struct binary_op
);
620 struct binary_op
*insn
= (struct binary_op
*) pc
;
622 switch(vstack_ax(stack
)->type
) {
624 printk(KERN_WARNING
"unknown register type\n");
628 case REG_STAR_GLOB_STRING
:
629 printk(KERN_WARNING
"invalid register type for > binary operator\n");
633 insn
->op
= FILTER_OP_GT_STRING
;
636 if (vstack_bx(stack
)->type
== REG_S64
)
637 insn
->op
= FILTER_OP_GT_S64
;
639 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
642 if (vstack_bx(stack
)->type
== REG_S64
)
643 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
645 insn
->op
= FILTER_OP_GT_DOUBLE
;
649 if (vstack_pop(stack
)) {
653 vstack_ax(stack
)->type
= REG_S64
;
654 next_pc
+= sizeof(struct binary_op
);
660 struct binary_op
*insn
= (struct binary_op
*) pc
;
662 switch(vstack_ax(stack
)->type
) {
664 printk(KERN_WARNING
"unknown register type\n");
668 case REG_STAR_GLOB_STRING
:
669 printk(KERN_WARNING
"invalid register type for < binary operator\n");
673 insn
->op
= FILTER_OP_LT_STRING
;
676 if (vstack_bx(stack
)->type
== REG_S64
)
677 insn
->op
= FILTER_OP_LT_S64
;
679 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
682 if (vstack_bx(stack
)->type
== REG_S64
)
683 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
685 insn
->op
= FILTER_OP_LT_DOUBLE
;
689 if (vstack_pop(stack
)) {
693 vstack_ax(stack
)->type
= REG_S64
;
694 next_pc
+= sizeof(struct binary_op
);
700 struct binary_op
*insn
= (struct binary_op
*) pc
;
702 switch(vstack_ax(stack
)->type
) {
704 printk(KERN_WARNING
"unknown register type\n");
708 case REG_STAR_GLOB_STRING
:
709 printk(KERN_WARNING
"invalid register type for >= binary operator\n");
713 insn
->op
= FILTER_OP_GE_STRING
;
716 if (vstack_bx(stack
)->type
== REG_S64
)
717 insn
->op
= FILTER_OP_GE_S64
;
719 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
722 if (vstack_bx(stack
)->type
== REG_S64
)
723 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
725 insn
->op
= FILTER_OP_GE_DOUBLE
;
729 if (vstack_pop(stack
)) {
733 vstack_ax(stack
)->type
= REG_S64
;
734 next_pc
+= sizeof(struct binary_op
);
739 struct binary_op
*insn
= (struct binary_op
*) pc
;
741 switch(vstack_ax(stack
)->type
) {
743 printk(KERN_WARNING
"unknown register type\n");
747 case REG_STAR_GLOB_STRING
:
748 printk(KERN_WARNING
"invalid register type for <= binary operator\n");
752 insn
->op
= FILTER_OP_LE_STRING
;
755 if (vstack_bx(stack
)->type
== REG_S64
)
756 insn
->op
= FILTER_OP_LE_S64
;
758 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
761 if (vstack_bx(stack
)->type
== REG_S64
)
762 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
764 insn
->op
= FILTER_OP_LE_DOUBLE
;
767 vstack_ax(stack
)->type
= REG_S64
;
768 next_pc
+= sizeof(struct binary_op
);
772 case FILTER_OP_EQ_STRING
:
773 case FILTER_OP_NE_STRING
:
774 case FILTER_OP_GT_STRING
:
775 case FILTER_OP_LT_STRING
:
776 case FILTER_OP_GE_STRING
:
777 case FILTER_OP_LE_STRING
:
778 case FILTER_OP_EQ_STAR_GLOB_STRING
:
779 case FILTER_OP_NE_STAR_GLOB_STRING
:
780 case FILTER_OP_EQ_S64
:
781 case FILTER_OP_NE_S64
:
782 case FILTER_OP_GT_S64
:
783 case FILTER_OP_LT_S64
:
784 case FILTER_OP_GE_S64
:
785 case FILTER_OP_LE_S64
:
786 case FILTER_OP_EQ_DOUBLE
:
787 case FILTER_OP_NE_DOUBLE
:
788 case FILTER_OP_GT_DOUBLE
:
789 case FILTER_OP_LT_DOUBLE
:
790 case FILTER_OP_GE_DOUBLE
:
791 case FILTER_OP_LE_DOUBLE
:
792 case FILTER_OP_EQ_DOUBLE_S64
:
793 case FILTER_OP_NE_DOUBLE_S64
:
794 case FILTER_OP_GT_DOUBLE_S64
:
795 case FILTER_OP_LT_DOUBLE_S64
:
796 case FILTER_OP_GE_DOUBLE_S64
:
797 case FILTER_OP_LE_DOUBLE_S64
:
798 case FILTER_OP_EQ_S64_DOUBLE
:
799 case FILTER_OP_NE_S64_DOUBLE
:
800 case FILTER_OP_GT_S64_DOUBLE
:
801 case FILTER_OP_LT_S64_DOUBLE
:
802 case FILTER_OP_GE_S64_DOUBLE
:
803 case FILTER_OP_LE_S64_DOUBLE
:
804 case FILTER_OP_BIT_RSHIFT
:
805 case FILTER_OP_BIT_LSHIFT
:
806 case FILTER_OP_BIT_AND
:
807 case FILTER_OP_BIT_OR
:
808 case FILTER_OP_BIT_XOR
:
811 if (vstack_pop(stack
)) {
815 vstack_ax(stack
)->type
= REG_S64
;
816 next_pc
+= sizeof(struct binary_op
);
821 case FILTER_OP_UNARY_PLUS
:
823 struct unary_op
*insn
= (struct unary_op
*) pc
;
825 switch(vstack_ax(stack
)->type
) {
827 printk(KERN_WARNING
"unknown register type\n");
832 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
835 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
839 next_pc
+= sizeof(struct unary_op
);
843 case FILTER_OP_UNARY_MINUS
:
845 struct unary_op
*insn
= (struct unary_op
*) pc
;
847 switch(vstack_ax(stack
)->type
) {
849 printk(KERN_WARNING
"unknown register type\n");
854 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
857 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
861 next_pc
+= sizeof(struct unary_op
);
865 case FILTER_OP_UNARY_NOT
:
867 struct unary_op
*insn
= (struct unary_op
*) pc
;
869 switch(vstack_ax(stack
)->type
) {
871 printk(KERN_WARNING
"unknown register type\n");
876 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
879 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
883 next_pc
+= sizeof(struct unary_op
);
887 case FILTER_OP_UNARY_BIT_NOT
:
890 next_pc
+= sizeof(struct unary_op
);
894 case FILTER_OP_UNARY_PLUS_S64
:
895 case FILTER_OP_UNARY_MINUS_S64
:
896 case FILTER_OP_UNARY_NOT_S64
:
897 case FILTER_OP_UNARY_PLUS_DOUBLE
:
898 case FILTER_OP_UNARY_MINUS_DOUBLE
:
899 case FILTER_OP_UNARY_NOT_DOUBLE
:
902 next_pc
+= sizeof(struct unary_op
);
910 /* Continue to next instruction */
911 /* Pop 1 when jump not taken */
912 if (vstack_pop(stack
)) {
916 next_pc
+= sizeof(struct logical_op
);
921 case FILTER_OP_LOAD_FIELD_REF
:
923 printk(KERN_WARNING
"Unknown field ref type\n");
927 /* get context ref */
928 case FILTER_OP_GET_CONTEXT_REF
:
930 printk(KERN_WARNING
"Unknown get context ref type\n");
934 case FILTER_OP_LOAD_FIELD_REF_STRING
:
935 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
936 case FILTER_OP_GET_CONTEXT_REF_STRING
:
937 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
938 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
940 if (vstack_push(stack
)) {
944 vstack_ax(stack
)->type
= REG_STRING
;
945 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
948 case FILTER_OP_LOAD_FIELD_REF_S64
:
949 case FILTER_OP_GET_CONTEXT_REF_S64
:
951 if (vstack_push(stack
)) {
955 vstack_ax(stack
)->type
= REG_S64
;
956 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
959 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
960 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
962 if (vstack_push(stack
)) {
966 vstack_ax(stack
)->type
= REG_DOUBLE
;
967 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
971 /* load from immediate operand */
972 case FILTER_OP_LOAD_STRING
:
974 struct load_op
*insn
= (struct load_op
*) pc
;
976 if (vstack_push(stack
)) {
980 vstack_ax(stack
)->type
= REG_STRING
;
981 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
985 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
987 struct load_op
*insn
= (struct load_op
*) pc
;
989 if (vstack_push(stack
)) {
993 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
994 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
998 case FILTER_OP_LOAD_S64
:
1000 if (vstack_push(stack
)) {
1004 vstack_ax(stack
)->type
= REG_S64
;
1005 next_pc
+= sizeof(struct load_op
)
1006 + sizeof(struct literal_numeric
);
1010 case FILTER_OP_LOAD_DOUBLE
:
1012 if (vstack_push(stack
)) {
1016 vstack_ax(stack
)->type
= REG_DOUBLE
;
1017 next_pc
+= sizeof(struct load_op
)
1018 + sizeof(struct literal_double
);
1023 case FILTER_OP_CAST_TO_S64
:
1025 struct cast_op
*insn
= (struct cast_op
*) pc
;
1027 switch (vstack_ax(stack
)->type
) {
1029 printk(KERN_WARNING
"unknown register type\n");
1034 case REG_STAR_GLOB_STRING
:
1035 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
1039 insn
->op
= FILTER_OP_CAST_NOP
;
1042 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1046 vstack_ax(stack
)->type
= REG_S64
;
1047 next_pc
+= sizeof(struct cast_op
);
1050 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1053 vstack_ax(stack
)->type
= REG_S64
;
1054 next_pc
+= sizeof(struct cast_op
);
1057 case FILTER_OP_CAST_NOP
:
1059 next_pc
+= sizeof(struct cast_op
);
1064 * Instructions for recursive traversal through composed types.
1066 case FILTER_OP_GET_CONTEXT_ROOT
:
1068 if (vstack_push(stack
)) {
1072 vstack_ax(stack
)->type
= REG_PTR
;
1073 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1074 next_pc
+= sizeof(struct load_op
);
1077 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1079 if (vstack_push(stack
)) {
1083 vstack_ax(stack
)->type
= REG_PTR
;
1084 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1085 next_pc
+= sizeof(struct load_op
);
1088 case FILTER_OP_GET_PAYLOAD_ROOT
:
1090 if (vstack_push(stack
)) {
1094 vstack_ax(stack
)->type
= REG_PTR
;
1095 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1096 next_pc
+= sizeof(struct load_op
);
1100 case FILTER_OP_LOAD_FIELD
:
1102 struct load_op
*insn
= (struct load_op
*) pc
;
1104 WARN_ON_ONCE(vstack_ax(stack
)->type
!= REG_PTR
);
1106 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1110 next_pc
+= sizeof(struct load_op
);
1114 case FILTER_OP_LOAD_FIELD_S8
:
1115 case FILTER_OP_LOAD_FIELD_S16
:
1116 case FILTER_OP_LOAD_FIELD_S32
:
1117 case FILTER_OP_LOAD_FIELD_S64
:
1118 case FILTER_OP_LOAD_FIELD_U8
:
1119 case FILTER_OP_LOAD_FIELD_U16
:
1120 case FILTER_OP_LOAD_FIELD_U32
:
1121 case FILTER_OP_LOAD_FIELD_U64
:
1124 vstack_ax(stack
)->type
= REG_S64
;
1125 next_pc
+= sizeof(struct load_op
);
1129 case FILTER_OP_LOAD_FIELD_STRING
:
1130 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1133 vstack_ax(stack
)->type
= REG_STRING
;
1134 next_pc
+= sizeof(struct load_op
);
1138 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1141 vstack_ax(stack
)->type
= REG_DOUBLE
;
1142 next_pc
+= sizeof(struct load_op
);
1146 case FILTER_OP_GET_SYMBOL
:
1148 struct load_op
*insn
= (struct load_op
*) pc
;
1150 dbg_printk("op get symbol\n");
1151 switch (vstack_ax(stack
)->load
.type
) {
1153 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1156 case LOAD_ROOT_CONTEXT
:
1157 /* Lookup context field. */
1158 ret
= specialize_context_lookup(bytecode
, insn
,
1159 &vstack_ax(stack
)->load
);
1163 case LOAD_ROOT_APP_CONTEXT
:
1166 case LOAD_ROOT_PAYLOAD
:
1167 /* Lookup event payload field. */
1168 ret
= specialize_event_payload_lookup(event
,
1170 &vstack_ax(stack
)->load
);
1175 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1179 case FILTER_OP_GET_SYMBOL_FIELD
:
1181 /* Always generated by specialize phase. */
1186 case FILTER_OP_GET_INDEX_U16
:
1188 struct load_op
*insn
= (struct load_op
*) pc
;
1189 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1191 dbg_printk("op get index u16\n");
1193 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1194 vstack_ax(stack
), sizeof(*index
));
1197 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1201 case FILTER_OP_GET_INDEX_U64
:
1203 struct load_op
*insn
= (struct load_op
*) pc
;
1204 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1206 dbg_printk("op get index u64\n");
1208 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1209 vstack_ax(stack
), sizeof(*index
));
1212 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);