1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-validator.c
5 * LTTng modules filter bytecode validator.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/types.h>
11 #include <linux/jhash.h>
12 #include <linux/slab.h>
14 #include <lttng-filter.h>
16 #define MERGE_POINT_TABLE_BITS 7
17 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
19 /* merge point table node */
21 struct hlist_node node
;
23 /* Context at merge point */
25 unsigned long target_pc
;
29 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
33 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
35 if (mp_node
->target_pc
== key_pc
)
42 int merge_points_compare(const struct vstack
*stacka
,
43 const struct vstack
*stackb
)
47 if (stacka
->top
!= stackb
->top
)
49 len
= stacka
->top
+ 1;
50 WARN_ON_ONCE(len
< 0);
51 for (i
= 0; i
< len
; i
++) {
52 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
59 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
60 const struct vstack
*stack
)
62 struct mp_node
*mp_node
;
63 unsigned long hash
= jhash_1word(target_pc
, 0);
64 struct hlist_head
*head
;
65 struct mp_node
*lookup_node
;
68 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
70 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
73 mp_node
->target_pc
= target_pc
;
74 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
76 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
77 hlist_for_each_entry(lookup_node
, head
, node
) {
78 if (lttng_hash_match(lookup_node
, target_pc
)) {
84 /* Key already present */
85 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
88 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
89 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
94 hlist_add_head(&mp_node
->node
, head
);
100 * Binary comparators use top of stack and top of stack -1.
103 int bin_op_compare_check(struct vstack
*stack
, const filter_opcode_t opcode
,
106 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
109 switch (vstack_ax(stack
)->type
) {
115 switch (vstack_bx(stack
)->type
) {
119 case REG_TYPE_UNKNOWN
:
123 case REG_STAR_GLOB_STRING
:
124 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
132 case REG_STAR_GLOB_STRING
:
133 switch (vstack_bx(stack
)->type
) {
137 case REG_TYPE_UNKNOWN
:
140 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
144 case REG_STAR_GLOB_STRING
:
150 switch (vstack_bx(stack
)->type
) {
154 case REG_TYPE_UNKNOWN
:
157 case REG_STAR_GLOB_STRING
:
163 case REG_TYPE_UNKNOWN
:
164 switch (vstack_bx(stack
)->type
) {
168 case REG_TYPE_UNKNOWN
:
170 case REG_STAR_GLOB_STRING
:
182 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
186 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
190 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
195 * Binary bitwise operators use top of stack and top of stack -1.
196 * Return 0 if typing is known to match, 1 if typing is dynamic
197 * (unknown), negative error value on error.
200 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
203 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
206 switch (vstack_ax(stack
)->type
) {
211 case REG_TYPE_UNKNOWN
:
212 switch (vstack_bx(stack
)->type
) {
216 case REG_TYPE_UNKNOWN
:
218 case REG_STAR_GLOB_STRING
:
224 switch (vstack_bx(stack
)->type
) {
228 case REG_TYPE_UNKNOWN
:
241 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
245 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
250 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
251 const struct get_symbol
*sym
)
253 const char *str
, *str_limit
;
256 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
259 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
260 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
261 len_limit
= str_limit
- str
;
262 if (strnlen(str
, len_limit
) == len_limit
)
268 * Validate bytecode range overflow within the validation pass.
269 * Called for each instruction encountered.
272 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
273 char *start_pc
, char *pc
)
277 switch (*(filter_opcode_t
*) pc
) {
278 case FILTER_OP_UNKNOWN
:
281 printk(KERN_WARNING
"unknown bytecode op %u\n",
282 (unsigned int) *(filter_opcode_t
*) pc
);
287 case FILTER_OP_RETURN
:
288 case FILTER_OP_RETURN_S64
:
290 if (unlikely(pc
+ sizeof(struct return_op
)
291 > start_pc
+ bytecode
->len
)) {
302 case FILTER_OP_MINUS
:
303 case FILTER_OP_EQ_DOUBLE
:
304 case FILTER_OP_NE_DOUBLE
:
305 case FILTER_OP_GT_DOUBLE
:
306 case FILTER_OP_LT_DOUBLE
:
307 case FILTER_OP_GE_DOUBLE
:
308 case FILTER_OP_LE_DOUBLE
:
310 case FILTER_OP_EQ_DOUBLE_S64
:
311 case FILTER_OP_NE_DOUBLE_S64
:
312 case FILTER_OP_GT_DOUBLE_S64
:
313 case FILTER_OP_LT_DOUBLE_S64
:
314 case FILTER_OP_GE_DOUBLE_S64
:
315 case FILTER_OP_LE_DOUBLE_S64
:
316 case FILTER_OP_EQ_S64_DOUBLE
:
317 case FILTER_OP_NE_S64_DOUBLE
:
318 case FILTER_OP_GT_S64_DOUBLE
:
319 case FILTER_OP_LT_S64_DOUBLE
:
320 case FILTER_OP_GE_S64_DOUBLE
:
321 case FILTER_OP_LE_S64_DOUBLE
:
322 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
323 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
324 case FILTER_OP_LOAD_DOUBLE
:
325 case FILTER_OP_CAST_DOUBLE_TO_S64
:
326 case FILTER_OP_UNARY_PLUS_DOUBLE
:
327 case FILTER_OP_UNARY_MINUS_DOUBLE
:
328 case FILTER_OP_UNARY_NOT_DOUBLE
:
330 printk(KERN_WARNING
"unsupported bytecode op %u\n",
331 (unsigned int) *(filter_opcode_t
*) pc
);
342 case FILTER_OP_EQ_STRING
:
343 case FILTER_OP_NE_STRING
:
344 case FILTER_OP_GT_STRING
:
345 case FILTER_OP_LT_STRING
:
346 case FILTER_OP_GE_STRING
:
347 case FILTER_OP_LE_STRING
:
348 case FILTER_OP_EQ_STAR_GLOB_STRING
:
349 case FILTER_OP_NE_STAR_GLOB_STRING
:
350 case FILTER_OP_EQ_S64
:
351 case FILTER_OP_NE_S64
:
352 case FILTER_OP_GT_S64
:
353 case FILTER_OP_LT_S64
:
354 case FILTER_OP_GE_S64
:
355 case FILTER_OP_LE_S64
:
356 case FILTER_OP_BIT_RSHIFT
:
357 case FILTER_OP_BIT_LSHIFT
:
358 case FILTER_OP_BIT_AND
:
359 case FILTER_OP_BIT_OR
:
360 case FILTER_OP_BIT_XOR
:
362 if (unlikely(pc
+ sizeof(struct binary_op
)
363 > start_pc
+ bytecode
->len
)) {
370 case FILTER_OP_UNARY_PLUS
:
371 case FILTER_OP_UNARY_MINUS
:
372 case FILTER_OP_UNARY_NOT
:
373 case FILTER_OP_UNARY_PLUS_S64
:
374 case FILTER_OP_UNARY_MINUS_S64
:
375 case FILTER_OP_UNARY_NOT_S64
:
376 case FILTER_OP_UNARY_BIT_NOT
:
378 if (unlikely(pc
+ sizeof(struct unary_op
)
379 > start_pc
+ bytecode
->len
)) {
389 if (unlikely(pc
+ sizeof(struct logical_op
)
390 > start_pc
+ bytecode
->len
)) {
397 case FILTER_OP_LOAD_FIELD_REF
:
399 printk(KERN_WARNING
"Unknown field ref type\n");
404 /* get context ref */
405 case FILTER_OP_GET_CONTEXT_REF
:
407 printk(KERN_WARNING
"Unknown field ref type\n");
411 case FILTER_OP_LOAD_FIELD_REF_STRING
:
412 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
413 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
414 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
415 case FILTER_OP_LOAD_FIELD_REF_S64
:
416 case FILTER_OP_GET_CONTEXT_REF_STRING
:
417 case FILTER_OP_GET_CONTEXT_REF_S64
:
419 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
420 > start_pc
+ bytecode
->len
)) {
426 /* load from immediate operand */
427 case FILTER_OP_LOAD_STRING
:
428 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
430 struct load_op
*insn
= (struct load_op
*) pc
;
431 uint32_t str_len
, maxlen
;
433 if (unlikely(pc
+ sizeof(struct load_op
)
434 > start_pc
+ bytecode
->len
)) {
439 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
440 str_len
= strnlen(insn
->data
, maxlen
);
441 if (unlikely(str_len
>= maxlen
)) {
442 /* Final '\0' not found within range */
448 case FILTER_OP_LOAD_S64
:
450 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
451 > start_pc
+ bytecode
->len
)) {
457 case FILTER_OP_CAST_TO_S64
:
458 case FILTER_OP_CAST_NOP
:
460 if (unlikely(pc
+ sizeof(struct cast_op
)
461 > start_pc
+ bytecode
->len
)) {
468 * Instructions for recursive traversal through composed types.
470 case FILTER_OP_GET_CONTEXT_ROOT
:
471 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
472 case FILTER_OP_GET_PAYLOAD_ROOT
:
473 case FILTER_OP_LOAD_FIELD
:
474 case FILTER_OP_LOAD_FIELD_S8
:
475 case FILTER_OP_LOAD_FIELD_S16
:
476 case FILTER_OP_LOAD_FIELD_S32
:
477 case FILTER_OP_LOAD_FIELD_S64
:
478 case FILTER_OP_LOAD_FIELD_U8
:
479 case FILTER_OP_LOAD_FIELD_U16
:
480 case FILTER_OP_LOAD_FIELD_U32
:
481 case FILTER_OP_LOAD_FIELD_U64
:
482 case FILTER_OP_LOAD_FIELD_STRING
:
483 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
484 case FILTER_OP_LOAD_FIELD_DOUBLE
:
485 if (unlikely(pc
+ sizeof(struct load_op
)
486 > start_pc
+ bytecode
->len
)) {
491 case FILTER_OP_GET_SYMBOL
:
493 struct load_op
*insn
= (struct load_op
*) pc
;
494 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
496 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
497 > start_pc
+ bytecode
->len
)) {
501 ret
= validate_get_symbol(bytecode
, sym
);
505 case FILTER_OP_GET_SYMBOL_FIELD
:
506 printk(KERN_WARNING
"Unexpected get symbol field\n");
510 case FILTER_OP_GET_INDEX_U16
:
511 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
512 > start_pc
+ bytecode
->len
)) {
517 case FILTER_OP_GET_INDEX_U64
:
518 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
519 > start_pc
+ bytecode
->len
)) {
529 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
531 struct mp_node
*mp_node
;
532 struct hlist_node
*tmp
;
533 unsigned long nr_nodes
= 0;
536 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
537 struct hlist_head
*head
;
539 head
= &mp_table
->mp_head
[i
];
540 hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
554 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
555 struct vstack
*stack
,
560 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
563 case FILTER_OP_UNKNOWN
:
566 printk(KERN_WARNING
"unknown bytecode op %u\n",
567 (unsigned int) *(filter_opcode_t
*) pc
);
572 case FILTER_OP_RETURN
:
573 case FILTER_OP_RETURN_S64
:
583 case FILTER_OP_MINUS
:
585 case FILTER_OP_EQ_DOUBLE
:
586 case FILTER_OP_NE_DOUBLE
:
587 case FILTER_OP_GT_DOUBLE
:
588 case FILTER_OP_LT_DOUBLE
:
589 case FILTER_OP_GE_DOUBLE
:
590 case FILTER_OP_LE_DOUBLE
:
591 case FILTER_OP_EQ_DOUBLE_S64
:
592 case FILTER_OP_NE_DOUBLE_S64
:
593 case FILTER_OP_GT_DOUBLE_S64
:
594 case FILTER_OP_LT_DOUBLE_S64
:
595 case FILTER_OP_GE_DOUBLE_S64
:
596 case FILTER_OP_LE_DOUBLE_S64
:
597 case FILTER_OP_EQ_S64_DOUBLE
:
598 case FILTER_OP_NE_S64_DOUBLE
:
599 case FILTER_OP_GT_S64_DOUBLE
:
600 case FILTER_OP_LT_S64_DOUBLE
:
601 case FILTER_OP_GE_S64_DOUBLE
:
602 case FILTER_OP_LE_S64_DOUBLE
:
603 case FILTER_OP_UNARY_PLUS_DOUBLE
:
604 case FILTER_OP_UNARY_MINUS_DOUBLE
:
605 case FILTER_OP_UNARY_NOT_DOUBLE
:
606 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
607 case FILTER_OP_LOAD_DOUBLE
:
608 case FILTER_OP_CAST_DOUBLE_TO_S64
:
609 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
611 printk(KERN_WARNING
"unsupported bytecode op %u\n",
612 (unsigned int) *(filter_opcode_t
*) pc
);
619 ret
= bin_op_compare_check(stack
, opcode
, "==");
626 ret
= bin_op_compare_check(stack
, opcode
, "!=");
633 ret
= bin_op_compare_check(stack
, opcode
, ">");
640 ret
= bin_op_compare_check(stack
, opcode
, "<");
647 ret
= bin_op_compare_check(stack
, opcode
, ">=");
654 ret
= bin_op_compare_check(stack
, opcode
, "<=");
660 case FILTER_OP_EQ_STRING
:
661 case FILTER_OP_NE_STRING
:
662 case FILTER_OP_GT_STRING
:
663 case FILTER_OP_LT_STRING
:
664 case FILTER_OP_GE_STRING
:
665 case FILTER_OP_LE_STRING
:
667 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
668 printk(KERN_WARNING
"Empty stack\n");
672 if (vstack_ax(stack
)->type
!= REG_STRING
673 || vstack_bx(stack
)->type
!= REG_STRING
) {
674 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
682 case FILTER_OP_EQ_STAR_GLOB_STRING
:
683 case FILTER_OP_NE_STAR_GLOB_STRING
:
685 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
686 printk(KERN_WARNING
"Empty stack\n");
690 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
691 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
692 printk(KERN_WARNING
"Unexpected register type for globbing pattern comparator\n");
699 case FILTER_OP_EQ_S64
:
700 case FILTER_OP_NE_S64
:
701 case FILTER_OP_GT_S64
:
702 case FILTER_OP_LT_S64
:
703 case FILTER_OP_GE_S64
:
704 case FILTER_OP_LE_S64
:
706 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
707 printk(KERN_WARNING
"Empty stack\n");
711 if (vstack_ax(stack
)->type
!= REG_S64
712 || vstack_bx(stack
)->type
!= REG_S64
) {
713 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
720 case FILTER_OP_BIT_RSHIFT
:
721 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
725 case FILTER_OP_BIT_LSHIFT
:
726 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
730 case FILTER_OP_BIT_AND
:
731 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
735 case FILTER_OP_BIT_OR
:
736 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
740 case FILTER_OP_BIT_XOR
:
741 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
747 case FILTER_OP_UNARY_PLUS
:
748 case FILTER_OP_UNARY_MINUS
:
749 case FILTER_OP_UNARY_NOT
:
751 if (!vstack_ax(stack
)) {
752 printk(KERN_WARNING
"Empty stack\n");
756 switch (vstack_ax(stack
)->type
) {
759 printk(KERN_WARNING
"unknown register type\n");
764 case REG_STAR_GLOB_STRING
:
765 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
769 case REG_TYPE_UNKNOWN
:
774 case FILTER_OP_UNARY_BIT_NOT
:
776 if (!vstack_ax(stack
)) {
777 printk(KERN_WARNING
"Empty stack\n");
781 switch (vstack_ax(stack
)->type
) {
783 printk(KERN_WARNING
"unknown register type\n");
788 case REG_STAR_GLOB_STRING
:
790 printk(KERN_WARNING
"Unary bitwise op can only be applied to numeric registers\n");
795 case REG_TYPE_UNKNOWN
:
801 case FILTER_OP_UNARY_PLUS_S64
:
802 case FILTER_OP_UNARY_MINUS_S64
:
803 case FILTER_OP_UNARY_NOT_S64
:
805 if (!vstack_ax(stack
)) {
806 printk(KERN_WARNING
"Empty stack\n");
810 if (vstack_ax(stack
)->type
!= REG_S64
) {
811 printk(KERN_WARNING
"Invalid register type\n");
822 struct logical_op
*insn
= (struct logical_op
*) pc
;
824 if (!vstack_ax(stack
)) {
825 printk(KERN_WARNING
"Empty stack\n");
829 if (vstack_ax(stack
)->type
!= REG_S64
) {
830 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
835 dbg_printk("Validate jumping to bytecode offset %u\n",
836 (unsigned int) insn
->skip_offset
);
837 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
838 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
846 case FILTER_OP_LOAD_FIELD_REF
:
848 printk(KERN_WARNING
"Unknown field ref type\n");
852 case FILTER_OP_LOAD_FIELD_REF_STRING
:
853 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
854 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
855 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
857 struct load_op
*insn
= (struct load_op
*) pc
;
858 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
860 dbg_printk("Validate load field ref offset %u type string\n",
864 case FILTER_OP_LOAD_FIELD_REF_S64
:
866 struct load_op
*insn
= (struct load_op
*) pc
;
867 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
869 dbg_printk("Validate load field ref offset %u type s64\n",
874 /* load from immediate operand */
875 case FILTER_OP_LOAD_STRING
:
876 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
881 case FILTER_OP_LOAD_S64
:
886 case FILTER_OP_CAST_TO_S64
:
888 struct cast_op
*insn
= (struct cast_op
*) pc
;
890 if (!vstack_ax(stack
)) {
891 printk(KERN_WARNING
"Empty stack\n");
895 switch (vstack_ax(stack
)->type
) {
898 printk(KERN_WARNING
"unknown register type\n");
903 case REG_STAR_GLOB_STRING
:
904 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
910 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
911 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
912 printk(KERN_WARNING
"Cast expects double\n");
919 case FILTER_OP_CAST_NOP
:
924 /* get context ref */
925 case FILTER_OP_GET_CONTEXT_REF
:
927 printk(KERN_WARNING
"Unknown get context ref type\n");
931 case FILTER_OP_GET_CONTEXT_REF_STRING
:
933 struct load_op
*insn
= (struct load_op
*) pc
;
934 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
936 dbg_printk("Validate get context ref offset %u type string\n",
940 case FILTER_OP_GET_CONTEXT_REF_S64
:
942 struct load_op
*insn
= (struct load_op
*) pc
;
943 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
945 dbg_printk("Validate get context ref offset %u type s64\n",
951 * Instructions for recursive traversal through composed types.
953 case FILTER_OP_GET_CONTEXT_ROOT
:
955 dbg_printk("Validate get context root\n");
958 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
960 dbg_printk("Validate get app context root\n");
963 case FILTER_OP_GET_PAYLOAD_ROOT
:
965 dbg_printk("Validate get payload root\n");
968 case FILTER_OP_LOAD_FIELD
:
971 * We tolerate that field type is unknown at validation,
972 * because we are performing the load specialization in
973 * a phase after validation.
975 dbg_printk("Validate load field\n");
978 case FILTER_OP_LOAD_FIELD_S8
:
980 dbg_printk("Validate load field s8\n");
983 case FILTER_OP_LOAD_FIELD_S16
:
985 dbg_printk("Validate load field s16\n");
988 case FILTER_OP_LOAD_FIELD_S32
:
990 dbg_printk("Validate load field s32\n");
993 case FILTER_OP_LOAD_FIELD_S64
:
995 dbg_printk("Validate load field s64\n");
998 case FILTER_OP_LOAD_FIELD_U8
:
1000 dbg_printk("Validate load field u8\n");
1003 case FILTER_OP_LOAD_FIELD_U16
:
1005 dbg_printk("Validate load field u16\n");
1008 case FILTER_OP_LOAD_FIELD_U32
:
1010 dbg_printk("Validate load field u32\n");
1013 case FILTER_OP_LOAD_FIELD_U64
:
1015 dbg_printk("Validate load field u64\n");
1018 case FILTER_OP_LOAD_FIELD_STRING
:
1020 dbg_printk("Validate load field string\n");
1023 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1025 dbg_printk("Validate load field sequence\n");
1028 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1030 dbg_printk("Validate load field double\n");
1034 case FILTER_OP_GET_SYMBOL
:
1036 struct load_op
*insn
= (struct load_op
*) pc
;
1037 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1039 dbg_printk("Validate get symbol offset %u\n", sym
->offset
);
1043 case FILTER_OP_GET_SYMBOL_FIELD
:
1045 struct load_op
*insn
= (struct load_op
*) pc
;
1046 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1048 dbg_printk("Validate get symbol field offset %u\n", sym
->offset
);
1052 case FILTER_OP_GET_INDEX_U16
:
1054 struct load_op
*insn
= (struct load_op
*) pc
;
1055 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1057 dbg_printk("Validate get index u16 index %u\n", get_index
->index
);
1061 case FILTER_OP_GET_INDEX_U64
:
1063 struct load_op
*insn
= (struct load_op
*) pc
;
1064 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1066 dbg_printk("Validate get index u64 index %llu\n",
1067 (unsigned long long) get_index
->index
);
1081 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1082 struct mp_table
*mp_table
,
1083 struct vstack
*stack
,
1088 unsigned long target_pc
= pc
- start_pc
;
1090 struct hlist_head
*head
;
1091 struct mp_node
*mp_node
;
1093 /* Validate the context resulting from the previous instruction */
1094 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1098 /* Validate merge points */
1099 hash
= jhash_1word(target_pc
, 0);
1100 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
1101 hlist_for_each_entry(mp_node
, head
, node
) {
1102 if (lttng_hash_match(mp_node
, target_pc
)) {
1108 dbg_printk("Filter: validate merge point at offset %lu\n",
1110 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1111 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
1115 /* Once validated, we can remove the merge point */
1116 dbg_printk("Filter: remove merge point at offset %lu\n",
1118 hlist_del(&mp_node
->node
);
1125 * >0: going to next insn.
1126 * 0: success, stop iteration.
1130 int exec_insn(struct bytecode_runtime
*bytecode
,
1131 struct mp_table
*mp_table
,
1132 struct vstack
*stack
,
1137 char *next_pc
= *_next_pc
;
1139 switch (*(filter_opcode_t
*) pc
) {
1140 case FILTER_OP_UNKNOWN
:
1143 printk(KERN_WARNING
"unknown bytecode op %u\n",
1144 (unsigned int) *(filter_opcode_t
*) pc
);
1149 case FILTER_OP_RETURN
:
1151 if (!vstack_ax(stack
)) {
1152 printk(KERN_WARNING
"Empty stack\n");
1156 switch (vstack_ax(stack
)->type
) {
1158 case REG_TYPE_UNKNOWN
:
1161 printk(KERN_WARNING
"Unexpected register type %d at end of bytecode\n",
1162 (int) vstack_ax(stack
)->type
);
1171 case FILTER_OP_RETURN_S64
:
1173 if (!vstack_ax(stack
)) {
1174 printk(KERN_WARNING
"Empty stack\n");
1178 switch (vstack_ax(stack
)->type
) {
1182 case REG_TYPE_UNKNOWN
:
1183 printk(KERN_WARNING
"Unexpected register type %d at end of bytecode\n",
1184 (int) vstack_ax(stack
)->type
);
1197 case FILTER_OP_PLUS
:
1198 case FILTER_OP_MINUS
:
1199 /* Floating point */
1200 case FILTER_OP_EQ_DOUBLE
:
1201 case FILTER_OP_NE_DOUBLE
:
1202 case FILTER_OP_GT_DOUBLE
:
1203 case FILTER_OP_LT_DOUBLE
:
1204 case FILTER_OP_GE_DOUBLE
:
1205 case FILTER_OP_LE_DOUBLE
:
1206 case FILTER_OP_EQ_DOUBLE_S64
:
1207 case FILTER_OP_NE_DOUBLE_S64
:
1208 case FILTER_OP_GT_DOUBLE_S64
:
1209 case FILTER_OP_LT_DOUBLE_S64
:
1210 case FILTER_OP_GE_DOUBLE_S64
:
1211 case FILTER_OP_LE_DOUBLE_S64
:
1212 case FILTER_OP_EQ_S64_DOUBLE
:
1213 case FILTER_OP_NE_S64_DOUBLE
:
1214 case FILTER_OP_GT_S64_DOUBLE
:
1215 case FILTER_OP_LT_S64_DOUBLE
:
1216 case FILTER_OP_GE_S64_DOUBLE
:
1217 case FILTER_OP_LE_S64_DOUBLE
:
1218 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1219 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1220 case FILTER_OP_UNARY_NOT_DOUBLE
:
1221 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1222 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1223 case FILTER_OP_LOAD_DOUBLE
:
1224 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1226 printk(KERN_WARNING
"unsupported bytecode op %u\n",
1227 (unsigned int) *(filter_opcode_t
*) pc
);
1238 case FILTER_OP_EQ_STRING
:
1239 case FILTER_OP_NE_STRING
:
1240 case FILTER_OP_GT_STRING
:
1241 case FILTER_OP_LT_STRING
:
1242 case FILTER_OP_GE_STRING
:
1243 case FILTER_OP_LE_STRING
:
1244 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1245 case FILTER_OP_NE_STAR_GLOB_STRING
:
1246 case FILTER_OP_EQ_S64
:
1247 case FILTER_OP_NE_S64
:
1248 case FILTER_OP_GT_S64
:
1249 case FILTER_OP_LT_S64
:
1250 case FILTER_OP_GE_S64
:
1251 case FILTER_OP_LE_S64
:
1252 case FILTER_OP_BIT_RSHIFT
:
1253 case FILTER_OP_BIT_LSHIFT
:
1254 case FILTER_OP_BIT_AND
:
1255 case FILTER_OP_BIT_OR
:
1256 case FILTER_OP_BIT_XOR
:
1259 if (vstack_pop(stack
)) {
1263 if (!vstack_ax(stack
)) {
1264 printk(KERN_WARNING
"Empty stack\n");
1268 switch (vstack_ax(stack
)->type
) {
1272 case REG_STAR_GLOB_STRING
:
1273 case REG_TYPE_UNKNOWN
:
1276 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1277 (int) vstack_ax(stack
)->type
);
1282 vstack_ax(stack
)->type
= REG_S64
;
1283 next_pc
+= sizeof(struct binary_op
);
1288 case FILTER_OP_UNARY_PLUS
:
1289 case FILTER_OP_UNARY_MINUS
:
1292 if (!vstack_ax(stack
)) {
1293 printk(KERN_WARNING
"Empty stack\n\n");
1297 switch (vstack_ax(stack
)->type
) {
1299 case REG_TYPE_UNKNOWN
:
1302 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1303 (int) vstack_ax(stack
)->type
);
1308 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1309 next_pc
+= sizeof(struct unary_op
);
1313 case FILTER_OP_UNARY_PLUS_S64
:
1314 case FILTER_OP_UNARY_MINUS_S64
:
1315 case FILTER_OP_UNARY_NOT_S64
:
1318 if (!vstack_ax(stack
)) {
1319 printk(KERN_WARNING
"Empty stack\n\n");
1323 switch (vstack_ax(stack
)->type
) {
1327 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1328 (int) vstack_ax(stack
)->type
);
1333 vstack_ax(stack
)->type
= REG_S64
;
1334 next_pc
+= sizeof(struct unary_op
);
1338 case FILTER_OP_UNARY_NOT
:
1341 if (!vstack_ax(stack
)) {
1342 printk(KERN_WARNING
"Empty stack\n\n");
1346 switch (vstack_ax(stack
)->type
) {
1348 case REG_TYPE_UNKNOWN
:
1351 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1352 (int) vstack_ax(stack
)->type
);
1357 vstack_ax(stack
)->type
= REG_S64
;
1358 next_pc
+= sizeof(struct unary_op
);
1362 case FILTER_OP_UNARY_BIT_NOT
:
1365 if (!vstack_ax(stack
)) {
1366 printk(KERN_WARNING
"Empty stack\n");
1370 switch (vstack_ax(stack
)->type
) {
1372 case REG_TYPE_UNKNOWN
:
1376 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1377 (int) vstack_ax(stack
)->type
);
1382 vstack_ax(stack
)->type
= REG_S64
;
1383 next_pc
+= sizeof(struct unary_op
);
1391 struct logical_op
*insn
= (struct logical_op
*) pc
;
1394 /* Add merge point to table */
1395 merge_ret
= merge_point_add_check(mp_table
,
1396 insn
->skip_offset
, stack
);
1402 if (!vstack_ax(stack
)) {
1403 printk(KERN_WARNING
"Empty stack\n\n");
1407 /* There is always a cast-to-s64 operation before a or/and op. */
1408 switch (vstack_ax(stack
)->type
) {
1412 printk(KERN_WARNING
"Incorrect register type %d for operation\n",
1413 (int) vstack_ax(stack
)->type
);
1418 /* Continue to next instruction */
1419 /* Pop 1 when jump not taken */
1420 if (vstack_pop(stack
)) {
1424 next_pc
+= sizeof(struct logical_op
);
1428 /* load field ref */
1429 case FILTER_OP_LOAD_FIELD_REF
:
1431 printk(KERN_WARNING
"Unknown field ref type\n");
1435 /* get context ref */
1436 case FILTER_OP_GET_CONTEXT_REF
:
1438 printk(KERN_WARNING
"Unknown get context ref type\n");
1442 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1443 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1444 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1445 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
1446 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
1448 if (vstack_push(stack
)) {
1452 vstack_ax(stack
)->type
= REG_STRING
;
1453 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1456 case FILTER_OP_LOAD_FIELD_REF_S64
:
1457 case FILTER_OP_GET_CONTEXT_REF_S64
:
1459 if (vstack_push(stack
)) {
1463 vstack_ax(stack
)->type
= REG_S64
;
1464 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1468 /* load from immediate operand */
1469 case FILTER_OP_LOAD_STRING
:
1471 struct load_op
*insn
= (struct load_op
*) pc
;
1473 if (vstack_push(stack
)) {
1477 vstack_ax(stack
)->type
= REG_STRING
;
1478 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1482 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1484 struct load_op
*insn
= (struct load_op
*) pc
;
1486 if (vstack_push(stack
)) {
1490 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1491 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1495 case FILTER_OP_LOAD_S64
:
1497 if (vstack_push(stack
)) {
1501 vstack_ax(stack
)->type
= REG_S64
;
1502 next_pc
+= sizeof(struct load_op
)
1503 + sizeof(struct literal_numeric
);
1507 case FILTER_OP_CAST_TO_S64
:
1510 if (!vstack_ax(stack
)) {
1511 printk(KERN_WARNING
"Empty stack\n");
1515 switch (vstack_ax(stack
)->type
) {
1518 case REG_TYPE_UNKNOWN
:
1521 printk(KERN_WARNING
"Incorrect register type %d for cast\n",
1522 (int) vstack_ax(stack
)->type
);
1526 vstack_ax(stack
)->type
= REG_S64
;
1527 next_pc
+= sizeof(struct cast_op
);
1530 case FILTER_OP_CAST_NOP
:
1532 next_pc
+= sizeof(struct cast_op
);
1537 * Instructions for recursive traversal through composed types.
1539 case FILTER_OP_GET_CONTEXT_ROOT
:
1540 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1541 case FILTER_OP_GET_PAYLOAD_ROOT
:
1543 if (vstack_push(stack
)) {
1547 vstack_ax(stack
)->type
= REG_PTR
;
1548 next_pc
+= sizeof(struct load_op
);
1552 case FILTER_OP_LOAD_FIELD
:
1555 if (!vstack_ax(stack
)) {
1556 printk(KERN_WARNING
"Empty stack\n\n");
1560 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1561 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1565 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1566 next_pc
+= sizeof(struct load_op
);
1570 case FILTER_OP_LOAD_FIELD_S8
:
1571 case FILTER_OP_LOAD_FIELD_S16
:
1572 case FILTER_OP_LOAD_FIELD_S32
:
1573 case FILTER_OP_LOAD_FIELD_S64
:
1574 case FILTER_OP_LOAD_FIELD_U8
:
1575 case FILTER_OP_LOAD_FIELD_U16
:
1576 case FILTER_OP_LOAD_FIELD_U32
:
1577 case FILTER_OP_LOAD_FIELD_U64
:
1580 if (!vstack_ax(stack
)) {
1581 printk(KERN_WARNING
"Empty stack\n\n");
1585 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1586 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1590 vstack_ax(stack
)->type
= REG_S64
;
1591 next_pc
+= sizeof(struct load_op
);
1595 case FILTER_OP_LOAD_FIELD_STRING
:
1596 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1599 if (!vstack_ax(stack
)) {
1600 printk(KERN_WARNING
"Empty stack\n\n");
1604 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1605 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1609 vstack_ax(stack
)->type
= REG_STRING
;
1610 next_pc
+= sizeof(struct load_op
);
1614 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1617 if (!vstack_ax(stack
)) {
1618 printk(KERN_WARNING
"Empty stack\n\n");
1622 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1623 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1627 vstack_ax(stack
)->type
= REG_DOUBLE
;
1628 next_pc
+= sizeof(struct load_op
);
1632 case FILTER_OP_GET_SYMBOL
:
1633 case FILTER_OP_GET_SYMBOL_FIELD
:
1636 if (!vstack_ax(stack
)) {
1637 printk(KERN_WARNING
"Empty stack\n\n");
1641 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1642 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1646 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1650 case FILTER_OP_GET_INDEX_U16
:
1653 if (!vstack_ax(stack
)) {
1654 printk(KERN_WARNING
"Empty stack\n\n");
1658 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1659 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1663 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1667 case FILTER_OP_GET_INDEX_U64
:
1670 if (!vstack_ax(stack
)) {
1671 printk(KERN_WARNING
"Empty stack\n\n");
1675 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1676 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1680 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1686 *_next_pc
= next_pc
;
1691 * Never called concurrently (hash seed is shared).
1693 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1695 struct mp_table
*mp_table
;
1696 char *pc
, *next_pc
, *start_pc
;
1698 struct vstack stack
;
1700 vstack_init(&stack
);
1702 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1704 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1707 start_pc
= &bytecode
->code
[0];
1708 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1710 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1713 printk(KERN_WARNING
"filter bytecode overflow\n");
1716 dbg_printk("Validating op %s (%u)\n",
1717 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1718 (unsigned int) *(filter_opcode_t
*) pc
);
1721 * For each instruction, validate the current context
1722 * (traversal of entire execution flow), and validate
1723 * all merge points targeting this instruction.
1725 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1726 &stack
, start_pc
, pc
);
1729 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1734 if (delete_all_nodes(mp_table
)) {
1736 printk(KERN_WARNING
"Unexpected merge points\n");