2 * lttng-filter-validator.c
4 * LTTng modules filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
37 /* merge point table node */
39 struct hlist_node node
;
41 /* Context at merge point */
43 unsigned long target_pc
;
47 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
51 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
53 if (mp_node
->target_pc
== key_pc
)
60 int merge_points_compare(const struct vstack
*stacka
,
61 const struct vstack
*stackb
)
65 if (stacka
->top
!= stackb
->top
)
67 len
= stacka
->top
+ 1;
68 WARN_ON_ONCE(len
< 0);
69 for (i
= 0; i
< len
; i
++) {
70 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
77 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
78 const struct vstack
*stack
)
80 struct mp_node
*mp_node
;
81 unsigned long hash
= jhash_1word(target_pc
, 0);
82 struct hlist_head
*head
;
83 struct mp_node
*lookup_node
;
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
88 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
91 mp_node
->target_pc
= target_pc
;
92 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
94 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
95 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
96 if (lttng_hash_match(lookup_node
, target_pc
)) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
106 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
107 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
112 hlist_add_head(&mp_node
->node
, head
);
118 * Binary comparators use top of stack and top of stack -1.
121 int bin_op_compare_check(struct vstack
*stack
, const filter_opcode_t opcode
,
124 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
127 switch (vstack_ax(stack
)->type
) {
133 switch (vstack_bx(stack
)->type
) {
137 case REG_TYPE_UNKNOWN
:
141 case REG_STAR_GLOB_STRING
:
142 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
150 case REG_STAR_GLOB_STRING
:
151 switch (vstack_bx(stack
)->type
) {
155 case REG_TYPE_UNKNOWN
:
158 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
162 case REG_STAR_GLOB_STRING
:
168 switch (vstack_bx(stack
)->type
) {
172 case REG_TYPE_UNKNOWN
:
175 case REG_STAR_GLOB_STRING
:
181 case REG_TYPE_UNKNOWN
:
182 switch (vstack_bx(stack
)->type
) {
186 case REG_TYPE_UNKNOWN
:
188 case REG_STAR_GLOB_STRING
:
200 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
204 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
208 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
213 * Binary bitwise operators use top of stack and top of stack -1.
214 * Return 0 if typing is known to match, 1 if typing is dynamic
215 * (unknown), negative error value on error.
218 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
221 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
224 switch (vstack_ax(stack
)->type
) {
229 case REG_TYPE_UNKNOWN
:
230 switch (vstack_bx(stack
)->type
) {
234 case REG_TYPE_UNKNOWN
:
236 case REG_STAR_GLOB_STRING
:
242 switch (vstack_bx(stack
)->type
) {
246 case REG_TYPE_UNKNOWN
:
259 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
263 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
268 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
269 const struct get_symbol
*sym
)
271 const char *str
, *str_limit
;
274 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
277 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
278 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
279 len_limit
= str_limit
- str
;
280 if (strnlen(str
, len_limit
) == len_limit
)
286 * Validate bytecode range overflow within the validation pass.
287 * Called for each instruction encountered.
290 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
291 char *start_pc
, char *pc
)
295 switch (*(filter_opcode_t
*) pc
) {
296 case FILTER_OP_UNKNOWN
:
299 printk(KERN_WARNING
"unknown bytecode op %u\n",
300 (unsigned int) *(filter_opcode_t
*) pc
);
305 case FILTER_OP_RETURN
:
307 if (unlikely(pc
+ sizeof(struct return_op
)
308 > start_pc
+ bytecode
->len
)) {
319 case FILTER_OP_MINUS
:
320 case FILTER_OP_RSHIFT
:
321 case FILTER_OP_LSHIFT
:
322 case FILTER_OP_EQ_DOUBLE
:
323 case FILTER_OP_NE_DOUBLE
:
324 case FILTER_OP_GT_DOUBLE
:
325 case FILTER_OP_LT_DOUBLE
:
326 case FILTER_OP_GE_DOUBLE
:
327 case FILTER_OP_LE_DOUBLE
:
329 case FILTER_OP_EQ_DOUBLE_S64
:
330 case FILTER_OP_NE_DOUBLE_S64
:
331 case FILTER_OP_GT_DOUBLE_S64
:
332 case FILTER_OP_LT_DOUBLE_S64
:
333 case FILTER_OP_GE_DOUBLE_S64
:
334 case FILTER_OP_LE_DOUBLE_S64
:
335 case FILTER_OP_EQ_S64_DOUBLE
:
336 case FILTER_OP_NE_S64_DOUBLE
:
337 case FILTER_OP_GT_S64_DOUBLE
:
338 case FILTER_OP_LT_S64_DOUBLE
:
339 case FILTER_OP_GE_S64_DOUBLE
:
340 case FILTER_OP_LE_S64_DOUBLE
:
341 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
342 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
343 case FILTER_OP_LOAD_DOUBLE
:
344 case FILTER_OP_CAST_DOUBLE_TO_S64
:
345 case FILTER_OP_UNARY_PLUS_DOUBLE
:
346 case FILTER_OP_UNARY_MINUS_DOUBLE
:
347 case FILTER_OP_UNARY_NOT_DOUBLE
:
349 printk(KERN_WARNING
"unsupported bytecode op %u\n",
350 (unsigned int) *(filter_opcode_t
*) pc
);
361 case FILTER_OP_EQ_STRING
:
362 case FILTER_OP_NE_STRING
:
363 case FILTER_OP_GT_STRING
:
364 case FILTER_OP_LT_STRING
:
365 case FILTER_OP_GE_STRING
:
366 case FILTER_OP_LE_STRING
:
367 case FILTER_OP_EQ_STAR_GLOB_STRING
:
368 case FILTER_OP_NE_STAR_GLOB_STRING
:
369 case FILTER_OP_EQ_S64
:
370 case FILTER_OP_NE_S64
:
371 case FILTER_OP_GT_S64
:
372 case FILTER_OP_LT_S64
:
373 case FILTER_OP_GE_S64
:
374 case FILTER_OP_LE_S64
:
375 case FILTER_OP_BIT_AND
:
376 case FILTER_OP_BIT_OR
:
377 case FILTER_OP_BIT_XOR
:
379 if (unlikely(pc
+ sizeof(struct binary_op
)
380 > start_pc
+ bytecode
->len
)) {
387 case FILTER_OP_UNARY_PLUS
:
388 case FILTER_OP_UNARY_MINUS
:
389 case FILTER_OP_UNARY_NOT
:
390 case FILTER_OP_UNARY_PLUS_S64
:
391 case FILTER_OP_UNARY_MINUS_S64
:
392 case FILTER_OP_UNARY_NOT_S64
:
394 if (unlikely(pc
+ sizeof(struct unary_op
)
395 > start_pc
+ bytecode
->len
)) {
405 if (unlikely(pc
+ sizeof(struct logical_op
)
406 > start_pc
+ bytecode
->len
)) {
413 case FILTER_OP_LOAD_FIELD_REF
:
415 printk(KERN_WARNING
"Unknown field ref type\n");
420 /* get context ref */
421 case FILTER_OP_GET_CONTEXT_REF
:
423 printk(KERN_WARNING
"Unknown field ref type\n");
427 case FILTER_OP_LOAD_FIELD_REF_STRING
:
428 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
429 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
430 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
431 case FILTER_OP_LOAD_FIELD_REF_S64
:
432 case FILTER_OP_GET_CONTEXT_REF_STRING
:
433 case FILTER_OP_GET_CONTEXT_REF_S64
:
435 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
436 > start_pc
+ bytecode
->len
)) {
442 /* load from immediate operand */
443 case FILTER_OP_LOAD_STRING
:
444 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
446 struct load_op
*insn
= (struct load_op
*) pc
;
447 uint32_t str_len
, maxlen
;
449 if (unlikely(pc
+ sizeof(struct load_op
)
450 > start_pc
+ bytecode
->len
)) {
455 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
456 str_len
= strnlen(insn
->data
, maxlen
);
457 if (unlikely(str_len
>= maxlen
)) {
458 /* Final '\0' not found within range */
464 case FILTER_OP_LOAD_S64
:
466 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
467 > start_pc
+ bytecode
->len
)) {
473 case FILTER_OP_CAST_TO_S64
:
474 case FILTER_OP_CAST_NOP
:
476 if (unlikely(pc
+ sizeof(struct cast_op
)
477 > start_pc
+ bytecode
->len
)) {
484 * Instructions for recursive traversal through composed types.
486 case FILTER_OP_GET_CONTEXT_ROOT
:
487 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
488 case FILTER_OP_GET_PAYLOAD_ROOT
:
489 case FILTER_OP_LOAD_FIELD
:
490 case FILTER_OP_LOAD_FIELD_S8
:
491 case FILTER_OP_LOAD_FIELD_S16
:
492 case FILTER_OP_LOAD_FIELD_S32
:
493 case FILTER_OP_LOAD_FIELD_S64
:
494 case FILTER_OP_LOAD_FIELD_U8
:
495 case FILTER_OP_LOAD_FIELD_U16
:
496 case FILTER_OP_LOAD_FIELD_U32
:
497 case FILTER_OP_LOAD_FIELD_U64
:
498 case FILTER_OP_LOAD_FIELD_STRING
:
499 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
500 case FILTER_OP_LOAD_FIELD_DOUBLE
:
501 if (unlikely(pc
+ sizeof(struct load_op
)
502 > start_pc
+ bytecode
->len
)) {
507 case FILTER_OP_GET_SYMBOL
:
509 struct load_op
*insn
= (struct load_op
*) pc
;
510 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
512 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
513 > start_pc
+ bytecode
->len
)) {
516 ret
= validate_get_symbol(bytecode
, sym
);
520 case FILTER_OP_GET_SYMBOL_FIELD
:
521 printk(KERN_WARNING
"Unexpected get symbol field\n");
525 case FILTER_OP_GET_INDEX_U16
:
526 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
527 > start_pc
+ bytecode
->len
)) {
532 case FILTER_OP_GET_INDEX_U64
:
533 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
534 > start_pc
+ bytecode
->len
)) {
544 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
546 struct mp_node
*mp_node
;
547 struct hlist_node
*tmp
;
548 unsigned long nr_nodes
= 0;
551 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
552 struct hlist_head
*head
;
554 head
= &mp_table
->mp_head
[i
];
555 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
569 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
570 struct vstack
*stack
,
575 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
578 case FILTER_OP_UNKNOWN
:
581 printk(KERN_WARNING
"unknown bytecode op %u\n",
582 (unsigned int) *(filter_opcode_t
*) pc
);
587 case FILTER_OP_RETURN
:
597 case FILTER_OP_MINUS
:
598 case FILTER_OP_RSHIFT
:
599 case FILTER_OP_LSHIFT
:
601 case FILTER_OP_EQ_DOUBLE
:
602 case FILTER_OP_NE_DOUBLE
:
603 case FILTER_OP_GT_DOUBLE
:
604 case FILTER_OP_LT_DOUBLE
:
605 case FILTER_OP_GE_DOUBLE
:
606 case FILTER_OP_LE_DOUBLE
:
607 case FILTER_OP_EQ_DOUBLE_S64
:
608 case FILTER_OP_NE_DOUBLE_S64
:
609 case FILTER_OP_GT_DOUBLE_S64
:
610 case FILTER_OP_LT_DOUBLE_S64
:
611 case FILTER_OP_GE_DOUBLE_S64
:
612 case FILTER_OP_LE_DOUBLE_S64
:
613 case FILTER_OP_EQ_S64_DOUBLE
:
614 case FILTER_OP_NE_S64_DOUBLE
:
615 case FILTER_OP_GT_S64_DOUBLE
:
616 case FILTER_OP_LT_S64_DOUBLE
:
617 case FILTER_OP_GE_S64_DOUBLE
:
618 case FILTER_OP_LE_S64_DOUBLE
:
619 case FILTER_OP_UNARY_PLUS_DOUBLE
:
620 case FILTER_OP_UNARY_MINUS_DOUBLE
:
621 case FILTER_OP_UNARY_NOT_DOUBLE
:
622 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
623 case FILTER_OP_LOAD_DOUBLE
:
624 case FILTER_OP_CAST_DOUBLE_TO_S64
:
625 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
627 printk(KERN_WARNING
"unsupported bytecode op %u\n",
628 (unsigned int) *(filter_opcode_t
*) pc
);
635 ret
= bin_op_compare_check(stack
, opcode
, "==");
642 ret
= bin_op_compare_check(stack
, opcode
, "!=");
649 ret
= bin_op_compare_check(stack
, opcode
, ">");
656 ret
= bin_op_compare_check(stack
, opcode
, "<");
663 ret
= bin_op_compare_check(stack
, opcode
, ">=");
670 ret
= bin_op_compare_check(stack
, opcode
, "<=");
676 case FILTER_OP_EQ_STRING
:
677 case FILTER_OP_NE_STRING
:
678 case FILTER_OP_GT_STRING
:
679 case FILTER_OP_LT_STRING
:
680 case FILTER_OP_GE_STRING
:
681 case FILTER_OP_LE_STRING
:
683 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
684 printk(KERN_WARNING
"Empty stack\n");
688 if (vstack_ax(stack
)->type
!= REG_STRING
689 || vstack_bx(stack
)->type
!= REG_STRING
) {
690 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
698 case FILTER_OP_EQ_STAR_GLOB_STRING
:
699 case FILTER_OP_NE_STAR_GLOB_STRING
:
701 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
702 printk(KERN_WARNING
"Empty stack\n");
706 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
707 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
708 printk(KERN_WARNING
"Unexpected register type for globbing pattern comparator\n");
715 case FILTER_OP_EQ_S64
:
716 case FILTER_OP_NE_S64
:
717 case FILTER_OP_GT_S64
:
718 case FILTER_OP_LT_S64
:
719 case FILTER_OP_GE_S64
:
720 case FILTER_OP_LE_S64
:
722 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
723 printk(KERN_WARNING
"Empty stack\n");
727 if (vstack_ax(stack
)->type
!= REG_S64
728 || vstack_bx(stack
)->type
!= REG_S64
) {
729 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
736 case FILTER_OP_BIT_AND
:
737 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
741 case FILTER_OP_BIT_OR
:
742 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
746 case FILTER_OP_BIT_XOR
:
747 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
753 case FILTER_OP_UNARY_PLUS
:
754 case FILTER_OP_UNARY_MINUS
:
755 case FILTER_OP_UNARY_NOT
:
757 if (!vstack_ax(stack
)) {
758 printk(KERN_WARNING
"Empty stack\n");
762 switch (vstack_ax(stack
)->type
) {
765 printk(KERN_WARNING
"unknown register type\n");
770 case REG_STAR_GLOB_STRING
:
771 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
775 case REG_TYPE_UNKNOWN
:
781 case FILTER_OP_UNARY_PLUS_S64
:
782 case FILTER_OP_UNARY_MINUS_S64
:
783 case FILTER_OP_UNARY_NOT_S64
:
785 if (!vstack_ax(stack
)) {
786 printk(KERN_WARNING
"Empty stack\n");
790 if (vstack_ax(stack
)->type
!= REG_S64
) {
791 printk(KERN_WARNING
"Invalid register type\n");
802 struct logical_op
*insn
= (struct logical_op
*) pc
;
804 if (!vstack_ax(stack
)) {
805 printk(KERN_WARNING
"Empty stack\n");
809 if (vstack_ax(stack
)->type
!= REG_S64
) {
810 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
815 dbg_printk("Validate jumping to bytecode offset %u\n",
816 (unsigned int) insn
->skip_offset
);
817 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
818 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
826 case FILTER_OP_LOAD_FIELD_REF
:
828 printk(KERN_WARNING
"Unknown field ref type\n");
832 case FILTER_OP_LOAD_FIELD_REF_STRING
:
833 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
834 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
835 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
837 struct load_op
*insn
= (struct load_op
*) pc
;
838 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
840 dbg_printk("Validate load field ref offset %u type string\n",
844 case FILTER_OP_LOAD_FIELD_REF_S64
:
846 struct load_op
*insn
= (struct load_op
*) pc
;
847 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
849 dbg_printk("Validate load field ref offset %u type s64\n",
854 /* load from immediate operand */
855 case FILTER_OP_LOAD_STRING
:
856 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
861 case FILTER_OP_LOAD_S64
:
866 case FILTER_OP_CAST_TO_S64
:
868 struct cast_op
*insn
= (struct cast_op
*) pc
;
870 if (!vstack_ax(stack
)) {
871 printk(KERN_WARNING
"Empty stack\n");
875 switch (vstack_ax(stack
)->type
) {
878 printk(KERN_WARNING
"unknown register type\n");
883 case REG_STAR_GLOB_STRING
:
884 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
890 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
891 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
892 printk(KERN_WARNING
"Cast expects double\n");
899 case FILTER_OP_CAST_NOP
:
904 /* get context ref */
905 case FILTER_OP_GET_CONTEXT_REF
:
907 printk(KERN_WARNING
"Unknown get context ref type\n");
911 case FILTER_OP_GET_CONTEXT_REF_STRING
:
913 struct load_op
*insn
= (struct load_op
*) pc
;
914 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
916 dbg_printk("Validate get context ref offset %u type string\n",
920 case FILTER_OP_GET_CONTEXT_REF_S64
:
922 struct load_op
*insn
= (struct load_op
*) pc
;
923 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
925 dbg_printk("Validate get context ref offset %u type s64\n",
931 * Instructions for recursive traversal through composed types.
933 case FILTER_OP_GET_CONTEXT_ROOT
:
935 dbg_printk("Validate get context root\n");
938 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
940 dbg_printk("Validate get app context root\n");
943 case FILTER_OP_GET_PAYLOAD_ROOT
:
945 dbg_printk("Validate get payload root\n");
948 case FILTER_OP_LOAD_FIELD
:
951 * We tolerate that field type is unknown at validation,
952 * because we are performing the load specialization in
953 * a phase after validation.
955 dbg_printk("Validate load field\n");
958 case FILTER_OP_LOAD_FIELD_S8
:
960 dbg_printk("Validate load field s8\n");
963 case FILTER_OP_LOAD_FIELD_S16
:
965 dbg_printk("Validate load field s16\n");
968 case FILTER_OP_LOAD_FIELD_S32
:
970 dbg_printk("Validate load field s32\n");
973 case FILTER_OP_LOAD_FIELD_S64
:
975 dbg_printk("Validate load field s64\n");
978 case FILTER_OP_LOAD_FIELD_U8
:
980 dbg_printk("Validate load field u8\n");
983 case FILTER_OP_LOAD_FIELD_U16
:
985 dbg_printk("Validate load field u16\n");
988 case FILTER_OP_LOAD_FIELD_U32
:
990 dbg_printk("Validate load field u32\n");
993 case FILTER_OP_LOAD_FIELD_U64
:
995 dbg_printk("Validate load field u64\n");
998 case FILTER_OP_LOAD_FIELD_STRING
:
1000 dbg_printk("Validate load field string\n");
1003 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1005 dbg_printk("Validate load field sequence\n");
1008 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1010 dbg_printk("Validate load field double\n");
1014 case FILTER_OP_GET_SYMBOL
:
1016 struct load_op
*insn
= (struct load_op
*) pc
;
1017 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1019 dbg_printk("Validate get symbol offset %u\n", sym
->offset
);
1023 case FILTER_OP_GET_SYMBOL_FIELD
:
1025 struct load_op
*insn
= (struct load_op
*) pc
;
1026 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1028 dbg_printk("Validate get symbol field offset %u\n", sym
->offset
);
1032 case FILTER_OP_GET_INDEX_U16
:
1034 struct load_op
*insn
= (struct load_op
*) pc
;
1035 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1037 dbg_printk("Validate get index u16 index %u\n", get_index
->index
);
1041 case FILTER_OP_GET_INDEX_U64
:
1043 struct load_op
*insn
= (struct load_op
*) pc
;
1044 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1046 dbg_printk("Validate get index u64 index %llu\n",
1047 (unsigned long long) get_index
->index
);
1061 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1062 struct mp_table
*mp_table
,
1063 struct vstack
*stack
,
1068 unsigned long target_pc
= pc
- start_pc
;
1070 struct hlist_head
*head
;
1071 struct mp_node
*mp_node
;
1073 /* Validate the context resulting from the previous instruction */
1074 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1078 /* Validate merge points */
1079 hash
= jhash_1word(target_pc
, 0);
1080 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
1081 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
1082 if (lttng_hash_match(mp_node
, target_pc
)) {
1088 dbg_printk("Filter: validate merge point at offset %lu\n",
1090 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1091 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
1095 /* Once validated, we can remove the merge point */
1096 dbg_printk("Filter: remove merge point at offset %lu\n",
1098 hlist_del(&mp_node
->node
);
1105 * >0: going to next insn.
1106 * 0: success, stop iteration.
1110 int exec_insn(struct bytecode_runtime
*bytecode
,
1111 struct mp_table
*mp_table
,
1112 struct vstack
*stack
,
1117 char *next_pc
= *_next_pc
;
1119 switch (*(filter_opcode_t
*) pc
) {
1120 case FILTER_OP_UNKNOWN
:
1123 printk(KERN_WARNING
"unknown bytecode op %u\n",
1124 (unsigned int) *(filter_opcode_t
*) pc
);
1129 case FILTER_OP_RETURN
:
1131 if (!vstack_ax(stack
)) {
1132 printk(KERN_WARNING
"Empty stack\n");
1136 switch (vstack_ax(stack
)->type
) {
1138 case REG_TYPE_UNKNOWN
:
1141 printk(KERN_WARNING
"Unexpected register type %d at end of bytecode\n",
1142 (int) vstack_ax(stack
)->type
);
1155 case FILTER_OP_PLUS
:
1156 case FILTER_OP_MINUS
:
1157 case FILTER_OP_RSHIFT
:
1158 case FILTER_OP_LSHIFT
:
1159 /* Floating point */
1160 case FILTER_OP_EQ_DOUBLE
:
1161 case FILTER_OP_NE_DOUBLE
:
1162 case FILTER_OP_GT_DOUBLE
:
1163 case FILTER_OP_LT_DOUBLE
:
1164 case FILTER_OP_GE_DOUBLE
:
1165 case FILTER_OP_LE_DOUBLE
:
1166 case FILTER_OP_EQ_DOUBLE_S64
:
1167 case FILTER_OP_NE_DOUBLE_S64
:
1168 case FILTER_OP_GT_DOUBLE_S64
:
1169 case FILTER_OP_LT_DOUBLE_S64
:
1170 case FILTER_OP_GE_DOUBLE_S64
:
1171 case FILTER_OP_LE_DOUBLE_S64
:
1172 case FILTER_OP_EQ_S64_DOUBLE
:
1173 case FILTER_OP_NE_S64_DOUBLE
:
1174 case FILTER_OP_GT_S64_DOUBLE
:
1175 case FILTER_OP_LT_S64_DOUBLE
:
1176 case FILTER_OP_GE_S64_DOUBLE
:
1177 case FILTER_OP_LE_S64_DOUBLE
:
1178 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1179 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1180 case FILTER_OP_UNARY_NOT_DOUBLE
:
1181 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1182 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1183 case FILTER_OP_LOAD_DOUBLE
:
1184 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1186 printk(KERN_WARNING
"unsupported bytecode op %u\n",
1187 (unsigned int) *(filter_opcode_t
*) pc
);
1198 case FILTER_OP_EQ_STRING
:
1199 case FILTER_OP_NE_STRING
:
1200 case FILTER_OP_GT_STRING
:
1201 case FILTER_OP_LT_STRING
:
1202 case FILTER_OP_GE_STRING
:
1203 case FILTER_OP_LE_STRING
:
1204 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1205 case FILTER_OP_NE_STAR_GLOB_STRING
:
1206 case FILTER_OP_EQ_S64
:
1207 case FILTER_OP_NE_S64
:
1208 case FILTER_OP_GT_S64
:
1209 case FILTER_OP_LT_S64
:
1210 case FILTER_OP_GE_S64
:
1211 case FILTER_OP_LE_S64
:
1212 case FILTER_OP_BIT_AND
:
1213 case FILTER_OP_BIT_OR
:
1214 case FILTER_OP_BIT_XOR
:
1217 if (vstack_pop(stack
)) {
1221 if (!vstack_ax(stack
)) {
1222 printk(KERN_WARNING
"Empty stack\n");
1226 switch (vstack_ax(stack
)->type
) {
1230 case REG_STAR_GLOB_STRING
:
1231 case REG_TYPE_UNKNOWN
:
1234 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1235 (int) vstack_ax(stack
)->type
);
1240 vstack_ax(stack
)->type
= REG_S64
;
1241 next_pc
+= sizeof(struct binary_op
);
1246 case FILTER_OP_UNARY_PLUS
:
1247 case FILTER_OP_UNARY_MINUS
:
1250 if (!vstack_ax(stack
)) {
1251 printk(KERN_WARNING
"Empty stack\n\n");
1255 switch (vstack_ax(stack
)->type
) {
1257 case REG_TYPE_UNKNOWN
:
1260 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1261 (int) vstack_ax(stack
)->type
);
1266 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1267 next_pc
+= sizeof(struct unary_op
);
1271 case FILTER_OP_UNARY_PLUS_S64
:
1272 case FILTER_OP_UNARY_MINUS_S64
:
1273 case FILTER_OP_UNARY_NOT_S64
:
1276 if (!vstack_ax(stack
)) {
1277 printk(KERN_WARNING
"Empty stack\n\n");
1281 switch (vstack_ax(stack
)->type
) {
1285 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1286 (int) vstack_ax(stack
)->type
);
1291 vstack_ax(stack
)->type
= REG_S64
;
1292 next_pc
+= sizeof(struct unary_op
);
1296 case FILTER_OP_UNARY_NOT
:
1299 if (!vstack_ax(stack
)) {
1300 printk(KERN_WARNING
"Empty stack\n\n");
1304 switch (vstack_ax(stack
)->type
) {
1306 case REG_TYPE_UNKNOWN
:
1309 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1310 (int) vstack_ax(stack
)->type
);
1315 vstack_ax(stack
)->type
= REG_S64
;
1316 next_pc
+= sizeof(struct unary_op
);
1324 struct logical_op
*insn
= (struct logical_op
*) pc
;
1327 /* Add merge point to table */
1328 merge_ret
= merge_point_add_check(mp_table
,
1329 insn
->skip_offset
, stack
);
1335 if (!vstack_ax(stack
)) {
1336 printk(KERN_WARNING
"Empty stack\n\n");
1340 /* There is always a cast-to-s64 operation before a or/and op. */
1341 switch (vstack_ax(stack
)->type
) {
1345 printk(KERN_WARNING
"Incorrect register type %d for operation\n",
1346 (int) vstack_ax(stack
)->type
);
1351 /* Continue to next instruction */
1352 /* Pop 1 when jump not taken */
1353 if (vstack_pop(stack
)) {
1357 next_pc
+= sizeof(struct logical_op
);
1361 /* load field ref */
1362 case FILTER_OP_LOAD_FIELD_REF
:
1364 printk(KERN_WARNING
"Unknown field ref type\n");
1368 /* get context ref */
1369 case FILTER_OP_GET_CONTEXT_REF
:
1371 printk(KERN_WARNING
"Unknown get context ref type\n");
1375 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1376 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1377 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1378 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
1379 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
1381 if (vstack_push(stack
)) {
1385 vstack_ax(stack
)->type
= REG_STRING
;
1386 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1389 case FILTER_OP_LOAD_FIELD_REF_S64
:
1390 case FILTER_OP_GET_CONTEXT_REF_S64
:
1392 if (vstack_push(stack
)) {
1396 vstack_ax(stack
)->type
= REG_S64
;
1397 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1401 /* load from immediate operand */
1402 case FILTER_OP_LOAD_STRING
:
1404 struct load_op
*insn
= (struct load_op
*) pc
;
1406 if (vstack_push(stack
)) {
1410 vstack_ax(stack
)->type
= REG_STRING
;
1411 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1415 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1417 struct load_op
*insn
= (struct load_op
*) pc
;
1419 if (vstack_push(stack
)) {
1423 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1424 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1428 case FILTER_OP_LOAD_S64
:
1430 if (vstack_push(stack
)) {
1434 vstack_ax(stack
)->type
= REG_S64
;
1435 next_pc
+= sizeof(struct load_op
)
1436 + sizeof(struct literal_numeric
);
1440 case FILTER_OP_CAST_TO_S64
:
1443 if (!vstack_ax(stack
)) {
1444 printk(KERN_WARNING
"Empty stack\n");
1448 switch (vstack_ax(stack
)->type
) {
1451 case REG_TYPE_UNKNOWN
:
1454 printk(KERN_WARNING
"Incorrect register type %d for cast\n",
1455 (int) vstack_ax(stack
)->type
);
1459 vstack_ax(stack
)->type
= REG_S64
;
1460 next_pc
+= sizeof(struct cast_op
);
1463 case FILTER_OP_CAST_NOP
:
1465 next_pc
+= sizeof(struct cast_op
);
1470 * Instructions for recursive traversal through composed types.
1472 case FILTER_OP_GET_CONTEXT_ROOT
:
1473 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1474 case FILTER_OP_GET_PAYLOAD_ROOT
:
1476 if (vstack_push(stack
)) {
1480 vstack_ax(stack
)->type
= REG_PTR
;
1481 next_pc
+= sizeof(struct load_op
);
1485 case FILTER_OP_LOAD_FIELD
:
1488 if (!vstack_ax(stack
)) {
1489 printk(KERN_WARNING
"Empty stack\n\n");
1493 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1494 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1498 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1499 next_pc
+= sizeof(struct load_op
);
1503 case FILTER_OP_LOAD_FIELD_S8
:
1504 case FILTER_OP_LOAD_FIELD_S16
:
1505 case FILTER_OP_LOAD_FIELD_S32
:
1506 case FILTER_OP_LOAD_FIELD_S64
:
1507 case FILTER_OP_LOAD_FIELD_U8
:
1508 case FILTER_OP_LOAD_FIELD_U16
:
1509 case FILTER_OP_LOAD_FIELD_U32
:
1510 case FILTER_OP_LOAD_FIELD_U64
:
1513 if (!vstack_ax(stack
)) {
1514 printk(KERN_WARNING
"Empty stack\n\n");
1518 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1519 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1523 vstack_ax(stack
)->type
= REG_S64
;
1524 next_pc
+= sizeof(struct load_op
);
1528 case FILTER_OP_LOAD_FIELD_STRING
:
1529 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1532 if (!vstack_ax(stack
)) {
1533 printk(KERN_WARNING
"Empty stack\n\n");
1537 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1538 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1542 vstack_ax(stack
)->type
= REG_STRING
;
1543 next_pc
+= sizeof(struct load_op
);
1547 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1550 if (!vstack_ax(stack
)) {
1551 printk(KERN_WARNING
"Empty stack\n\n");
1555 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1556 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1560 vstack_ax(stack
)->type
= REG_DOUBLE
;
1561 next_pc
+= sizeof(struct load_op
);
1565 case FILTER_OP_GET_SYMBOL
:
1566 case FILTER_OP_GET_SYMBOL_FIELD
:
1569 if (!vstack_ax(stack
)) {
1570 printk(KERN_WARNING
"Empty stack\n\n");
1574 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1575 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1579 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1583 case FILTER_OP_GET_INDEX_U16
:
1586 if (!vstack_ax(stack
)) {
1587 printk(KERN_WARNING
"Empty stack\n\n");
1591 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1592 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1596 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1600 case FILTER_OP_GET_INDEX_U64
:
1603 if (!vstack_ax(stack
)) {
1604 printk(KERN_WARNING
"Empty stack\n\n");
1608 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1609 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1613 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1619 *_next_pc
= next_pc
;
1624 * Never called concurrently (hash seed is shared).
1626 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1628 struct mp_table
*mp_table
;
1629 char *pc
, *next_pc
, *start_pc
;
1631 struct vstack stack
;
1633 vstack_init(&stack
);
1635 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1637 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1640 start_pc
= &bytecode
->code
[0];
1641 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1643 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1646 printk(KERN_WARNING
"filter bytecode overflow\n");
1649 dbg_printk("Validating op %s (%u)\n",
1650 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1651 (unsigned int) *(filter_opcode_t
*) pc
);
1654 * For each instruction, validate the current context
1655 * (traversal of entire execution flow), and validate
1656 * all merge points targeting this instruction.
1658 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1659 &stack
, start_pc
, pc
);
1662 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1667 if (delete_all_nodes(mp_table
)) {
1669 printk(KERN_WARNING
"Unexpected merge points\n");