2 * lttng-filter-validator.c
4 * LTTng modules filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
37 /* merge point table node */
39 struct hlist_node node
;
41 /* Context at merge point */
43 unsigned long target_pc
;
47 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
51 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
53 if (mp_node
->target_pc
== key_pc
)
60 int merge_points_compare(const struct vstack
*stacka
,
61 const struct vstack
*stackb
)
65 if (stacka
->top
!= stackb
->top
)
67 len
= stacka
->top
+ 1;
68 WARN_ON_ONCE(len
< 0);
69 for (i
= 0; i
< len
; i
++) {
70 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
77 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
78 const struct vstack
*stack
)
80 struct mp_node
*mp_node
;
81 unsigned long hash
= jhash_1word(target_pc
, 0);
82 struct hlist_head
*head
;
83 struct mp_node
*lookup_node
;
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
88 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
91 mp_node
->target_pc
= target_pc
;
92 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
94 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
95 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
96 if (lttng_hash_match(lookup_node
, target_pc
)) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
106 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
107 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
112 hlist_add_head(&mp_node
->node
, head
);
118 * Binary comparators use top of stack and top of stack -1.
121 int bin_op_compare_check(struct vstack
*stack
, const filter_opcode_t opcode
,
124 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
127 switch (vstack_ax(stack
)->type
) {
133 switch (vstack_bx(stack
)->type
) {
140 case REG_STAR_GLOB_STRING
:
141 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
149 case REG_STAR_GLOB_STRING
:
150 switch (vstack_bx(stack
)->type
) {
156 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
160 case REG_STAR_GLOB_STRING
:
166 switch (vstack_bx(stack
)->type
) {
172 case REG_STAR_GLOB_STRING
:
186 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
191 * Validate bytecode range overflow within the validation pass.
192 * Called for each instruction encountered.
195 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
196 char *start_pc
, char *pc
)
200 switch (*(filter_opcode_t
*) pc
) {
201 case FILTER_OP_UNKNOWN
:
204 printk(KERN_WARNING
"unknown bytecode op %u\n",
205 (unsigned int) *(filter_opcode_t
*) pc
);
210 case FILTER_OP_RETURN
:
212 if (unlikely(pc
+ sizeof(struct return_op
)
213 > start_pc
+ bytecode
->len
)) {
224 case FILTER_OP_MINUS
:
225 case FILTER_OP_RSHIFT
:
226 case FILTER_OP_LSHIFT
:
227 case FILTER_OP_BIN_AND
:
228 case FILTER_OP_BIN_OR
:
229 case FILTER_OP_BIN_XOR
:
230 case FILTER_OP_EQ_DOUBLE
:
231 case FILTER_OP_NE_DOUBLE
:
232 case FILTER_OP_GT_DOUBLE
:
233 case FILTER_OP_LT_DOUBLE
:
234 case FILTER_OP_GE_DOUBLE
:
235 case FILTER_OP_LE_DOUBLE
:
237 case FILTER_OP_EQ_DOUBLE_S64
:
238 case FILTER_OP_NE_DOUBLE_S64
:
239 case FILTER_OP_GT_DOUBLE_S64
:
240 case FILTER_OP_LT_DOUBLE_S64
:
241 case FILTER_OP_GE_DOUBLE_S64
:
242 case FILTER_OP_LE_DOUBLE_S64
:
243 case FILTER_OP_EQ_S64_DOUBLE
:
244 case FILTER_OP_NE_S64_DOUBLE
:
245 case FILTER_OP_GT_S64_DOUBLE
:
246 case FILTER_OP_LT_S64_DOUBLE
:
247 case FILTER_OP_GE_S64_DOUBLE
:
248 case FILTER_OP_LE_S64_DOUBLE
:
249 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
250 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
251 case FILTER_OP_LOAD_DOUBLE
:
252 case FILTER_OP_CAST_DOUBLE_TO_S64
:
253 case FILTER_OP_UNARY_PLUS_DOUBLE
:
254 case FILTER_OP_UNARY_MINUS_DOUBLE
:
255 case FILTER_OP_UNARY_NOT_DOUBLE
:
257 printk(KERN_WARNING
"unsupported bytecode op %u\n",
258 (unsigned int) *(filter_opcode_t
*) pc
);
269 case FILTER_OP_EQ_STRING
:
270 case FILTER_OP_NE_STRING
:
271 case FILTER_OP_GT_STRING
:
272 case FILTER_OP_LT_STRING
:
273 case FILTER_OP_GE_STRING
:
274 case FILTER_OP_LE_STRING
:
275 case FILTER_OP_EQ_STAR_GLOB_STRING
:
276 case FILTER_OP_NE_STAR_GLOB_STRING
:
277 case FILTER_OP_EQ_S64
:
278 case FILTER_OP_NE_S64
:
279 case FILTER_OP_GT_S64
:
280 case FILTER_OP_LT_S64
:
281 case FILTER_OP_GE_S64
:
282 case FILTER_OP_LE_S64
:
284 if (unlikely(pc
+ sizeof(struct binary_op
)
285 > start_pc
+ bytecode
->len
)) {
292 case FILTER_OP_UNARY_PLUS
:
293 case FILTER_OP_UNARY_MINUS
:
294 case FILTER_OP_UNARY_NOT
:
295 case FILTER_OP_UNARY_PLUS_S64
:
296 case FILTER_OP_UNARY_MINUS_S64
:
297 case FILTER_OP_UNARY_NOT_S64
:
299 if (unlikely(pc
+ sizeof(struct unary_op
)
300 > start_pc
+ bytecode
->len
)) {
310 if (unlikely(pc
+ sizeof(struct logical_op
)
311 > start_pc
+ bytecode
->len
)) {
318 case FILTER_OP_LOAD_FIELD_REF
:
320 printk(KERN_WARNING
"Unknown field ref type\n");
324 /* get context ref */
325 case FILTER_OP_GET_CONTEXT_REF
:
327 printk(KERN_WARNING
"Unknown field ref type\n");
331 case FILTER_OP_LOAD_FIELD_REF_STRING
:
332 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
333 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
334 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
335 case FILTER_OP_LOAD_FIELD_REF_S64
:
336 case FILTER_OP_GET_CONTEXT_REF_STRING
:
337 case FILTER_OP_GET_CONTEXT_REF_S64
:
339 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
340 > start_pc
+ bytecode
->len
)) {
346 /* load from immediate operand */
347 case FILTER_OP_LOAD_STRING
:
348 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
350 struct load_op
*insn
= (struct load_op
*) pc
;
351 uint32_t str_len
, maxlen
;
353 if (unlikely(pc
+ sizeof(struct load_op
)
354 > start_pc
+ bytecode
->len
)) {
359 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
360 str_len
= strnlen(insn
->data
, maxlen
);
361 if (unlikely(str_len
>= maxlen
)) {
362 /* Final '\0' not found within range */
368 case FILTER_OP_LOAD_S64
:
370 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
371 > start_pc
+ bytecode
->len
)) {
377 case FILTER_OP_CAST_TO_S64
:
378 case FILTER_OP_CAST_NOP
:
380 if (unlikely(pc
+ sizeof(struct cast_op
)
381 > start_pc
+ bytecode
->len
)) {
393 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
395 struct mp_node
*mp_node
;
396 struct hlist_node
*tmp
;
397 unsigned long nr_nodes
= 0;
400 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
401 struct hlist_head
*head
;
403 head
= &mp_table
->mp_head
[i
];
404 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
418 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
419 struct vstack
*stack
,
424 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
427 case FILTER_OP_UNKNOWN
:
430 printk(KERN_WARNING
"unknown bytecode op %u\n",
431 (unsigned int) *(filter_opcode_t
*) pc
);
436 case FILTER_OP_RETURN
:
446 case FILTER_OP_MINUS
:
447 case FILTER_OP_RSHIFT
:
448 case FILTER_OP_LSHIFT
:
449 case FILTER_OP_BIN_AND
:
450 case FILTER_OP_BIN_OR
:
451 case FILTER_OP_BIN_XOR
:
453 case FILTER_OP_EQ_DOUBLE
:
454 case FILTER_OP_NE_DOUBLE
:
455 case FILTER_OP_GT_DOUBLE
:
456 case FILTER_OP_LT_DOUBLE
:
457 case FILTER_OP_GE_DOUBLE
:
458 case FILTER_OP_LE_DOUBLE
:
459 case FILTER_OP_EQ_DOUBLE_S64
:
460 case FILTER_OP_NE_DOUBLE_S64
:
461 case FILTER_OP_GT_DOUBLE_S64
:
462 case FILTER_OP_LT_DOUBLE_S64
:
463 case FILTER_OP_GE_DOUBLE_S64
:
464 case FILTER_OP_LE_DOUBLE_S64
:
465 case FILTER_OP_EQ_S64_DOUBLE
:
466 case FILTER_OP_NE_S64_DOUBLE
:
467 case FILTER_OP_GT_S64_DOUBLE
:
468 case FILTER_OP_LT_S64_DOUBLE
:
469 case FILTER_OP_GE_S64_DOUBLE
:
470 case FILTER_OP_LE_S64_DOUBLE
:
471 case FILTER_OP_UNARY_PLUS_DOUBLE
:
472 case FILTER_OP_UNARY_MINUS_DOUBLE
:
473 case FILTER_OP_UNARY_NOT_DOUBLE
:
474 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
475 case FILTER_OP_LOAD_DOUBLE
:
476 case FILTER_OP_CAST_DOUBLE_TO_S64
:
477 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
479 printk(KERN_WARNING
"unsupported bytecode op %u\n",
480 (unsigned int) *(filter_opcode_t
*) pc
);
487 ret
= bin_op_compare_check(stack
, opcode
, "==");
494 ret
= bin_op_compare_check(stack
, opcode
, "!=");
501 ret
= bin_op_compare_check(stack
, opcode
, ">");
508 ret
= bin_op_compare_check(stack
, opcode
, "<");
515 ret
= bin_op_compare_check(stack
, opcode
, ">=");
522 ret
= bin_op_compare_check(stack
, opcode
, "<=");
528 case FILTER_OP_EQ_STRING
:
529 case FILTER_OP_NE_STRING
:
530 case FILTER_OP_GT_STRING
:
531 case FILTER_OP_LT_STRING
:
532 case FILTER_OP_GE_STRING
:
533 case FILTER_OP_LE_STRING
:
535 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
536 printk(KERN_WARNING
"Empty stack\n");
540 if (vstack_ax(stack
)->type
!= REG_STRING
541 || vstack_bx(stack
)->type
!= REG_STRING
) {
542 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
550 case FILTER_OP_EQ_STAR_GLOB_STRING
:
551 case FILTER_OP_NE_STAR_GLOB_STRING
:
553 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
554 printk(KERN_WARNING
"Empty stack\n");
558 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
559 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
560 printk(KERN_WARNING
"Unexpected register type for globbing pattern comparator\n");
567 case FILTER_OP_EQ_S64
:
568 case FILTER_OP_NE_S64
:
569 case FILTER_OP_GT_S64
:
570 case FILTER_OP_LT_S64
:
571 case FILTER_OP_GE_S64
:
572 case FILTER_OP_LE_S64
:
574 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
575 printk(KERN_WARNING
"Empty stack\n");
579 if (vstack_ax(stack
)->type
!= REG_S64
580 || vstack_bx(stack
)->type
!= REG_S64
) {
581 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
589 case FILTER_OP_UNARY_PLUS
:
590 case FILTER_OP_UNARY_MINUS
:
591 case FILTER_OP_UNARY_NOT
:
593 if (!vstack_ax(stack
)) {
594 printk(KERN_WARNING
"Empty stack\n");
598 switch (vstack_ax(stack
)->type
) {
601 printk(KERN_WARNING
"unknown register type\n");
606 case REG_STAR_GLOB_STRING
:
607 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
616 case FILTER_OP_UNARY_PLUS_S64
:
617 case FILTER_OP_UNARY_MINUS_S64
:
618 case FILTER_OP_UNARY_NOT_S64
:
620 if (!vstack_ax(stack
)) {
621 printk(KERN_WARNING
"Empty stack\n");
625 if (vstack_ax(stack
)->type
!= REG_S64
) {
626 printk(KERN_WARNING
"Invalid register type\n");
637 struct logical_op
*insn
= (struct logical_op
*) pc
;
639 if (!vstack_ax(stack
)) {
640 printk(KERN_WARNING
"Empty stack\n");
644 if (vstack_ax(stack
)->type
!= REG_S64
) {
645 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
650 dbg_printk("Validate jumping to bytecode offset %u\n",
651 (unsigned int) insn
->skip_offset
);
652 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
653 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
661 case FILTER_OP_LOAD_FIELD_REF
:
663 printk(KERN_WARNING
"Unknown field ref type\n");
667 case FILTER_OP_LOAD_FIELD_REF_STRING
:
668 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
669 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
670 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
672 struct load_op
*insn
= (struct load_op
*) pc
;
673 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
675 dbg_printk("Validate load field ref offset %u type string\n",
679 case FILTER_OP_LOAD_FIELD_REF_S64
:
681 struct load_op
*insn
= (struct load_op
*) pc
;
682 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
684 dbg_printk("Validate load field ref offset %u type s64\n",
689 /* load from immediate operand */
690 case FILTER_OP_LOAD_STRING
:
691 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
696 case FILTER_OP_LOAD_S64
:
701 case FILTER_OP_CAST_TO_S64
:
703 struct cast_op
*insn
= (struct cast_op
*) pc
;
705 if (!vstack_ax(stack
)) {
706 printk(KERN_WARNING
"Empty stack\n");
710 switch (vstack_ax(stack
)->type
) {
713 printk(KERN_WARNING
"unknown register type\n");
718 case REG_STAR_GLOB_STRING
:
719 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
725 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
726 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
727 printk(KERN_WARNING
"Cast expects double\n");
734 case FILTER_OP_CAST_NOP
:
739 /* get context ref */
740 case FILTER_OP_GET_CONTEXT_REF
:
742 printk(KERN_WARNING
"Unknown get context ref type\n");
746 case FILTER_OP_GET_CONTEXT_REF_STRING
:
748 struct load_op
*insn
= (struct load_op
*) pc
;
749 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
751 dbg_printk("Validate get context ref offset %u type string\n",
755 case FILTER_OP_GET_CONTEXT_REF_S64
:
757 struct load_op
*insn
= (struct load_op
*) pc
;
758 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
760 dbg_printk("Validate get context ref offset %u type s64\n",
776 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
777 struct mp_table
*mp_table
,
778 struct vstack
*stack
,
783 unsigned long target_pc
= pc
- start_pc
;
785 struct hlist_head
*head
;
786 struct mp_node
*mp_node
;
788 /* Validate the context resulting from the previous instruction */
789 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
793 /* Validate merge points */
794 hash
= jhash_1word(target_pc
, 0);
795 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
796 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
797 if (lttng_hash_match(mp_node
, target_pc
)) {
803 dbg_printk("Filter: validate merge point at offset %lu\n",
805 if (merge_points_compare(stack
, &mp_node
->stack
)) {
806 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
810 /* Once validated, we can remove the merge point */
811 dbg_printk("Filter: remove merge point at offset %lu\n",
813 hlist_del(&mp_node
->node
);
820 * >0: going to next insn.
821 * 0: success, stop iteration.
825 int exec_insn(struct bytecode_runtime
*bytecode
,
826 struct mp_table
*mp_table
,
827 struct vstack
*stack
,
832 char *next_pc
= *_next_pc
;
834 switch (*(filter_opcode_t
*) pc
) {
835 case FILTER_OP_UNKNOWN
:
838 printk(KERN_WARNING
"unknown bytecode op %u\n",
839 (unsigned int) *(filter_opcode_t
*) pc
);
844 case FILTER_OP_RETURN
:
846 if (!vstack_ax(stack
)) {
847 printk(KERN_WARNING
"Empty stack\n");
860 case FILTER_OP_MINUS
:
861 case FILTER_OP_RSHIFT
:
862 case FILTER_OP_LSHIFT
:
863 case FILTER_OP_BIN_AND
:
864 case FILTER_OP_BIN_OR
:
865 case FILTER_OP_BIN_XOR
:
867 case FILTER_OP_EQ_DOUBLE
:
868 case FILTER_OP_NE_DOUBLE
:
869 case FILTER_OP_GT_DOUBLE
:
870 case FILTER_OP_LT_DOUBLE
:
871 case FILTER_OP_GE_DOUBLE
:
872 case FILTER_OP_LE_DOUBLE
:
873 case FILTER_OP_EQ_DOUBLE_S64
:
874 case FILTER_OP_NE_DOUBLE_S64
:
875 case FILTER_OP_GT_DOUBLE_S64
:
876 case FILTER_OP_LT_DOUBLE_S64
:
877 case FILTER_OP_GE_DOUBLE_S64
:
878 case FILTER_OP_LE_DOUBLE_S64
:
879 case FILTER_OP_EQ_S64_DOUBLE
:
880 case FILTER_OP_NE_S64_DOUBLE
:
881 case FILTER_OP_GT_S64_DOUBLE
:
882 case FILTER_OP_LT_S64_DOUBLE
:
883 case FILTER_OP_GE_S64_DOUBLE
:
884 case FILTER_OP_LE_S64_DOUBLE
:
885 case FILTER_OP_UNARY_PLUS_DOUBLE
:
886 case FILTER_OP_UNARY_MINUS_DOUBLE
:
887 case FILTER_OP_UNARY_NOT_DOUBLE
:
888 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
889 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
890 case FILTER_OP_LOAD_DOUBLE
:
891 case FILTER_OP_CAST_DOUBLE_TO_S64
:
893 printk(KERN_WARNING
"unsupported bytecode op %u\n",
894 (unsigned int) *(filter_opcode_t
*) pc
);
905 case FILTER_OP_EQ_STRING
:
906 case FILTER_OP_NE_STRING
:
907 case FILTER_OP_GT_STRING
:
908 case FILTER_OP_LT_STRING
:
909 case FILTER_OP_GE_STRING
:
910 case FILTER_OP_LE_STRING
:
911 case FILTER_OP_EQ_STAR_GLOB_STRING
:
912 case FILTER_OP_NE_STAR_GLOB_STRING
:
913 case FILTER_OP_EQ_S64
:
914 case FILTER_OP_NE_S64
:
915 case FILTER_OP_GT_S64
:
916 case FILTER_OP_LT_S64
:
917 case FILTER_OP_GE_S64
:
918 case FILTER_OP_LE_S64
:
921 if (vstack_pop(stack
)) {
925 if (!vstack_ax(stack
)) {
926 printk(KERN_WARNING
"Empty stack\n");
930 vstack_ax(stack
)->type
= REG_S64
;
931 next_pc
+= sizeof(struct binary_op
);
936 case FILTER_OP_UNARY_PLUS
:
937 case FILTER_OP_UNARY_MINUS
:
938 case FILTER_OP_UNARY_NOT
:
939 case FILTER_OP_UNARY_PLUS_S64
:
940 case FILTER_OP_UNARY_MINUS_S64
:
941 case FILTER_OP_UNARY_NOT_S64
:
944 if (!vstack_ax(stack
)) {
945 printk(KERN_WARNING
"Empty stack\n");
949 vstack_ax(stack
)->type
= REG_S64
;
950 next_pc
+= sizeof(struct unary_op
);
958 struct logical_op
*insn
= (struct logical_op
*) pc
;
961 /* Add merge point to table */
962 merge_ret
= merge_point_add_check(mp_table
,
963 insn
->skip_offset
, stack
);
968 /* Continue to next instruction */
969 /* Pop 1 when jump not taken */
970 if (vstack_pop(stack
)) {
974 next_pc
+= sizeof(struct logical_op
);
979 case FILTER_OP_LOAD_FIELD_REF
:
981 printk(KERN_WARNING
"Unknown field ref type\n");
985 /* get context ref */
986 case FILTER_OP_GET_CONTEXT_REF
:
988 printk(KERN_WARNING
"Unknown get context ref type\n");
992 case FILTER_OP_LOAD_FIELD_REF_STRING
:
993 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
994 case FILTER_OP_GET_CONTEXT_REF_STRING
:
995 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
996 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
998 if (vstack_push(stack
)) {
1002 vstack_ax(stack
)->type
= REG_STRING
;
1003 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1006 case FILTER_OP_LOAD_FIELD_REF_S64
:
1007 case FILTER_OP_GET_CONTEXT_REF_S64
:
1009 if (vstack_push(stack
)) {
1013 vstack_ax(stack
)->type
= REG_S64
;
1014 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1018 /* load from immediate operand */
1019 case FILTER_OP_LOAD_STRING
:
1021 struct load_op
*insn
= (struct load_op
*) pc
;
1023 if (vstack_push(stack
)) {
1027 vstack_ax(stack
)->type
= REG_STRING
;
1028 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1032 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1034 struct load_op
*insn
= (struct load_op
*) pc
;
1036 if (vstack_push(stack
)) {
1040 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1041 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1045 case FILTER_OP_LOAD_S64
:
1047 if (vstack_push(stack
)) {
1051 vstack_ax(stack
)->type
= REG_S64
;
1052 next_pc
+= sizeof(struct load_op
)
1053 + sizeof(struct literal_numeric
);
1057 case FILTER_OP_CAST_TO_S64
:
1060 if (!vstack_ax(stack
)) {
1061 printk(KERN_WARNING
"Empty stack\n");
1065 vstack_ax(stack
)->type
= REG_S64
;
1066 next_pc
+= sizeof(struct cast_op
);
1069 case FILTER_OP_CAST_NOP
:
1071 next_pc
+= sizeof(struct cast_op
);
1077 *_next_pc
= next_pc
;
1082 * Never called concurrently (hash seed is shared).
1084 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1086 struct mp_table
*mp_table
;
1087 char *pc
, *next_pc
, *start_pc
;
1089 struct vstack stack
;
1091 vstack_init(&stack
);
1093 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1095 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1098 start_pc
= &bytecode
->data
[0];
1099 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1101 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1104 printk(KERN_WARNING
"filter bytecode overflow\n");
1107 dbg_printk("Validating op %s (%u)\n",
1108 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1109 (unsigned int) *(filter_opcode_t
*) pc
);
1112 * For each instruction, validate the current context
1113 * (traversal of entire execution flow), and validate
1114 * all merge points targeting this instruction.
1116 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1117 &stack
, start_pc
, pc
);
1120 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1125 if (delete_all_nodes(mp_table
)) {
1127 printk(KERN_WARNING
"Unexpected merge points\n");