2 * lttng-filter-validator.c
4 * LTTng modules filter bytecode validator.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/types.h>
24 #include <linux/jhash.h>
25 #include <linux/slab.h>
27 #include <wrapper/list.h>
28 #include <lttng-filter.h>
30 #define MERGE_POINT_TABLE_BITS 7
31 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
33 /* merge point table node */
35 struct hlist_node node
;
37 /* Context at merge point */
39 unsigned long target_pc
;
43 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
47 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
49 if (mp_node
->target_pc
== key_pc
)
56 int merge_points_compare(const struct vstack
*stacka
,
57 const struct vstack
*stackb
)
61 if (stacka
->top
!= stackb
->top
)
63 len
= stacka
->top
+ 1;
64 WARN_ON_ONCE(len
< 0);
65 for (i
= 0; i
< len
; i
++) {
66 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
73 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
74 const struct vstack
*stack
)
76 struct mp_node
*mp_node
;
77 unsigned long hash
= jhash_1word(target_pc
, 0);
78 struct hlist_head
*head
;
79 struct mp_node
*lookup_node
;
82 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
84 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
87 mp_node
->target_pc
= target_pc
;
88 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
90 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
91 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
92 if (lttng_hash_match(lookup_node
, target_pc
)) {
98 /* Key already present */
99 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
102 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
103 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
108 hlist_add_head(&mp_node
->node
, head
);
114 * Binary comparators use top of stack and top of stack -1.
117 int bin_op_compare_check(struct vstack
*stack
, const char *str
)
119 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
122 switch (vstack_ax(stack
)->type
) {
128 switch (vstack_bx(stack
)->type
) {
140 switch (vstack_bx(stack
)->type
) {
159 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
164 * Validate bytecode range overflow within the validation pass.
165 * Called for each instruction encountered.
168 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
169 void *start_pc
, void *pc
)
173 switch (*(filter_opcode_t
*) pc
) {
174 case FILTER_OP_UNKNOWN
:
177 printk(KERN_WARNING
"unknown bytecode op %u\n",
178 (unsigned int) *(filter_opcode_t
*) pc
);
183 case FILTER_OP_RETURN
:
185 if (unlikely(pc
+ sizeof(struct return_op
)
186 > start_pc
+ bytecode
->len
)) {
197 case FILTER_OP_MINUS
:
198 case FILTER_OP_RSHIFT
:
199 case FILTER_OP_LSHIFT
:
200 case FILTER_OP_BIN_AND
:
201 case FILTER_OP_BIN_OR
:
202 case FILTER_OP_BIN_XOR
:
203 case FILTER_OP_EQ_DOUBLE
:
204 case FILTER_OP_NE_DOUBLE
:
205 case FILTER_OP_GT_DOUBLE
:
206 case FILTER_OP_LT_DOUBLE
:
207 case FILTER_OP_GE_DOUBLE
:
208 case FILTER_OP_LE_DOUBLE
:
210 case FILTER_OP_EQ_DOUBLE_S64
:
211 case FILTER_OP_NE_DOUBLE_S64
:
212 case FILTER_OP_GT_DOUBLE_S64
:
213 case FILTER_OP_LT_DOUBLE_S64
:
214 case FILTER_OP_GE_DOUBLE_S64
:
215 case FILTER_OP_LE_DOUBLE_S64
:
216 case FILTER_OP_EQ_S64_DOUBLE
:
217 case FILTER_OP_NE_S64_DOUBLE
:
218 case FILTER_OP_GT_S64_DOUBLE
:
219 case FILTER_OP_LT_S64_DOUBLE
:
220 case FILTER_OP_GE_S64_DOUBLE
:
221 case FILTER_OP_LE_S64_DOUBLE
:
222 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
223 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
224 case FILTER_OP_LOAD_DOUBLE
:
225 case FILTER_OP_CAST_DOUBLE_TO_S64
:
226 case FILTER_OP_UNARY_PLUS_DOUBLE
:
227 case FILTER_OP_UNARY_MINUS_DOUBLE
:
228 case FILTER_OP_UNARY_NOT_DOUBLE
:
230 printk(KERN_WARNING
"unsupported bytecode op %u\n",
231 (unsigned int) *(filter_opcode_t
*) pc
);
242 case FILTER_OP_EQ_STRING
:
243 case FILTER_OP_NE_STRING
:
244 case FILTER_OP_GT_STRING
:
245 case FILTER_OP_LT_STRING
:
246 case FILTER_OP_GE_STRING
:
247 case FILTER_OP_LE_STRING
:
248 case FILTER_OP_EQ_S64
:
249 case FILTER_OP_NE_S64
:
250 case FILTER_OP_GT_S64
:
251 case FILTER_OP_LT_S64
:
252 case FILTER_OP_GE_S64
:
253 case FILTER_OP_LE_S64
:
255 if (unlikely(pc
+ sizeof(struct binary_op
)
256 > start_pc
+ bytecode
->len
)) {
263 case FILTER_OP_UNARY_PLUS
:
264 case FILTER_OP_UNARY_MINUS
:
265 case FILTER_OP_UNARY_NOT
:
266 case FILTER_OP_UNARY_PLUS_S64
:
267 case FILTER_OP_UNARY_MINUS_S64
:
268 case FILTER_OP_UNARY_NOT_S64
:
270 if (unlikely(pc
+ sizeof(struct unary_op
)
271 > start_pc
+ bytecode
->len
)) {
281 if (unlikely(pc
+ sizeof(struct logical_op
)
282 > start_pc
+ bytecode
->len
)) {
289 case FILTER_OP_LOAD_FIELD_REF
:
291 printk(KERN_WARNING
"Unknown field ref type\n");
295 /* get context ref */
296 case FILTER_OP_GET_CONTEXT_REF
:
298 printk(KERN_WARNING
"Unknown field ref type\n");
302 case FILTER_OP_LOAD_FIELD_REF_STRING
:
303 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
304 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
305 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
306 case FILTER_OP_LOAD_FIELD_REF_S64
:
307 case FILTER_OP_GET_CONTEXT_REF_STRING
:
308 case FILTER_OP_GET_CONTEXT_REF_S64
:
310 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
311 > start_pc
+ bytecode
->len
)) {
317 /* load from immediate operand */
318 case FILTER_OP_LOAD_STRING
:
320 struct load_op
*insn
= (struct load_op
*) pc
;
321 uint32_t str_len
, maxlen
;
323 if (unlikely(pc
+ sizeof(struct load_op
)
324 > start_pc
+ bytecode
->len
)) {
329 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
330 str_len
= strnlen(insn
->data
, maxlen
);
331 if (unlikely(str_len
>= maxlen
)) {
332 /* Final '\0' not found within range */
338 case FILTER_OP_LOAD_S64
:
340 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
341 > start_pc
+ bytecode
->len
)) {
347 case FILTER_OP_CAST_TO_S64
:
348 case FILTER_OP_CAST_NOP
:
350 if (unlikely(pc
+ sizeof(struct cast_op
)
351 > start_pc
+ bytecode
->len
)) {
363 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
365 struct mp_node
*mp_node
;
366 struct hlist_node
*tmp
;
367 unsigned long nr_nodes
= 0;
370 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
371 struct hlist_head
*head
;
373 head
= &mp_table
->mp_head
[i
];
374 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
388 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
389 struct vstack
*stack
,
395 switch (*(filter_opcode_t
*) pc
) {
396 case FILTER_OP_UNKNOWN
:
399 printk(KERN_WARNING
"unknown bytecode op %u\n",
400 (unsigned int) *(filter_opcode_t
*) pc
);
405 case FILTER_OP_RETURN
:
415 case FILTER_OP_MINUS
:
416 case FILTER_OP_RSHIFT
:
417 case FILTER_OP_LSHIFT
:
418 case FILTER_OP_BIN_AND
:
419 case FILTER_OP_BIN_OR
:
420 case FILTER_OP_BIN_XOR
:
422 case FILTER_OP_EQ_DOUBLE
:
423 case FILTER_OP_NE_DOUBLE
:
424 case FILTER_OP_GT_DOUBLE
:
425 case FILTER_OP_LT_DOUBLE
:
426 case FILTER_OP_GE_DOUBLE
:
427 case FILTER_OP_LE_DOUBLE
:
428 case FILTER_OP_EQ_DOUBLE_S64
:
429 case FILTER_OP_NE_DOUBLE_S64
:
430 case FILTER_OP_GT_DOUBLE_S64
:
431 case FILTER_OP_LT_DOUBLE_S64
:
432 case FILTER_OP_GE_DOUBLE_S64
:
433 case FILTER_OP_LE_DOUBLE_S64
:
434 case FILTER_OP_EQ_S64_DOUBLE
:
435 case FILTER_OP_NE_S64_DOUBLE
:
436 case FILTER_OP_GT_S64_DOUBLE
:
437 case FILTER_OP_LT_S64_DOUBLE
:
438 case FILTER_OP_GE_S64_DOUBLE
:
439 case FILTER_OP_LE_S64_DOUBLE
:
440 case FILTER_OP_UNARY_PLUS_DOUBLE
:
441 case FILTER_OP_UNARY_MINUS_DOUBLE
:
442 case FILTER_OP_UNARY_NOT_DOUBLE
:
443 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
444 case FILTER_OP_LOAD_DOUBLE
:
445 case FILTER_OP_CAST_DOUBLE_TO_S64
:
446 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
448 printk(KERN_WARNING
"unsupported bytecode op %u\n",
449 (unsigned int) *(filter_opcode_t
*) pc
);
456 ret
= bin_op_compare_check(stack
, "==");
463 ret
= bin_op_compare_check(stack
, "!=");
470 ret
= bin_op_compare_check(stack
, ">");
477 ret
= bin_op_compare_check(stack
, "<");
484 ret
= bin_op_compare_check(stack
, ">=");
491 ret
= bin_op_compare_check(stack
, "<=");
497 case FILTER_OP_EQ_STRING
:
498 case FILTER_OP_NE_STRING
:
499 case FILTER_OP_GT_STRING
:
500 case FILTER_OP_LT_STRING
:
501 case FILTER_OP_GE_STRING
:
502 case FILTER_OP_LE_STRING
:
504 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
505 printk(KERN_WARNING
"Empty stack\n");
509 if (vstack_ax(stack
)->type
!= REG_STRING
510 || vstack_bx(stack
)->type
!= REG_STRING
) {
511 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
518 case FILTER_OP_EQ_S64
:
519 case FILTER_OP_NE_S64
:
520 case FILTER_OP_GT_S64
:
521 case FILTER_OP_LT_S64
:
522 case FILTER_OP_GE_S64
:
523 case FILTER_OP_LE_S64
:
525 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
526 printk(KERN_WARNING
"Empty stack\n");
530 if (vstack_ax(stack
)->type
!= REG_S64
531 || vstack_bx(stack
)->type
!= REG_S64
) {
532 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
540 case FILTER_OP_UNARY_PLUS
:
541 case FILTER_OP_UNARY_MINUS
:
542 case FILTER_OP_UNARY_NOT
:
544 if (!vstack_ax(stack
)) {
545 printk(KERN_WARNING
"Empty stack\n");
549 switch (vstack_ax(stack
)->type
) {
552 printk(KERN_WARNING
"unknown register type\n");
557 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
566 case FILTER_OP_UNARY_PLUS_S64
:
567 case FILTER_OP_UNARY_MINUS_S64
:
568 case FILTER_OP_UNARY_NOT_S64
:
570 if (!vstack_ax(stack
)) {
571 printk(KERN_WARNING
"Empty stack\n");
575 if (vstack_ax(stack
)->type
!= REG_S64
) {
576 printk(KERN_WARNING
"Invalid register type\n");
587 struct logical_op
*insn
= (struct logical_op
*) pc
;
589 if (!vstack_ax(stack
)) {
590 printk(KERN_WARNING
"Empty stack\n");
594 if (vstack_ax(stack
)->type
!= REG_S64
) {
595 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
600 dbg_printk("Validate jumping to bytecode offset %u\n",
601 (unsigned int) insn
->skip_offset
);
602 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
603 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
611 case FILTER_OP_LOAD_FIELD_REF
:
613 printk(KERN_WARNING
"Unknown field ref type\n");
617 case FILTER_OP_LOAD_FIELD_REF_STRING
:
618 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
619 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
620 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
622 struct load_op
*insn
= (struct load_op
*) pc
;
623 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
625 dbg_printk("Validate load field ref offset %u type string\n",
629 case FILTER_OP_LOAD_FIELD_REF_S64
:
631 struct load_op
*insn
= (struct load_op
*) pc
;
632 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
634 dbg_printk("Validate load field ref offset %u type s64\n",
639 /* load from immediate operand */
640 case FILTER_OP_LOAD_STRING
:
645 case FILTER_OP_LOAD_S64
:
650 case FILTER_OP_CAST_TO_S64
:
652 struct cast_op
*insn
= (struct cast_op
*) pc
;
654 if (!vstack_ax(stack
)) {
655 printk(KERN_WARNING
"Empty stack\n");
659 switch (vstack_ax(stack
)->type
) {
662 printk(KERN_WARNING
"unknown register type\n");
667 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
673 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
674 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
675 printk(KERN_WARNING
"Cast expects double\n");
682 case FILTER_OP_CAST_NOP
:
687 /* get context ref */
688 case FILTER_OP_GET_CONTEXT_REF
:
690 printk(KERN_WARNING
"Unknown get context ref type\n");
694 case FILTER_OP_GET_CONTEXT_REF_STRING
:
696 struct load_op
*insn
= (struct load_op
*) pc
;
697 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
699 dbg_printk("Validate get context ref offset %u type string\n",
703 case FILTER_OP_GET_CONTEXT_REF_S64
:
705 struct load_op
*insn
= (struct load_op
*) pc
;
706 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
708 dbg_printk("Validate get context ref offset %u type s64\n",
724 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
725 struct mp_table
*mp_table
,
726 struct vstack
*stack
,
731 unsigned long target_pc
= pc
- start_pc
;
733 struct hlist_head
*head
;
734 struct mp_node
*mp_node
;
736 /* Validate the context resulting from the previous instruction */
737 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
741 /* Validate merge points */
742 hash
= jhash_1word(target_pc
, 0);
743 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
744 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
745 if (lttng_hash_match(mp_node
, target_pc
)) {
751 dbg_printk("Filter: validate merge point at offset %lu\n",
753 if (merge_points_compare(stack
, &mp_node
->stack
)) {
754 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
758 /* Once validated, we can remove the merge point */
759 dbg_printk("Filter: remove merge point at offset %lu\n",
761 hlist_del(&mp_node
->node
);
768 * >0: going to next insn.
769 * 0: success, stop iteration.
773 int exec_insn(struct bytecode_runtime
*bytecode
,
774 struct mp_table
*mp_table
,
775 struct vstack
*stack
,
780 void *next_pc
= *_next_pc
;
782 switch (*(filter_opcode_t
*) pc
) {
783 case FILTER_OP_UNKNOWN
:
786 printk(KERN_WARNING
"unknown bytecode op %u\n",
787 (unsigned int) *(filter_opcode_t
*) pc
);
792 case FILTER_OP_RETURN
:
794 if (!vstack_ax(stack
)) {
795 printk(KERN_WARNING
"Empty stack\n");
808 case FILTER_OP_MINUS
:
809 case FILTER_OP_RSHIFT
:
810 case FILTER_OP_LSHIFT
:
811 case FILTER_OP_BIN_AND
:
812 case FILTER_OP_BIN_OR
:
813 case FILTER_OP_BIN_XOR
:
815 case FILTER_OP_EQ_DOUBLE
:
816 case FILTER_OP_NE_DOUBLE
:
817 case FILTER_OP_GT_DOUBLE
:
818 case FILTER_OP_LT_DOUBLE
:
819 case FILTER_OP_GE_DOUBLE
:
820 case FILTER_OP_LE_DOUBLE
:
821 case FILTER_OP_EQ_DOUBLE_S64
:
822 case FILTER_OP_NE_DOUBLE_S64
:
823 case FILTER_OP_GT_DOUBLE_S64
:
824 case FILTER_OP_LT_DOUBLE_S64
:
825 case FILTER_OP_GE_DOUBLE_S64
:
826 case FILTER_OP_LE_DOUBLE_S64
:
827 case FILTER_OP_EQ_S64_DOUBLE
:
828 case FILTER_OP_NE_S64_DOUBLE
:
829 case FILTER_OP_GT_S64_DOUBLE
:
830 case FILTER_OP_LT_S64_DOUBLE
:
831 case FILTER_OP_GE_S64_DOUBLE
:
832 case FILTER_OP_LE_S64_DOUBLE
:
833 case FILTER_OP_UNARY_PLUS_DOUBLE
:
834 case FILTER_OP_UNARY_MINUS_DOUBLE
:
835 case FILTER_OP_UNARY_NOT_DOUBLE
:
836 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
837 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
838 case FILTER_OP_LOAD_DOUBLE
:
839 case FILTER_OP_CAST_DOUBLE_TO_S64
:
841 printk(KERN_WARNING
"unsupported bytecode op %u\n",
842 (unsigned int) *(filter_opcode_t
*) pc
);
853 case FILTER_OP_EQ_STRING
:
854 case FILTER_OP_NE_STRING
:
855 case FILTER_OP_GT_STRING
:
856 case FILTER_OP_LT_STRING
:
857 case FILTER_OP_GE_STRING
:
858 case FILTER_OP_LE_STRING
:
859 case FILTER_OP_EQ_S64
:
860 case FILTER_OP_NE_S64
:
861 case FILTER_OP_GT_S64
:
862 case FILTER_OP_LT_S64
:
863 case FILTER_OP_GE_S64
:
864 case FILTER_OP_LE_S64
:
867 if (vstack_pop(stack
)) {
871 if (!vstack_ax(stack
)) {
872 printk(KERN_WARNING
"Empty stack\n");
876 vstack_ax(stack
)->type
= REG_S64
;
877 next_pc
+= sizeof(struct binary_op
);
882 case FILTER_OP_UNARY_PLUS
:
883 case FILTER_OP_UNARY_MINUS
:
884 case FILTER_OP_UNARY_NOT
:
885 case FILTER_OP_UNARY_PLUS_S64
:
886 case FILTER_OP_UNARY_MINUS_S64
:
887 case FILTER_OP_UNARY_NOT_S64
:
890 if (!vstack_ax(stack
)) {
891 printk(KERN_WARNING
"Empty stack\n");
895 vstack_ax(stack
)->type
= REG_S64
;
896 next_pc
+= sizeof(struct unary_op
);
904 struct logical_op
*insn
= (struct logical_op
*) pc
;
907 /* Add merge point to table */
908 merge_ret
= merge_point_add_check(mp_table
,
909 insn
->skip_offset
, stack
);
914 /* Continue to next instruction */
915 /* Pop 1 when jump not taken */
916 if (vstack_pop(stack
)) {
920 next_pc
+= sizeof(struct logical_op
);
925 case FILTER_OP_LOAD_FIELD_REF
:
927 printk(KERN_WARNING
"Unknown field ref type\n");
931 /* get context ref */
932 case FILTER_OP_GET_CONTEXT_REF
:
934 printk(KERN_WARNING
"Unknown get context ref type\n");
938 case FILTER_OP_LOAD_FIELD_REF_STRING
:
939 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
940 case FILTER_OP_GET_CONTEXT_REF_STRING
:
941 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
942 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
944 if (vstack_push(stack
)) {
948 vstack_ax(stack
)->type
= REG_STRING
;
949 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
952 case FILTER_OP_LOAD_FIELD_REF_S64
:
953 case FILTER_OP_GET_CONTEXT_REF_S64
:
955 if (vstack_push(stack
)) {
959 vstack_ax(stack
)->type
= REG_S64
;
960 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
964 /* load from immediate operand */
965 case FILTER_OP_LOAD_STRING
:
967 struct load_op
*insn
= (struct load_op
*) pc
;
969 if (vstack_push(stack
)) {
973 vstack_ax(stack
)->type
= REG_STRING
;
974 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
978 case FILTER_OP_LOAD_S64
:
980 if (vstack_push(stack
)) {
984 vstack_ax(stack
)->type
= REG_S64
;
985 next_pc
+= sizeof(struct load_op
)
986 + sizeof(struct literal_numeric
);
990 case FILTER_OP_CAST_TO_S64
:
993 if (!vstack_ax(stack
)) {
994 printk(KERN_WARNING
"Empty stack\n");
998 vstack_ax(stack
)->type
= REG_S64
;
999 next_pc
+= sizeof(struct cast_op
);
1002 case FILTER_OP_CAST_NOP
:
1004 next_pc
+= sizeof(struct cast_op
);
1010 *_next_pc
= next_pc
;
1015 * Never called concurrently (hash seed is shared).
1017 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1019 struct mp_table
*mp_table
;
1020 void *pc
, *next_pc
, *start_pc
;
1022 struct vstack stack
;
1024 vstack_init(&stack
);
1026 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1028 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1031 start_pc
= &bytecode
->data
[0];
1032 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1034 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1037 printk(KERN_WARNING
"filter bytecode overflow\n");
1040 dbg_printk("Validating op %s (%u)\n",
1041 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1042 (unsigned int) *(filter_opcode_t
*) pc
);
1045 * For each instruction, validate the current context
1046 * (traversal of entire execution flow), and validate
1047 * all merge points targeting this instruction.
1049 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1050 &stack
, start_pc
, pc
);
1053 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1058 if (delete_all_nodes(mp_table
)) {
1060 printk(KERN_WARNING
"Unexpected merge points\n");