2 * lttng-filter-validator.c
4 * LTTng UST filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <urcu/rculfhash.h>
35 #include "lttng-filter.h"
36 #include "lttng-hash-helper.h"
37 #include "string-utils.h"
38 #include "ust-events-internal.h"
41 * Number of merge points for hash table size. Hash table initialized to
42 * that size, and we do not resize, because we do not want to trigger
43 * RCU worker thread execution: fall-back on linear traversal if number
44 * of merge points exceeds this value.
46 #define DEFAULT_NR_MERGE_POINTS 128
47 #define MIN_NR_BUCKETS 128
48 #define MAX_NR_BUCKETS 128
50 /* merge point table node */
52 struct cds_lfht_node node
;
54 /* Context at merge point */
56 unsigned long target_pc
;
59 static unsigned long lttng_hash_seed
;
60 static unsigned int lttng_hash_seed_ready
;
63 int lttng_hash_match(struct cds_lfht_node
*node
, const void *key
)
65 struct lfht_mp_node
*mp_node
=
66 caa_container_of(node
, struct lfht_mp_node
, node
);
67 unsigned long key_pc
= (unsigned long) key
;
69 if (mp_node
->target_pc
== key_pc
)
76 int merge_points_compare(const struct vstack
*stacka
,
77 const struct vstack
*stackb
)
81 if (stacka
->top
!= stackb
->top
)
83 len
= stacka
->top
+ 1;
85 for (i
= 0; i
< len
; i
++) {
86 if (stacka
->e
[i
].type
!= REG_UNKNOWN
87 && stackb
->e
[i
].type
!= REG_UNKNOWN
88 && stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
95 int merge_point_add_check(struct cds_lfht
*ht
, unsigned long target_pc
,
96 const struct vstack
*stack
)
98 struct lfht_mp_node
*node
;
99 unsigned long hash
= lttng_hash_mix((const char *) target_pc
,
102 struct cds_lfht_node
*ret
;
104 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
106 node
= zmalloc(sizeof(struct lfht_mp_node
));
109 node
->target_pc
= target_pc
;
110 memcpy(&node
->stack
, stack
, sizeof(node
->stack
));
111 ret
= cds_lfht_add_unique(ht
, hash
, lttng_hash_match
,
112 (const char *) target_pc
, &node
->node
);
113 if (ret
!= &node
->node
) {
114 struct lfht_mp_node
*ret_mp
=
115 caa_container_of(ret
, struct lfht_mp_node
, node
);
117 /* Key already present */
118 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
121 if (merge_points_compare(stack
, &ret_mp
->stack
)) {
122 ERR("Merge points differ for offset %lu\n",
131 * Binary comparators use top of stack and top of stack -1.
132 * Return 0 if typing is known to match, 1 if typing is dynamic
133 * (unknown), negative error value on error.
136 int bin_op_compare_check(struct vstack
*stack
, filter_opcode_t opcode
,
139 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
142 switch (vstack_ax(stack
)->type
) {
149 switch (vstack_bx(stack
)->type
) {
157 case REG_STAR_GLOB_STRING
:
158 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
168 case REG_STAR_GLOB_STRING
:
169 switch (vstack_bx(stack
)->type
) {
176 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
180 case REG_STAR_GLOB_STRING
:
190 switch (vstack_bx(stack
)->type
) {
197 case REG_STAR_GLOB_STRING
:
212 ERR("type mismatch for '%s' binary operator\n", str
);
216 ERR("empty stack for '%s' binary operator\n", str
);
220 ERR("unknown type for '%s' binary operator\n", str
);
225 * Binary bitwise operators use top of stack and top of stack -1.
226 * Return 0 if typing is known to match, 1 if typing is dynamic
227 * (unknown), negative error value on error.
230 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
233 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
236 switch (vstack_ax(stack
)->type
) {
244 switch (vstack_bx(stack
)->type
) {
262 ERR("empty stack for '%s' binary operator\n", str
);
266 ERR("unknown type for '%s' binary operator\n", str
);
271 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
272 const struct get_symbol
*sym
)
274 const char *str
, *str_limit
;
277 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
280 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
281 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
282 len_limit
= str_limit
- str
;
283 if (strnlen(str
, len_limit
) == len_limit
)
289 * Validate bytecode range overflow within the validation pass.
290 * Called for each instruction encountered.
293 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
294 char *start_pc
, char *pc
)
298 switch (*(filter_opcode_t
*) pc
) {
299 case FILTER_OP_UNKNOWN
:
302 ERR("unknown bytecode op %u\n",
303 (unsigned int) *(filter_opcode_t
*) pc
);
308 case FILTER_OP_RETURN
:
309 case FILTER_OP_RETURN_S64
:
311 if (unlikely(pc
+ sizeof(struct return_op
)
312 > start_pc
+ bytecode
->len
)) {
323 case FILTER_OP_MINUS
:
325 ERR("unsupported bytecode op %u\n",
326 (unsigned int) *(filter_opcode_t
*) pc
);
337 case FILTER_OP_EQ_STRING
:
338 case FILTER_OP_NE_STRING
:
339 case FILTER_OP_GT_STRING
:
340 case FILTER_OP_LT_STRING
:
341 case FILTER_OP_GE_STRING
:
342 case FILTER_OP_LE_STRING
:
343 case FILTER_OP_EQ_STAR_GLOB_STRING
:
344 case FILTER_OP_NE_STAR_GLOB_STRING
:
345 case FILTER_OP_EQ_S64
:
346 case FILTER_OP_NE_S64
:
347 case FILTER_OP_GT_S64
:
348 case FILTER_OP_LT_S64
:
349 case FILTER_OP_GE_S64
:
350 case FILTER_OP_LE_S64
:
351 case FILTER_OP_EQ_DOUBLE
:
352 case FILTER_OP_NE_DOUBLE
:
353 case FILTER_OP_GT_DOUBLE
:
354 case FILTER_OP_LT_DOUBLE
:
355 case FILTER_OP_GE_DOUBLE
:
356 case FILTER_OP_LE_DOUBLE
:
357 case FILTER_OP_EQ_DOUBLE_S64
:
358 case FILTER_OP_NE_DOUBLE_S64
:
359 case FILTER_OP_GT_DOUBLE_S64
:
360 case FILTER_OP_LT_DOUBLE_S64
:
361 case FILTER_OP_GE_DOUBLE_S64
:
362 case FILTER_OP_LE_DOUBLE_S64
:
363 case FILTER_OP_EQ_S64_DOUBLE
:
364 case FILTER_OP_NE_S64_DOUBLE
:
365 case FILTER_OP_GT_S64_DOUBLE
:
366 case FILTER_OP_LT_S64_DOUBLE
:
367 case FILTER_OP_GE_S64_DOUBLE
:
368 case FILTER_OP_LE_S64_DOUBLE
:
369 case FILTER_OP_BIT_RSHIFT
:
370 case FILTER_OP_BIT_LSHIFT
:
371 case FILTER_OP_BIT_AND
:
372 case FILTER_OP_BIT_OR
:
373 case FILTER_OP_BIT_XOR
:
375 if (unlikely(pc
+ sizeof(struct binary_op
)
376 > start_pc
+ bytecode
->len
)) {
383 case FILTER_OP_UNARY_PLUS
:
384 case FILTER_OP_UNARY_MINUS
:
385 case FILTER_OP_UNARY_NOT
:
386 case FILTER_OP_UNARY_PLUS_S64
:
387 case FILTER_OP_UNARY_MINUS_S64
:
388 case FILTER_OP_UNARY_NOT_S64
:
389 case FILTER_OP_UNARY_PLUS_DOUBLE
:
390 case FILTER_OP_UNARY_MINUS_DOUBLE
:
391 case FILTER_OP_UNARY_NOT_DOUBLE
:
392 case FILTER_OP_UNARY_BIT_NOT
:
394 if (unlikely(pc
+ sizeof(struct unary_op
)
395 > start_pc
+ bytecode
->len
)) {
405 if (unlikely(pc
+ sizeof(struct logical_op
)
406 > start_pc
+ bytecode
->len
)) {
413 case FILTER_OP_LOAD_FIELD_REF
:
415 ERR("Unknown field ref type\n");
420 /* get context ref */
421 case FILTER_OP_GET_CONTEXT_REF
:
422 case FILTER_OP_LOAD_FIELD_REF_STRING
:
423 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
424 case FILTER_OP_LOAD_FIELD_REF_S64
:
425 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
426 case FILTER_OP_GET_CONTEXT_REF_STRING
:
427 case FILTER_OP_GET_CONTEXT_REF_S64
:
428 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
430 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
431 > start_pc
+ bytecode
->len
)) {
437 /* load from immediate operand */
438 case FILTER_OP_LOAD_STRING
:
439 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
441 struct load_op
*insn
= (struct load_op
*) pc
;
442 uint32_t str_len
, maxlen
;
444 if (unlikely(pc
+ sizeof(struct load_op
)
445 > start_pc
+ bytecode
->len
)) {
450 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
451 str_len
= strnlen(insn
->data
, maxlen
);
452 if (unlikely(str_len
>= maxlen
)) {
453 /* Final '\0' not found within range */
459 case FILTER_OP_LOAD_S64
:
461 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
462 > start_pc
+ bytecode
->len
)) {
468 case FILTER_OP_LOAD_DOUBLE
:
470 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_double
)
471 > start_pc
+ bytecode
->len
)) {
477 case FILTER_OP_CAST_TO_S64
:
478 case FILTER_OP_CAST_DOUBLE_TO_S64
:
479 case FILTER_OP_CAST_NOP
:
481 if (unlikely(pc
+ sizeof(struct cast_op
)
482 > start_pc
+ bytecode
->len
)) {
489 * Instructions for recursive traversal through composed types.
491 case FILTER_OP_GET_CONTEXT_ROOT
:
492 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
493 case FILTER_OP_GET_PAYLOAD_ROOT
:
494 case FILTER_OP_LOAD_FIELD
:
495 case FILTER_OP_LOAD_FIELD_S8
:
496 case FILTER_OP_LOAD_FIELD_S16
:
497 case FILTER_OP_LOAD_FIELD_S32
:
498 case FILTER_OP_LOAD_FIELD_S64
:
499 case FILTER_OP_LOAD_FIELD_U8
:
500 case FILTER_OP_LOAD_FIELD_U16
:
501 case FILTER_OP_LOAD_FIELD_U32
:
502 case FILTER_OP_LOAD_FIELD_U64
:
503 case FILTER_OP_LOAD_FIELD_STRING
:
504 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
505 case FILTER_OP_LOAD_FIELD_DOUBLE
:
506 if (unlikely(pc
+ sizeof(struct load_op
)
507 > start_pc
+ bytecode
->len
)) {
512 case FILTER_OP_GET_SYMBOL
:
514 struct load_op
*insn
= (struct load_op
*) pc
;
515 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
517 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
518 > start_pc
+ bytecode
->len
)) {
522 ret
= validate_get_symbol(bytecode
, sym
);
526 case FILTER_OP_GET_SYMBOL_FIELD
:
527 ERR("Unexpected get symbol field");
531 case FILTER_OP_GET_INDEX_U16
:
532 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
533 > start_pc
+ bytecode
->len
)) {
538 case FILTER_OP_GET_INDEX_U64
:
539 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
540 > start_pc
+ bytecode
->len
)) {
550 unsigned long delete_all_nodes(struct cds_lfht
*ht
)
552 struct cds_lfht_iter iter
;
553 struct lfht_mp_node
*node
;
554 unsigned long nr_nodes
= 0;
556 cds_lfht_for_each_entry(ht
, &iter
, node
, node
) {
559 ret
= cds_lfht_del(ht
, cds_lfht_iter_get_node(&iter
));
561 /* note: this hash table is never used concurrently */
574 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
575 struct vstack
*stack
,
580 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
583 case FILTER_OP_UNKNOWN
:
586 ERR("unknown bytecode op %u\n",
587 (unsigned int) *(filter_opcode_t
*) pc
);
592 case FILTER_OP_RETURN
:
593 case FILTER_OP_RETURN_S64
:
603 case FILTER_OP_MINUS
:
605 ERR("unsupported bytecode op %u\n",
606 (unsigned int) opcode
);
613 ret
= bin_op_compare_check(stack
, opcode
, "==");
620 ret
= bin_op_compare_check(stack
, opcode
, "!=");
627 ret
= bin_op_compare_check(stack
, opcode
, ">");
634 ret
= bin_op_compare_check(stack
, opcode
, "<");
641 ret
= bin_op_compare_check(stack
, opcode
, ">=");
648 ret
= bin_op_compare_check(stack
, opcode
, "<=");
654 case FILTER_OP_EQ_STRING
:
655 case FILTER_OP_NE_STRING
:
656 case FILTER_OP_GT_STRING
:
657 case FILTER_OP_LT_STRING
:
658 case FILTER_OP_GE_STRING
:
659 case FILTER_OP_LE_STRING
:
661 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
662 ERR("Empty stack\n");
666 if (vstack_ax(stack
)->type
!= REG_STRING
667 || vstack_bx(stack
)->type
!= REG_STRING
) {
668 ERR("Unexpected register type for string comparator\n");
675 case FILTER_OP_EQ_STAR_GLOB_STRING
:
676 case FILTER_OP_NE_STAR_GLOB_STRING
:
678 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
679 ERR("Empty stack\n");
683 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
684 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
685 ERR("Unexpected register type for globbing pattern comparator\n");
692 case FILTER_OP_EQ_S64
:
693 case FILTER_OP_NE_S64
:
694 case FILTER_OP_GT_S64
:
695 case FILTER_OP_LT_S64
:
696 case FILTER_OP_GE_S64
:
697 case FILTER_OP_LE_S64
:
699 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
700 ERR("Empty stack\n");
704 switch (vstack_ax(stack
)->type
) {
709 ERR("Unexpected register type for s64 comparator\n");
713 switch (vstack_bx(stack
)->type
) {
718 ERR("Unexpected register type for s64 comparator\n");
725 case FILTER_OP_EQ_DOUBLE
:
726 case FILTER_OP_NE_DOUBLE
:
727 case FILTER_OP_GT_DOUBLE
:
728 case FILTER_OP_LT_DOUBLE
:
729 case FILTER_OP_GE_DOUBLE
:
730 case FILTER_OP_LE_DOUBLE
:
732 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
733 ERR("Empty stack\n");
737 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
738 ERR("Double operator should have two double registers\n");
745 case FILTER_OP_EQ_DOUBLE_S64
:
746 case FILTER_OP_NE_DOUBLE_S64
:
747 case FILTER_OP_GT_DOUBLE_S64
:
748 case FILTER_OP_LT_DOUBLE_S64
:
749 case FILTER_OP_GE_DOUBLE_S64
:
750 case FILTER_OP_LE_DOUBLE_S64
:
752 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
753 ERR("Empty stack\n");
757 switch (vstack_ax(stack
)->type
) {
762 ERR("Double-S64 operator has unexpected register types\n");
766 switch (vstack_bx(stack
)->type
) {
770 ERR("Double-S64 operator has unexpected register types\n");
777 case FILTER_OP_EQ_S64_DOUBLE
:
778 case FILTER_OP_NE_S64_DOUBLE
:
779 case FILTER_OP_GT_S64_DOUBLE
:
780 case FILTER_OP_LT_S64_DOUBLE
:
781 case FILTER_OP_GE_S64_DOUBLE
:
782 case FILTER_OP_LE_S64_DOUBLE
:
784 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
785 ERR("Empty stack\n");
789 switch (vstack_ax(stack
)->type
) {
793 ERR("S64-Double operator has unexpected register types\n");
797 switch (vstack_bx(stack
)->type
) {
802 ERR("S64-Double operator has unexpected register types\n");
809 case FILTER_OP_BIT_RSHIFT
:
810 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
814 case FILTER_OP_BIT_LSHIFT
:
815 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
819 case FILTER_OP_BIT_AND
:
820 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
824 case FILTER_OP_BIT_OR
:
825 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
829 case FILTER_OP_BIT_XOR
:
830 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
836 case FILTER_OP_UNARY_PLUS
:
837 case FILTER_OP_UNARY_MINUS
:
838 case FILTER_OP_UNARY_NOT
:
840 if (!vstack_ax(stack
)) {
841 ERR("Empty stack\n");
845 switch (vstack_ax(stack
)->type
) {
847 ERR("unknown register type\n");
852 case REG_STAR_GLOB_STRING
:
853 ERR("Unary op can only be applied to numeric or floating point registers\n");
867 case FILTER_OP_UNARY_BIT_NOT
:
869 if (!vstack_ax(stack
)) {
870 ERR("Empty stack\n");
874 switch (vstack_ax(stack
)->type
) {
876 ERR("unknown register type\n");
881 case REG_STAR_GLOB_STRING
:
883 ERR("Unary bitwise op can only be applied to numeric registers\n");
896 case FILTER_OP_UNARY_PLUS_S64
:
897 case FILTER_OP_UNARY_MINUS_S64
:
898 case FILTER_OP_UNARY_NOT_S64
:
900 if (!vstack_ax(stack
)) {
901 ERR("Empty stack\n");
905 if (vstack_ax(stack
)->type
!= REG_S64
&&
906 vstack_ax(stack
)->type
!= REG_U64
) {
907 ERR("Invalid register type\n");
914 case FILTER_OP_UNARY_PLUS_DOUBLE
:
915 case FILTER_OP_UNARY_MINUS_DOUBLE
:
916 case FILTER_OP_UNARY_NOT_DOUBLE
:
918 if (!vstack_ax(stack
)) {
919 ERR("Empty stack\n");
923 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
924 ERR("Invalid register type\n");
935 struct logical_op
*insn
= (struct logical_op
*) pc
;
937 if (!vstack_ax(stack
)) {
938 ERR("Empty stack\n");
942 if (vstack_ax(stack
)->type
!= REG_S64
943 && vstack_ax(stack
)->type
!= REG_U64
944 && vstack_ax(stack
)->type
!= REG_UNKNOWN
) {
945 ERR("Logical comparator expects S64, U64 or dynamic register\n");
950 dbg_printf("Validate jumping to bytecode offset %u\n",
951 (unsigned int) insn
->skip_offset
);
952 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
953 ERR("Loops are not allowed in bytecode\n");
961 case FILTER_OP_LOAD_FIELD_REF
:
963 ERR("Unknown field ref type\n");
967 case FILTER_OP_LOAD_FIELD_REF_STRING
:
968 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
970 struct load_op
*insn
= (struct load_op
*) pc
;
971 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
973 dbg_printf("Validate load field ref offset %u type string\n",
977 case FILTER_OP_LOAD_FIELD_REF_S64
:
979 struct load_op
*insn
= (struct load_op
*) pc
;
980 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
982 dbg_printf("Validate load field ref offset %u type s64\n",
986 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
988 struct load_op
*insn
= (struct load_op
*) pc
;
989 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
991 dbg_printf("Validate load field ref offset %u type double\n",
996 /* load from immediate operand */
997 case FILTER_OP_LOAD_STRING
:
998 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1003 case FILTER_OP_LOAD_S64
:
1008 case FILTER_OP_LOAD_DOUBLE
:
1013 case FILTER_OP_CAST_TO_S64
:
1014 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1016 struct cast_op
*insn
= (struct cast_op
*) pc
;
1018 if (!vstack_ax(stack
)) {
1019 ERR("Empty stack\n");
1023 switch (vstack_ax(stack
)->type
) {
1025 ERR("unknown register type\n");
1030 case REG_STAR_GLOB_STRING
:
1031 ERR("Cast op can only be applied to numeric or floating point registers\n");
1043 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
1044 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
1045 ERR("Cast expects double\n");
1052 case FILTER_OP_CAST_NOP
:
1057 /* get context ref */
1058 case FILTER_OP_GET_CONTEXT_REF
:
1060 struct load_op
*insn
= (struct load_op
*) pc
;
1061 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1063 dbg_printf("Validate get context ref offset %u type dynamic\n",
1067 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1069 struct load_op
*insn
= (struct load_op
*) pc
;
1070 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1072 dbg_printf("Validate get context ref offset %u type string\n",
1076 case FILTER_OP_GET_CONTEXT_REF_S64
:
1078 struct load_op
*insn
= (struct load_op
*) pc
;
1079 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1081 dbg_printf("Validate get context ref offset %u type s64\n",
1085 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1087 struct load_op
*insn
= (struct load_op
*) pc
;
1088 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1090 dbg_printf("Validate get context ref offset %u type double\n",
1096 * Instructions for recursive traversal through composed types.
1098 case FILTER_OP_GET_CONTEXT_ROOT
:
1100 dbg_printf("Validate get context root\n");
1103 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1105 dbg_printf("Validate get app context root\n");
1108 case FILTER_OP_GET_PAYLOAD_ROOT
:
1110 dbg_printf("Validate get payload root\n");
1113 case FILTER_OP_LOAD_FIELD
:
1116 * We tolerate that field type is unknown at validation,
1117 * because we are performing the load specialization in
1118 * a phase after validation.
1120 dbg_printf("Validate load field\n");
1123 case FILTER_OP_LOAD_FIELD_S8
:
1125 dbg_printf("Validate load field s8\n");
1128 case FILTER_OP_LOAD_FIELD_S16
:
1130 dbg_printf("Validate load field s16\n");
1133 case FILTER_OP_LOAD_FIELD_S32
:
1135 dbg_printf("Validate load field s32\n");
1138 case FILTER_OP_LOAD_FIELD_S64
:
1140 dbg_printf("Validate load field s64\n");
1143 case FILTER_OP_LOAD_FIELD_U8
:
1145 dbg_printf("Validate load field u8\n");
1148 case FILTER_OP_LOAD_FIELD_U16
:
1150 dbg_printf("Validate load field u16\n");
1153 case FILTER_OP_LOAD_FIELD_U32
:
1155 dbg_printf("Validate load field u32\n");
1158 case FILTER_OP_LOAD_FIELD_U64
:
1160 dbg_printf("Validate load field u64\n");
1163 case FILTER_OP_LOAD_FIELD_STRING
:
1165 dbg_printf("Validate load field string\n");
1168 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1170 dbg_printf("Validate load field sequence\n");
1173 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1175 dbg_printf("Validate load field double\n");
1179 case FILTER_OP_GET_SYMBOL
:
1181 struct load_op
*insn
= (struct load_op
*) pc
;
1182 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1184 dbg_printf("Validate get symbol offset %u\n", sym
->offset
);
1188 case FILTER_OP_GET_SYMBOL_FIELD
:
1190 struct load_op
*insn
= (struct load_op
*) pc
;
1191 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1193 dbg_printf("Validate get symbol field offset %u\n", sym
->offset
);
1197 case FILTER_OP_GET_INDEX_U16
:
1199 struct load_op
*insn
= (struct load_op
*) pc
;
1200 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1202 dbg_printf("Validate get index u16 index %u\n", get_index
->index
);
1206 case FILTER_OP_GET_INDEX_U64
:
1208 struct load_op
*insn
= (struct load_op
*) pc
;
1209 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1211 dbg_printf("Validate get index u64 index %" PRIu64
"\n", get_index
->index
);
1225 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1226 struct cds_lfht
*merge_points
,
1227 struct vstack
*stack
,
1232 unsigned long target_pc
= pc
- start_pc
;
1233 struct cds_lfht_iter iter
;
1234 struct cds_lfht_node
*node
;
1235 struct lfht_mp_node
*mp_node
;
1238 /* Validate the context resulting from the previous instruction */
1239 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1243 /* Validate merge points */
1244 hash
= lttng_hash_mix((const char *) target_pc
, sizeof(target_pc
),
1246 cds_lfht_lookup(merge_points
, hash
, lttng_hash_match
,
1247 (const char *) target_pc
, &iter
);
1248 node
= cds_lfht_iter_get_node(&iter
);
1250 mp_node
= caa_container_of(node
, struct lfht_mp_node
, node
);
1252 dbg_printf("Filter: validate merge point at offset %lu\n",
1254 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1255 ERR("Merge points differ for offset %lu\n",
1259 /* Once validated, we can remove the merge point */
1260 dbg_printf("Filter: remove merge point at offset %lu\n",
1262 ret
= cds_lfht_del(merge_points
, node
);
1270 * >0: going to next insn.
1271 * 0: success, stop iteration.
1275 int exec_insn(struct bytecode_runtime
*bytecode
,
1276 struct cds_lfht
*merge_points
,
1277 struct vstack
*stack
,
1282 char *next_pc
= *_next_pc
;
1284 switch (*(filter_opcode_t
*) pc
) {
1285 case FILTER_OP_UNKNOWN
:
1288 ERR("unknown bytecode op %u\n",
1289 (unsigned int) *(filter_opcode_t
*) pc
);
1294 case FILTER_OP_RETURN
:
1296 if (!vstack_ax(stack
)) {
1297 ERR("Empty stack\n");
1301 switch (vstack_ax(stack
)->type
) {
1307 ERR("Unexpected register type %d at end of bytecode\n",
1308 (int) vstack_ax(stack
)->type
);
1316 case FILTER_OP_RETURN_S64
:
1318 if (!vstack_ax(stack
)) {
1319 ERR("Empty stack\n");
1323 switch (vstack_ax(stack
)->type
) {
1329 ERR("Unexpected register type %d at end of bytecode\n",
1330 (int) vstack_ax(stack
)->type
);
1343 case FILTER_OP_PLUS
:
1344 case FILTER_OP_MINUS
:
1346 ERR("unsupported bytecode op %u\n",
1347 (unsigned int) *(filter_opcode_t
*) pc
);
1358 case FILTER_OP_EQ_STRING
:
1359 case FILTER_OP_NE_STRING
:
1360 case FILTER_OP_GT_STRING
:
1361 case FILTER_OP_LT_STRING
:
1362 case FILTER_OP_GE_STRING
:
1363 case FILTER_OP_LE_STRING
:
1364 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1365 case FILTER_OP_NE_STAR_GLOB_STRING
:
1366 case FILTER_OP_EQ_S64
:
1367 case FILTER_OP_NE_S64
:
1368 case FILTER_OP_GT_S64
:
1369 case FILTER_OP_LT_S64
:
1370 case FILTER_OP_GE_S64
:
1371 case FILTER_OP_LE_S64
:
1372 case FILTER_OP_EQ_DOUBLE
:
1373 case FILTER_OP_NE_DOUBLE
:
1374 case FILTER_OP_GT_DOUBLE
:
1375 case FILTER_OP_LT_DOUBLE
:
1376 case FILTER_OP_GE_DOUBLE
:
1377 case FILTER_OP_LE_DOUBLE
:
1378 case FILTER_OP_EQ_DOUBLE_S64
:
1379 case FILTER_OP_NE_DOUBLE_S64
:
1380 case FILTER_OP_GT_DOUBLE_S64
:
1381 case FILTER_OP_LT_DOUBLE_S64
:
1382 case FILTER_OP_GE_DOUBLE_S64
:
1383 case FILTER_OP_LE_DOUBLE_S64
:
1384 case FILTER_OP_EQ_S64_DOUBLE
:
1385 case FILTER_OP_NE_S64_DOUBLE
:
1386 case FILTER_OP_GT_S64_DOUBLE
:
1387 case FILTER_OP_LT_S64_DOUBLE
:
1388 case FILTER_OP_GE_S64_DOUBLE
:
1389 case FILTER_OP_LE_S64_DOUBLE
:
1392 if (vstack_pop(stack
)) {
1396 if (!vstack_ax(stack
)) {
1397 ERR("Empty stack\n");
1401 switch (vstack_ax(stack
)->type
) {
1406 case REG_STAR_GLOB_STRING
:
1410 ERR("Unexpected register type %d for operation\n",
1411 (int) vstack_ax(stack
)->type
);
1416 vstack_ax(stack
)->type
= REG_S64
;
1417 next_pc
+= sizeof(struct binary_op
);
1421 case FILTER_OP_BIT_RSHIFT
:
1422 case FILTER_OP_BIT_LSHIFT
:
1423 case FILTER_OP_BIT_AND
:
1424 case FILTER_OP_BIT_OR
:
1425 case FILTER_OP_BIT_XOR
:
1428 if (vstack_pop(stack
)) {
1432 if (!vstack_ax(stack
)) {
1433 ERR("Empty stack\n");
1437 switch (vstack_ax(stack
)->type
) {
1442 case REG_STAR_GLOB_STRING
:
1446 ERR("Unexpected register type %d for operation\n",
1447 (int) vstack_ax(stack
)->type
);
1452 vstack_ax(stack
)->type
= REG_U64
;
1453 next_pc
+= sizeof(struct binary_op
);
1458 case FILTER_OP_UNARY_PLUS
:
1459 case FILTER_OP_UNARY_MINUS
:
1462 if (!vstack_ax(stack
)) {
1463 ERR("Empty stack\n");
1467 switch (vstack_ax(stack
)->type
) {
1474 ERR("Unexpected register type %d for operation\n",
1475 (int) vstack_ax(stack
)->type
);
1479 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1480 next_pc
+= sizeof(struct unary_op
);
1484 case FILTER_OP_UNARY_PLUS_S64
:
1485 case FILTER_OP_UNARY_MINUS_S64
:
1486 case FILTER_OP_UNARY_NOT_S64
:
1489 if (!vstack_ax(stack
)) {
1490 ERR("Empty stack\n");
1494 switch (vstack_ax(stack
)->type
) {
1499 ERR("Unexpected register type %d for operation\n",
1500 (int) vstack_ax(stack
)->type
);
1505 next_pc
+= sizeof(struct unary_op
);
1509 case FILTER_OP_UNARY_NOT
:
1512 if (!vstack_ax(stack
)) {
1513 ERR("Empty stack\n");
1517 switch (vstack_ax(stack
)->type
) {
1524 ERR("Unexpected register type %d for operation\n",
1525 (int) vstack_ax(stack
)->type
);
1530 next_pc
+= sizeof(struct unary_op
);
1534 case FILTER_OP_UNARY_BIT_NOT
:
1537 if (!vstack_ax(stack
)) {
1538 ERR("Empty stack\n");
1542 switch (vstack_ax(stack
)->type
) {
1549 ERR("Unexpected register type %d for operation\n",
1550 (int) vstack_ax(stack
)->type
);
1555 vstack_ax(stack
)->type
= REG_U64
;
1556 next_pc
+= sizeof(struct unary_op
);
1560 case FILTER_OP_UNARY_NOT_DOUBLE
:
1563 if (!vstack_ax(stack
)) {
1564 ERR("Empty stack\n");
1568 switch (vstack_ax(stack
)->type
) {
1572 ERR("Incorrect register type %d for operation\n",
1573 (int) vstack_ax(stack
)->type
);
1578 vstack_ax(stack
)->type
= REG_S64
;
1579 next_pc
+= sizeof(struct unary_op
);
1583 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1584 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1587 if (!vstack_ax(stack
)) {
1588 ERR("Empty stack\n");
1592 switch (vstack_ax(stack
)->type
) {
1596 ERR("Incorrect register type %d for operation\n",
1597 (int) vstack_ax(stack
)->type
);
1602 vstack_ax(stack
)->type
= REG_DOUBLE
;
1603 next_pc
+= sizeof(struct unary_op
);
1611 struct logical_op
*insn
= (struct logical_op
*) pc
;
1614 /* Add merge point to table */
1615 merge_ret
= merge_point_add_check(merge_points
,
1616 insn
->skip_offset
, stack
);
1622 if (!vstack_ax(stack
)) {
1623 ERR("Empty stack\n");
1627 /* There is always a cast-to-s64 operation before a or/and op. */
1628 switch (vstack_ax(stack
)->type
) {
1633 ERR("Incorrect register type %d for operation\n",
1634 (int) vstack_ax(stack
)->type
);
1639 /* Continue to next instruction */
1640 /* Pop 1 when jump not taken */
1641 if (vstack_pop(stack
)) {
1645 next_pc
+= sizeof(struct logical_op
);
1649 /* load field ref */
1650 case FILTER_OP_LOAD_FIELD_REF
:
1652 ERR("Unknown field ref type\n");
1656 /* get context ref */
1657 case FILTER_OP_GET_CONTEXT_REF
:
1659 if (vstack_push(stack
)) {
1663 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1664 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1667 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1668 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1669 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1671 if (vstack_push(stack
)) {
1675 vstack_ax(stack
)->type
= REG_STRING
;
1676 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1679 case FILTER_OP_LOAD_FIELD_REF_S64
:
1680 case FILTER_OP_GET_CONTEXT_REF_S64
:
1682 if (vstack_push(stack
)) {
1686 vstack_ax(stack
)->type
= REG_S64
;
1687 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1690 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1691 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1693 if (vstack_push(stack
)) {
1697 vstack_ax(stack
)->type
= REG_DOUBLE
;
1698 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1702 /* load from immediate operand */
1703 case FILTER_OP_LOAD_STRING
:
1705 struct load_op
*insn
= (struct load_op
*) pc
;
1707 if (vstack_push(stack
)) {
1711 vstack_ax(stack
)->type
= REG_STRING
;
1712 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1716 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1718 struct load_op
*insn
= (struct load_op
*) pc
;
1720 if (vstack_push(stack
)) {
1724 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1725 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1729 case FILTER_OP_LOAD_S64
:
1731 if (vstack_push(stack
)) {
1735 vstack_ax(stack
)->type
= REG_S64
;
1736 next_pc
+= sizeof(struct load_op
)
1737 + sizeof(struct literal_numeric
);
1741 case FILTER_OP_LOAD_DOUBLE
:
1743 if (vstack_push(stack
)) {
1747 vstack_ax(stack
)->type
= REG_DOUBLE
;
1748 next_pc
+= sizeof(struct load_op
)
1749 + sizeof(struct literal_double
);
1753 case FILTER_OP_CAST_TO_S64
:
1754 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1757 if (!vstack_ax(stack
)) {
1758 ERR("Empty stack\n");
1762 switch (vstack_ax(stack
)->type
) {
1769 ERR("Incorrect register type %d for cast\n",
1770 (int) vstack_ax(stack
)->type
);
1774 vstack_ax(stack
)->type
= REG_S64
;
1775 next_pc
+= sizeof(struct cast_op
);
1778 case FILTER_OP_CAST_NOP
:
1780 next_pc
+= sizeof(struct cast_op
);
1785 * Instructions for recursive traversal through composed types.
1787 case FILTER_OP_GET_CONTEXT_ROOT
:
1788 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1789 case FILTER_OP_GET_PAYLOAD_ROOT
:
1791 if (vstack_push(stack
)) {
1795 vstack_ax(stack
)->type
= REG_PTR
;
1796 next_pc
+= sizeof(struct load_op
);
1800 case FILTER_OP_LOAD_FIELD
:
1803 if (!vstack_ax(stack
)) {
1804 ERR("Empty stack\n");
1808 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1809 ERR("Expecting pointer on top of stack\n");
1813 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1814 next_pc
+= sizeof(struct load_op
);
1818 case FILTER_OP_LOAD_FIELD_S8
:
1819 case FILTER_OP_LOAD_FIELD_S16
:
1820 case FILTER_OP_LOAD_FIELD_S32
:
1821 case FILTER_OP_LOAD_FIELD_S64
:
1824 if (!vstack_ax(stack
)) {
1825 ERR("Empty stack\n");
1829 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1830 ERR("Expecting pointer on top of stack\n");
1834 vstack_ax(stack
)->type
= REG_S64
;
1835 next_pc
+= sizeof(struct load_op
);
1839 case FILTER_OP_LOAD_FIELD_U8
:
1840 case FILTER_OP_LOAD_FIELD_U16
:
1841 case FILTER_OP_LOAD_FIELD_U32
:
1842 case FILTER_OP_LOAD_FIELD_U64
:
1845 if (!vstack_ax(stack
)) {
1846 ERR("Empty stack\n");
1850 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1851 ERR("Expecting pointer on top of stack\n");
1855 vstack_ax(stack
)->type
= REG_U64
;
1856 next_pc
+= sizeof(struct load_op
);
1860 case FILTER_OP_LOAD_FIELD_STRING
:
1861 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1864 if (!vstack_ax(stack
)) {
1865 ERR("Empty stack\n");
1869 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1870 ERR("Expecting pointer on top of stack\n");
1874 vstack_ax(stack
)->type
= REG_STRING
;
1875 next_pc
+= sizeof(struct load_op
);
1879 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1882 if (!vstack_ax(stack
)) {
1883 ERR("Empty stack\n");
1887 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1888 ERR("Expecting pointer on top of stack\n");
1892 vstack_ax(stack
)->type
= REG_DOUBLE
;
1893 next_pc
+= sizeof(struct load_op
);
1897 case FILTER_OP_GET_SYMBOL
:
1898 case FILTER_OP_GET_SYMBOL_FIELD
:
1901 if (!vstack_ax(stack
)) {
1902 ERR("Empty stack\n");
1906 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1907 ERR("Expecting pointer on top of stack\n");
1911 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1915 case FILTER_OP_GET_INDEX_U16
:
1918 if (!vstack_ax(stack
)) {
1919 ERR("Empty stack\n");
1923 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1924 ERR("Expecting pointer on top of stack\n");
1928 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1932 case FILTER_OP_GET_INDEX_U64
:
1935 if (!vstack_ax(stack
)) {
1936 ERR("Empty stack\n");
1940 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1941 ERR("Expecting pointer on top of stack\n");
1945 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1951 *_next_pc
= next_pc
;
1956 * Never called concurrently (hash seed is shared).
1958 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1960 struct cds_lfht
*merge_points
;
1961 char *pc
, *next_pc
, *start_pc
;
1963 struct vstack stack
;
1965 vstack_init(&stack
);
1967 if (!lttng_hash_seed_ready
) {
1968 lttng_hash_seed
= time(NULL
);
1969 lttng_hash_seed_ready
= 1;
1972 * Note: merge_points hash table used by single thread, and
1973 * never concurrently resized. Therefore, we can use it without
1974 * holding RCU read-side lock and free nodes without using
1977 merge_points
= cds_lfht_new(DEFAULT_NR_MERGE_POINTS
,
1978 MIN_NR_BUCKETS
, MAX_NR_BUCKETS
,
1980 if (!merge_points
) {
1981 ERR("Error allocating hash table for bytecode validation\n");
1984 start_pc
= &bytecode
->code
[0];
1985 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1987 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1990 ERR("filter bytecode overflow\n");
1993 dbg_printf("Validating op %s (%u)\n",
1994 print_op((unsigned int) *(filter_opcode_t
*) pc
),
1995 (unsigned int) *(filter_opcode_t
*) pc
);
1998 * For each instruction, validate the current context
1999 * (traversal of entire execution flow), and validate
2000 * all merge points targeting this instruction.
2002 ret
= validate_instruction_all_contexts(bytecode
, merge_points
,
2003 &stack
, start_pc
, pc
);
2006 ret
= exec_insn(bytecode
, merge_points
, &stack
, &next_pc
, pc
);
2011 if (delete_all_nodes(merge_points
)) {
2013 ERR("Unexpected merge points\n");
2017 if (cds_lfht_destroy(merge_points
, NULL
)) {
2018 ERR("Error destroying hash table\n");