2 * lttng-filter-validator.c
4 * LTTng UST filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "lttng-filter.h"
33 #include <urcu/rculfhash.h>
34 #include "lttng-hash-helper.h"
35 #include "string-utils.h"
38 * Number of merge points for hash table size. Hash table initialized to
39 * that size, and we do not resize, because we do not want to trigger
40 * RCU worker thread execution: fall-back on linear traversal if number
41 * of merge points exceeds this value.
43 #define DEFAULT_NR_MERGE_POINTS 128
44 #define MIN_NR_BUCKETS 128
45 #define MAX_NR_BUCKETS 128
47 /* merge point table node */
49 struct cds_lfht_node node
;
51 /* Context at merge point */
53 unsigned long target_pc
;
56 static unsigned long lttng_hash_seed
;
57 static unsigned int lttng_hash_seed_ready
;
60 int lttng_hash_match(struct cds_lfht_node
*node
, const void *key
)
62 struct lfht_mp_node
*mp_node
=
63 caa_container_of(node
, struct lfht_mp_node
, node
);
64 unsigned long key_pc
= (unsigned long) key
;
66 if (mp_node
->target_pc
== key_pc
)
73 int merge_points_compare(const struct vstack
*stacka
,
74 const struct vstack
*stackb
)
78 if (stacka
->top
!= stackb
->top
)
80 len
= stacka
->top
+ 1;
82 for (i
= 0; i
< len
; i
++) {
83 if (stacka
->e
[i
].type
!= REG_UNKNOWN
84 && stackb
->e
[i
].type
!= REG_UNKNOWN
85 && stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
92 int merge_point_add_check(struct cds_lfht
*ht
, unsigned long target_pc
,
93 const struct vstack
*stack
)
95 struct lfht_mp_node
*node
;
96 unsigned long hash
= lttng_hash_mix((const char *) target_pc
,
99 struct cds_lfht_node
*ret
;
101 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
103 node
= zmalloc(sizeof(struct lfht_mp_node
));
106 node
->target_pc
= target_pc
;
107 memcpy(&node
->stack
, stack
, sizeof(node
->stack
));
108 ret
= cds_lfht_add_unique(ht
, hash
, lttng_hash_match
,
109 (const char *) target_pc
, &node
->node
);
110 if (ret
!= &node
->node
) {
111 struct lfht_mp_node
*ret_mp
=
112 caa_container_of(ret
, struct lfht_mp_node
, node
);
114 /* Key already present */
115 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
118 if (merge_points_compare(stack
, &ret_mp
->stack
)) {
119 ERR("Merge points differ for offset %lu\n",
128 * Binary comparators use top of stack and top of stack -1.
129 * Return 0 if typing is known to match, 1 if typing is dynamic
130 * (unknown), negative error value on error.
133 int bin_op_compare_check(struct vstack
*stack
, filter_opcode_t opcode
,
136 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
139 switch (vstack_ax(stack
)->type
) {
146 switch (vstack_bx(stack
)->type
) {
154 case REG_STAR_GLOB_STRING
:
155 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
164 case REG_STAR_GLOB_STRING
:
165 switch (vstack_bx(stack
)->type
) {
172 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
176 case REG_STAR_GLOB_STRING
:
184 switch (vstack_bx(stack
)->type
) {
191 case REG_STAR_GLOB_STRING
:
205 ERR("type mismatch for '%s' binary operator\n", str
);
209 ERR("empty stack for '%s' binary operator\n", str
);
213 ERR("unknown type for '%s' binary operator\n", str
);
218 * Binary bitwise operators use top of stack and top of stack -1.
219 * Return 0 if typing is known to match, 1 if typing is dynamic
220 * (unknown), negative error value on error.
223 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
226 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
229 switch (vstack_ax(stack
)->type
) {
236 switch (vstack_bx(stack
)->type
) {
253 ERR("empty stack for '%s' binary operator\n", str
);
257 ERR("unknown type for '%s' binary operator\n", str
);
262 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
263 const struct get_symbol
*sym
)
265 const char *str
, *str_limit
;
268 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
271 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
272 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
273 len_limit
= str_limit
- str
;
274 if (strnlen(str
, len_limit
) == len_limit
)
280 * Validate bytecode range overflow within the validation pass.
281 * Called for each instruction encountered.
284 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
285 char *start_pc
, char *pc
)
289 switch (*(filter_opcode_t
*) pc
) {
290 case FILTER_OP_UNKNOWN
:
293 ERR("unknown bytecode op %u\n",
294 (unsigned int) *(filter_opcode_t
*) pc
);
299 case FILTER_OP_RETURN
:
300 case FILTER_OP_RETURN_S64
:
302 if (unlikely(pc
+ sizeof(struct return_op
)
303 > start_pc
+ bytecode
->len
)) {
314 case FILTER_OP_MINUS
:
316 ERR("unsupported bytecode op %u\n",
317 (unsigned int) *(filter_opcode_t
*) pc
);
328 case FILTER_OP_EQ_STRING
:
329 case FILTER_OP_NE_STRING
:
330 case FILTER_OP_GT_STRING
:
331 case FILTER_OP_LT_STRING
:
332 case FILTER_OP_GE_STRING
:
333 case FILTER_OP_LE_STRING
:
334 case FILTER_OP_EQ_STAR_GLOB_STRING
:
335 case FILTER_OP_NE_STAR_GLOB_STRING
:
336 case FILTER_OP_EQ_S64
:
337 case FILTER_OP_NE_S64
:
338 case FILTER_OP_GT_S64
:
339 case FILTER_OP_LT_S64
:
340 case FILTER_OP_GE_S64
:
341 case FILTER_OP_LE_S64
:
342 case FILTER_OP_EQ_DOUBLE
:
343 case FILTER_OP_NE_DOUBLE
:
344 case FILTER_OP_GT_DOUBLE
:
345 case FILTER_OP_LT_DOUBLE
:
346 case FILTER_OP_GE_DOUBLE
:
347 case FILTER_OP_LE_DOUBLE
:
348 case FILTER_OP_EQ_DOUBLE_S64
:
349 case FILTER_OP_NE_DOUBLE_S64
:
350 case FILTER_OP_GT_DOUBLE_S64
:
351 case FILTER_OP_LT_DOUBLE_S64
:
352 case FILTER_OP_GE_DOUBLE_S64
:
353 case FILTER_OP_LE_DOUBLE_S64
:
354 case FILTER_OP_EQ_S64_DOUBLE
:
355 case FILTER_OP_NE_S64_DOUBLE
:
356 case FILTER_OP_GT_S64_DOUBLE
:
357 case FILTER_OP_LT_S64_DOUBLE
:
358 case FILTER_OP_GE_S64_DOUBLE
:
359 case FILTER_OP_LE_S64_DOUBLE
:
360 case FILTER_OP_BIT_RSHIFT
:
361 case FILTER_OP_BIT_LSHIFT
:
362 case FILTER_OP_BIT_AND
:
363 case FILTER_OP_BIT_OR
:
364 case FILTER_OP_BIT_XOR
:
366 if (unlikely(pc
+ sizeof(struct binary_op
)
367 > start_pc
+ bytecode
->len
)) {
374 case FILTER_OP_UNARY_PLUS
:
375 case FILTER_OP_UNARY_MINUS
:
376 case FILTER_OP_UNARY_NOT
:
377 case FILTER_OP_UNARY_PLUS_S64
:
378 case FILTER_OP_UNARY_MINUS_S64
:
379 case FILTER_OP_UNARY_NOT_S64
:
380 case FILTER_OP_UNARY_PLUS_DOUBLE
:
381 case FILTER_OP_UNARY_MINUS_DOUBLE
:
382 case FILTER_OP_UNARY_NOT_DOUBLE
:
383 case FILTER_OP_UNARY_BIT_NOT
:
385 if (unlikely(pc
+ sizeof(struct unary_op
)
386 > start_pc
+ bytecode
->len
)) {
396 if (unlikely(pc
+ sizeof(struct logical_op
)
397 > start_pc
+ bytecode
->len
)) {
404 case FILTER_OP_LOAD_FIELD_REF
:
406 ERR("Unknown field ref type\n");
411 /* get context ref */
412 case FILTER_OP_GET_CONTEXT_REF
:
413 case FILTER_OP_LOAD_FIELD_REF_STRING
:
414 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
415 case FILTER_OP_LOAD_FIELD_REF_S64
:
416 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
417 case FILTER_OP_GET_CONTEXT_REF_STRING
:
418 case FILTER_OP_GET_CONTEXT_REF_S64
:
419 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
421 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
422 > start_pc
+ bytecode
->len
)) {
428 /* load from immediate operand */
429 case FILTER_OP_LOAD_STRING
:
430 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
432 struct load_op
*insn
= (struct load_op
*) pc
;
433 uint32_t str_len
, maxlen
;
435 if (unlikely(pc
+ sizeof(struct load_op
)
436 > start_pc
+ bytecode
->len
)) {
441 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
442 str_len
= strnlen(insn
->data
, maxlen
);
443 if (unlikely(str_len
>= maxlen
)) {
444 /* Final '\0' not found within range */
450 case FILTER_OP_LOAD_S64
:
452 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
453 > start_pc
+ bytecode
->len
)) {
459 case FILTER_OP_LOAD_DOUBLE
:
461 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_double
)
462 > start_pc
+ bytecode
->len
)) {
468 case FILTER_OP_CAST_TO_S64
:
469 case FILTER_OP_CAST_DOUBLE_TO_S64
:
470 case FILTER_OP_CAST_NOP
:
472 if (unlikely(pc
+ sizeof(struct cast_op
)
473 > start_pc
+ bytecode
->len
)) {
480 * Instructions for recursive traversal through composed types.
482 case FILTER_OP_GET_CONTEXT_ROOT
:
483 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
484 case FILTER_OP_GET_PAYLOAD_ROOT
:
485 case FILTER_OP_LOAD_FIELD
:
486 case FILTER_OP_LOAD_FIELD_S8
:
487 case FILTER_OP_LOAD_FIELD_S16
:
488 case FILTER_OP_LOAD_FIELD_S32
:
489 case FILTER_OP_LOAD_FIELD_S64
:
490 case FILTER_OP_LOAD_FIELD_U8
:
491 case FILTER_OP_LOAD_FIELD_U16
:
492 case FILTER_OP_LOAD_FIELD_U32
:
493 case FILTER_OP_LOAD_FIELD_U64
:
494 case FILTER_OP_LOAD_FIELD_STRING
:
495 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
496 case FILTER_OP_LOAD_FIELD_DOUBLE
:
497 if (unlikely(pc
+ sizeof(struct load_op
)
498 > start_pc
+ bytecode
->len
)) {
503 case FILTER_OP_GET_SYMBOL
:
505 struct load_op
*insn
= (struct load_op
*) pc
;
506 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
508 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
509 > start_pc
+ bytecode
->len
)) {
513 ret
= validate_get_symbol(bytecode
, sym
);
517 case FILTER_OP_GET_SYMBOL_FIELD
:
518 ERR("Unexpected get symbol field");
522 case FILTER_OP_GET_INDEX_U16
:
523 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
524 > start_pc
+ bytecode
->len
)) {
529 case FILTER_OP_GET_INDEX_U64
:
530 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
531 > start_pc
+ bytecode
->len
)) {
541 unsigned long delete_all_nodes(struct cds_lfht
*ht
)
543 struct cds_lfht_iter iter
;
544 struct lfht_mp_node
*node
;
545 unsigned long nr_nodes
= 0;
547 cds_lfht_for_each_entry(ht
, &iter
, node
, node
) {
550 ret
= cds_lfht_del(ht
, cds_lfht_iter_get_node(&iter
));
552 /* note: this hash table is never used concurrently */
565 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
566 struct vstack
*stack
,
571 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
574 case FILTER_OP_UNKNOWN
:
577 ERR("unknown bytecode op %u\n",
578 (unsigned int) *(filter_opcode_t
*) pc
);
583 case FILTER_OP_RETURN
:
584 case FILTER_OP_RETURN_S64
:
594 case FILTER_OP_MINUS
:
596 ERR("unsupported bytecode op %u\n",
597 (unsigned int) opcode
);
604 ret
= bin_op_compare_check(stack
, opcode
, "==");
611 ret
= bin_op_compare_check(stack
, opcode
, "!=");
618 ret
= bin_op_compare_check(stack
, opcode
, ">");
625 ret
= bin_op_compare_check(stack
, opcode
, "<");
632 ret
= bin_op_compare_check(stack
, opcode
, ">=");
639 ret
= bin_op_compare_check(stack
, opcode
, "<=");
645 case FILTER_OP_EQ_STRING
:
646 case FILTER_OP_NE_STRING
:
647 case FILTER_OP_GT_STRING
:
648 case FILTER_OP_LT_STRING
:
649 case FILTER_OP_GE_STRING
:
650 case FILTER_OP_LE_STRING
:
652 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
653 ERR("Empty stack\n");
657 if (vstack_ax(stack
)->type
!= REG_STRING
658 || vstack_bx(stack
)->type
!= REG_STRING
) {
659 ERR("Unexpected register type for string comparator\n");
666 case FILTER_OP_EQ_STAR_GLOB_STRING
:
667 case FILTER_OP_NE_STAR_GLOB_STRING
:
669 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
670 ERR("Empty stack\n");
674 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
675 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
676 ERR("Unexpected register type for globbing pattern comparator\n");
683 case FILTER_OP_EQ_S64
:
684 case FILTER_OP_NE_S64
:
685 case FILTER_OP_GT_S64
:
686 case FILTER_OP_LT_S64
:
687 case FILTER_OP_GE_S64
:
688 case FILTER_OP_LE_S64
:
690 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
691 ERR("Empty stack\n");
695 if (vstack_ax(stack
)->type
!= REG_S64
696 || vstack_bx(stack
)->type
!= REG_S64
) {
697 ERR("Unexpected register type for s64 comparator\n");
704 case FILTER_OP_EQ_DOUBLE
:
705 case FILTER_OP_NE_DOUBLE
:
706 case FILTER_OP_GT_DOUBLE
:
707 case FILTER_OP_LT_DOUBLE
:
708 case FILTER_OP_GE_DOUBLE
:
709 case FILTER_OP_LE_DOUBLE
:
711 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
712 ERR("Empty stack\n");
716 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
717 ERR("Double operator should have two double registers\n");
724 case FILTER_OP_EQ_DOUBLE_S64
:
725 case FILTER_OP_NE_DOUBLE_S64
:
726 case FILTER_OP_GT_DOUBLE_S64
:
727 case FILTER_OP_LT_DOUBLE_S64
:
728 case FILTER_OP_GE_DOUBLE_S64
:
729 case FILTER_OP_LE_DOUBLE_S64
:
731 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
732 ERR("Empty stack\n");
736 if (vstack_ax(stack
)->type
!= REG_S64
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
737 ERR("Double-S64 operator has unexpected register types\n");
744 case FILTER_OP_EQ_S64_DOUBLE
:
745 case FILTER_OP_NE_S64_DOUBLE
:
746 case FILTER_OP_GT_S64_DOUBLE
:
747 case FILTER_OP_LT_S64_DOUBLE
:
748 case FILTER_OP_GE_S64_DOUBLE
:
749 case FILTER_OP_LE_S64_DOUBLE
:
751 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
752 ERR("Empty stack\n");
756 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_S64
) {
757 ERR("S64-Double operator has unexpected register types\n");
764 case FILTER_OP_BIT_RSHIFT
:
765 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
769 case FILTER_OP_BIT_LSHIFT
:
770 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
774 case FILTER_OP_BIT_AND
:
775 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
779 case FILTER_OP_BIT_OR
:
780 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
784 case FILTER_OP_BIT_XOR
:
785 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
791 case FILTER_OP_UNARY_PLUS
:
792 case FILTER_OP_UNARY_MINUS
:
793 case FILTER_OP_UNARY_NOT
:
795 if (!vstack_ax(stack
)) {
796 ERR("Empty stack\n");
800 switch (vstack_ax(stack
)->type
) {
802 ERR("unknown register type\n");
807 case REG_STAR_GLOB_STRING
:
808 ERR("Unary op can only be applied to numeric or floating point registers\n");
820 case FILTER_OP_UNARY_BIT_NOT
:
822 if (!vstack_ax(stack
)) {
823 ERR("Empty stack\n");
827 switch (vstack_ax(stack
)->type
) {
829 ERR("unknown register type\n");
834 case REG_STAR_GLOB_STRING
:
836 ERR("Unary bitwise op can only be applied to numeric registers\n");
847 case FILTER_OP_UNARY_PLUS_S64
:
848 case FILTER_OP_UNARY_MINUS_S64
:
849 case FILTER_OP_UNARY_NOT_S64
:
851 if (!vstack_ax(stack
)) {
852 ERR("Empty stack\n");
856 if (vstack_ax(stack
)->type
!= REG_S64
) {
857 ERR("Invalid register type\n");
864 case FILTER_OP_UNARY_PLUS_DOUBLE
:
865 case FILTER_OP_UNARY_MINUS_DOUBLE
:
866 case FILTER_OP_UNARY_NOT_DOUBLE
:
868 if (!vstack_ax(stack
)) {
869 ERR("Empty stack\n");
873 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
874 ERR("Invalid register type\n");
885 struct logical_op
*insn
= (struct logical_op
*) pc
;
887 if (!vstack_ax(stack
)) {
888 ERR("Empty stack\n");
892 if (vstack_ax(stack
)->type
!= REG_S64
893 && vstack_ax(stack
)->type
!= REG_UNKNOWN
) {
894 ERR("Logical comparator expects S64 or dynamic register\n");
899 dbg_printf("Validate jumping to bytecode offset %u\n",
900 (unsigned int) insn
->skip_offset
);
901 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
902 ERR("Loops are not allowed in bytecode\n");
910 case FILTER_OP_LOAD_FIELD_REF
:
912 ERR("Unknown field ref type\n");
916 case FILTER_OP_LOAD_FIELD_REF_STRING
:
917 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
919 struct load_op
*insn
= (struct load_op
*) pc
;
920 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
922 dbg_printf("Validate load field ref offset %u type string\n",
926 case FILTER_OP_LOAD_FIELD_REF_S64
:
928 struct load_op
*insn
= (struct load_op
*) pc
;
929 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
931 dbg_printf("Validate load field ref offset %u type s64\n",
935 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
937 struct load_op
*insn
= (struct load_op
*) pc
;
938 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
940 dbg_printf("Validate load field ref offset %u type double\n",
945 /* load from immediate operand */
946 case FILTER_OP_LOAD_STRING
:
947 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
952 case FILTER_OP_LOAD_S64
:
957 case FILTER_OP_LOAD_DOUBLE
:
962 case FILTER_OP_CAST_TO_S64
:
963 case FILTER_OP_CAST_DOUBLE_TO_S64
:
965 struct cast_op
*insn
= (struct cast_op
*) pc
;
967 if (!vstack_ax(stack
)) {
968 ERR("Empty stack\n");
972 switch (vstack_ax(stack
)->type
) {
974 ERR("unknown register type\n");
979 case REG_STAR_GLOB_STRING
:
980 ERR("Cast op can only be applied to numeric or floating point registers\n");
990 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
991 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
992 ERR("Cast expects double\n");
999 case FILTER_OP_CAST_NOP
:
1004 /* get context ref */
1005 case FILTER_OP_GET_CONTEXT_REF
:
1007 struct load_op
*insn
= (struct load_op
*) pc
;
1008 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1010 dbg_printf("Validate get context ref offset %u type dynamic\n",
1014 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1016 struct load_op
*insn
= (struct load_op
*) pc
;
1017 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1019 dbg_printf("Validate get context ref offset %u type string\n",
1023 case FILTER_OP_GET_CONTEXT_REF_S64
:
1025 struct load_op
*insn
= (struct load_op
*) pc
;
1026 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1028 dbg_printf("Validate get context ref offset %u type s64\n",
1032 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1034 struct load_op
*insn
= (struct load_op
*) pc
;
1035 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1037 dbg_printf("Validate get context ref offset %u type double\n",
1043 * Instructions for recursive traversal through composed types.
1045 case FILTER_OP_GET_CONTEXT_ROOT
:
1047 dbg_printf("Validate get context root\n");
1050 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1052 dbg_printf("Validate get app context root\n");
1055 case FILTER_OP_GET_PAYLOAD_ROOT
:
1057 dbg_printf("Validate get payload root\n");
1060 case FILTER_OP_LOAD_FIELD
:
1063 * We tolerate that field type is unknown at validation,
1064 * because we are performing the load specialization in
1065 * a phase after validation.
1067 dbg_printf("Validate load field\n");
1070 case FILTER_OP_LOAD_FIELD_S8
:
1072 dbg_printf("Validate load field s8\n");
1075 case FILTER_OP_LOAD_FIELD_S16
:
1077 dbg_printf("Validate load field s16\n");
1080 case FILTER_OP_LOAD_FIELD_S32
:
1082 dbg_printf("Validate load field s32\n");
1085 case FILTER_OP_LOAD_FIELD_S64
:
1087 dbg_printf("Validate load field s64\n");
1090 case FILTER_OP_LOAD_FIELD_U8
:
1092 dbg_printf("Validate load field u8\n");
1095 case FILTER_OP_LOAD_FIELD_U16
:
1097 dbg_printf("Validate load field u16\n");
1100 case FILTER_OP_LOAD_FIELD_U32
:
1102 dbg_printf("Validate load field u32\n");
1105 case FILTER_OP_LOAD_FIELD_U64
:
1107 dbg_printf("Validate load field u64\n");
1110 case FILTER_OP_LOAD_FIELD_STRING
:
1112 dbg_printf("Validate load field string\n");
1115 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1117 dbg_printf("Validate load field sequence\n");
1120 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1122 dbg_printf("Validate load field double\n");
1126 case FILTER_OP_GET_SYMBOL
:
1128 struct load_op
*insn
= (struct load_op
*) pc
;
1129 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1131 dbg_printf("Validate get symbol offset %u\n", sym
->offset
);
1135 case FILTER_OP_GET_SYMBOL_FIELD
:
1137 struct load_op
*insn
= (struct load_op
*) pc
;
1138 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1140 dbg_printf("Validate get symbol field offset %u\n", sym
->offset
);
1144 case FILTER_OP_GET_INDEX_U16
:
1146 struct load_op
*insn
= (struct load_op
*) pc
;
1147 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1149 dbg_printf("Validate get index u16 index %u\n", get_index
->index
);
1153 case FILTER_OP_GET_INDEX_U64
:
1155 struct load_op
*insn
= (struct load_op
*) pc
;
1156 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1158 dbg_printf("Validate get index u64 index %" PRIu64
"\n", get_index
->index
);
1172 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1173 struct cds_lfht
*merge_points
,
1174 struct vstack
*stack
,
1179 unsigned long target_pc
= pc
- start_pc
;
1180 struct cds_lfht_iter iter
;
1181 struct cds_lfht_node
*node
;
1182 struct lfht_mp_node
*mp_node
;
1185 /* Validate the context resulting from the previous instruction */
1186 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1190 /* Validate merge points */
1191 hash
= lttng_hash_mix((const char *) target_pc
, sizeof(target_pc
),
1193 cds_lfht_lookup(merge_points
, hash
, lttng_hash_match
,
1194 (const char *) target_pc
, &iter
);
1195 node
= cds_lfht_iter_get_node(&iter
);
1197 mp_node
= caa_container_of(node
, struct lfht_mp_node
, node
);
1199 dbg_printf("Filter: validate merge point at offset %lu\n",
1201 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1202 ERR("Merge points differ for offset %lu\n",
1206 /* Once validated, we can remove the merge point */
1207 dbg_printf("Filter: remove merge point at offset %lu\n",
1209 ret
= cds_lfht_del(merge_points
, node
);
1217 * >0: going to next insn.
1218 * 0: success, stop iteration.
1222 int exec_insn(struct bytecode_runtime
*bytecode
,
1223 struct cds_lfht
*merge_points
,
1224 struct vstack
*stack
,
1229 char *next_pc
= *_next_pc
;
1231 switch (*(filter_opcode_t
*) pc
) {
1232 case FILTER_OP_UNKNOWN
:
1235 ERR("unknown bytecode op %u\n",
1236 (unsigned int) *(filter_opcode_t
*) pc
);
1241 case FILTER_OP_RETURN
:
1243 if (!vstack_ax(stack
)) {
1244 ERR("Empty stack\n");
1248 switch (vstack_ax(stack
)->type
) {
1253 ERR("Unexpected register type %d at end of bytecode\n",
1254 (int) vstack_ax(stack
)->type
);
1262 case FILTER_OP_RETURN_S64
:
1264 if (!vstack_ax(stack
)) {
1265 ERR("Empty stack\n");
1269 switch (vstack_ax(stack
)->type
) {
1274 ERR("Unexpected register type %d at end of bytecode\n",
1275 (int) vstack_ax(stack
)->type
);
1288 case FILTER_OP_PLUS
:
1289 case FILTER_OP_MINUS
:
1291 ERR("unsupported bytecode op %u\n",
1292 (unsigned int) *(filter_opcode_t
*) pc
);
1303 case FILTER_OP_EQ_STRING
:
1304 case FILTER_OP_NE_STRING
:
1305 case FILTER_OP_GT_STRING
:
1306 case FILTER_OP_LT_STRING
:
1307 case FILTER_OP_GE_STRING
:
1308 case FILTER_OP_LE_STRING
:
1309 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1310 case FILTER_OP_NE_STAR_GLOB_STRING
:
1311 case FILTER_OP_EQ_S64
:
1312 case FILTER_OP_NE_S64
:
1313 case FILTER_OP_GT_S64
:
1314 case FILTER_OP_LT_S64
:
1315 case FILTER_OP_GE_S64
:
1316 case FILTER_OP_LE_S64
:
1317 case FILTER_OP_EQ_DOUBLE
:
1318 case FILTER_OP_NE_DOUBLE
:
1319 case FILTER_OP_GT_DOUBLE
:
1320 case FILTER_OP_LT_DOUBLE
:
1321 case FILTER_OP_GE_DOUBLE
:
1322 case FILTER_OP_LE_DOUBLE
:
1323 case FILTER_OP_EQ_DOUBLE_S64
:
1324 case FILTER_OP_NE_DOUBLE_S64
:
1325 case FILTER_OP_GT_DOUBLE_S64
:
1326 case FILTER_OP_LT_DOUBLE_S64
:
1327 case FILTER_OP_GE_DOUBLE_S64
:
1328 case FILTER_OP_LE_DOUBLE_S64
:
1329 case FILTER_OP_EQ_S64_DOUBLE
:
1330 case FILTER_OP_NE_S64_DOUBLE
:
1331 case FILTER_OP_GT_S64_DOUBLE
:
1332 case FILTER_OP_LT_S64_DOUBLE
:
1333 case FILTER_OP_GE_S64_DOUBLE
:
1334 case FILTER_OP_LE_S64_DOUBLE
:
1335 case FILTER_OP_BIT_RSHIFT
:
1336 case FILTER_OP_BIT_LSHIFT
:
1337 case FILTER_OP_BIT_AND
:
1338 case FILTER_OP_BIT_OR
:
1339 case FILTER_OP_BIT_XOR
:
1342 if (vstack_pop(stack
)) {
1346 if (!vstack_ax(stack
)) {
1347 ERR("Empty stack\n");
1351 switch (vstack_ax(stack
)->type
) {
1355 case REG_STAR_GLOB_STRING
:
1359 ERR("Unexpected register type %d for operation\n",
1360 (int) vstack_ax(stack
)->type
);
1365 vstack_ax(stack
)->type
= REG_S64
;
1366 next_pc
+= sizeof(struct binary_op
);
1371 case FILTER_OP_UNARY_PLUS
:
1372 case FILTER_OP_UNARY_MINUS
:
1375 if (!vstack_ax(stack
)) {
1376 ERR("Empty stack\n");
1380 switch (vstack_ax(stack
)->type
) {
1386 ERR("Unexpected register type %d for operation\n",
1387 (int) vstack_ax(stack
)->type
);
1391 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1392 next_pc
+= sizeof(struct unary_op
);
1396 case FILTER_OP_UNARY_PLUS_S64
:
1397 case FILTER_OP_UNARY_MINUS_S64
:
1398 case FILTER_OP_UNARY_NOT_S64
:
1401 if (!vstack_ax(stack
)) {
1402 ERR("Empty stack\n");
1406 switch (vstack_ax(stack
)->type
) {
1410 ERR("Unexpected register type %d for operation\n",
1411 (int) vstack_ax(stack
)->type
);
1416 vstack_ax(stack
)->type
= REG_S64
;
1417 next_pc
+= sizeof(struct unary_op
);
1421 case FILTER_OP_UNARY_NOT
:
1424 if (!vstack_ax(stack
)) {
1425 ERR("Empty stack\n");
1429 switch (vstack_ax(stack
)->type
) {
1435 ERR("Unexpected register type %d for operation\n",
1436 (int) vstack_ax(stack
)->type
);
1441 vstack_ax(stack
)->type
= REG_S64
;
1442 next_pc
+= sizeof(struct unary_op
);
1446 case FILTER_OP_UNARY_BIT_NOT
:
1449 if (!vstack_ax(stack
)) {
1450 ERR("Empty stack\n");
1454 switch (vstack_ax(stack
)->type
) {
1460 ERR("Unexpected register type %d for operation\n",
1461 (int) vstack_ax(stack
)->type
);
1466 vstack_ax(stack
)->type
= REG_S64
;
1467 next_pc
+= sizeof(struct unary_op
);
1471 case FILTER_OP_UNARY_NOT_DOUBLE
:
1474 if (!vstack_ax(stack
)) {
1475 ERR("Empty stack\n");
1479 switch (vstack_ax(stack
)->type
) {
1483 ERR("Incorrect register type %d for operation\n",
1484 (int) vstack_ax(stack
)->type
);
1489 vstack_ax(stack
)->type
= REG_S64
;
1490 next_pc
+= sizeof(struct unary_op
);
1494 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1495 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1498 if (!vstack_ax(stack
)) {
1499 ERR("Empty stack\n");
1503 switch (vstack_ax(stack
)->type
) {
1507 ERR("Incorrect register type %d for operation\n",
1508 (int) vstack_ax(stack
)->type
);
1513 vstack_ax(stack
)->type
= REG_DOUBLE
;
1514 next_pc
+= sizeof(struct unary_op
);
1522 struct logical_op
*insn
= (struct logical_op
*) pc
;
1525 /* Add merge point to table */
1526 merge_ret
= merge_point_add_check(merge_points
,
1527 insn
->skip_offset
, stack
);
1533 if (!vstack_ax(stack
)) {
1534 ERR("Empty stack\n");
1538 /* There is always a cast-to-s64 operation before a or/and op. */
1539 switch (vstack_ax(stack
)->type
) {
1543 ERR("Incorrect register type %d for operation\n",
1544 (int) vstack_ax(stack
)->type
);
1549 /* Continue to next instruction */
1550 /* Pop 1 when jump not taken */
1551 if (vstack_pop(stack
)) {
1555 next_pc
+= sizeof(struct logical_op
);
1559 /* load field ref */
1560 case FILTER_OP_LOAD_FIELD_REF
:
1562 ERR("Unknown field ref type\n");
1566 /* get context ref */
1567 case FILTER_OP_GET_CONTEXT_REF
:
1569 if (vstack_push(stack
)) {
1573 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1574 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1577 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1578 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1579 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1581 if (vstack_push(stack
)) {
1585 vstack_ax(stack
)->type
= REG_STRING
;
1586 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1589 case FILTER_OP_LOAD_FIELD_REF_S64
:
1590 case FILTER_OP_GET_CONTEXT_REF_S64
:
1592 if (vstack_push(stack
)) {
1596 vstack_ax(stack
)->type
= REG_S64
;
1597 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1600 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1601 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1603 if (vstack_push(stack
)) {
1607 vstack_ax(stack
)->type
= REG_DOUBLE
;
1608 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1612 /* load from immediate operand */
1613 case FILTER_OP_LOAD_STRING
:
1615 struct load_op
*insn
= (struct load_op
*) pc
;
1617 if (vstack_push(stack
)) {
1621 vstack_ax(stack
)->type
= REG_STRING
;
1622 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1626 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1628 struct load_op
*insn
= (struct load_op
*) pc
;
1630 if (vstack_push(stack
)) {
1634 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1635 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1639 case FILTER_OP_LOAD_S64
:
1641 if (vstack_push(stack
)) {
1645 vstack_ax(stack
)->type
= REG_S64
;
1646 next_pc
+= sizeof(struct load_op
)
1647 + sizeof(struct literal_numeric
);
1651 case FILTER_OP_LOAD_DOUBLE
:
1653 if (vstack_push(stack
)) {
1657 vstack_ax(stack
)->type
= REG_DOUBLE
;
1658 next_pc
+= sizeof(struct load_op
)
1659 + sizeof(struct literal_double
);
1663 case FILTER_OP_CAST_TO_S64
:
1664 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1667 if (!vstack_ax(stack
)) {
1668 ERR("Empty stack\n");
1672 switch (vstack_ax(stack
)->type
) {
1678 ERR("Incorrect register type %d for cast\n",
1679 (int) vstack_ax(stack
)->type
);
1683 vstack_ax(stack
)->type
= REG_S64
;
1684 next_pc
+= sizeof(struct cast_op
);
1687 case FILTER_OP_CAST_NOP
:
1689 next_pc
+= sizeof(struct cast_op
);
1694 * Instructions for recursive traversal through composed types.
1696 case FILTER_OP_GET_CONTEXT_ROOT
:
1697 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1698 case FILTER_OP_GET_PAYLOAD_ROOT
:
1700 if (vstack_push(stack
)) {
1704 vstack_ax(stack
)->type
= REG_PTR
;
1705 next_pc
+= sizeof(struct load_op
);
1709 case FILTER_OP_LOAD_FIELD
:
1712 if (!vstack_ax(stack
)) {
1713 ERR("Empty stack\n");
1717 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1718 ERR("Expecting pointer on top of stack\n");
1722 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1723 next_pc
+= sizeof(struct load_op
);
1727 case FILTER_OP_LOAD_FIELD_S8
:
1728 case FILTER_OP_LOAD_FIELD_S16
:
1729 case FILTER_OP_LOAD_FIELD_S32
:
1730 case FILTER_OP_LOAD_FIELD_S64
:
1731 case FILTER_OP_LOAD_FIELD_U8
:
1732 case FILTER_OP_LOAD_FIELD_U16
:
1733 case FILTER_OP_LOAD_FIELD_U32
:
1734 case FILTER_OP_LOAD_FIELD_U64
:
1737 if (!vstack_ax(stack
)) {
1738 ERR("Empty stack\n");
1742 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1743 ERR("Expecting pointer on top of stack\n");
1747 vstack_ax(stack
)->type
= REG_S64
;
1748 next_pc
+= sizeof(struct load_op
);
1752 case FILTER_OP_LOAD_FIELD_STRING
:
1753 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1756 if (!vstack_ax(stack
)) {
1757 ERR("Empty stack\n");
1761 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1762 ERR("Expecting pointer on top of stack\n");
1766 vstack_ax(stack
)->type
= REG_STRING
;
1767 next_pc
+= sizeof(struct load_op
);
1771 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1774 if (!vstack_ax(stack
)) {
1775 ERR("Empty stack\n");
1779 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1780 ERR("Expecting pointer on top of stack\n");
1784 vstack_ax(stack
)->type
= REG_DOUBLE
;
1785 next_pc
+= sizeof(struct load_op
);
1789 case FILTER_OP_GET_SYMBOL
:
1790 case FILTER_OP_GET_SYMBOL_FIELD
:
1793 if (!vstack_ax(stack
)) {
1794 ERR("Empty stack\n");
1798 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1799 ERR("Expecting pointer on top of stack\n");
1803 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1807 case FILTER_OP_GET_INDEX_U16
:
1810 if (!vstack_ax(stack
)) {
1811 ERR("Empty stack\n");
1815 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1816 ERR("Expecting pointer on top of stack\n");
1820 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1824 case FILTER_OP_GET_INDEX_U64
:
1827 if (!vstack_ax(stack
)) {
1828 ERR("Empty stack\n");
1832 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1833 ERR("Expecting pointer on top of stack\n");
1837 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1843 *_next_pc
= next_pc
;
1848 * Never called concurrently (hash seed is shared).
1850 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1852 struct cds_lfht
*merge_points
;
1853 char *pc
, *next_pc
, *start_pc
;
1855 struct vstack stack
;
1857 vstack_init(&stack
);
1859 if (!lttng_hash_seed_ready
) {
1860 lttng_hash_seed
= time(NULL
);
1861 lttng_hash_seed_ready
= 1;
1864 * Note: merge_points hash table used by single thread, and
1865 * never concurrently resized. Therefore, we can use it without
1866 * holding RCU read-side lock and free nodes without using
1869 merge_points
= cds_lfht_new(DEFAULT_NR_MERGE_POINTS
,
1870 MIN_NR_BUCKETS
, MAX_NR_BUCKETS
,
1872 if (!merge_points
) {
1873 ERR("Error allocating hash table for bytecode validation\n");
1876 start_pc
= &bytecode
->code
[0];
1877 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1879 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1882 ERR("filter bytecode overflow\n");
1885 dbg_printf("Validating op %s (%u)\n",
1886 print_op((unsigned int) *(filter_opcode_t
*) pc
),
1887 (unsigned int) *(filter_opcode_t
*) pc
);
1890 * For each instruction, validate the current context
1891 * (traversal of entire execution flow), and validate
1892 * all merge points targeting this instruction.
1894 ret
= validate_instruction_all_contexts(bytecode
, merge_points
,
1895 &stack
, start_pc
, pc
);
1898 ret
= exec_insn(bytecode
, merge_points
, &stack
, &next_pc
, pc
);
1903 if (delete_all_nodes(merge_points
)) {
1905 ERR("Unexpected merge points\n");
1909 if (cds_lfht_destroy(merge_points
, NULL
)) {
1910 ERR("Error destroying hash table\n");