2 * lttng-filter-validator.c
4 * LTTng UST filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #include "lttng-filter.h"
32 #include <urcu/rculfhash.h>
33 #include "lttng-hash-helper.h"
34 #include "string-utils.h"
37 * Number of merge points for hash table size. Hash table initialized to
38 * that size, and we do not resize, because we do not want to trigger
39 * RCU worker thread execution: fall-back on linear traversal if number
40 * of merge points exceeds this value.
42 #define DEFAULT_NR_MERGE_POINTS 128
43 #define MIN_NR_BUCKETS 128
44 #define MAX_NR_BUCKETS 128
46 /* merge point table node */
48 struct cds_lfht_node node
;
50 /* Context at merge point */
52 unsigned long target_pc
;
55 static unsigned long lttng_hash_seed
;
56 static unsigned int lttng_hash_seed_ready
;
59 int lttng_hash_match(struct cds_lfht_node
*node
, const void *key
)
61 struct lfht_mp_node
*mp_node
=
62 caa_container_of(node
, struct lfht_mp_node
, node
);
63 unsigned long key_pc
= (unsigned long) key
;
65 if (mp_node
->target_pc
== key_pc
)
72 int merge_points_compare(const struct vstack
*stacka
,
73 const struct vstack
*stackb
)
77 if (stacka
->top
!= stackb
->top
)
79 len
= stacka
->top
+ 1;
81 for (i
= 0; i
< len
; i
++) {
82 if (stacka
->e
[i
].type
!= REG_UNKNOWN
83 && stackb
->e
[i
].type
!= REG_UNKNOWN
84 && stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
91 int merge_point_add_check(struct cds_lfht
*ht
, unsigned long target_pc
,
92 const struct vstack
*stack
)
94 struct lfht_mp_node
*node
;
95 unsigned long hash
= lttng_hash_mix((const char *) target_pc
,
98 struct cds_lfht_node
*ret
;
100 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
102 node
= zmalloc(sizeof(struct lfht_mp_node
));
105 node
->target_pc
= target_pc
;
106 memcpy(&node
->stack
, stack
, sizeof(node
->stack
));
107 ret
= cds_lfht_add_unique(ht
, hash
, lttng_hash_match
,
108 (const char *) target_pc
, &node
->node
);
109 if (ret
!= &node
->node
) {
110 struct lfht_mp_node
*ret_mp
=
111 caa_container_of(ret
, struct lfht_mp_node
, node
);
113 /* Key already present */
114 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
117 if (merge_points_compare(stack
, &ret_mp
->stack
)) {
118 ERR("Merge points differ for offset %lu\n",
127 * Binary comparators use top of stack and top of stack -1.
128 * Return 0 if typing is known to match, 1 if typing is dynamic
129 * (unknown), negative error value on error.
132 int bin_op_compare_check(struct vstack
*stack
, filter_opcode_t opcode
,
135 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
138 switch (vstack_ax(stack
)->type
) {
145 switch (vstack_bx(stack
)->type
) {
153 case REG_STAR_GLOB_STRING
:
154 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
163 case REG_STAR_GLOB_STRING
:
164 switch (vstack_bx(stack
)->type
) {
171 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
175 case REG_STAR_GLOB_STRING
:
183 switch (vstack_bx(stack
)->type
) {
190 case REG_STAR_GLOB_STRING
:
204 ERR("type mismatch for '%s' binary operator\n", str
);
208 ERR("empty stack for '%s' binary operator\n", str
);
212 ERR("unknown type for '%s' binary operator\n", str
);
217 * Binary bitwise operators use top of stack and top of stack -1.
218 * Return 0 if typing is known to match, 1 if typing is dynamic
219 * (unknown), negative error value on error.
222 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
225 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
228 switch (vstack_ax(stack
)->type
) {
235 switch (vstack_bx(stack
)->type
) {
252 ERR("empty stack for '%s' binary operator\n", str
);
256 ERR("unknown type for '%s' binary operator\n", str
);
261 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
262 const struct get_symbol
*sym
)
264 const char *str
, *str_limit
;
267 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
270 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
271 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
272 len_limit
= str_limit
- str
;
273 if (strnlen(str
, len_limit
) == len_limit
)
279 * Validate bytecode range overflow within the validation pass.
280 * Called for each instruction encountered.
283 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
284 char *start_pc
, char *pc
)
288 switch (*(filter_opcode_t
*) pc
) {
289 case FILTER_OP_UNKNOWN
:
292 ERR("unknown bytecode op %u\n",
293 (unsigned int) *(filter_opcode_t
*) pc
);
298 case FILTER_OP_RETURN
:
299 case FILTER_OP_RETURN_S64
:
301 if (unlikely(pc
+ sizeof(struct return_op
)
302 > start_pc
+ bytecode
->len
)) {
313 case FILTER_OP_MINUS
:
315 ERR("unsupported bytecode op %u\n",
316 (unsigned int) *(filter_opcode_t
*) pc
);
327 case FILTER_OP_EQ_STRING
:
328 case FILTER_OP_NE_STRING
:
329 case FILTER_OP_GT_STRING
:
330 case FILTER_OP_LT_STRING
:
331 case FILTER_OP_GE_STRING
:
332 case FILTER_OP_LE_STRING
:
333 case FILTER_OP_EQ_STAR_GLOB_STRING
:
334 case FILTER_OP_NE_STAR_GLOB_STRING
:
335 case FILTER_OP_EQ_S64
:
336 case FILTER_OP_NE_S64
:
337 case FILTER_OP_GT_S64
:
338 case FILTER_OP_LT_S64
:
339 case FILTER_OP_GE_S64
:
340 case FILTER_OP_LE_S64
:
341 case FILTER_OP_EQ_DOUBLE
:
342 case FILTER_OP_NE_DOUBLE
:
343 case FILTER_OP_GT_DOUBLE
:
344 case FILTER_OP_LT_DOUBLE
:
345 case FILTER_OP_GE_DOUBLE
:
346 case FILTER_OP_LE_DOUBLE
:
347 case FILTER_OP_EQ_DOUBLE_S64
:
348 case FILTER_OP_NE_DOUBLE_S64
:
349 case FILTER_OP_GT_DOUBLE_S64
:
350 case FILTER_OP_LT_DOUBLE_S64
:
351 case FILTER_OP_GE_DOUBLE_S64
:
352 case FILTER_OP_LE_DOUBLE_S64
:
353 case FILTER_OP_EQ_S64_DOUBLE
:
354 case FILTER_OP_NE_S64_DOUBLE
:
355 case FILTER_OP_GT_S64_DOUBLE
:
356 case FILTER_OP_LT_S64_DOUBLE
:
357 case FILTER_OP_GE_S64_DOUBLE
:
358 case FILTER_OP_LE_S64_DOUBLE
:
359 case FILTER_OP_BIT_RSHIFT
:
360 case FILTER_OP_BIT_LSHIFT
:
361 case FILTER_OP_BIT_AND
:
362 case FILTER_OP_BIT_OR
:
363 case FILTER_OP_BIT_XOR
:
365 if (unlikely(pc
+ sizeof(struct binary_op
)
366 > start_pc
+ bytecode
->len
)) {
373 case FILTER_OP_UNARY_PLUS
:
374 case FILTER_OP_UNARY_MINUS
:
375 case FILTER_OP_UNARY_NOT
:
376 case FILTER_OP_UNARY_PLUS_S64
:
377 case FILTER_OP_UNARY_MINUS_S64
:
378 case FILTER_OP_UNARY_NOT_S64
:
379 case FILTER_OP_UNARY_PLUS_DOUBLE
:
380 case FILTER_OP_UNARY_MINUS_DOUBLE
:
381 case FILTER_OP_UNARY_NOT_DOUBLE
:
382 case FILTER_OP_UNARY_BIT_NOT
:
384 if (unlikely(pc
+ sizeof(struct unary_op
)
385 > start_pc
+ bytecode
->len
)) {
395 if (unlikely(pc
+ sizeof(struct logical_op
)
396 > start_pc
+ bytecode
->len
)) {
402 /* load field and get context ref */
403 case FILTER_OP_LOAD_FIELD_REF
:
404 case FILTER_OP_GET_CONTEXT_REF
:
405 case FILTER_OP_LOAD_FIELD_REF_STRING
:
406 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
407 case FILTER_OP_LOAD_FIELD_REF_S64
:
408 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
409 case FILTER_OP_GET_CONTEXT_REF_STRING
:
410 case FILTER_OP_GET_CONTEXT_REF_S64
:
411 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
413 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
414 > start_pc
+ bytecode
->len
)) {
420 /* load from immediate operand */
421 case FILTER_OP_LOAD_STRING
:
422 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
424 struct load_op
*insn
= (struct load_op
*) pc
;
425 uint32_t str_len
, maxlen
;
427 if (unlikely(pc
+ sizeof(struct load_op
)
428 > start_pc
+ bytecode
->len
)) {
433 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
434 str_len
= strnlen(insn
->data
, maxlen
);
435 if (unlikely(str_len
>= maxlen
)) {
436 /* Final '\0' not found within range */
442 case FILTER_OP_LOAD_S64
:
444 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
445 > start_pc
+ bytecode
->len
)) {
451 case FILTER_OP_LOAD_DOUBLE
:
453 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_double
)
454 > start_pc
+ bytecode
->len
)) {
460 case FILTER_OP_CAST_TO_S64
:
461 case FILTER_OP_CAST_DOUBLE_TO_S64
:
462 case FILTER_OP_CAST_NOP
:
464 if (unlikely(pc
+ sizeof(struct cast_op
)
465 > start_pc
+ bytecode
->len
)) {
472 * Instructions for recursive traversal through composed types.
474 case FILTER_OP_GET_CONTEXT_ROOT
:
475 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
476 case FILTER_OP_GET_PAYLOAD_ROOT
:
477 case FILTER_OP_LOAD_FIELD
:
478 case FILTER_OP_LOAD_FIELD_S8
:
479 case FILTER_OP_LOAD_FIELD_S16
:
480 case FILTER_OP_LOAD_FIELD_S32
:
481 case FILTER_OP_LOAD_FIELD_S64
:
482 case FILTER_OP_LOAD_FIELD_U8
:
483 case FILTER_OP_LOAD_FIELD_U16
:
484 case FILTER_OP_LOAD_FIELD_U32
:
485 case FILTER_OP_LOAD_FIELD_U64
:
486 case FILTER_OP_LOAD_FIELD_STRING
:
487 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
488 case FILTER_OP_LOAD_FIELD_DOUBLE
:
489 if (unlikely(pc
+ sizeof(struct load_op
)
490 > start_pc
+ bytecode
->len
)) {
495 case FILTER_OP_GET_SYMBOL
:
497 struct load_op
*insn
= (struct load_op
*) pc
;
498 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
500 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
501 > start_pc
+ bytecode
->len
)) {
505 ret
= validate_get_symbol(bytecode
, sym
);
509 case FILTER_OP_GET_SYMBOL_FIELD
:
510 ERR("Unexpected get symbol field");
514 case FILTER_OP_GET_INDEX_U16
:
515 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
516 > start_pc
+ bytecode
->len
)) {
521 case FILTER_OP_GET_INDEX_U64
:
522 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
523 > start_pc
+ bytecode
->len
)) {
533 unsigned long delete_all_nodes(struct cds_lfht
*ht
)
535 struct cds_lfht_iter iter
;
536 struct lfht_mp_node
*node
;
537 unsigned long nr_nodes
= 0;
539 cds_lfht_for_each_entry(ht
, &iter
, node
, node
) {
542 ret
= cds_lfht_del(ht
, cds_lfht_iter_get_node(&iter
));
544 /* note: this hash table is never used concurrently */
557 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
558 struct vstack
*stack
,
563 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
566 case FILTER_OP_UNKNOWN
:
569 ERR("unknown bytecode op %u\n",
570 (unsigned int) *(filter_opcode_t
*) pc
);
575 case FILTER_OP_RETURN
:
576 case FILTER_OP_RETURN_S64
:
586 case FILTER_OP_MINUS
:
588 ERR("unsupported bytecode op %u\n",
589 (unsigned int) opcode
);
596 ret
= bin_op_compare_check(stack
, opcode
, "==");
603 ret
= bin_op_compare_check(stack
, opcode
, "!=");
610 ret
= bin_op_compare_check(stack
, opcode
, ">");
617 ret
= bin_op_compare_check(stack
, opcode
, "<");
624 ret
= bin_op_compare_check(stack
, opcode
, ">=");
631 ret
= bin_op_compare_check(stack
, opcode
, "<=");
637 case FILTER_OP_EQ_STRING
:
638 case FILTER_OP_NE_STRING
:
639 case FILTER_OP_GT_STRING
:
640 case FILTER_OP_LT_STRING
:
641 case FILTER_OP_GE_STRING
:
642 case FILTER_OP_LE_STRING
:
644 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
645 ERR("Empty stack\n");
649 if (vstack_ax(stack
)->type
!= REG_STRING
650 || vstack_bx(stack
)->type
!= REG_STRING
) {
651 ERR("Unexpected register type for string comparator\n");
658 case FILTER_OP_EQ_STAR_GLOB_STRING
:
659 case FILTER_OP_NE_STAR_GLOB_STRING
:
661 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
662 ERR("Empty stack\n");
666 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
667 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
668 ERR("Unexpected register type for globbing pattern comparator\n");
675 case FILTER_OP_EQ_S64
:
676 case FILTER_OP_NE_S64
:
677 case FILTER_OP_GT_S64
:
678 case FILTER_OP_LT_S64
:
679 case FILTER_OP_GE_S64
:
680 case FILTER_OP_LE_S64
:
682 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
683 ERR("Empty stack\n");
687 if (vstack_ax(stack
)->type
!= REG_S64
688 || vstack_bx(stack
)->type
!= REG_S64
) {
689 ERR("Unexpected register type for s64 comparator\n");
696 case FILTER_OP_EQ_DOUBLE
:
697 case FILTER_OP_NE_DOUBLE
:
698 case FILTER_OP_GT_DOUBLE
:
699 case FILTER_OP_LT_DOUBLE
:
700 case FILTER_OP_GE_DOUBLE
:
701 case FILTER_OP_LE_DOUBLE
:
703 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
704 ERR("Empty stack\n");
708 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
709 ERR("Double operator should have two double registers\n");
716 case FILTER_OP_EQ_DOUBLE_S64
:
717 case FILTER_OP_NE_DOUBLE_S64
:
718 case FILTER_OP_GT_DOUBLE_S64
:
719 case FILTER_OP_LT_DOUBLE_S64
:
720 case FILTER_OP_GE_DOUBLE_S64
:
721 case FILTER_OP_LE_DOUBLE_S64
:
723 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
724 ERR("Empty stack\n");
728 if (vstack_ax(stack
)->type
!= REG_S64
&& vstack_bx(stack
)->type
!= REG_DOUBLE
) {
729 ERR("Double-S64 operator has unexpected register types\n");
736 case FILTER_OP_EQ_S64_DOUBLE
:
737 case FILTER_OP_NE_S64_DOUBLE
:
738 case FILTER_OP_GT_S64_DOUBLE
:
739 case FILTER_OP_LT_S64_DOUBLE
:
740 case FILTER_OP_GE_S64_DOUBLE
:
741 case FILTER_OP_LE_S64_DOUBLE
:
743 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
744 ERR("Empty stack\n");
748 if (vstack_ax(stack
)->type
!= REG_DOUBLE
&& vstack_bx(stack
)->type
!= REG_S64
) {
749 ERR("S64-Double operator has unexpected register types\n");
756 case FILTER_OP_BIT_RSHIFT
:
757 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
761 case FILTER_OP_BIT_LSHIFT
:
762 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
766 case FILTER_OP_BIT_AND
:
767 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
771 case FILTER_OP_BIT_OR
:
772 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
776 case FILTER_OP_BIT_XOR
:
777 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
783 case FILTER_OP_UNARY_PLUS
:
784 case FILTER_OP_UNARY_MINUS
:
785 case FILTER_OP_UNARY_NOT
:
787 if (!vstack_ax(stack
)) {
788 ERR("Empty stack\n");
792 switch (vstack_ax(stack
)->type
) {
794 ERR("unknown register type\n");
799 case REG_STAR_GLOB_STRING
:
800 ERR("Unary op can only be applied to numeric or floating point registers\n");
812 case FILTER_OP_UNARY_BIT_NOT
:
814 if (!vstack_ax(stack
)) {
815 ERR("Empty stack\n");
819 switch (vstack_ax(stack
)->type
) {
821 ERR("unknown register type\n");
826 case REG_STAR_GLOB_STRING
:
828 ERR("Unary bitwise op can only be applied to numeric registers\n");
839 case FILTER_OP_UNARY_PLUS_S64
:
840 case FILTER_OP_UNARY_MINUS_S64
:
841 case FILTER_OP_UNARY_NOT_S64
:
843 if (!vstack_ax(stack
)) {
844 ERR("Empty stack\n");
848 if (vstack_ax(stack
)->type
!= REG_S64
) {
849 ERR("Invalid register type\n");
856 case FILTER_OP_UNARY_PLUS_DOUBLE
:
857 case FILTER_OP_UNARY_MINUS_DOUBLE
:
858 case FILTER_OP_UNARY_NOT_DOUBLE
:
860 if (!vstack_ax(stack
)) {
861 ERR("Empty stack\n");
865 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
866 ERR("Invalid register type\n");
877 struct logical_op
*insn
= (struct logical_op
*) pc
;
879 if (!vstack_ax(stack
)) {
880 ERR("Empty stack\n");
884 if (vstack_ax(stack
)->type
!= REG_S64
885 && vstack_ax(stack
)->type
!= REG_UNKNOWN
) {
886 ERR("Logical comparator expects S64 or dynamic register\n");
891 dbg_printf("Validate jumping to bytecode offset %u\n",
892 (unsigned int) insn
->skip_offset
);
893 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
894 ERR("Loops are not allowed in bytecode\n");
902 case FILTER_OP_LOAD_FIELD_REF
:
904 ERR("Unknown field ref type\n");
908 case FILTER_OP_LOAD_FIELD_REF_STRING
:
909 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
911 struct load_op
*insn
= (struct load_op
*) pc
;
912 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
914 dbg_printf("Validate load field ref offset %u type string\n",
918 case FILTER_OP_LOAD_FIELD_REF_S64
:
920 struct load_op
*insn
= (struct load_op
*) pc
;
921 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
923 dbg_printf("Validate load field ref offset %u type s64\n",
927 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
929 struct load_op
*insn
= (struct load_op
*) pc
;
930 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
932 dbg_printf("Validate load field ref offset %u type double\n",
937 /* load from immediate operand */
938 case FILTER_OP_LOAD_STRING
:
939 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
944 case FILTER_OP_LOAD_S64
:
949 case FILTER_OP_LOAD_DOUBLE
:
954 case FILTER_OP_CAST_TO_S64
:
955 case FILTER_OP_CAST_DOUBLE_TO_S64
:
957 struct cast_op
*insn
= (struct cast_op
*) pc
;
959 if (!vstack_ax(stack
)) {
960 ERR("Empty stack\n");
964 switch (vstack_ax(stack
)->type
) {
966 ERR("unknown register type\n");
971 case REG_STAR_GLOB_STRING
:
972 ERR("Cast op can only be applied to numeric or floating point registers\n");
982 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
983 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
984 ERR("Cast expects double\n");
991 case FILTER_OP_CAST_NOP
:
996 /* get context ref */
997 case FILTER_OP_GET_CONTEXT_REF
:
999 struct load_op
*insn
= (struct load_op
*) pc
;
1000 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1002 dbg_printf("Validate get context ref offset %u type dynamic\n",
1006 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1008 struct load_op
*insn
= (struct load_op
*) pc
;
1009 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1011 dbg_printf("Validate get context ref offset %u type string\n",
1015 case FILTER_OP_GET_CONTEXT_REF_S64
:
1017 struct load_op
*insn
= (struct load_op
*) pc
;
1018 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1020 dbg_printf("Validate get context ref offset %u type s64\n",
1024 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1026 struct load_op
*insn
= (struct load_op
*) pc
;
1027 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1029 dbg_printf("Validate get context ref offset %u type double\n",
1035 * Instructions for recursive traversal through composed types.
1037 case FILTER_OP_GET_CONTEXT_ROOT
:
1039 dbg_printf("Validate get context root\n");
1042 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1044 dbg_printf("Validate get app context root\n");
1047 case FILTER_OP_GET_PAYLOAD_ROOT
:
1049 dbg_printf("Validate get payload root\n");
1052 case FILTER_OP_LOAD_FIELD
:
1055 * We tolerate that field type is unknown at validation,
1056 * because we are performing the load specialization in
1057 * a phase after validation.
1059 dbg_printf("Validate load field\n");
1064 * Disallow already specialized bytecode op load field instructions to
1065 * ensure that the received bytecode does not read a memory area larger
1066 * than the memory targeted by the instrumentation.
1068 case FILTER_OP_LOAD_FIELD_S8
:
1069 case FILTER_OP_LOAD_FIELD_S16
:
1070 case FILTER_OP_LOAD_FIELD_S32
:
1071 case FILTER_OP_LOAD_FIELD_S64
:
1072 case FILTER_OP_LOAD_FIELD_U8
:
1073 case FILTER_OP_LOAD_FIELD_U16
:
1074 case FILTER_OP_LOAD_FIELD_U32
:
1075 case FILTER_OP_LOAD_FIELD_U64
:
1076 case FILTER_OP_LOAD_FIELD_STRING
:
1077 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1078 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1080 dbg_printf("Validate load field, reject specialized load instruction (%d)\n",
1086 case FILTER_OP_GET_SYMBOL
:
1088 struct load_op
*insn
= (struct load_op
*) pc
;
1089 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1091 dbg_printf("Validate get symbol offset %u\n", sym
->offset
);
1095 case FILTER_OP_GET_SYMBOL_FIELD
:
1097 struct load_op
*insn
= (struct load_op
*) pc
;
1098 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1100 dbg_printf("Validate get symbol field offset %u\n", sym
->offset
);
1104 case FILTER_OP_GET_INDEX_U16
:
1106 struct load_op
*insn
= (struct load_op
*) pc
;
1107 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1109 dbg_printf("Validate get index u16 index %u\n", get_index
->index
);
1113 case FILTER_OP_GET_INDEX_U64
:
1115 struct load_op
*insn
= (struct load_op
*) pc
;
1116 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1118 dbg_printf("Validate get index u64 index %" PRIu64
"\n", get_index
->index
);
1132 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1133 struct cds_lfht
*merge_points
,
1134 struct vstack
*stack
,
1139 unsigned long target_pc
= pc
- start_pc
;
1140 struct cds_lfht_iter iter
;
1141 struct cds_lfht_node
*node
;
1142 struct lfht_mp_node
*mp_node
;
1145 /* Validate the context resulting from the previous instruction */
1146 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1150 /* Validate merge points */
1151 hash
= lttng_hash_mix((const char *) target_pc
, sizeof(target_pc
),
1153 cds_lfht_lookup(merge_points
, hash
, lttng_hash_match
,
1154 (const char *) target_pc
, &iter
);
1155 node
= cds_lfht_iter_get_node(&iter
);
1157 mp_node
= caa_container_of(node
, struct lfht_mp_node
, node
);
1159 dbg_printf("Filter: validate merge point at offset %lu\n",
1161 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1162 ERR("Merge points differ for offset %lu\n",
1166 /* Once validated, we can remove the merge point */
1167 dbg_printf("Filter: remove merge point at offset %lu\n",
1169 ret
= cds_lfht_del(merge_points
, node
);
1176 * Validate load instructions: specialized instructions not accepted as input.
1179 * >0: going to next insn.
1180 * 0: success, stop iteration.
1184 int validate_load(char **_next_pc
,
1188 char *next_pc
= *_next_pc
;
1190 switch (*(filter_opcode_t
*) pc
) {
1191 case FILTER_OP_UNKNOWN
:
1194 ERR("Unknown bytecode op %u\n",
1195 (unsigned int) *(filter_opcode_t
*) pc
);
1200 case FILTER_OP_RETURN
:
1202 next_pc
+= sizeof(struct return_op
);
1206 case FILTER_OP_RETURN_S64
:
1208 next_pc
+= sizeof(struct return_op
);
1216 case FILTER_OP_PLUS
:
1217 case FILTER_OP_MINUS
:
1219 ERR("Unsupported bytecode op %u\n",
1220 (unsigned int) *(filter_opcode_t
*) pc
);
1231 case FILTER_OP_EQ_STRING
:
1232 case FILTER_OP_NE_STRING
:
1233 case FILTER_OP_GT_STRING
:
1234 case FILTER_OP_LT_STRING
:
1235 case FILTER_OP_GE_STRING
:
1236 case FILTER_OP_LE_STRING
:
1237 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1238 case FILTER_OP_NE_STAR_GLOB_STRING
:
1239 case FILTER_OP_EQ_S64
:
1240 case FILTER_OP_NE_S64
:
1241 case FILTER_OP_GT_S64
:
1242 case FILTER_OP_LT_S64
:
1243 case FILTER_OP_GE_S64
:
1244 case FILTER_OP_LE_S64
:
1245 case FILTER_OP_EQ_DOUBLE
:
1246 case FILTER_OP_NE_DOUBLE
:
1247 case FILTER_OP_GT_DOUBLE
:
1248 case FILTER_OP_LT_DOUBLE
:
1249 case FILTER_OP_GE_DOUBLE
:
1250 case FILTER_OP_LE_DOUBLE
:
1251 case FILTER_OP_EQ_DOUBLE_S64
:
1252 case FILTER_OP_NE_DOUBLE_S64
:
1253 case FILTER_OP_GT_DOUBLE_S64
:
1254 case FILTER_OP_LT_DOUBLE_S64
:
1255 case FILTER_OP_GE_DOUBLE_S64
:
1256 case FILTER_OP_LE_DOUBLE_S64
:
1257 case FILTER_OP_EQ_S64_DOUBLE
:
1258 case FILTER_OP_NE_S64_DOUBLE
:
1259 case FILTER_OP_GT_S64_DOUBLE
:
1260 case FILTER_OP_LT_S64_DOUBLE
:
1261 case FILTER_OP_GE_S64_DOUBLE
:
1262 case FILTER_OP_LE_S64_DOUBLE
:
1263 case FILTER_OP_BIT_RSHIFT
:
1264 case FILTER_OP_BIT_LSHIFT
:
1265 case FILTER_OP_BIT_AND
:
1266 case FILTER_OP_BIT_OR
:
1267 case FILTER_OP_BIT_XOR
:
1269 next_pc
+= sizeof(struct binary_op
);
1274 case FILTER_OP_UNARY_PLUS
:
1275 case FILTER_OP_UNARY_MINUS
:
1276 case FILTER_OP_UNARY_PLUS_S64
:
1277 case FILTER_OP_UNARY_MINUS_S64
:
1278 case FILTER_OP_UNARY_NOT_S64
:
1279 case FILTER_OP_UNARY_NOT
:
1280 case FILTER_OP_UNARY_BIT_NOT
:
1281 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1282 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1283 case FILTER_OP_UNARY_NOT_DOUBLE
:
1285 next_pc
+= sizeof(struct unary_op
);
1293 next_pc
+= sizeof(struct logical_op
);
1297 /* load field ref */
1298 case FILTER_OP_LOAD_FIELD_REF
:
1299 /* get context ref */
1300 case FILTER_OP_GET_CONTEXT_REF
:
1302 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1305 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1306 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1307 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1308 case FILTER_OP_LOAD_FIELD_REF_S64
:
1309 case FILTER_OP_GET_CONTEXT_REF_S64
:
1310 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1311 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1314 * Reject specialized load field ref instructions.
1320 /* load from immediate operand */
1321 case FILTER_OP_LOAD_STRING
:
1322 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1324 struct load_op
*insn
= (struct load_op
*) pc
;
1326 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1330 case FILTER_OP_LOAD_S64
:
1332 next_pc
+= sizeof(struct load_op
) + sizeof(struct literal_numeric
);
1335 case FILTER_OP_LOAD_DOUBLE
:
1337 next_pc
+= sizeof(struct load_op
) + sizeof(struct literal_double
);
1341 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1342 case FILTER_OP_CAST_TO_S64
:
1343 case FILTER_OP_CAST_NOP
:
1345 next_pc
+= sizeof(struct cast_op
);
1350 * Instructions for recursive traversal through composed types.
1352 case FILTER_OP_GET_CONTEXT_ROOT
:
1353 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1354 case FILTER_OP_GET_PAYLOAD_ROOT
:
1355 case FILTER_OP_LOAD_FIELD
:
1357 next_pc
+= sizeof(struct load_op
);
1361 case FILTER_OP_LOAD_FIELD_S8
:
1362 case FILTER_OP_LOAD_FIELD_S16
:
1363 case FILTER_OP_LOAD_FIELD_S32
:
1364 case FILTER_OP_LOAD_FIELD_S64
:
1365 case FILTER_OP_LOAD_FIELD_U8
:
1366 case FILTER_OP_LOAD_FIELD_U16
:
1367 case FILTER_OP_LOAD_FIELD_U32
:
1368 case FILTER_OP_LOAD_FIELD_U64
:
1369 case FILTER_OP_LOAD_FIELD_STRING
:
1370 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1371 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1374 * Reject specialized load field instructions.
1380 case FILTER_OP_GET_SYMBOL
:
1381 case FILTER_OP_GET_SYMBOL_FIELD
:
1383 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1387 case FILTER_OP_GET_INDEX_U16
:
1389 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1393 case FILTER_OP_GET_INDEX_U64
:
1395 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1401 *_next_pc
= next_pc
;
1407 * >0: going to next insn.
1408 * 0: success, stop iteration.
1412 int exec_insn(struct bytecode_runtime
*bytecode
,
1413 struct cds_lfht
*merge_points
,
1414 struct vstack
*stack
,
1419 char *next_pc
= *_next_pc
;
1421 switch (*(filter_opcode_t
*) pc
) {
1422 case FILTER_OP_UNKNOWN
:
1425 ERR("unknown bytecode op %u\n",
1426 (unsigned int) *(filter_opcode_t
*) pc
);
1431 case FILTER_OP_RETURN
:
1433 if (!vstack_ax(stack
)) {
1434 ERR("Empty stack\n");
1438 switch (vstack_ax(stack
)->type
) {
1443 ERR("Unexpected register type %d at end of bytecode\n",
1444 (int) vstack_ax(stack
)->type
);
1452 case FILTER_OP_RETURN_S64
:
1454 if (!vstack_ax(stack
)) {
1455 ERR("Empty stack\n");
1459 switch (vstack_ax(stack
)->type
) {
1464 ERR("Unexpected register type %d at end of bytecode\n",
1465 (int) vstack_ax(stack
)->type
);
1478 case FILTER_OP_PLUS
:
1479 case FILTER_OP_MINUS
:
1481 ERR("unsupported bytecode op %u\n",
1482 (unsigned int) *(filter_opcode_t
*) pc
);
1493 case FILTER_OP_EQ_STRING
:
1494 case FILTER_OP_NE_STRING
:
1495 case FILTER_OP_GT_STRING
:
1496 case FILTER_OP_LT_STRING
:
1497 case FILTER_OP_GE_STRING
:
1498 case FILTER_OP_LE_STRING
:
1499 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1500 case FILTER_OP_NE_STAR_GLOB_STRING
:
1501 case FILTER_OP_EQ_S64
:
1502 case FILTER_OP_NE_S64
:
1503 case FILTER_OP_GT_S64
:
1504 case FILTER_OP_LT_S64
:
1505 case FILTER_OP_GE_S64
:
1506 case FILTER_OP_LE_S64
:
1507 case FILTER_OP_EQ_DOUBLE
:
1508 case FILTER_OP_NE_DOUBLE
:
1509 case FILTER_OP_GT_DOUBLE
:
1510 case FILTER_OP_LT_DOUBLE
:
1511 case FILTER_OP_GE_DOUBLE
:
1512 case FILTER_OP_LE_DOUBLE
:
1513 case FILTER_OP_EQ_DOUBLE_S64
:
1514 case FILTER_OP_NE_DOUBLE_S64
:
1515 case FILTER_OP_GT_DOUBLE_S64
:
1516 case FILTER_OP_LT_DOUBLE_S64
:
1517 case FILTER_OP_GE_DOUBLE_S64
:
1518 case FILTER_OP_LE_DOUBLE_S64
:
1519 case FILTER_OP_EQ_S64_DOUBLE
:
1520 case FILTER_OP_NE_S64_DOUBLE
:
1521 case FILTER_OP_GT_S64_DOUBLE
:
1522 case FILTER_OP_LT_S64_DOUBLE
:
1523 case FILTER_OP_GE_S64_DOUBLE
:
1524 case FILTER_OP_LE_S64_DOUBLE
:
1525 case FILTER_OP_BIT_RSHIFT
:
1526 case FILTER_OP_BIT_LSHIFT
:
1527 case FILTER_OP_BIT_AND
:
1528 case FILTER_OP_BIT_OR
:
1529 case FILTER_OP_BIT_XOR
:
1532 if (vstack_pop(stack
)) {
1536 if (!vstack_ax(stack
)) {
1537 ERR("Empty stack\n");
1541 switch (vstack_ax(stack
)->type
) {
1545 case REG_STAR_GLOB_STRING
:
1549 ERR("Unexpected register type %d for operation\n",
1550 (int) vstack_ax(stack
)->type
);
1555 vstack_ax(stack
)->type
= REG_S64
;
1556 next_pc
+= sizeof(struct binary_op
);
1561 case FILTER_OP_UNARY_PLUS
:
1562 case FILTER_OP_UNARY_MINUS
:
1565 if (!vstack_ax(stack
)) {
1566 ERR("Empty stack\n");
1570 switch (vstack_ax(stack
)->type
) {
1576 ERR("Unexpected register type %d for operation\n",
1577 (int) vstack_ax(stack
)->type
);
1581 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1582 next_pc
+= sizeof(struct unary_op
);
1586 case FILTER_OP_UNARY_PLUS_S64
:
1587 case FILTER_OP_UNARY_MINUS_S64
:
1588 case FILTER_OP_UNARY_NOT_S64
:
1591 if (!vstack_ax(stack
)) {
1592 ERR("Empty stack\n");
1596 switch (vstack_ax(stack
)->type
) {
1600 ERR("Unexpected register type %d for operation\n",
1601 (int) vstack_ax(stack
)->type
);
1606 vstack_ax(stack
)->type
= REG_S64
;
1607 next_pc
+= sizeof(struct unary_op
);
1611 case FILTER_OP_UNARY_NOT
:
1614 if (!vstack_ax(stack
)) {
1615 ERR("Empty stack\n");
1619 switch (vstack_ax(stack
)->type
) {
1625 ERR("Unexpected register type %d for operation\n",
1626 (int) vstack_ax(stack
)->type
);
1631 vstack_ax(stack
)->type
= REG_S64
;
1632 next_pc
+= sizeof(struct unary_op
);
1636 case FILTER_OP_UNARY_BIT_NOT
:
1639 if (!vstack_ax(stack
)) {
1640 ERR("Empty stack\n");
1644 switch (vstack_ax(stack
)->type
) {
1650 ERR("Unexpected register type %d for operation\n",
1651 (int) vstack_ax(stack
)->type
);
1656 vstack_ax(stack
)->type
= REG_S64
;
1657 next_pc
+= sizeof(struct unary_op
);
1661 case FILTER_OP_UNARY_NOT_DOUBLE
:
1664 if (!vstack_ax(stack
)) {
1665 ERR("Empty stack\n");
1669 switch (vstack_ax(stack
)->type
) {
1673 ERR("Incorrect register type %d for operation\n",
1674 (int) vstack_ax(stack
)->type
);
1679 vstack_ax(stack
)->type
= REG_S64
;
1680 next_pc
+= sizeof(struct unary_op
);
1684 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1685 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1688 if (!vstack_ax(stack
)) {
1689 ERR("Empty stack\n");
1693 switch (vstack_ax(stack
)->type
) {
1697 ERR("Incorrect register type %d for operation\n",
1698 (int) vstack_ax(stack
)->type
);
1703 vstack_ax(stack
)->type
= REG_DOUBLE
;
1704 next_pc
+= sizeof(struct unary_op
);
1712 struct logical_op
*insn
= (struct logical_op
*) pc
;
1715 /* Add merge point to table */
1716 merge_ret
= merge_point_add_check(merge_points
,
1717 insn
->skip_offset
, stack
);
1723 if (!vstack_ax(stack
)) {
1724 ERR("Empty stack\n");
1728 /* There is always a cast-to-s64 operation before a or/and op. */
1729 switch (vstack_ax(stack
)->type
) {
1733 ERR("Incorrect register type %d for operation\n",
1734 (int) vstack_ax(stack
)->type
);
1739 /* Continue to next instruction */
1740 /* Pop 1 when jump not taken */
1741 if (vstack_pop(stack
)) {
1745 next_pc
+= sizeof(struct logical_op
);
1749 /* load field ref */
1750 case FILTER_OP_LOAD_FIELD_REF
:
1752 ERR("Unknown field ref type\n");
1756 /* get context ref */
1757 case FILTER_OP_GET_CONTEXT_REF
:
1759 if (vstack_push(stack
)) {
1763 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1764 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1767 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1768 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1769 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1771 if (vstack_push(stack
)) {
1775 vstack_ax(stack
)->type
= REG_STRING
;
1776 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1779 case FILTER_OP_LOAD_FIELD_REF_S64
:
1780 case FILTER_OP_GET_CONTEXT_REF_S64
:
1782 if (vstack_push(stack
)) {
1786 vstack_ax(stack
)->type
= REG_S64
;
1787 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1790 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1791 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1793 if (vstack_push(stack
)) {
1797 vstack_ax(stack
)->type
= REG_DOUBLE
;
1798 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1802 /* load from immediate operand */
1803 case FILTER_OP_LOAD_STRING
:
1805 struct load_op
*insn
= (struct load_op
*) pc
;
1807 if (vstack_push(stack
)) {
1811 vstack_ax(stack
)->type
= REG_STRING
;
1812 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1816 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1818 struct load_op
*insn
= (struct load_op
*) pc
;
1820 if (vstack_push(stack
)) {
1824 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1825 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1829 case FILTER_OP_LOAD_S64
:
1831 if (vstack_push(stack
)) {
1835 vstack_ax(stack
)->type
= REG_S64
;
1836 next_pc
+= sizeof(struct load_op
)
1837 + sizeof(struct literal_numeric
);
1841 case FILTER_OP_LOAD_DOUBLE
:
1843 if (vstack_push(stack
)) {
1847 vstack_ax(stack
)->type
= REG_DOUBLE
;
1848 next_pc
+= sizeof(struct load_op
)
1849 + sizeof(struct literal_double
);
1853 case FILTER_OP_CAST_TO_S64
:
1854 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1857 if (!vstack_ax(stack
)) {
1858 ERR("Empty stack\n");
1862 switch (vstack_ax(stack
)->type
) {
1868 ERR("Incorrect register type %d for cast\n",
1869 (int) vstack_ax(stack
)->type
);
1873 vstack_ax(stack
)->type
= REG_S64
;
1874 next_pc
+= sizeof(struct cast_op
);
1877 case FILTER_OP_CAST_NOP
:
1879 next_pc
+= sizeof(struct cast_op
);
1884 * Instructions for recursive traversal through composed types.
1886 case FILTER_OP_GET_CONTEXT_ROOT
:
1887 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1888 case FILTER_OP_GET_PAYLOAD_ROOT
:
1890 if (vstack_push(stack
)) {
1894 vstack_ax(stack
)->type
= REG_PTR
;
1895 next_pc
+= sizeof(struct load_op
);
1899 case FILTER_OP_LOAD_FIELD
:
1902 if (!vstack_ax(stack
)) {
1903 ERR("Empty stack\n");
1907 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1908 ERR("Expecting pointer on top of stack\n");
1912 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1913 next_pc
+= sizeof(struct load_op
);
1917 case FILTER_OP_LOAD_FIELD_S8
:
1918 case FILTER_OP_LOAD_FIELD_S16
:
1919 case FILTER_OP_LOAD_FIELD_S32
:
1920 case FILTER_OP_LOAD_FIELD_S64
:
1921 case FILTER_OP_LOAD_FIELD_U8
:
1922 case FILTER_OP_LOAD_FIELD_U16
:
1923 case FILTER_OP_LOAD_FIELD_U32
:
1924 case FILTER_OP_LOAD_FIELD_U64
:
1927 if (!vstack_ax(stack
)) {
1928 ERR("Empty stack\n");
1932 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1933 ERR("Expecting pointer on top of stack\n");
1937 vstack_ax(stack
)->type
= REG_S64
;
1938 next_pc
+= sizeof(struct load_op
);
1942 case FILTER_OP_LOAD_FIELD_STRING
:
1943 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1946 if (!vstack_ax(stack
)) {
1947 ERR("Empty stack\n");
1951 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1952 ERR("Expecting pointer on top of stack\n");
1956 vstack_ax(stack
)->type
= REG_STRING
;
1957 next_pc
+= sizeof(struct load_op
);
1961 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1964 if (!vstack_ax(stack
)) {
1965 ERR("Empty stack\n");
1969 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1970 ERR("Expecting pointer on top of stack\n");
1974 vstack_ax(stack
)->type
= REG_DOUBLE
;
1975 next_pc
+= sizeof(struct load_op
);
1979 case FILTER_OP_GET_SYMBOL
:
1980 case FILTER_OP_GET_SYMBOL_FIELD
:
1983 if (!vstack_ax(stack
)) {
1984 ERR("Empty stack\n");
1988 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1989 ERR("Expecting pointer on top of stack\n");
1993 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1997 case FILTER_OP_GET_INDEX_U16
:
2000 if (!vstack_ax(stack
)) {
2001 ERR("Empty stack\n");
2005 if (vstack_ax(stack
)->type
!= REG_PTR
) {
2006 ERR("Expecting pointer on top of stack\n");
2010 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
2014 case FILTER_OP_GET_INDEX_U64
:
2017 if (!vstack_ax(stack
)) {
2018 ERR("Empty stack\n");
2022 if (vstack_ax(stack
)->type
!= REG_PTR
) {
2023 ERR("Expecting pointer on top of stack\n");
2027 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
2033 *_next_pc
= next_pc
;
2037 int lttng_filter_validate_bytecode_load(struct bytecode_runtime
*bytecode
)
2039 char *pc
, *next_pc
, *start_pc
;
2042 start_pc
= &bytecode
->code
[0];
2043 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
2045 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
2048 ERR("filter bytecode overflow\n");
2051 dbg_printf("Validating loads: op %s (%u)\n",
2052 print_op((unsigned int) *(filter_opcode_t
*) pc
),
2053 (unsigned int) *(filter_opcode_t
*) pc
);
2055 ret
= validate_load(&next_pc
, pc
);
2064 * Never called concurrently (hash seed is shared).
2066 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
2068 struct cds_lfht
*merge_points
;
2069 char *pc
, *next_pc
, *start_pc
;
2071 struct vstack stack
;
2073 vstack_init(&stack
);
2075 if (!lttng_hash_seed_ready
) {
2076 lttng_hash_seed
= time(NULL
);
2077 lttng_hash_seed_ready
= 1;
2080 * Note: merge_points hash table used by single thread, and
2081 * never concurrently resized. Therefore, we can use it without
2082 * holding RCU read-side lock and free nodes without using
2085 merge_points
= cds_lfht_new(DEFAULT_NR_MERGE_POINTS
,
2086 MIN_NR_BUCKETS
, MAX_NR_BUCKETS
,
2088 if (!merge_points
) {
2089 ERR("Error allocating hash table for bytecode validation\n");
2092 start_pc
= &bytecode
->code
[0];
2093 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
2095 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
2098 ERR("filter bytecode overflow\n");
2101 dbg_printf("Validating op %s (%u)\n",
2102 print_op((unsigned int) *(filter_opcode_t
*) pc
),
2103 (unsigned int) *(filter_opcode_t
*) pc
);
2106 * For each instruction, validate the current context
2107 * (traversal of entire execution flow), and validate
2108 * all merge points targeting this instruction.
2110 ret
= validate_instruction_all_contexts(bytecode
, merge_points
,
2111 &stack
, start_pc
, pc
);
2114 ret
= exec_insn(bytecode
, merge_points
, &stack
, &next_pc
, pc
);
2119 if (delete_all_nodes(merge_points
)) {
2121 ERR("Unexpected merge points\n");
2125 if (cds_lfht_destroy(merge_points
, NULL
)) {
2126 ERR("Error destroying hash table\n");