Version 2.7.7
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/types.h>
24 #include <linux/jhash.h>
25 #include <linux/slab.h>
26
27 #include "wrapper/list.h"
28 #include "lttng-filter.h"
29
30 #define MERGE_POINT_TABLE_BITS 7
31 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
32
33 /* merge point table node */
34 struct mp_node {
35 struct hlist_node node;
36
37 /* Context at merge point */
38 struct vstack stack;
39 unsigned long target_pc;
40 };
41
42 struct mp_table {
43 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
44 };
45
46 static
47 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
48 {
49 if (mp_node->target_pc == key_pc)
50 return 1;
51 else
52 return 0;
53 }
54
55 static
56 int merge_points_compare(const struct vstack *stacka,
57 const struct vstack *stackb)
58 {
59 int i, len;
60
61 if (stacka->top != stackb->top)
62 return 1;
63 len = stacka->top + 1;
64 WARN_ON_ONCE(len < 0);
65 for (i = 0; i < len; i++) {
66 if (stacka->e[i].type != stackb->e[i].type)
67 return 1;
68 }
69 return 0;
70 }
71
72 static
73 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
74 const struct vstack *stack)
75 {
76 struct mp_node *mp_node;
77 unsigned long hash = jhash_1word(target_pc, 0);
78 struct hlist_head *head;
79 struct mp_node *lookup_node;
80 int found = 0;
81
82 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
83 target_pc, hash);
84 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
85 if (!mp_node)
86 return -ENOMEM;
87 mp_node->target_pc = target_pc;
88 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
89
90 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
91 lttng_hlist_for_each_entry(lookup_node, head, node) {
92 if (lttng_hash_match(lookup_node, target_pc)) {
93 found = 1;
94 break;
95 }
96 }
97 if (found) {
98 /* Key already present */
99 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
100 target_pc, hash);
101 kfree(mp_node);
102 if (merge_points_compare(stack, &lookup_node->stack)) {
103 printk(KERN_WARNING "Merge points differ for offset %lu\n",
104 target_pc);
105 return -EINVAL;
106 }
107 } else {
108 hlist_add_head(&mp_node->node, head);
109 }
110 return 0;
111 }
112
113 /*
114 * Binary comparators use top of stack and top of stack -1.
115 */
116 static
117 int bin_op_compare_check(struct vstack *stack, const char *str)
118 {
119 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
120 goto error_unknown;
121
122 switch (vstack_ax(stack)->type) {
123 default:
124 case REG_DOUBLE:
125 goto error_unknown;
126
127 case REG_STRING:
128 switch (vstack_bx(stack)->type) {
129 default:
130 case REG_DOUBLE:
131 goto error_unknown;
132
133 case REG_STRING:
134 break;
135 case REG_S64:
136 goto error_mismatch;
137 }
138 break;
139 case REG_S64:
140 switch (vstack_bx(stack)->type) {
141 default:
142 case REG_DOUBLE:
143 goto error_unknown;
144
145 case REG_STRING:
146 goto error_mismatch;
147
148 case REG_S64:
149 break;
150 }
151 break;
152 }
153 return 0;
154
155 error_unknown:
156 return -EINVAL;
157
158 error_mismatch:
159 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
160 return -EINVAL;
161 }
162
163 /*
164 * Validate bytecode range overflow within the validation pass.
165 * Called for each instruction encountered.
166 */
167 static
168 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
169 void *start_pc, void *pc)
170 {
171 int ret = 0;
172
173 switch (*(filter_opcode_t *) pc) {
174 case FILTER_OP_UNKNOWN:
175 default:
176 {
177 printk(KERN_WARNING "unknown bytecode op %u\n",
178 (unsigned int) *(filter_opcode_t *) pc);
179 ret = -EINVAL;
180 break;
181 }
182
183 case FILTER_OP_RETURN:
184 {
185 if (unlikely(pc + sizeof(struct return_op)
186 > start_pc + bytecode->len)) {
187 ret = -ERANGE;
188 }
189 break;
190 }
191
192 /* binary */
193 case FILTER_OP_MUL:
194 case FILTER_OP_DIV:
195 case FILTER_OP_MOD:
196 case FILTER_OP_PLUS:
197 case FILTER_OP_MINUS:
198 case FILTER_OP_RSHIFT:
199 case FILTER_OP_LSHIFT:
200 case FILTER_OP_BIN_AND:
201 case FILTER_OP_BIN_OR:
202 case FILTER_OP_BIN_XOR:
203 case FILTER_OP_EQ_DOUBLE:
204 case FILTER_OP_NE_DOUBLE:
205 case FILTER_OP_GT_DOUBLE:
206 case FILTER_OP_LT_DOUBLE:
207 case FILTER_OP_GE_DOUBLE:
208 case FILTER_OP_LE_DOUBLE:
209 /* Floating point */
210 case FILTER_OP_EQ_DOUBLE_S64:
211 case FILTER_OP_NE_DOUBLE_S64:
212 case FILTER_OP_GT_DOUBLE_S64:
213 case FILTER_OP_LT_DOUBLE_S64:
214 case FILTER_OP_GE_DOUBLE_S64:
215 case FILTER_OP_LE_DOUBLE_S64:
216 case FILTER_OP_EQ_S64_DOUBLE:
217 case FILTER_OP_NE_S64_DOUBLE:
218 case FILTER_OP_GT_S64_DOUBLE:
219 case FILTER_OP_LT_S64_DOUBLE:
220 case FILTER_OP_GE_S64_DOUBLE:
221 case FILTER_OP_LE_S64_DOUBLE:
222 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
223 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
224 case FILTER_OP_LOAD_DOUBLE:
225 case FILTER_OP_CAST_DOUBLE_TO_S64:
226 case FILTER_OP_UNARY_PLUS_DOUBLE:
227 case FILTER_OP_UNARY_MINUS_DOUBLE:
228 case FILTER_OP_UNARY_NOT_DOUBLE:
229 {
230 printk(KERN_WARNING "unsupported bytecode op %u\n",
231 (unsigned int) *(filter_opcode_t *) pc);
232 ret = -EINVAL;
233 break;
234 }
235
236 case FILTER_OP_EQ:
237 case FILTER_OP_NE:
238 case FILTER_OP_GT:
239 case FILTER_OP_LT:
240 case FILTER_OP_GE:
241 case FILTER_OP_LE:
242 case FILTER_OP_EQ_STRING:
243 case FILTER_OP_NE_STRING:
244 case FILTER_OP_GT_STRING:
245 case FILTER_OP_LT_STRING:
246 case FILTER_OP_GE_STRING:
247 case FILTER_OP_LE_STRING:
248 case FILTER_OP_EQ_S64:
249 case FILTER_OP_NE_S64:
250 case FILTER_OP_GT_S64:
251 case FILTER_OP_LT_S64:
252 case FILTER_OP_GE_S64:
253 case FILTER_OP_LE_S64:
254 {
255 if (unlikely(pc + sizeof(struct binary_op)
256 > start_pc + bytecode->len)) {
257 ret = -ERANGE;
258 }
259 break;
260 }
261
262 /* unary */
263 case FILTER_OP_UNARY_PLUS:
264 case FILTER_OP_UNARY_MINUS:
265 case FILTER_OP_UNARY_NOT:
266 case FILTER_OP_UNARY_PLUS_S64:
267 case FILTER_OP_UNARY_MINUS_S64:
268 case FILTER_OP_UNARY_NOT_S64:
269 {
270 if (unlikely(pc + sizeof(struct unary_op)
271 > start_pc + bytecode->len)) {
272 ret = -ERANGE;
273 }
274 break;
275 }
276
277 /* logical */
278 case FILTER_OP_AND:
279 case FILTER_OP_OR:
280 {
281 if (unlikely(pc + sizeof(struct logical_op)
282 > start_pc + bytecode->len)) {
283 ret = -ERANGE;
284 }
285 break;
286 }
287
288 /* load field ref */
289 case FILTER_OP_LOAD_FIELD_REF:
290 {
291 printk(KERN_WARNING "Unknown field ref type\n");
292 ret = -EINVAL;
293 break;
294 }
295 /* get context ref */
296 case FILTER_OP_GET_CONTEXT_REF:
297 {
298 printk(KERN_WARNING "Unknown field ref type\n");
299 ret = -EINVAL;
300 break;
301 }
302 case FILTER_OP_LOAD_FIELD_REF_STRING:
303 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
304 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
305 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
306 case FILTER_OP_LOAD_FIELD_REF_S64:
307 case FILTER_OP_GET_CONTEXT_REF_STRING:
308 case FILTER_OP_GET_CONTEXT_REF_S64:
309 {
310 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
311 > start_pc + bytecode->len)) {
312 ret = -ERANGE;
313 }
314 break;
315 }
316
317 /* load from immediate operand */
318 case FILTER_OP_LOAD_STRING:
319 {
320 struct load_op *insn = (struct load_op *) pc;
321 uint32_t str_len, maxlen;
322
323 if (unlikely(pc + sizeof(struct load_op)
324 > start_pc + bytecode->len)) {
325 ret = -ERANGE;
326 break;
327 }
328
329 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
330 str_len = strnlen(insn->data, maxlen);
331 if (unlikely(str_len >= maxlen)) {
332 /* Final '\0' not found within range */
333 ret = -ERANGE;
334 }
335 break;
336 }
337
338 case FILTER_OP_LOAD_S64:
339 {
340 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
341 > start_pc + bytecode->len)) {
342 ret = -ERANGE;
343 }
344 break;
345 }
346
347 case FILTER_OP_CAST_TO_S64:
348 case FILTER_OP_CAST_NOP:
349 {
350 if (unlikely(pc + sizeof(struct cast_op)
351 > start_pc + bytecode->len)) {
352 ret = -ERANGE;
353 }
354 break;
355 }
356
357 }
358
359 return ret;
360 }
361
362 static
363 unsigned long delete_all_nodes(struct mp_table *mp_table)
364 {
365 struct mp_node *mp_node;
366 struct hlist_node *tmp;
367 unsigned long nr_nodes = 0;
368 int i;
369
370 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
371 struct hlist_head *head;
372
373 head = &mp_table->mp_head[i];
374 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
375 kfree(mp_node);
376 nr_nodes++;
377 }
378 }
379 return nr_nodes;
380 }
381
382 /*
383 * Return value:
384 * 0: success
385 * <0: error
386 */
387 static
388 int validate_instruction_context(struct bytecode_runtime *bytecode,
389 struct vstack *stack,
390 void *start_pc,
391 void *pc)
392 {
393 int ret = 0;
394
395 switch (*(filter_opcode_t *) pc) {
396 case FILTER_OP_UNKNOWN:
397 default:
398 {
399 printk(KERN_WARNING "unknown bytecode op %u\n",
400 (unsigned int) *(filter_opcode_t *) pc);
401 ret = -EINVAL;
402 goto end;
403 }
404
405 case FILTER_OP_RETURN:
406 {
407 goto end;
408 }
409
410 /* binary */
411 case FILTER_OP_MUL:
412 case FILTER_OP_DIV:
413 case FILTER_OP_MOD:
414 case FILTER_OP_PLUS:
415 case FILTER_OP_MINUS:
416 case FILTER_OP_RSHIFT:
417 case FILTER_OP_LSHIFT:
418 case FILTER_OP_BIN_AND:
419 case FILTER_OP_BIN_OR:
420 case FILTER_OP_BIN_XOR:
421 /* Floating point */
422 case FILTER_OP_EQ_DOUBLE:
423 case FILTER_OP_NE_DOUBLE:
424 case FILTER_OP_GT_DOUBLE:
425 case FILTER_OP_LT_DOUBLE:
426 case FILTER_OP_GE_DOUBLE:
427 case FILTER_OP_LE_DOUBLE:
428 case FILTER_OP_EQ_DOUBLE_S64:
429 case FILTER_OP_NE_DOUBLE_S64:
430 case FILTER_OP_GT_DOUBLE_S64:
431 case FILTER_OP_LT_DOUBLE_S64:
432 case FILTER_OP_GE_DOUBLE_S64:
433 case FILTER_OP_LE_DOUBLE_S64:
434 case FILTER_OP_EQ_S64_DOUBLE:
435 case FILTER_OP_NE_S64_DOUBLE:
436 case FILTER_OP_GT_S64_DOUBLE:
437 case FILTER_OP_LT_S64_DOUBLE:
438 case FILTER_OP_GE_S64_DOUBLE:
439 case FILTER_OP_LE_S64_DOUBLE:
440 case FILTER_OP_UNARY_PLUS_DOUBLE:
441 case FILTER_OP_UNARY_MINUS_DOUBLE:
442 case FILTER_OP_UNARY_NOT_DOUBLE:
443 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
444 case FILTER_OP_LOAD_DOUBLE:
445 case FILTER_OP_CAST_DOUBLE_TO_S64:
446 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
447 {
448 printk(KERN_WARNING "unsupported bytecode op %u\n",
449 (unsigned int) *(filter_opcode_t *) pc);
450 ret = -EINVAL;
451 goto end;
452 }
453
454 case FILTER_OP_EQ:
455 {
456 ret = bin_op_compare_check(stack, "==");
457 if (ret)
458 goto end;
459 break;
460 }
461 case FILTER_OP_NE:
462 {
463 ret = bin_op_compare_check(stack, "!=");
464 if (ret)
465 goto end;
466 break;
467 }
468 case FILTER_OP_GT:
469 {
470 ret = bin_op_compare_check(stack, ">");
471 if (ret)
472 goto end;
473 break;
474 }
475 case FILTER_OP_LT:
476 {
477 ret = bin_op_compare_check(stack, "<");
478 if (ret)
479 goto end;
480 break;
481 }
482 case FILTER_OP_GE:
483 {
484 ret = bin_op_compare_check(stack, ">=");
485 if (ret)
486 goto end;
487 break;
488 }
489 case FILTER_OP_LE:
490 {
491 ret = bin_op_compare_check(stack, "<=");
492 if (ret)
493 goto end;
494 break;
495 }
496
497 case FILTER_OP_EQ_STRING:
498 case FILTER_OP_NE_STRING:
499 case FILTER_OP_GT_STRING:
500 case FILTER_OP_LT_STRING:
501 case FILTER_OP_GE_STRING:
502 case FILTER_OP_LE_STRING:
503 {
504 if (!vstack_ax(stack) || !vstack_bx(stack)) {
505 printk(KERN_WARNING "Empty stack\n");
506 ret = -EINVAL;
507 goto end;
508 }
509 if (vstack_ax(stack)->type != REG_STRING
510 || vstack_bx(stack)->type != REG_STRING) {
511 printk(KERN_WARNING "Unexpected register type for string comparator\n");
512 ret = -EINVAL;
513 goto end;
514 }
515 break;
516 }
517
518 case FILTER_OP_EQ_S64:
519 case FILTER_OP_NE_S64:
520 case FILTER_OP_GT_S64:
521 case FILTER_OP_LT_S64:
522 case FILTER_OP_GE_S64:
523 case FILTER_OP_LE_S64:
524 {
525 if (!vstack_ax(stack) || !vstack_bx(stack)) {
526 printk(KERN_WARNING "Empty stack\n");
527 ret = -EINVAL;
528 goto end;
529 }
530 if (vstack_ax(stack)->type != REG_S64
531 || vstack_bx(stack)->type != REG_S64) {
532 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
533 ret = -EINVAL;
534 goto end;
535 }
536 break;
537 }
538
539 /* unary */
540 case FILTER_OP_UNARY_PLUS:
541 case FILTER_OP_UNARY_MINUS:
542 case FILTER_OP_UNARY_NOT:
543 {
544 if (!vstack_ax(stack)) {
545 printk(KERN_WARNING "Empty stack\n");
546 ret = -EINVAL;
547 goto end;
548 }
549 switch (vstack_ax(stack)->type) {
550 default:
551 case REG_DOUBLE:
552 printk(KERN_WARNING "unknown register type\n");
553 ret = -EINVAL;
554 goto end;
555
556 case REG_STRING:
557 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
558 ret = -EINVAL;
559 goto end;
560 case REG_S64:
561 break;
562 }
563 break;
564 }
565
566 case FILTER_OP_UNARY_PLUS_S64:
567 case FILTER_OP_UNARY_MINUS_S64:
568 case FILTER_OP_UNARY_NOT_S64:
569 {
570 if (!vstack_ax(stack)) {
571 printk(KERN_WARNING "Empty stack\n");
572 ret = -EINVAL;
573 goto end;
574 }
575 if (vstack_ax(stack)->type != REG_S64) {
576 printk(KERN_WARNING "Invalid register type\n");
577 ret = -EINVAL;
578 goto end;
579 }
580 break;
581 }
582
583 /* logical */
584 case FILTER_OP_AND:
585 case FILTER_OP_OR:
586 {
587 struct logical_op *insn = (struct logical_op *) pc;
588
589 if (!vstack_ax(stack)) {
590 printk(KERN_WARNING "Empty stack\n");
591 ret = -EINVAL;
592 goto end;
593 }
594 if (vstack_ax(stack)->type != REG_S64) {
595 printk(KERN_WARNING "Logical comparator expects S64 register\n");
596 ret = -EINVAL;
597 goto end;
598 }
599
600 dbg_printk("Validate jumping to bytecode offset %u\n",
601 (unsigned int) insn->skip_offset);
602 if (unlikely(start_pc + insn->skip_offset <= pc)) {
603 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
604 ret = -EINVAL;
605 goto end;
606 }
607 break;
608 }
609
610 /* load field ref */
611 case FILTER_OP_LOAD_FIELD_REF:
612 {
613 printk(KERN_WARNING "Unknown field ref type\n");
614 ret = -EINVAL;
615 goto end;
616 }
617 case FILTER_OP_LOAD_FIELD_REF_STRING:
618 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
619 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
620 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
621 {
622 struct load_op *insn = (struct load_op *) pc;
623 struct field_ref *ref = (struct field_ref *) insn->data;
624
625 dbg_printk("Validate load field ref offset %u type string\n",
626 ref->offset);
627 break;
628 }
629 case FILTER_OP_LOAD_FIELD_REF_S64:
630 {
631 struct load_op *insn = (struct load_op *) pc;
632 struct field_ref *ref = (struct field_ref *) insn->data;
633
634 dbg_printk("Validate load field ref offset %u type s64\n",
635 ref->offset);
636 break;
637 }
638
639 /* load from immediate operand */
640 case FILTER_OP_LOAD_STRING:
641 {
642 break;
643 }
644
645 case FILTER_OP_LOAD_S64:
646 {
647 break;
648 }
649
650 case FILTER_OP_CAST_TO_S64:
651 {
652 struct cast_op *insn = (struct cast_op *) pc;
653
654 if (!vstack_ax(stack)) {
655 printk(KERN_WARNING "Empty stack\n");
656 ret = -EINVAL;
657 goto end;
658 }
659 switch (vstack_ax(stack)->type) {
660 default:
661 case REG_DOUBLE:
662 printk(KERN_WARNING "unknown register type\n");
663 ret = -EINVAL;
664 goto end;
665
666 case REG_STRING:
667 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
668 ret = -EINVAL;
669 goto end;
670 case REG_S64:
671 break;
672 }
673 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
674 if (vstack_ax(stack)->type != REG_DOUBLE) {
675 printk(KERN_WARNING "Cast expects double\n");
676 ret = -EINVAL;
677 goto end;
678 }
679 }
680 break;
681 }
682 case FILTER_OP_CAST_NOP:
683 {
684 break;
685 }
686
687 /* get context ref */
688 case FILTER_OP_GET_CONTEXT_REF:
689 {
690 printk(KERN_WARNING "Unknown get context ref type\n");
691 ret = -EINVAL;
692 goto end;
693 }
694 case FILTER_OP_GET_CONTEXT_REF_STRING:
695 {
696 struct load_op *insn = (struct load_op *) pc;
697 struct field_ref *ref = (struct field_ref *) insn->data;
698
699 dbg_printk("Validate get context ref offset %u type string\n",
700 ref->offset);
701 break;
702 }
703 case FILTER_OP_GET_CONTEXT_REF_S64:
704 {
705 struct load_op *insn = (struct load_op *) pc;
706 struct field_ref *ref = (struct field_ref *) insn->data;
707
708 dbg_printk("Validate get context ref offset %u type s64\n",
709 ref->offset);
710 break;
711 }
712
713 }
714 end:
715 return ret;
716 }
717
718 /*
719 * Return value:
720 * 0: success
721 * <0: error
722 */
723 static
724 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
725 struct mp_table *mp_table,
726 struct vstack *stack,
727 void *start_pc,
728 void *pc)
729 {
730 int ret, found = 0;
731 unsigned long target_pc = pc - start_pc;
732 unsigned long hash;
733 struct hlist_head *head;
734 struct mp_node *mp_node;
735
736 /* Validate the context resulting from the previous instruction */
737 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
738 if (ret)
739 return ret;
740
741 /* Validate merge points */
742 hash = jhash_1word(target_pc, 0);
743 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
744 lttng_hlist_for_each_entry(mp_node, head, node) {
745 if (lttng_hash_match(mp_node, target_pc)) {
746 found = 1;
747 break;
748 }
749 }
750 if (found) {
751 dbg_printk("Filter: validate merge point at offset %lu\n",
752 target_pc);
753 if (merge_points_compare(stack, &mp_node->stack)) {
754 printk(KERN_WARNING "Merge points differ for offset %lu\n",
755 target_pc);
756 return -EINVAL;
757 }
758 /* Once validated, we can remove the merge point */
759 dbg_printk("Filter: remove merge point at offset %lu\n",
760 target_pc);
761 hlist_del(&mp_node->node);
762 }
763 return 0;
764 }
765
766 /*
767 * Return value:
768 * >0: going to next insn.
769 * 0: success, stop iteration.
770 * <0: error
771 */
772 static
773 int exec_insn(struct bytecode_runtime *bytecode,
774 struct mp_table *mp_table,
775 struct vstack *stack,
776 void **_next_pc,
777 void *pc)
778 {
779 int ret = 1;
780 void *next_pc = *_next_pc;
781
782 switch (*(filter_opcode_t *) pc) {
783 case FILTER_OP_UNKNOWN:
784 default:
785 {
786 printk(KERN_WARNING "unknown bytecode op %u\n",
787 (unsigned int) *(filter_opcode_t *) pc);
788 ret = -EINVAL;
789 goto end;
790 }
791
792 case FILTER_OP_RETURN:
793 {
794 if (!vstack_ax(stack)) {
795 printk(KERN_WARNING "Empty stack\n");
796 ret = -EINVAL;
797 goto end;
798 }
799 ret = 0;
800 goto end;
801 }
802
803 /* binary */
804 case FILTER_OP_MUL:
805 case FILTER_OP_DIV:
806 case FILTER_OP_MOD:
807 case FILTER_OP_PLUS:
808 case FILTER_OP_MINUS:
809 case FILTER_OP_RSHIFT:
810 case FILTER_OP_LSHIFT:
811 case FILTER_OP_BIN_AND:
812 case FILTER_OP_BIN_OR:
813 case FILTER_OP_BIN_XOR:
814 /* Floating point */
815 case FILTER_OP_EQ_DOUBLE:
816 case FILTER_OP_NE_DOUBLE:
817 case FILTER_OP_GT_DOUBLE:
818 case FILTER_OP_LT_DOUBLE:
819 case FILTER_OP_GE_DOUBLE:
820 case FILTER_OP_LE_DOUBLE:
821 case FILTER_OP_EQ_DOUBLE_S64:
822 case FILTER_OP_NE_DOUBLE_S64:
823 case FILTER_OP_GT_DOUBLE_S64:
824 case FILTER_OP_LT_DOUBLE_S64:
825 case FILTER_OP_GE_DOUBLE_S64:
826 case FILTER_OP_LE_DOUBLE_S64:
827 case FILTER_OP_EQ_S64_DOUBLE:
828 case FILTER_OP_NE_S64_DOUBLE:
829 case FILTER_OP_GT_S64_DOUBLE:
830 case FILTER_OP_LT_S64_DOUBLE:
831 case FILTER_OP_GE_S64_DOUBLE:
832 case FILTER_OP_LE_S64_DOUBLE:
833 case FILTER_OP_UNARY_PLUS_DOUBLE:
834 case FILTER_OP_UNARY_MINUS_DOUBLE:
835 case FILTER_OP_UNARY_NOT_DOUBLE:
836 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
837 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
838 case FILTER_OP_LOAD_DOUBLE:
839 case FILTER_OP_CAST_DOUBLE_TO_S64:
840 {
841 printk(KERN_WARNING "unsupported bytecode op %u\n",
842 (unsigned int) *(filter_opcode_t *) pc);
843 ret = -EINVAL;
844 goto end;
845 }
846
847 case FILTER_OP_EQ:
848 case FILTER_OP_NE:
849 case FILTER_OP_GT:
850 case FILTER_OP_LT:
851 case FILTER_OP_GE:
852 case FILTER_OP_LE:
853 case FILTER_OP_EQ_STRING:
854 case FILTER_OP_NE_STRING:
855 case FILTER_OP_GT_STRING:
856 case FILTER_OP_LT_STRING:
857 case FILTER_OP_GE_STRING:
858 case FILTER_OP_LE_STRING:
859 case FILTER_OP_EQ_S64:
860 case FILTER_OP_NE_S64:
861 case FILTER_OP_GT_S64:
862 case FILTER_OP_LT_S64:
863 case FILTER_OP_GE_S64:
864 case FILTER_OP_LE_S64:
865 {
866 /* Pop 2, push 1 */
867 if (vstack_pop(stack)) {
868 ret = -EINVAL;
869 goto end;
870 }
871 if (!vstack_ax(stack)) {
872 printk(KERN_WARNING "Empty stack\n");
873 ret = -EINVAL;
874 goto end;
875 }
876 vstack_ax(stack)->type = REG_S64;
877 next_pc += sizeof(struct binary_op);
878 break;
879 }
880
881 /* unary */
882 case FILTER_OP_UNARY_PLUS:
883 case FILTER_OP_UNARY_MINUS:
884 case FILTER_OP_UNARY_NOT:
885 case FILTER_OP_UNARY_PLUS_S64:
886 case FILTER_OP_UNARY_MINUS_S64:
887 case FILTER_OP_UNARY_NOT_S64:
888 {
889 /* Pop 1, push 1 */
890 if (!vstack_ax(stack)) {
891 printk(KERN_WARNING "Empty stack\n");
892 ret = -EINVAL;
893 goto end;
894 }
895 vstack_ax(stack)->type = REG_S64;
896 next_pc += sizeof(struct unary_op);
897 break;
898 }
899
900 /* logical */
901 case FILTER_OP_AND:
902 case FILTER_OP_OR:
903 {
904 struct logical_op *insn = (struct logical_op *) pc;
905 int merge_ret;
906
907 /* Add merge point to table */
908 merge_ret = merge_point_add_check(mp_table,
909 insn->skip_offset, stack);
910 if (merge_ret) {
911 ret = merge_ret;
912 goto end;
913 }
914 /* Continue to next instruction */
915 /* Pop 1 when jump not taken */
916 if (vstack_pop(stack)) {
917 ret = -EINVAL;
918 goto end;
919 }
920 next_pc += sizeof(struct logical_op);
921 break;
922 }
923
924 /* load field ref */
925 case FILTER_OP_LOAD_FIELD_REF:
926 {
927 printk(KERN_WARNING "Unknown field ref type\n");
928 ret = -EINVAL;
929 goto end;
930 }
931 /* get context ref */
932 case FILTER_OP_GET_CONTEXT_REF:
933 {
934 printk(KERN_WARNING "Unknown get context ref type\n");
935 ret = -EINVAL;
936 goto end;
937 }
938 case FILTER_OP_LOAD_FIELD_REF_STRING:
939 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
940 case FILTER_OP_GET_CONTEXT_REF_STRING:
941 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
942 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
943 {
944 if (vstack_push(stack)) {
945 ret = -EINVAL;
946 goto end;
947 }
948 vstack_ax(stack)->type = REG_STRING;
949 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
950 break;
951 }
952 case FILTER_OP_LOAD_FIELD_REF_S64:
953 case FILTER_OP_GET_CONTEXT_REF_S64:
954 {
955 if (vstack_push(stack)) {
956 ret = -EINVAL;
957 goto end;
958 }
959 vstack_ax(stack)->type = REG_S64;
960 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
961 break;
962 }
963
964 /* load from immediate operand */
965 case FILTER_OP_LOAD_STRING:
966 {
967 struct load_op *insn = (struct load_op *) pc;
968
969 if (vstack_push(stack)) {
970 ret = -EINVAL;
971 goto end;
972 }
973 vstack_ax(stack)->type = REG_STRING;
974 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
975 break;
976 }
977
978 case FILTER_OP_LOAD_S64:
979 {
980 if (vstack_push(stack)) {
981 ret = -EINVAL;
982 goto end;
983 }
984 vstack_ax(stack)->type = REG_S64;
985 next_pc += sizeof(struct load_op)
986 + sizeof(struct literal_numeric);
987 break;
988 }
989
990 case FILTER_OP_CAST_TO_S64:
991 {
992 /* Pop 1, push 1 */
993 if (!vstack_ax(stack)) {
994 printk(KERN_WARNING "Empty stack\n");
995 ret = -EINVAL;
996 goto end;
997 }
998 vstack_ax(stack)->type = REG_S64;
999 next_pc += sizeof(struct cast_op);
1000 break;
1001 }
1002 case FILTER_OP_CAST_NOP:
1003 {
1004 next_pc += sizeof(struct cast_op);
1005 break;
1006 }
1007
1008 }
1009 end:
1010 *_next_pc = next_pc;
1011 return ret;
1012 }
1013
1014 /*
1015 * Never called concurrently (hash seed is shared).
1016 */
1017 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1018 {
1019 struct mp_table *mp_table;
1020 void *pc, *next_pc, *start_pc;
1021 int ret = -EINVAL;
1022 struct vstack stack;
1023
1024 vstack_init(&stack);
1025
1026 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1027 if (!mp_table) {
1028 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1029 return -ENOMEM;
1030 }
1031 start_pc = &bytecode->data[0];
1032 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1033 pc = next_pc) {
1034 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1035 if (ret != 0) {
1036 if (ret == -ERANGE)
1037 printk(KERN_WARNING "filter bytecode overflow\n");
1038 goto end;
1039 }
1040 dbg_printk("Validating op %s (%u)\n",
1041 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1042 (unsigned int) *(filter_opcode_t *) pc);
1043
1044 /*
1045 * For each instruction, validate the current context
1046 * (traversal of entire execution flow), and validate
1047 * all merge points targeting this instruction.
1048 */
1049 ret = validate_instruction_all_contexts(bytecode, mp_table,
1050 &stack, start_pc, pc);
1051 if (ret)
1052 goto end;
1053 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1054 if (ret <= 0)
1055 goto end;
1056 }
1057 end:
1058 if (delete_all_nodes(mp_table)) {
1059 if (!ret) {
1060 printk(KERN_WARNING "Unexpected merge points\n");
1061 ret = -EINVAL;
1062 }
1063 }
1064 kfree(mp_table);
1065 return ret;
1066 }
This page took 0.050797 seconds and 4 git commands to generate.