Implement filtering infrastructure
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/list.h>
24 #include <linux/jhash.h>
25 #include <linux/slab.h>
26
27 #include "lttng-filter.h"
28
29 #define MERGE_POINT_TABLE_BITS 7
30 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
31
32 /* merge point table node */
33 struct mp_node {
34 struct hlist_node node;
35
36 /* Context at merge point */
37 struct vstack stack;
38 unsigned long target_pc;
39 };
40
41 struct mp_table {
42 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
43 };
44
45 static
46 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
47 {
48 if (mp_node->target_pc == key_pc)
49 return 1;
50 else
51 return 0;
52 }
53
54 static
55 int merge_points_compare(const struct vstack *stacka,
56 const struct vstack *stackb)
57 {
58 int i, len;
59
60 if (stacka->top != stackb->top)
61 return 1;
62 len = stacka->top + 1;
63 WARN_ON_ONCE(len < 0);
64 for (i = 0; i < len; i++) {
65 if (stacka->e[i].type != stackb->e[i].type)
66 return 1;
67 }
68 return 0;
69 }
70
71 static
72 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
73 const struct vstack *stack)
74 {
75 struct mp_node *mp_node;
76 unsigned long hash = jhash_1word(target_pc, 0);
77 struct hlist_head *head;
78 struct mp_node *lookup_node;
79 int found = 0;
80
81 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
82 target_pc, hash);
83 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
84 if (!mp_node)
85 return -ENOMEM;
86 mp_node->target_pc = target_pc;
87 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
88
89 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
90 hlist_for_each_entry(lookup_node, head, node) {
91 if (lttng_hash_match(lookup_node, target_pc)) {
92 found = 1;
93 break;
94 }
95 }
96 if (found) {
97 /* Key already present */
98 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
99 target_pc, hash);
100 kfree(mp_node);
101 if (merge_points_compare(stack, &lookup_node->stack)) {
102 printk(KERN_WARNING "Merge points differ for offset %lu\n",
103 target_pc);
104 return -EINVAL;
105 }
106 }
107 hlist_add_head(&mp_node->node, head);
108 return 0;
109 }
110
111 /*
112 * Binary comparators use top of stack and top of stack -1.
113 */
114 static
115 int bin_op_compare_check(struct vstack *stack, const char *str)
116 {
117 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
118 goto error_unknown;
119
120 switch (vstack_ax(stack)->type) {
121 default:
122 case REG_DOUBLE:
123 goto error_unknown;
124
125 case REG_STRING:
126 switch (vstack_bx(stack)->type) {
127 default:
128 case REG_DOUBLE:
129 goto error_unknown;
130
131 case REG_STRING:
132 break;
133 case REG_S64:
134 goto error_mismatch;
135 }
136 break;
137 case REG_S64:
138 switch (vstack_bx(stack)->type) {
139 default:
140 case REG_DOUBLE:
141 goto error_unknown;
142
143 case REG_STRING:
144 goto error_mismatch;
145
146 case REG_S64:
147 break;
148 }
149 break;
150 }
151 return 0;
152
153 error_unknown:
154 return -EINVAL;
155
156 error_mismatch:
157 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
158 return -EINVAL;
159 }
160
161 /*
162 * Validate bytecode range overflow within the validation pass.
163 * Called for each instruction encountered.
164 */
165 static
166 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
167 void *start_pc, void *pc)
168 {
169 int ret = 0;
170
171 switch (*(filter_opcode_t *) pc) {
172 case FILTER_OP_UNKNOWN:
173 default:
174 {
175 printk(KERN_WARNING "unknown bytecode op %u\n",
176 (unsigned int) *(filter_opcode_t *) pc);
177 ret = -EINVAL;
178 break;
179 }
180
181 case FILTER_OP_RETURN:
182 {
183 if (unlikely(pc + sizeof(struct return_op)
184 > start_pc + bytecode->len)) {
185 ret = -ERANGE;
186 }
187 break;
188 }
189
190 /* binary */
191 case FILTER_OP_MUL:
192 case FILTER_OP_DIV:
193 case FILTER_OP_MOD:
194 case FILTER_OP_PLUS:
195 case FILTER_OP_MINUS:
196 case FILTER_OP_RSHIFT:
197 case FILTER_OP_LSHIFT:
198 case FILTER_OP_BIN_AND:
199 case FILTER_OP_BIN_OR:
200 case FILTER_OP_BIN_XOR:
201 case FILTER_OP_EQ_DOUBLE:
202 case FILTER_OP_NE_DOUBLE:
203 case FILTER_OP_GT_DOUBLE:
204 case FILTER_OP_LT_DOUBLE:
205 case FILTER_OP_GE_DOUBLE:
206 case FILTER_OP_LE_DOUBLE:
207 /* Floating point */
208 case FILTER_OP_EQ_DOUBLE_S64:
209 case FILTER_OP_NE_DOUBLE_S64:
210 case FILTER_OP_GT_DOUBLE_S64:
211 case FILTER_OP_LT_DOUBLE_S64:
212 case FILTER_OP_GE_DOUBLE_S64:
213 case FILTER_OP_LE_DOUBLE_S64:
214 case FILTER_OP_EQ_S64_DOUBLE:
215 case FILTER_OP_NE_S64_DOUBLE:
216 case FILTER_OP_GT_S64_DOUBLE:
217 case FILTER_OP_LT_S64_DOUBLE:
218 case FILTER_OP_GE_S64_DOUBLE:
219 case FILTER_OP_LE_S64_DOUBLE:
220 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
221 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
222 case FILTER_OP_LOAD_DOUBLE:
223 case FILTER_OP_CAST_DOUBLE_TO_S64:
224 case FILTER_OP_UNARY_PLUS_DOUBLE:
225 case FILTER_OP_UNARY_MINUS_DOUBLE:
226 case FILTER_OP_UNARY_NOT_DOUBLE:
227 {
228 printk(KERN_WARNING "unsupported bytecode op %u\n",
229 (unsigned int) *(filter_opcode_t *) pc);
230 ret = -EINVAL;
231 break;
232 }
233
234 case FILTER_OP_EQ:
235 case FILTER_OP_NE:
236 case FILTER_OP_GT:
237 case FILTER_OP_LT:
238 case FILTER_OP_GE:
239 case FILTER_OP_LE:
240 case FILTER_OP_EQ_STRING:
241 case FILTER_OP_NE_STRING:
242 case FILTER_OP_GT_STRING:
243 case FILTER_OP_LT_STRING:
244 case FILTER_OP_GE_STRING:
245 case FILTER_OP_LE_STRING:
246 case FILTER_OP_EQ_S64:
247 case FILTER_OP_NE_S64:
248 case FILTER_OP_GT_S64:
249 case FILTER_OP_LT_S64:
250 case FILTER_OP_GE_S64:
251 case FILTER_OP_LE_S64:
252 {
253 if (unlikely(pc + sizeof(struct binary_op)
254 > start_pc + bytecode->len)) {
255 ret = -ERANGE;
256 }
257 break;
258 }
259
260 /* unary */
261 case FILTER_OP_UNARY_PLUS:
262 case FILTER_OP_UNARY_MINUS:
263 case FILTER_OP_UNARY_NOT:
264 case FILTER_OP_UNARY_PLUS_S64:
265 case FILTER_OP_UNARY_MINUS_S64:
266 case FILTER_OP_UNARY_NOT_S64:
267 {
268 if (unlikely(pc + sizeof(struct unary_op)
269 > start_pc + bytecode->len)) {
270 ret = -ERANGE;
271 }
272 break;
273 }
274
275 /* logical */
276 case FILTER_OP_AND:
277 case FILTER_OP_OR:
278 {
279 if (unlikely(pc + sizeof(struct logical_op)
280 > start_pc + bytecode->len)) {
281 ret = -ERANGE;
282 }
283 break;
284 }
285
286 /* load field ref */
287 case FILTER_OP_LOAD_FIELD_REF:
288 {
289 printk(KERN_WARNING "Unknown field ref type\n");
290 ret = -EINVAL;
291 break;
292 }
293 /* get context ref */
294 case FILTER_OP_GET_CONTEXT_REF:
295 {
296 printk(KERN_WARNING "Unknown field ref type\n");
297 ret = -EINVAL;
298 break;
299 }
300 case FILTER_OP_LOAD_FIELD_REF_STRING:
301 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
302 case FILTER_OP_LOAD_FIELD_REF_S64:
303 case FILTER_OP_GET_CONTEXT_REF_STRING:
304 case FILTER_OP_GET_CONTEXT_REF_S64:
305 {
306 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
307 > start_pc + bytecode->len)) {
308 ret = -ERANGE;
309 }
310 break;
311 }
312
313 /* load from immediate operand */
314 case FILTER_OP_LOAD_STRING:
315 {
316 struct load_op *insn = (struct load_op *) pc;
317 uint32_t str_len, maxlen;
318
319 if (unlikely(pc + sizeof(struct load_op)
320 > start_pc + bytecode->len)) {
321 ret = -ERANGE;
322 break;
323 }
324
325 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
326 str_len = strnlen(insn->data, maxlen);
327 if (unlikely(str_len >= maxlen)) {
328 /* Final '\0' not found within range */
329 ret = -ERANGE;
330 }
331 break;
332 }
333
334 case FILTER_OP_LOAD_S64:
335 {
336 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
337 > start_pc + bytecode->len)) {
338 ret = -ERANGE;
339 }
340 break;
341 }
342
343 case FILTER_OP_CAST_TO_S64:
344 case FILTER_OP_CAST_NOP:
345 {
346 if (unlikely(pc + sizeof(struct cast_op)
347 > start_pc + bytecode->len)) {
348 ret = -ERANGE;
349 }
350 break;
351 }
352
353 }
354
355 return ret;
356 }
357
358 static
359 unsigned long delete_all_nodes(struct mp_table *mp_table)
360 {
361 struct mp_node *mp_node;
362 struct hlist_node *tmp;
363 unsigned long nr_nodes = 0;
364 int i;
365
366 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
367 struct hlist_head *head;
368
369 head = &mp_table->mp_head[i];
370 hlist_for_each_entry_safe(mp_node, tmp, head, node) {
371 kfree(mp_node);
372 nr_nodes++;
373 }
374 }
375 return nr_nodes;
376 }
377
378 /*
379 * Return value:
380 * 0: success
381 * <0: error
382 */
383 static
384 int validate_instruction_context(struct bytecode_runtime *bytecode,
385 struct vstack *stack,
386 void *start_pc,
387 void *pc)
388 {
389 int ret = 0;
390
391 switch (*(filter_opcode_t *) pc) {
392 case FILTER_OP_UNKNOWN:
393 default:
394 {
395 printk(KERN_WARNING "unknown bytecode op %u\n",
396 (unsigned int) *(filter_opcode_t *) pc);
397 ret = -EINVAL;
398 goto end;
399 }
400
401 case FILTER_OP_RETURN:
402 {
403 goto end;
404 }
405
406 /* binary */
407 case FILTER_OP_MUL:
408 case FILTER_OP_DIV:
409 case FILTER_OP_MOD:
410 case FILTER_OP_PLUS:
411 case FILTER_OP_MINUS:
412 case FILTER_OP_RSHIFT:
413 case FILTER_OP_LSHIFT:
414 case FILTER_OP_BIN_AND:
415 case FILTER_OP_BIN_OR:
416 case FILTER_OP_BIN_XOR:
417 /* Floating point */
418 case FILTER_OP_EQ_DOUBLE:
419 case FILTER_OP_NE_DOUBLE:
420 case FILTER_OP_GT_DOUBLE:
421 case FILTER_OP_LT_DOUBLE:
422 case FILTER_OP_GE_DOUBLE:
423 case FILTER_OP_LE_DOUBLE:
424 case FILTER_OP_EQ_DOUBLE_S64:
425 case FILTER_OP_NE_DOUBLE_S64:
426 case FILTER_OP_GT_DOUBLE_S64:
427 case FILTER_OP_LT_DOUBLE_S64:
428 case FILTER_OP_GE_DOUBLE_S64:
429 case FILTER_OP_LE_DOUBLE_S64:
430 case FILTER_OP_EQ_S64_DOUBLE:
431 case FILTER_OP_NE_S64_DOUBLE:
432 case FILTER_OP_GT_S64_DOUBLE:
433 case FILTER_OP_LT_S64_DOUBLE:
434 case FILTER_OP_GE_S64_DOUBLE:
435 case FILTER_OP_LE_S64_DOUBLE:
436 case FILTER_OP_UNARY_PLUS_DOUBLE:
437 case FILTER_OP_UNARY_MINUS_DOUBLE:
438 case FILTER_OP_UNARY_NOT_DOUBLE:
439 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
440 case FILTER_OP_LOAD_DOUBLE:
441 case FILTER_OP_CAST_DOUBLE_TO_S64:
442 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
443 {
444 printk(KERN_WARNING "unsupported bytecode op %u\n",
445 (unsigned int) *(filter_opcode_t *) pc);
446 ret = -EINVAL;
447 goto end;
448 }
449
450 case FILTER_OP_EQ:
451 {
452 ret = bin_op_compare_check(stack, "==");
453 if (ret)
454 goto end;
455 break;
456 }
457 case FILTER_OP_NE:
458 {
459 ret = bin_op_compare_check(stack, "!=");
460 if (ret)
461 goto end;
462 break;
463 }
464 case FILTER_OP_GT:
465 {
466 ret = bin_op_compare_check(stack, ">");
467 if (ret)
468 goto end;
469 break;
470 }
471 case FILTER_OP_LT:
472 {
473 ret = bin_op_compare_check(stack, "<");
474 if (ret)
475 goto end;
476 break;
477 }
478 case FILTER_OP_GE:
479 {
480 ret = bin_op_compare_check(stack, ">=");
481 if (ret)
482 goto end;
483 break;
484 }
485 case FILTER_OP_LE:
486 {
487 ret = bin_op_compare_check(stack, "<=");
488 if (ret)
489 goto end;
490 break;
491 }
492
493 case FILTER_OP_EQ_STRING:
494 case FILTER_OP_NE_STRING:
495 case FILTER_OP_GT_STRING:
496 case FILTER_OP_LT_STRING:
497 case FILTER_OP_GE_STRING:
498 case FILTER_OP_LE_STRING:
499 {
500 if (!vstack_ax(stack) || !vstack_bx(stack)) {
501 printk(KERN_WARNING "Empty stack\n");
502 ret = -EINVAL;
503 goto end;
504 }
505 if (vstack_ax(stack)->type != REG_STRING
506 || vstack_bx(stack)->type != REG_STRING) {
507 printk(KERN_WARNING "Unexpected register type for string comparator\n");
508 ret = -EINVAL;
509 goto end;
510 }
511 break;
512 }
513
514 case FILTER_OP_EQ_S64:
515 case FILTER_OP_NE_S64:
516 case FILTER_OP_GT_S64:
517 case FILTER_OP_LT_S64:
518 case FILTER_OP_GE_S64:
519 case FILTER_OP_LE_S64:
520 {
521 if (!vstack_ax(stack) || !vstack_bx(stack)) {
522 printk(KERN_WARNING "Empty stack\n");
523 ret = -EINVAL;
524 goto end;
525 }
526 if (vstack_ax(stack)->type != REG_S64
527 || vstack_bx(stack)->type != REG_S64) {
528 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
529 ret = -EINVAL;
530 goto end;
531 }
532 break;
533 }
534
535 /* unary */
536 case FILTER_OP_UNARY_PLUS:
537 case FILTER_OP_UNARY_MINUS:
538 case FILTER_OP_UNARY_NOT:
539 {
540 if (!vstack_ax(stack)) {
541 printk(KERN_WARNING "Empty stack\n");
542 ret = -EINVAL;
543 goto end;
544 }
545 switch (vstack_ax(stack)->type) {
546 default:
547 case REG_DOUBLE:
548 printk(KERN_WARNING "unknown register type\n");
549 ret = -EINVAL;
550 goto end;
551
552 case REG_STRING:
553 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
554 ret = -EINVAL;
555 goto end;
556 case REG_S64:
557 break;
558 }
559 break;
560 }
561
562 case FILTER_OP_UNARY_PLUS_S64:
563 case FILTER_OP_UNARY_MINUS_S64:
564 case FILTER_OP_UNARY_NOT_S64:
565 {
566 if (!vstack_ax(stack)) {
567 printk(KERN_WARNING "Empty stack\n");
568 ret = -EINVAL;
569 goto end;
570 }
571 if (vstack_ax(stack)->type != REG_S64) {
572 printk(KERN_WARNING "Invalid register type\n");
573 ret = -EINVAL;
574 goto end;
575 }
576 break;
577 }
578
579 /* logical */
580 case FILTER_OP_AND:
581 case FILTER_OP_OR:
582 {
583 struct logical_op *insn = (struct logical_op *) pc;
584
585 if (!vstack_ax(stack)) {
586 printk(KERN_WARNING "Empty stack\n");
587 ret = -EINVAL;
588 goto end;
589 }
590 if (vstack_ax(stack)->type != REG_S64) {
591 printk(KERN_WARNING "Logical comparator expects S64 register\n");
592 ret = -EINVAL;
593 goto end;
594 }
595
596 dbg_printk("Validate jumping to bytecode offset %u\n",
597 (unsigned int) insn->skip_offset);
598 if (unlikely(start_pc + insn->skip_offset <= pc)) {
599 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
600 ret = -EINVAL;
601 goto end;
602 }
603 break;
604 }
605
606 /* load field ref */
607 case FILTER_OP_LOAD_FIELD_REF:
608 {
609 printk(KERN_WARNING "Unknown field ref type\n");
610 ret = -EINVAL;
611 goto end;
612 }
613 case FILTER_OP_LOAD_FIELD_REF_STRING:
614 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
615 {
616 struct load_op *insn = (struct load_op *) pc;
617 struct field_ref *ref = (struct field_ref *) insn->data;
618
619 dbg_printk("Validate load field ref offset %u type string\n",
620 ref->offset);
621 break;
622 }
623 case FILTER_OP_LOAD_FIELD_REF_S64:
624 {
625 struct load_op *insn = (struct load_op *) pc;
626 struct field_ref *ref = (struct field_ref *) insn->data;
627
628 dbg_printk("Validate load field ref offset %u type s64\n",
629 ref->offset);
630 break;
631 }
632
633 /* load from immediate operand */
634 case FILTER_OP_LOAD_STRING:
635 {
636 break;
637 }
638
639 case FILTER_OP_LOAD_S64:
640 {
641 break;
642 }
643
644 case FILTER_OP_CAST_TO_S64:
645 {
646 struct cast_op *insn = (struct cast_op *) pc;
647
648 if (!vstack_ax(stack)) {
649 printk(KERN_WARNING "Empty stack\n");
650 ret = -EINVAL;
651 goto end;
652 }
653 switch (vstack_ax(stack)->type) {
654 default:
655 case REG_DOUBLE:
656 printk(KERN_WARNING "unknown register type\n");
657 ret = -EINVAL;
658 goto end;
659
660 case REG_STRING:
661 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
662 ret = -EINVAL;
663 goto end;
664 case REG_S64:
665 break;
666 }
667 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
668 if (vstack_ax(stack)->type != REG_DOUBLE) {
669 printk(KERN_WARNING "Cast expects double\n");
670 ret = -EINVAL;
671 goto end;
672 }
673 }
674 break;
675 }
676 case FILTER_OP_CAST_NOP:
677 {
678 break;
679 }
680
681 /* get context ref */
682 case FILTER_OP_GET_CONTEXT_REF:
683 {
684 printk(KERN_WARNING "Unknown get context ref type\n");
685 ret = -EINVAL;
686 goto end;
687 }
688 case FILTER_OP_GET_CONTEXT_REF_STRING:
689 {
690 struct load_op *insn = (struct load_op *) pc;
691 struct field_ref *ref = (struct field_ref *) insn->data;
692
693 dbg_printk("Validate get context ref offset %u type string\n",
694 ref->offset);
695 break;
696 }
697 case FILTER_OP_GET_CONTEXT_REF_S64:
698 {
699 struct load_op *insn = (struct load_op *) pc;
700 struct field_ref *ref = (struct field_ref *) insn->data;
701
702 dbg_printk("Validate get context ref offset %u type s64\n",
703 ref->offset);
704 break;
705 }
706
707 }
708 end:
709 return ret;
710 }
711
712 /*
713 * Return value:
714 * 0: success
715 * <0: error
716 */
717 static
718 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
719 struct mp_table *mp_table,
720 struct vstack *stack,
721 void *start_pc,
722 void *pc)
723 {
724 int ret, found = 0;
725 unsigned long target_pc = pc - start_pc;
726 unsigned long hash;
727 struct hlist_head *head;
728 struct mp_node *mp_node;
729
730 /* Validate the context resulting from the previous instruction */
731 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
732 if (ret)
733 return ret;
734
735 /* Validate merge points */
736 hash = jhash_1word(target_pc, 0);
737 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
738 hlist_for_each_entry(mp_node, head, node) {
739 if (lttng_hash_match(mp_node, target_pc)) {
740 found = 1;
741 break;
742 }
743 }
744 if (found) {
745 dbg_printk("Filter: validate merge point at offset %lu\n",
746 target_pc);
747 if (merge_points_compare(stack, &mp_node->stack)) {
748 printk(KERN_WARNING "Merge points differ for offset %lu\n",
749 target_pc);
750 return -EINVAL;
751 }
752 /* Once validated, we can remove the merge point */
753 dbg_printk("Filter: remove merge point at offset %lu\n",
754 target_pc);
755 hlist_del(&mp_node->node);
756 }
757 return 0;
758 }
759
760 /*
761 * Return value:
762 * >0: going to next insn.
763 * 0: success, stop iteration.
764 * <0: error
765 */
766 static
767 int exec_insn(struct bytecode_runtime *bytecode,
768 struct mp_table *mp_table,
769 struct vstack *stack,
770 void **_next_pc,
771 void *pc)
772 {
773 int ret = 1;
774 void *next_pc = *_next_pc;
775
776 switch (*(filter_opcode_t *) pc) {
777 case FILTER_OP_UNKNOWN:
778 default:
779 {
780 printk(KERN_WARNING "unknown bytecode op %u\n",
781 (unsigned int) *(filter_opcode_t *) pc);
782 ret = -EINVAL;
783 goto end;
784 }
785
786 case FILTER_OP_RETURN:
787 {
788 if (!vstack_ax(stack)) {
789 printk(KERN_WARNING "Empty stack\n");
790 ret = -EINVAL;
791 goto end;
792 }
793 ret = 0;
794 goto end;
795 }
796
797 /* binary */
798 case FILTER_OP_MUL:
799 case FILTER_OP_DIV:
800 case FILTER_OP_MOD:
801 case FILTER_OP_PLUS:
802 case FILTER_OP_MINUS:
803 case FILTER_OP_RSHIFT:
804 case FILTER_OP_LSHIFT:
805 case FILTER_OP_BIN_AND:
806 case FILTER_OP_BIN_OR:
807 case FILTER_OP_BIN_XOR:
808 /* Floating point */
809 case FILTER_OP_EQ_DOUBLE:
810 case FILTER_OP_NE_DOUBLE:
811 case FILTER_OP_GT_DOUBLE:
812 case FILTER_OP_LT_DOUBLE:
813 case FILTER_OP_GE_DOUBLE:
814 case FILTER_OP_LE_DOUBLE:
815 case FILTER_OP_EQ_DOUBLE_S64:
816 case FILTER_OP_NE_DOUBLE_S64:
817 case FILTER_OP_GT_DOUBLE_S64:
818 case FILTER_OP_LT_DOUBLE_S64:
819 case FILTER_OP_GE_DOUBLE_S64:
820 case FILTER_OP_LE_DOUBLE_S64:
821 case FILTER_OP_EQ_S64_DOUBLE:
822 case FILTER_OP_NE_S64_DOUBLE:
823 case FILTER_OP_GT_S64_DOUBLE:
824 case FILTER_OP_LT_S64_DOUBLE:
825 case FILTER_OP_GE_S64_DOUBLE:
826 case FILTER_OP_LE_S64_DOUBLE:
827 case FILTER_OP_UNARY_PLUS_DOUBLE:
828 case FILTER_OP_UNARY_MINUS_DOUBLE:
829 case FILTER_OP_UNARY_NOT_DOUBLE:
830 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
831 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
832 case FILTER_OP_LOAD_DOUBLE:
833 case FILTER_OP_CAST_DOUBLE_TO_S64:
834 {
835 printk(KERN_WARNING "unsupported bytecode op %u\n",
836 (unsigned int) *(filter_opcode_t *) pc);
837 ret = -EINVAL;
838 goto end;
839 }
840
841 case FILTER_OP_EQ:
842 case FILTER_OP_NE:
843 case FILTER_OP_GT:
844 case FILTER_OP_LT:
845 case FILTER_OP_GE:
846 case FILTER_OP_LE:
847 case FILTER_OP_EQ_STRING:
848 case FILTER_OP_NE_STRING:
849 case FILTER_OP_GT_STRING:
850 case FILTER_OP_LT_STRING:
851 case FILTER_OP_GE_STRING:
852 case FILTER_OP_LE_STRING:
853 case FILTER_OP_EQ_S64:
854 case FILTER_OP_NE_S64:
855 case FILTER_OP_GT_S64:
856 case FILTER_OP_LT_S64:
857 case FILTER_OP_GE_S64:
858 case FILTER_OP_LE_S64:
859 {
860 /* Pop 2, push 1 */
861 if (vstack_pop(stack)) {
862 ret = -EINVAL;
863 goto end;
864 }
865 if (!vstack_ax(stack)) {
866 printk(KERN_WARNING "Empty stack\n");
867 ret = -EINVAL;
868 goto end;
869 }
870 vstack_ax(stack)->type = REG_S64;
871 next_pc += sizeof(struct binary_op);
872 break;
873 }
874
875 /* unary */
876 case FILTER_OP_UNARY_PLUS:
877 case FILTER_OP_UNARY_MINUS:
878 case FILTER_OP_UNARY_NOT:
879 case FILTER_OP_UNARY_PLUS_S64:
880 case FILTER_OP_UNARY_MINUS_S64:
881 case FILTER_OP_UNARY_NOT_S64:
882 {
883 /* Pop 1, push 1 */
884 if (!vstack_ax(stack)) {
885 printk(KERN_WARNING "Empty stack\n");
886 ret = -EINVAL;
887 goto end;
888 }
889 vstack_ax(stack)->type = REG_S64;
890 next_pc += sizeof(struct unary_op);
891 break;
892 }
893
894 /* logical */
895 case FILTER_OP_AND:
896 case FILTER_OP_OR:
897 {
898 struct logical_op *insn = (struct logical_op *) pc;
899 int merge_ret;
900
901 /* Add merge point to table */
902 merge_ret = merge_point_add_check(mp_table,
903 insn->skip_offset, stack);
904 if (merge_ret) {
905 ret = merge_ret;
906 goto end;
907 }
908 /* Continue to next instruction */
909 /* Pop 1 when jump not taken */
910 if (vstack_pop(stack)) {
911 ret = -EINVAL;
912 goto end;
913 }
914 next_pc += sizeof(struct logical_op);
915 break;
916 }
917
918 /* load field ref */
919 case FILTER_OP_LOAD_FIELD_REF:
920 {
921 printk(KERN_WARNING "Unknown field ref type\n");
922 ret = -EINVAL;
923 goto end;
924 }
925 /* get context ref */
926 case FILTER_OP_GET_CONTEXT_REF:
927 {
928 printk(KERN_WARNING "Unknown get context ref type\n");
929 ret = -EINVAL;
930 goto end;
931 }
932 case FILTER_OP_LOAD_FIELD_REF_STRING:
933 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
934 case FILTER_OP_GET_CONTEXT_REF_STRING:
935 {
936 if (vstack_push(stack)) {
937 ret = -EINVAL;
938 goto end;
939 }
940 vstack_ax(stack)->type = REG_STRING;
941 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
942 break;
943 }
944 case FILTER_OP_LOAD_FIELD_REF_S64:
945 case FILTER_OP_GET_CONTEXT_REF_S64:
946 {
947 if (vstack_push(stack)) {
948 ret = -EINVAL;
949 goto end;
950 }
951 vstack_ax(stack)->type = REG_S64;
952 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
953 break;
954 }
955
956 /* load from immediate operand */
957 case FILTER_OP_LOAD_STRING:
958 {
959 struct load_op *insn = (struct load_op *) pc;
960
961 if (vstack_push(stack)) {
962 ret = -EINVAL;
963 goto end;
964 }
965 vstack_ax(stack)->type = REG_STRING;
966 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
967 break;
968 }
969
970 case FILTER_OP_LOAD_S64:
971 {
972 if (vstack_push(stack)) {
973 ret = -EINVAL;
974 goto end;
975 }
976 vstack_ax(stack)->type = REG_S64;
977 next_pc += sizeof(struct load_op)
978 + sizeof(struct literal_numeric);
979 break;
980 }
981
982 case FILTER_OP_CAST_TO_S64:
983 {
984 /* Pop 1, push 1 */
985 if (!vstack_ax(stack)) {
986 printk(KERN_WARNING "Empty stack\n");
987 ret = -EINVAL;
988 goto end;
989 }
990 vstack_ax(stack)->type = REG_S64;
991 next_pc += sizeof(struct cast_op);
992 break;
993 }
994 case FILTER_OP_CAST_NOP:
995 {
996 next_pc += sizeof(struct cast_op);
997 break;
998 }
999
1000 }
1001 end:
1002 *_next_pc = next_pc;
1003 return ret;
1004 }
1005
1006 /*
1007 * Never called concurrently (hash seed is shared).
1008 */
1009 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1010 {
1011 struct mp_table *mp_table;
1012 void *pc, *next_pc, *start_pc;
1013 int ret = -EINVAL;
1014 struct vstack stack;
1015
1016 vstack_init(&stack);
1017
1018 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1019 if (!mp_table) {
1020 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1021 return -ENOMEM;
1022 }
1023 start_pc = &bytecode->data[0];
1024 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1025 pc = next_pc) {
1026 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1027 if (ret != 0) {
1028 if (ret == -ERANGE)
1029 printk(KERN_WARNING "filter bytecode overflow\n");
1030 goto end;
1031 }
1032 dbg_printk("Validating op %s (%u)\n",
1033 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1034 (unsigned int) *(filter_opcode_t *) pc);
1035
1036 /*
1037 * For each instruction, validate the current context
1038 * (traversal of entire execution flow), and validate
1039 * all merge points targeting this instruction.
1040 */
1041 ret = validate_instruction_all_contexts(bytecode, mp_table,
1042 &stack, start_pc, pc);
1043 if (ret)
1044 goto end;
1045 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1046 if (ret <= 0)
1047 goto end;
1048 }
1049 end:
1050 if (delete_all_nodes(mp_table)) {
1051 if (!ret) {
1052 printk(KERN_WARNING "Unexpected merge points\n");
1053 ret = -EINVAL;
1054 }
1055 }
1056 kfree(mp_table);
1057 return ret;
1058 }
This page took 0.049516 seconds and 5 git commands to generate.