Fix: add missing types.h include for older kernels
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/types.h>
24 #include <linux/jhash.h>
25 #include <linux/slab.h>
26
27 #include "wrapper/list.h"
28 #include "lttng-filter.h"
29
30 #define MERGE_POINT_TABLE_BITS 7
31 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
32
33 /* merge point table node */
34 struct mp_node {
35 struct hlist_node node;
36
37 /* Context at merge point */
38 struct vstack stack;
39 unsigned long target_pc;
40 };
41
42 struct mp_table {
43 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
44 };
45
46 static
47 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
48 {
49 if (mp_node->target_pc == key_pc)
50 return 1;
51 else
52 return 0;
53 }
54
55 static
56 int merge_points_compare(const struct vstack *stacka,
57 const struct vstack *stackb)
58 {
59 int i, len;
60
61 if (stacka->top != stackb->top)
62 return 1;
63 len = stacka->top + 1;
64 WARN_ON_ONCE(len < 0);
65 for (i = 0; i < len; i++) {
66 if (stacka->e[i].type != stackb->e[i].type)
67 return 1;
68 }
69 return 0;
70 }
71
72 static
73 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
74 const struct vstack *stack)
75 {
76 struct mp_node *mp_node;
77 unsigned long hash = jhash_1word(target_pc, 0);
78 struct hlist_head *head;
79 struct mp_node *lookup_node;
80 int found = 0;
81
82 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
83 target_pc, hash);
84 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
85 if (!mp_node)
86 return -ENOMEM;
87 mp_node->target_pc = target_pc;
88 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
89
90 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
91 lttng_hlist_for_each_entry(lookup_node, head, node) {
92 if (lttng_hash_match(lookup_node, target_pc)) {
93 found = 1;
94 break;
95 }
96 }
97 if (found) {
98 /* Key already present */
99 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
100 target_pc, hash);
101 kfree(mp_node);
102 if (merge_points_compare(stack, &lookup_node->stack)) {
103 printk(KERN_WARNING "Merge points differ for offset %lu\n",
104 target_pc);
105 return -EINVAL;
106 }
107 }
108 hlist_add_head(&mp_node->node, head);
109 return 0;
110 }
111
112 /*
113 * Binary comparators use top of stack and top of stack -1.
114 */
115 static
116 int bin_op_compare_check(struct vstack *stack, const char *str)
117 {
118 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
119 goto error_unknown;
120
121 switch (vstack_ax(stack)->type) {
122 default:
123 case REG_DOUBLE:
124 goto error_unknown;
125
126 case REG_STRING:
127 switch (vstack_bx(stack)->type) {
128 default:
129 case REG_DOUBLE:
130 goto error_unknown;
131
132 case REG_STRING:
133 break;
134 case REG_S64:
135 goto error_mismatch;
136 }
137 break;
138 case REG_S64:
139 switch (vstack_bx(stack)->type) {
140 default:
141 case REG_DOUBLE:
142 goto error_unknown;
143
144 case REG_STRING:
145 goto error_mismatch;
146
147 case REG_S64:
148 break;
149 }
150 break;
151 }
152 return 0;
153
154 error_unknown:
155 return -EINVAL;
156
157 error_mismatch:
158 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
159 return -EINVAL;
160 }
161
162 /*
163 * Validate bytecode range overflow within the validation pass.
164 * Called for each instruction encountered.
165 */
166 static
167 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
168 void *start_pc, void *pc)
169 {
170 int ret = 0;
171
172 switch (*(filter_opcode_t *) pc) {
173 case FILTER_OP_UNKNOWN:
174 default:
175 {
176 printk(KERN_WARNING "unknown bytecode op %u\n",
177 (unsigned int) *(filter_opcode_t *) pc);
178 ret = -EINVAL;
179 break;
180 }
181
182 case FILTER_OP_RETURN:
183 {
184 if (unlikely(pc + sizeof(struct return_op)
185 > start_pc + bytecode->len)) {
186 ret = -ERANGE;
187 }
188 break;
189 }
190
191 /* binary */
192 case FILTER_OP_MUL:
193 case FILTER_OP_DIV:
194 case FILTER_OP_MOD:
195 case FILTER_OP_PLUS:
196 case FILTER_OP_MINUS:
197 case FILTER_OP_RSHIFT:
198 case FILTER_OP_LSHIFT:
199 case FILTER_OP_BIN_AND:
200 case FILTER_OP_BIN_OR:
201 case FILTER_OP_BIN_XOR:
202 case FILTER_OP_EQ_DOUBLE:
203 case FILTER_OP_NE_DOUBLE:
204 case FILTER_OP_GT_DOUBLE:
205 case FILTER_OP_LT_DOUBLE:
206 case FILTER_OP_GE_DOUBLE:
207 case FILTER_OP_LE_DOUBLE:
208 /* Floating point */
209 case FILTER_OP_EQ_DOUBLE_S64:
210 case FILTER_OP_NE_DOUBLE_S64:
211 case FILTER_OP_GT_DOUBLE_S64:
212 case FILTER_OP_LT_DOUBLE_S64:
213 case FILTER_OP_GE_DOUBLE_S64:
214 case FILTER_OP_LE_DOUBLE_S64:
215 case FILTER_OP_EQ_S64_DOUBLE:
216 case FILTER_OP_NE_S64_DOUBLE:
217 case FILTER_OP_GT_S64_DOUBLE:
218 case FILTER_OP_LT_S64_DOUBLE:
219 case FILTER_OP_GE_S64_DOUBLE:
220 case FILTER_OP_LE_S64_DOUBLE:
221 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
222 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
223 case FILTER_OP_LOAD_DOUBLE:
224 case FILTER_OP_CAST_DOUBLE_TO_S64:
225 case FILTER_OP_UNARY_PLUS_DOUBLE:
226 case FILTER_OP_UNARY_MINUS_DOUBLE:
227 case FILTER_OP_UNARY_NOT_DOUBLE:
228 {
229 printk(KERN_WARNING "unsupported bytecode op %u\n",
230 (unsigned int) *(filter_opcode_t *) pc);
231 ret = -EINVAL;
232 break;
233 }
234
235 case FILTER_OP_EQ:
236 case FILTER_OP_NE:
237 case FILTER_OP_GT:
238 case FILTER_OP_LT:
239 case FILTER_OP_GE:
240 case FILTER_OP_LE:
241 case FILTER_OP_EQ_STRING:
242 case FILTER_OP_NE_STRING:
243 case FILTER_OP_GT_STRING:
244 case FILTER_OP_LT_STRING:
245 case FILTER_OP_GE_STRING:
246 case FILTER_OP_LE_STRING:
247 case FILTER_OP_EQ_S64:
248 case FILTER_OP_NE_S64:
249 case FILTER_OP_GT_S64:
250 case FILTER_OP_LT_S64:
251 case FILTER_OP_GE_S64:
252 case FILTER_OP_LE_S64:
253 {
254 if (unlikely(pc + sizeof(struct binary_op)
255 > start_pc + bytecode->len)) {
256 ret = -ERANGE;
257 }
258 break;
259 }
260
261 /* unary */
262 case FILTER_OP_UNARY_PLUS:
263 case FILTER_OP_UNARY_MINUS:
264 case FILTER_OP_UNARY_NOT:
265 case FILTER_OP_UNARY_PLUS_S64:
266 case FILTER_OP_UNARY_MINUS_S64:
267 case FILTER_OP_UNARY_NOT_S64:
268 {
269 if (unlikely(pc + sizeof(struct unary_op)
270 > start_pc + bytecode->len)) {
271 ret = -ERANGE;
272 }
273 break;
274 }
275
276 /* logical */
277 case FILTER_OP_AND:
278 case FILTER_OP_OR:
279 {
280 if (unlikely(pc + sizeof(struct logical_op)
281 > start_pc + bytecode->len)) {
282 ret = -ERANGE;
283 }
284 break;
285 }
286
287 /* load field ref */
288 case FILTER_OP_LOAD_FIELD_REF:
289 {
290 printk(KERN_WARNING "Unknown field ref type\n");
291 ret = -EINVAL;
292 break;
293 }
294 /* get context ref */
295 case FILTER_OP_GET_CONTEXT_REF:
296 {
297 printk(KERN_WARNING "Unknown field ref type\n");
298 ret = -EINVAL;
299 break;
300 }
301 case FILTER_OP_LOAD_FIELD_REF_STRING:
302 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
303 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
304 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
305 case FILTER_OP_LOAD_FIELD_REF_S64:
306 case FILTER_OP_GET_CONTEXT_REF_STRING:
307 case FILTER_OP_GET_CONTEXT_REF_S64:
308 {
309 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
310 > start_pc + bytecode->len)) {
311 ret = -ERANGE;
312 }
313 break;
314 }
315
316 /* load from immediate operand */
317 case FILTER_OP_LOAD_STRING:
318 {
319 struct load_op *insn = (struct load_op *) pc;
320 uint32_t str_len, maxlen;
321
322 if (unlikely(pc + sizeof(struct load_op)
323 > start_pc + bytecode->len)) {
324 ret = -ERANGE;
325 break;
326 }
327
328 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
329 str_len = strnlen(insn->data, maxlen);
330 if (unlikely(str_len >= maxlen)) {
331 /* Final '\0' not found within range */
332 ret = -ERANGE;
333 }
334 break;
335 }
336
337 case FILTER_OP_LOAD_S64:
338 {
339 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
340 > start_pc + bytecode->len)) {
341 ret = -ERANGE;
342 }
343 break;
344 }
345
346 case FILTER_OP_CAST_TO_S64:
347 case FILTER_OP_CAST_NOP:
348 {
349 if (unlikely(pc + sizeof(struct cast_op)
350 > start_pc + bytecode->len)) {
351 ret = -ERANGE;
352 }
353 break;
354 }
355
356 }
357
358 return ret;
359 }
360
361 static
362 unsigned long delete_all_nodes(struct mp_table *mp_table)
363 {
364 struct mp_node *mp_node;
365 struct hlist_node *tmp;
366 unsigned long nr_nodes = 0;
367 int i;
368
369 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
370 struct hlist_head *head;
371
372 head = &mp_table->mp_head[i];
373 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
374 kfree(mp_node);
375 nr_nodes++;
376 }
377 }
378 return nr_nodes;
379 }
380
381 /*
382 * Return value:
383 * 0: success
384 * <0: error
385 */
386 static
387 int validate_instruction_context(struct bytecode_runtime *bytecode,
388 struct vstack *stack,
389 void *start_pc,
390 void *pc)
391 {
392 int ret = 0;
393
394 switch (*(filter_opcode_t *) pc) {
395 case FILTER_OP_UNKNOWN:
396 default:
397 {
398 printk(KERN_WARNING "unknown bytecode op %u\n",
399 (unsigned int) *(filter_opcode_t *) pc);
400 ret = -EINVAL;
401 goto end;
402 }
403
404 case FILTER_OP_RETURN:
405 {
406 goto end;
407 }
408
409 /* binary */
410 case FILTER_OP_MUL:
411 case FILTER_OP_DIV:
412 case FILTER_OP_MOD:
413 case FILTER_OP_PLUS:
414 case FILTER_OP_MINUS:
415 case FILTER_OP_RSHIFT:
416 case FILTER_OP_LSHIFT:
417 case FILTER_OP_BIN_AND:
418 case FILTER_OP_BIN_OR:
419 case FILTER_OP_BIN_XOR:
420 /* Floating point */
421 case FILTER_OP_EQ_DOUBLE:
422 case FILTER_OP_NE_DOUBLE:
423 case FILTER_OP_GT_DOUBLE:
424 case FILTER_OP_LT_DOUBLE:
425 case FILTER_OP_GE_DOUBLE:
426 case FILTER_OP_LE_DOUBLE:
427 case FILTER_OP_EQ_DOUBLE_S64:
428 case FILTER_OP_NE_DOUBLE_S64:
429 case FILTER_OP_GT_DOUBLE_S64:
430 case FILTER_OP_LT_DOUBLE_S64:
431 case FILTER_OP_GE_DOUBLE_S64:
432 case FILTER_OP_LE_DOUBLE_S64:
433 case FILTER_OP_EQ_S64_DOUBLE:
434 case FILTER_OP_NE_S64_DOUBLE:
435 case FILTER_OP_GT_S64_DOUBLE:
436 case FILTER_OP_LT_S64_DOUBLE:
437 case FILTER_OP_GE_S64_DOUBLE:
438 case FILTER_OP_LE_S64_DOUBLE:
439 case FILTER_OP_UNARY_PLUS_DOUBLE:
440 case FILTER_OP_UNARY_MINUS_DOUBLE:
441 case FILTER_OP_UNARY_NOT_DOUBLE:
442 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
443 case FILTER_OP_LOAD_DOUBLE:
444 case FILTER_OP_CAST_DOUBLE_TO_S64:
445 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
446 {
447 printk(KERN_WARNING "unsupported bytecode op %u\n",
448 (unsigned int) *(filter_opcode_t *) pc);
449 ret = -EINVAL;
450 goto end;
451 }
452
453 case FILTER_OP_EQ:
454 {
455 ret = bin_op_compare_check(stack, "==");
456 if (ret)
457 goto end;
458 break;
459 }
460 case FILTER_OP_NE:
461 {
462 ret = bin_op_compare_check(stack, "!=");
463 if (ret)
464 goto end;
465 break;
466 }
467 case FILTER_OP_GT:
468 {
469 ret = bin_op_compare_check(stack, ">");
470 if (ret)
471 goto end;
472 break;
473 }
474 case FILTER_OP_LT:
475 {
476 ret = bin_op_compare_check(stack, "<");
477 if (ret)
478 goto end;
479 break;
480 }
481 case FILTER_OP_GE:
482 {
483 ret = bin_op_compare_check(stack, ">=");
484 if (ret)
485 goto end;
486 break;
487 }
488 case FILTER_OP_LE:
489 {
490 ret = bin_op_compare_check(stack, "<=");
491 if (ret)
492 goto end;
493 break;
494 }
495
496 case FILTER_OP_EQ_STRING:
497 case FILTER_OP_NE_STRING:
498 case FILTER_OP_GT_STRING:
499 case FILTER_OP_LT_STRING:
500 case FILTER_OP_GE_STRING:
501 case FILTER_OP_LE_STRING:
502 {
503 if (!vstack_ax(stack) || !vstack_bx(stack)) {
504 printk(KERN_WARNING "Empty stack\n");
505 ret = -EINVAL;
506 goto end;
507 }
508 if (vstack_ax(stack)->type != REG_STRING
509 || vstack_bx(stack)->type != REG_STRING) {
510 printk(KERN_WARNING "Unexpected register type for string comparator\n");
511 ret = -EINVAL;
512 goto end;
513 }
514 break;
515 }
516
517 case FILTER_OP_EQ_S64:
518 case FILTER_OP_NE_S64:
519 case FILTER_OP_GT_S64:
520 case FILTER_OP_LT_S64:
521 case FILTER_OP_GE_S64:
522 case FILTER_OP_LE_S64:
523 {
524 if (!vstack_ax(stack) || !vstack_bx(stack)) {
525 printk(KERN_WARNING "Empty stack\n");
526 ret = -EINVAL;
527 goto end;
528 }
529 if (vstack_ax(stack)->type != REG_S64
530 || vstack_bx(stack)->type != REG_S64) {
531 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
532 ret = -EINVAL;
533 goto end;
534 }
535 break;
536 }
537
538 /* unary */
539 case FILTER_OP_UNARY_PLUS:
540 case FILTER_OP_UNARY_MINUS:
541 case FILTER_OP_UNARY_NOT:
542 {
543 if (!vstack_ax(stack)) {
544 printk(KERN_WARNING "Empty stack\n");
545 ret = -EINVAL;
546 goto end;
547 }
548 switch (vstack_ax(stack)->type) {
549 default:
550 case REG_DOUBLE:
551 printk(KERN_WARNING "unknown register type\n");
552 ret = -EINVAL;
553 goto end;
554
555 case REG_STRING:
556 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
557 ret = -EINVAL;
558 goto end;
559 case REG_S64:
560 break;
561 }
562 break;
563 }
564
565 case FILTER_OP_UNARY_PLUS_S64:
566 case FILTER_OP_UNARY_MINUS_S64:
567 case FILTER_OP_UNARY_NOT_S64:
568 {
569 if (!vstack_ax(stack)) {
570 printk(KERN_WARNING "Empty stack\n");
571 ret = -EINVAL;
572 goto end;
573 }
574 if (vstack_ax(stack)->type != REG_S64) {
575 printk(KERN_WARNING "Invalid register type\n");
576 ret = -EINVAL;
577 goto end;
578 }
579 break;
580 }
581
582 /* logical */
583 case FILTER_OP_AND:
584 case FILTER_OP_OR:
585 {
586 struct logical_op *insn = (struct logical_op *) pc;
587
588 if (!vstack_ax(stack)) {
589 printk(KERN_WARNING "Empty stack\n");
590 ret = -EINVAL;
591 goto end;
592 }
593 if (vstack_ax(stack)->type != REG_S64) {
594 printk(KERN_WARNING "Logical comparator expects S64 register\n");
595 ret = -EINVAL;
596 goto end;
597 }
598
599 dbg_printk("Validate jumping to bytecode offset %u\n",
600 (unsigned int) insn->skip_offset);
601 if (unlikely(start_pc + insn->skip_offset <= pc)) {
602 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
603 ret = -EINVAL;
604 goto end;
605 }
606 break;
607 }
608
609 /* load field ref */
610 case FILTER_OP_LOAD_FIELD_REF:
611 {
612 printk(KERN_WARNING "Unknown field ref type\n");
613 ret = -EINVAL;
614 goto end;
615 }
616 case FILTER_OP_LOAD_FIELD_REF_STRING:
617 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
618 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
619 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
620 {
621 struct load_op *insn = (struct load_op *) pc;
622 struct field_ref *ref = (struct field_ref *) insn->data;
623
624 dbg_printk("Validate load field ref offset %u type string\n",
625 ref->offset);
626 break;
627 }
628 case FILTER_OP_LOAD_FIELD_REF_S64:
629 {
630 struct load_op *insn = (struct load_op *) pc;
631 struct field_ref *ref = (struct field_ref *) insn->data;
632
633 dbg_printk("Validate load field ref offset %u type s64\n",
634 ref->offset);
635 break;
636 }
637
638 /* load from immediate operand */
639 case FILTER_OP_LOAD_STRING:
640 {
641 break;
642 }
643
644 case FILTER_OP_LOAD_S64:
645 {
646 break;
647 }
648
649 case FILTER_OP_CAST_TO_S64:
650 {
651 struct cast_op *insn = (struct cast_op *) pc;
652
653 if (!vstack_ax(stack)) {
654 printk(KERN_WARNING "Empty stack\n");
655 ret = -EINVAL;
656 goto end;
657 }
658 switch (vstack_ax(stack)->type) {
659 default:
660 case REG_DOUBLE:
661 printk(KERN_WARNING "unknown register type\n");
662 ret = -EINVAL;
663 goto end;
664
665 case REG_STRING:
666 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
667 ret = -EINVAL;
668 goto end;
669 case REG_S64:
670 break;
671 }
672 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
673 if (vstack_ax(stack)->type != REG_DOUBLE) {
674 printk(KERN_WARNING "Cast expects double\n");
675 ret = -EINVAL;
676 goto end;
677 }
678 }
679 break;
680 }
681 case FILTER_OP_CAST_NOP:
682 {
683 break;
684 }
685
686 /* get context ref */
687 case FILTER_OP_GET_CONTEXT_REF:
688 {
689 printk(KERN_WARNING "Unknown get context ref type\n");
690 ret = -EINVAL;
691 goto end;
692 }
693 case FILTER_OP_GET_CONTEXT_REF_STRING:
694 {
695 struct load_op *insn = (struct load_op *) pc;
696 struct field_ref *ref = (struct field_ref *) insn->data;
697
698 dbg_printk("Validate get context ref offset %u type string\n",
699 ref->offset);
700 break;
701 }
702 case FILTER_OP_GET_CONTEXT_REF_S64:
703 {
704 struct load_op *insn = (struct load_op *) pc;
705 struct field_ref *ref = (struct field_ref *) insn->data;
706
707 dbg_printk("Validate get context ref offset %u type s64\n",
708 ref->offset);
709 break;
710 }
711
712 }
713 end:
714 return ret;
715 }
716
717 /*
718 * Return value:
719 * 0: success
720 * <0: error
721 */
722 static
723 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
724 struct mp_table *mp_table,
725 struct vstack *stack,
726 void *start_pc,
727 void *pc)
728 {
729 int ret, found = 0;
730 unsigned long target_pc = pc - start_pc;
731 unsigned long hash;
732 struct hlist_head *head;
733 struct mp_node *mp_node;
734
735 /* Validate the context resulting from the previous instruction */
736 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
737 if (ret)
738 return ret;
739
740 /* Validate merge points */
741 hash = jhash_1word(target_pc, 0);
742 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
743 lttng_hlist_for_each_entry(mp_node, head, node) {
744 if (lttng_hash_match(mp_node, target_pc)) {
745 found = 1;
746 break;
747 }
748 }
749 if (found) {
750 dbg_printk("Filter: validate merge point at offset %lu\n",
751 target_pc);
752 if (merge_points_compare(stack, &mp_node->stack)) {
753 printk(KERN_WARNING "Merge points differ for offset %lu\n",
754 target_pc);
755 return -EINVAL;
756 }
757 /* Once validated, we can remove the merge point */
758 dbg_printk("Filter: remove merge point at offset %lu\n",
759 target_pc);
760 hlist_del(&mp_node->node);
761 }
762 return 0;
763 }
764
765 /*
766 * Return value:
767 * >0: going to next insn.
768 * 0: success, stop iteration.
769 * <0: error
770 */
771 static
772 int exec_insn(struct bytecode_runtime *bytecode,
773 struct mp_table *mp_table,
774 struct vstack *stack,
775 void **_next_pc,
776 void *pc)
777 {
778 int ret = 1;
779 void *next_pc = *_next_pc;
780
781 switch (*(filter_opcode_t *) pc) {
782 case FILTER_OP_UNKNOWN:
783 default:
784 {
785 printk(KERN_WARNING "unknown bytecode op %u\n",
786 (unsigned int) *(filter_opcode_t *) pc);
787 ret = -EINVAL;
788 goto end;
789 }
790
791 case FILTER_OP_RETURN:
792 {
793 if (!vstack_ax(stack)) {
794 printk(KERN_WARNING "Empty stack\n");
795 ret = -EINVAL;
796 goto end;
797 }
798 ret = 0;
799 goto end;
800 }
801
802 /* binary */
803 case FILTER_OP_MUL:
804 case FILTER_OP_DIV:
805 case FILTER_OP_MOD:
806 case FILTER_OP_PLUS:
807 case FILTER_OP_MINUS:
808 case FILTER_OP_RSHIFT:
809 case FILTER_OP_LSHIFT:
810 case FILTER_OP_BIN_AND:
811 case FILTER_OP_BIN_OR:
812 case FILTER_OP_BIN_XOR:
813 /* Floating point */
814 case FILTER_OP_EQ_DOUBLE:
815 case FILTER_OP_NE_DOUBLE:
816 case FILTER_OP_GT_DOUBLE:
817 case FILTER_OP_LT_DOUBLE:
818 case FILTER_OP_GE_DOUBLE:
819 case FILTER_OP_LE_DOUBLE:
820 case FILTER_OP_EQ_DOUBLE_S64:
821 case FILTER_OP_NE_DOUBLE_S64:
822 case FILTER_OP_GT_DOUBLE_S64:
823 case FILTER_OP_LT_DOUBLE_S64:
824 case FILTER_OP_GE_DOUBLE_S64:
825 case FILTER_OP_LE_DOUBLE_S64:
826 case FILTER_OP_EQ_S64_DOUBLE:
827 case FILTER_OP_NE_S64_DOUBLE:
828 case FILTER_OP_GT_S64_DOUBLE:
829 case FILTER_OP_LT_S64_DOUBLE:
830 case FILTER_OP_GE_S64_DOUBLE:
831 case FILTER_OP_LE_S64_DOUBLE:
832 case FILTER_OP_UNARY_PLUS_DOUBLE:
833 case FILTER_OP_UNARY_MINUS_DOUBLE:
834 case FILTER_OP_UNARY_NOT_DOUBLE:
835 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
836 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
837 case FILTER_OP_LOAD_DOUBLE:
838 case FILTER_OP_CAST_DOUBLE_TO_S64:
839 {
840 printk(KERN_WARNING "unsupported bytecode op %u\n",
841 (unsigned int) *(filter_opcode_t *) pc);
842 ret = -EINVAL;
843 goto end;
844 }
845
846 case FILTER_OP_EQ:
847 case FILTER_OP_NE:
848 case FILTER_OP_GT:
849 case FILTER_OP_LT:
850 case FILTER_OP_GE:
851 case FILTER_OP_LE:
852 case FILTER_OP_EQ_STRING:
853 case FILTER_OP_NE_STRING:
854 case FILTER_OP_GT_STRING:
855 case FILTER_OP_LT_STRING:
856 case FILTER_OP_GE_STRING:
857 case FILTER_OP_LE_STRING:
858 case FILTER_OP_EQ_S64:
859 case FILTER_OP_NE_S64:
860 case FILTER_OP_GT_S64:
861 case FILTER_OP_LT_S64:
862 case FILTER_OP_GE_S64:
863 case FILTER_OP_LE_S64:
864 {
865 /* Pop 2, push 1 */
866 if (vstack_pop(stack)) {
867 ret = -EINVAL;
868 goto end;
869 }
870 if (!vstack_ax(stack)) {
871 printk(KERN_WARNING "Empty stack\n");
872 ret = -EINVAL;
873 goto end;
874 }
875 vstack_ax(stack)->type = REG_S64;
876 next_pc += sizeof(struct binary_op);
877 break;
878 }
879
880 /* unary */
881 case FILTER_OP_UNARY_PLUS:
882 case FILTER_OP_UNARY_MINUS:
883 case FILTER_OP_UNARY_NOT:
884 case FILTER_OP_UNARY_PLUS_S64:
885 case FILTER_OP_UNARY_MINUS_S64:
886 case FILTER_OP_UNARY_NOT_S64:
887 {
888 /* Pop 1, push 1 */
889 if (!vstack_ax(stack)) {
890 printk(KERN_WARNING "Empty stack\n");
891 ret = -EINVAL;
892 goto end;
893 }
894 vstack_ax(stack)->type = REG_S64;
895 next_pc += sizeof(struct unary_op);
896 break;
897 }
898
899 /* logical */
900 case FILTER_OP_AND:
901 case FILTER_OP_OR:
902 {
903 struct logical_op *insn = (struct logical_op *) pc;
904 int merge_ret;
905
906 /* Add merge point to table */
907 merge_ret = merge_point_add_check(mp_table,
908 insn->skip_offset, stack);
909 if (merge_ret) {
910 ret = merge_ret;
911 goto end;
912 }
913 /* Continue to next instruction */
914 /* Pop 1 when jump not taken */
915 if (vstack_pop(stack)) {
916 ret = -EINVAL;
917 goto end;
918 }
919 next_pc += sizeof(struct logical_op);
920 break;
921 }
922
923 /* load field ref */
924 case FILTER_OP_LOAD_FIELD_REF:
925 {
926 printk(KERN_WARNING "Unknown field ref type\n");
927 ret = -EINVAL;
928 goto end;
929 }
930 /* get context ref */
931 case FILTER_OP_GET_CONTEXT_REF:
932 {
933 printk(KERN_WARNING "Unknown get context ref type\n");
934 ret = -EINVAL;
935 goto end;
936 }
937 case FILTER_OP_LOAD_FIELD_REF_STRING:
938 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
939 case FILTER_OP_GET_CONTEXT_REF_STRING:
940 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
941 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
942 {
943 if (vstack_push(stack)) {
944 ret = -EINVAL;
945 goto end;
946 }
947 vstack_ax(stack)->type = REG_STRING;
948 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
949 break;
950 }
951 case FILTER_OP_LOAD_FIELD_REF_S64:
952 case FILTER_OP_GET_CONTEXT_REF_S64:
953 {
954 if (vstack_push(stack)) {
955 ret = -EINVAL;
956 goto end;
957 }
958 vstack_ax(stack)->type = REG_S64;
959 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
960 break;
961 }
962
963 /* load from immediate operand */
964 case FILTER_OP_LOAD_STRING:
965 {
966 struct load_op *insn = (struct load_op *) pc;
967
968 if (vstack_push(stack)) {
969 ret = -EINVAL;
970 goto end;
971 }
972 vstack_ax(stack)->type = REG_STRING;
973 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
974 break;
975 }
976
977 case FILTER_OP_LOAD_S64:
978 {
979 if (vstack_push(stack)) {
980 ret = -EINVAL;
981 goto end;
982 }
983 vstack_ax(stack)->type = REG_S64;
984 next_pc += sizeof(struct load_op)
985 + sizeof(struct literal_numeric);
986 break;
987 }
988
989 case FILTER_OP_CAST_TO_S64:
990 {
991 /* Pop 1, push 1 */
992 if (!vstack_ax(stack)) {
993 printk(KERN_WARNING "Empty stack\n");
994 ret = -EINVAL;
995 goto end;
996 }
997 vstack_ax(stack)->type = REG_S64;
998 next_pc += sizeof(struct cast_op);
999 break;
1000 }
1001 case FILTER_OP_CAST_NOP:
1002 {
1003 next_pc += sizeof(struct cast_op);
1004 break;
1005 }
1006
1007 }
1008 end:
1009 *_next_pc = next_pc;
1010 return ret;
1011 }
1012
1013 /*
1014 * Never called concurrently (hash seed is shared).
1015 */
1016 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1017 {
1018 struct mp_table *mp_table;
1019 void *pc, *next_pc, *start_pc;
1020 int ret = -EINVAL;
1021 struct vstack stack;
1022
1023 vstack_init(&stack);
1024
1025 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1026 if (!mp_table) {
1027 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1028 return -ENOMEM;
1029 }
1030 start_pc = &bytecode->data[0];
1031 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1032 pc = next_pc) {
1033 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1034 if (ret != 0) {
1035 if (ret == -ERANGE)
1036 printk(KERN_WARNING "filter bytecode overflow\n");
1037 goto end;
1038 }
1039 dbg_printk("Validating op %s (%u)\n",
1040 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1041 (unsigned int) *(filter_opcode_t *) pc);
1042
1043 /*
1044 * For each instruction, validate the current context
1045 * (traversal of entire execution flow), and validate
1046 * all merge points targeting this instruction.
1047 */
1048 ret = validate_instruction_all_contexts(bytecode, mp_table,
1049 &stack, start_pc, pc);
1050 if (ret)
1051 goto end;
1052 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1053 if (ret <= 0)
1054 goto end;
1055 }
1056 end:
1057 if (delete_all_nodes(mp_table)) {
1058 if (!ret) {
1059 printk(KERN_WARNING "Unexpected merge points\n");
1060 ret = -EINVAL;
1061 }
1062 }
1063 kfree(mp_table);
1064 return ret;
1065 }
This page took 0.063118 seconds and 5 git commands to generate.