Migrate tracepoint instrumentation to TP_FIELDS
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/list.h>
24 #include <linux/jhash.h>
25 #include <linux/slab.h>
26
27 #include "lttng-filter.h"
28
29 #define MERGE_POINT_TABLE_BITS 7
30 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
31
32 /* merge point table node */
33 struct mp_node {
34 struct hlist_node node;
35
36 /* Context at merge point */
37 struct vstack stack;
38 unsigned long target_pc;
39 };
40
41 struct mp_table {
42 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
43 };
44
45 static
46 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
47 {
48 if (mp_node->target_pc == key_pc)
49 return 1;
50 else
51 return 0;
52 }
53
54 static
55 int merge_points_compare(const struct vstack *stacka,
56 const struct vstack *stackb)
57 {
58 int i, len;
59
60 if (stacka->top != stackb->top)
61 return 1;
62 len = stacka->top + 1;
63 WARN_ON_ONCE(len < 0);
64 for (i = 0; i < len; i++) {
65 if (stacka->e[i].type != stackb->e[i].type)
66 return 1;
67 }
68 return 0;
69 }
70
71 static
72 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
73 const struct vstack *stack)
74 {
75 struct mp_node *mp_node;
76 unsigned long hash = jhash_1word(target_pc, 0);
77 struct hlist_head *head;
78 struct mp_node *lookup_node;
79 int found = 0;
80
81 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
82 target_pc, hash);
83 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
84 if (!mp_node)
85 return -ENOMEM;
86 mp_node->target_pc = target_pc;
87 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
88
89 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
90 hlist_for_each_entry(lookup_node, head, node) {
91 if (lttng_hash_match(lookup_node, target_pc)) {
92 found = 1;
93 break;
94 }
95 }
96 if (found) {
97 /* Key already present */
98 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
99 target_pc, hash);
100 kfree(mp_node);
101 if (merge_points_compare(stack, &lookup_node->stack)) {
102 printk(KERN_WARNING "Merge points differ for offset %lu\n",
103 target_pc);
104 return -EINVAL;
105 }
106 }
107 hlist_add_head(&mp_node->node, head);
108 return 0;
109 }
110
111 /*
112 * Binary comparators use top of stack and top of stack -1.
113 */
114 static
115 int bin_op_compare_check(struct vstack *stack, const char *str)
116 {
117 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
118 goto error_unknown;
119
120 switch (vstack_ax(stack)->type) {
121 default:
122 case REG_DOUBLE:
123 goto error_unknown;
124
125 case REG_STRING:
126 switch (vstack_bx(stack)->type) {
127 default:
128 case REG_DOUBLE:
129 goto error_unknown;
130
131 case REG_STRING:
132 break;
133 case REG_S64:
134 goto error_mismatch;
135 }
136 break;
137 case REG_S64:
138 switch (vstack_bx(stack)->type) {
139 default:
140 case REG_DOUBLE:
141 goto error_unknown;
142
143 case REG_STRING:
144 goto error_mismatch;
145
146 case REG_S64:
147 break;
148 }
149 break;
150 }
151 return 0;
152
153 error_unknown:
154 return -EINVAL;
155
156 error_mismatch:
157 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
158 return -EINVAL;
159 }
160
161 /*
162 * Validate bytecode range overflow within the validation pass.
163 * Called for each instruction encountered.
164 */
165 static
166 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
167 void *start_pc, void *pc)
168 {
169 int ret = 0;
170
171 switch (*(filter_opcode_t *) pc) {
172 case FILTER_OP_UNKNOWN:
173 default:
174 {
175 printk(KERN_WARNING "unknown bytecode op %u\n",
176 (unsigned int) *(filter_opcode_t *) pc);
177 ret = -EINVAL;
178 break;
179 }
180
181 case FILTER_OP_RETURN:
182 {
183 if (unlikely(pc + sizeof(struct return_op)
184 > start_pc + bytecode->len)) {
185 ret = -ERANGE;
186 }
187 break;
188 }
189
190 /* binary */
191 case FILTER_OP_MUL:
192 case FILTER_OP_DIV:
193 case FILTER_OP_MOD:
194 case FILTER_OP_PLUS:
195 case FILTER_OP_MINUS:
196 case FILTER_OP_RSHIFT:
197 case FILTER_OP_LSHIFT:
198 case FILTER_OP_BIN_AND:
199 case FILTER_OP_BIN_OR:
200 case FILTER_OP_BIN_XOR:
201 case FILTER_OP_EQ_DOUBLE:
202 case FILTER_OP_NE_DOUBLE:
203 case FILTER_OP_GT_DOUBLE:
204 case FILTER_OP_LT_DOUBLE:
205 case FILTER_OP_GE_DOUBLE:
206 case FILTER_OP_LE_DOUBLE:
207 /* Floating point */
208 case FILTER_OP_EQ_DOUBLE_S64:
209 case FILTER_OP_NE_DOUBLE_S64:
210 case FILTER_OP_GT_DOUBLE_S64:
211 case FILTER_OP_LT_DOUBLE_S64:
212 case FILTER_OP_GE_DOUBLE_S64:
213 case FILTER_OP_LE_DOUBLE_S64:
214 case FILTER_OP_EQ_S64_DOUBLE:
215 case FILTER_OP_NE_S64_DOUBLE:
216 case FILTER_OP_GT_S64_DOUBLE:
217 case FILTER_OP_LT_S64_DOUBLE:
218 case FILTER_OP_GE_S64_DOUBLE:
219 case FILTER_OP_LE_S64_DOUBLE:
220 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
221 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
222 case FILTER_OP_LOAD_DOUBLE:
223 case FILTER_OP_CAST_DOUBLE_TO_S64:
224 case FILTER_OP_UNARY_PLUS_DOUBLE:
225 case FILTER_OP_UNARY_MINUS_DOUBLE:
226 case FILTER_OP_UNARY_NOT_DOUBLE:
227 {
228 printk(KERN_WARNING "unsupported bytecode op %u\n",
229 (unsigned int) *(filter_opcode_t *) pc);
230 ret = -EINVAL;
231 break;
232 }
233
234 case FILTER_OP_EQ:
235 case FILTER_OP_NE:
236 case FILTER_OP_GT:
237 case FILTER_OP_LT:
238 case FILTER_OP_GE:
239 case FILTER_OP_LE:
240 case FILTER_OP_EQ_STRING:
241 case FILTER_OP_NE_STRING:
242 case FILTER_OP_GT_STRING:
243 case FILTER_OP_LT_STRING:
244 case FILTER_OP_GE_STRING:
245 case FILTER_OP_LE_STRING:
246 case FILTER_OP_EQ_S64:
247 case FILTER_OP_NE_S64:
248 case FILTER_OP_GT_S64:
249 case FILTER_OP_LT_S64:
250 case FILTER_OP_GE_S64:
251 case FILTER_OP_LE_S64:
252 {
253 if (unlikely(pc + sizeof(struct binary_op)
254 > start_pc + bytecode->len)) {
255 ret = -ERANGE;
256 }
257 break;
258 }
259
260 /* unary */
261 case FILTER_OP_UNARY_PLUS:
262 case FILTER_OP_UNARY_MINUS:
263 case FILTER_OP_UNARY_NOT:
264 case FILTER_OP_UNARY_PLUS_S64:
265 case FILTER_OP_UNARY_MINUS_S64:
266 case FILTER_OP_UNARY_NOT_S64:
267 {
268 if (unlikely(pc + sizeof(struct unary_op)
269 > start_pc + bytecode->len)) {
270 ret = -ERANGE;
271 }
272 break;
273 }
274
275 /* logical */
276 case FILTER_OP_AND:
277 case FILTER_OP_OR:
278 {
279 if (unlikely(pc + sizeof(struct logical_op)
280 > start_pc + bytecode->len)) {
281 ret = -ERANGE;
282 }
283 break;
284 }
285
286 /* load field ref */
287 case FILTER_OP_LOAD_FIELD_REF:
288 {
289 printk(KERN_WARNING "Unknown field ref type\n");
290 ret = -EINVAL;
291 break;
292 }
293 /* get context ref */
294 case FILTER_OP_GET_CONTEXT_REF:
295 {
296 printk(KERN_WARNING "Unknown field ref type\n");
297 ret = -EINVAL;
298 break;
299 }
300 case FILTER_OP_LOAD_FIELD_REF_STRING:
301 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
302 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
303 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
304 case FILTER_OP_LOAD_FIELD_REF_S64:
305 case FILTER_OP_GET_CONTEXT_REF_STRING:
306 case FILTER_OP_GET_CONTEXT_REF_S64:
307 {
308 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
309 > start_pc + bytecode->len)) {
310 ret = -ERANGE;
311 }
312 break;
313 }
314
315 /* load from immediate operand */
316 case FILTER_OP_LOAD_STRING:
317 {
318 struct load_op *insn = (struct load_op *) pc;
319 uint32_t str_len, maxlen;
320
321 if (unlikely(pc + sizeof(struct load_op)
322 > start_pc + bytecode->len)) {
323 ret = -ERANGE;
324 break;
325 }
326
327 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
328 str_len = strnlen(insn->data, maxlen);
329 if (unlikely(str_len >= maxlen)) {
330 /* Final '\0' not found within range */
331 ret = -ERANGE;
332 }
333 break;
334 }
335
336 case FILTER_OP_LOAD_S64:
337 {
338 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
339 > start_pc + bytecode->len)) {
340 ret = -ERANGE;
341 }
342 break;
343 }
344
345 case FILTER_OP_CAST_TO_S64:
346 case FILTER_OP_CAST_NOP:
347 {
348 if (unlikely(pc + sizeof(struct cast_op)
349 > start_pc + bytecode->len)) {
350 ret = -ERANGE;
351 }
352 break;
353 }
354
355 }
356
357 return ret;
358 }
359
360 static
361 unsigned long delete_all_nodes(struct mp_table *mp_table)
362 {
363 struct mp_node *mp_node;
364 struct hlist_node *tmp;
365 unsigned long nr_nodes = 0;
366 int i;
367
368 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
369 struct hlist_head *head;
370
371 head = &mp_table->mp_head[i];
372 hlist_for_each_entry_safe(mp_node, tmp, head, node) {
373 kfree(mp_node);
374 nr_nodes++;
375 }
376 }
377 return nr_nodes;
378 }
379
380 /*
381 * Return value:
382 * 0: success
383 * <0: error
384 */
385 static
386 int validate_instruction_context(struct bytecode_runtime *bytecode,
387 struct vstack *stack,
388 void *start_pc,
389 void *pc)
390 {
391 int ret = 0;
392
393 switch (*(filter_opcode_t *) pc) {
394 case FILTER_OP_UNKNOWN:
395 default:
396 {
397 printk(KERN_WARNING "unknown bytecode op %u\n",
398 (unsigned int) *(filter_opcode_t *) pc);
399 ret = -EINVAL;
400 goto end;
401 }
402
403 case FILTER_OP_RETURN:
404 {
405 goto end;
406 }
407
408 /* binary */
409 case FILTER_OP_MUL:
410 case FILTER_OP_DIV:
411 case FILTER_OP_MOD:
412 case FILTER_OP_PLUS:
413 case FILTER_OP_MINUS:
414 case FILTER_OP_RSHIFT:
415 case FILTER_OP_LSHIFT:
416 case FILTER_OP_BIN_AND:
417 case FILTER_OP_BIN_OR:
418 case FILTER_OP_BIN_XOR:
419 /* Floating point */
420 case FILTER_OP_EQ_DOUBLE:
421 case FILTER_OP_NE_DOUBLE:
422 case FILTER_OP_GT_DOUBLE:
423 case FILTER_OP_LT_DOUBLE:
424 case FILTER_OP_GE_DOUBLE:
425 case FILTER_OP_LE_DOUBLE:
426 case FILTER_OP_EQ_DOUBLE_S64:
427 case FILTER_OP_NE_DOUBLE_S64:
428 case FILTER_OP_GT_DOUBLE_S64:
429 case FILTER_OP_LT_DOUBLE_S64:
430 case FILTER_OP_GE_DOUBLE_S64:
431 case FILTER_OP_LE_DOUBLE_S64:
432 case FILTER_OP_EQ_S64_DOUBLE:
433 case FILTER_OP_NE_S64_DOUBLE:
434 case FILTER_OP_GT_S64_DOUBLE:
435 case FILTER_OP_LT_S64_DOUBLE:
436 case FILTER_OP_GE_S64_DOUBLE:
437 case FILTER_OP_LE_S64_DOUBLE:
438 case FILTER_OP_UNARY_PLUS_DOUBLE:
439 case FILTER_OP_UNARY_MINUS_DOUBLE:
440 case FILTER_OP_UNARY_NOT_DOUBLE:
441 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
442 case FILTER_OP_LOAD_DOUBLE:
443 case FILTER_OP_CAST_DOUBLE_TO_S64:
444 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
445 {
446 printk(KERN_WARNING "unsupported bytecode op %u\n",
447 (unsigned int) *(filter_opcode_t *) pc);
448 ret = -EINVAL;
449 goto end;
450 }
451
452 case FILTER_OP_EQ:
453 {
454 ret = bin_op_compare_check(stack, "==");
455 if (ret)
456 goto end;
457 break;
458 }
459 case FILTER_OP_NE:
460 {
461 ret = bin_op_compare_check(stack, "!=");
462 if (ret)
463 goto end;
464 break;
465 }
466 case FILTER_OP_GT:
467 {
468 ret = bin_op_compare_check(stack, ">");
469 if (ret)
470 goto end;
471 break;
472 }
473 case FILTER_OP_LT:
474 {
475 ret = bin_op_compare_check(stack, "<");
476 if (ret)
477 goto end;
478 break;
479 }
480 case FILTER_OP_GE:
481 {
482 ret = bin_op_compare_check(stack, ">=");
483 if (ret)
484 goto end;
485 break;
486 }
487 case FILTER_OP_LE:
488 {
489 ret = bin_op_compare_check(stack, "<=");
490 if (ret)
491 goto end;
492 break;
493 }
494
495 case FILTER_OP_EQ_STRING:
496 case FILTER_OP_NE_STRING:
497 case FILTER_OP_GT_STRING:
498 case FILTER_OP_LT_STRING:
499 case FILTER_OP_GE_STRING:
500 case FILTER_OP_LE_STRING:
501 {
502 if (!vstack_ax(stack) || !vstack_bx(stack)) {
503 printk(KERN_WARNING "Empty stack\n");
504 ret = -EINVAL;
505 goto end;
506 }
507 if (vstack_ax(stack)->type != REG_STRING
508 || vstack_bx(stack)->type != REG_STRING) {
509 printk(KERN_WARNING "Unexpected register type for string comparator\n");
510 ret = -EINVAL;
511 goto end;
512 }
513 break;
514 }
515
516 case FILTER_OP_EQ_S64:
517 case FILTER_OP_NE_S64:
518 case FILTER_OP_GT_S64:
519 case FILTER_OP_LT_S64:
520 case FILTER_OP_GE_S64:
521 case FILTER_OP_LE_S64:
522 {
523 if (!vstack_ax(stack) || !vstack_bx(stack)) {
524 printk(KERN_WARNING "Empty stack\n");
525 ret = -EINVAL;
526 goto end;
527 }
528 if (vstack_ax(stack)->type != REG_S64
529 || vstack_bx(stack)->type != REG_S64) {
530 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
531 ret = -EINVAL;
532 goto end;
533 }
534 break;
535 }
536
537 /* unary */
538 case FILTER_OP_UNARY_PLUS:
539 case FILTER_OP_UNARY_MINUS:
540 case FILTER_OP_UNARY_NOT:
541 {
542 if (!vstack_ax(stack)) {
543 printk(KERN_WARNING "Empty stack\n");
544 ret = -EINVAL;
545 goto end;
546 }
547 switch (vstack_ax(stack)->type) {
548 default:
549 case REG_DOUBLE:
550 printk(KERN_WARNING "unknown register type\n");
551 ret = -EINVAL;
552 goto end;
553
554 case REG_STRING:
555 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
556 ret = -EINVAL;
557 goto end;
558 case REG_S64:
559 break;
560 }
561 break;
562 }
563
564 case FILTER_OP_UNARY_PLUS_S64:
565 case FILTER_OP_UNARY_MINUS_S64:
566 case FILTER_OP_UNARY_NOT_S64:
567 {
568 if (!vstack_ax(stack)) {
569 printk(KERN_WARNING "Empty stack\n");
570 ret = -EINVAL;
571 goto end;
572 }
573 if (vstack_ax(stack)->type != REG_S64) {
574 printk(KERN_WARNING "Invalid register type\n");
575 ret = -EINVAL;
576 goto end;
577 }
578 break;
579 }
580
581 /* logical */
582 case FILTER_OP_AND:
583 case FILTER_OP_OR:
584 {
585 struct logical_op *insn = (struct logical_op *) pc;
586
587 if (!vstack_ax(stack)) {
588 printk(KERN_WARNING "Empty stack\n");
589 ret = -EINVAL;
590 goto end;
591 }
592 if (vstack_ax(stack)->type != REG_S64) {
593 printk(KERN_WARNING "Logical comparator expects S64 register\n");
594 ret = -EINVAL;
595 goto end;
596 }
597
598 dbg_printk("Validate jumping to bytecode offset %u\n",
599 (unsigned int) insn->skip_offset);
600 if (unlikely(start_pc + insn->skip_offset <= pc)) {
601 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
602 ret = -EINVAL;
603 goto end;
604 }
605 break;
606 }
607
608 /* load field ref */
609 case FILTER_OP_LOAD_FIELD_REF:
610 {
611 printk(KERN_WARNING "Unknown field ref type\n");
612 ret = -EINVAL;
613 goto end;
614 }
615 case FILTER_OP_LOAD_FIELD_REF_STRING:
616 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
617 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
618 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
619 {
620 struct load_op *insn = (struct load_op *) pc;
621 struct field_ref *ref = (struct field_ref *) insn->data;
622
623 dbg_printk("Validate load field ref offset %u type string\n",
624 ref->offset);
625 break;
626 }
627 case FILTER_OP_LOAD_FIELD_REF_S64:
628 {
629 struct load_op *insn = (struct load_op *) pc;
630 struct field_ref *ref = (struct field_ref *) insn->data;
631
632 dbg_printk("Validate load field ref offset %u type s64\n",
633 ref->offset);
634 break;
635 }
636
637 /* load from immediate operand */
638 case FILTER_OP_LOAD_STRING:
639 {
640 break;
641 }
642
643 case FILTER_OP_LOAD_S64:
644 {
645 break;
646 }
647
648 case FILTER_OP_CAST_TO_S64:
649 {
650 struct cast_op *insn = (struct cast_op *) pc;
651
652 if (!vstack_ax(stack)) {
653 printk(KERN_WARNING "Empty stack\n");
654 ret = -EINVAL;
655 goto end;
656 }
657 switch (vstack_ax(stack)->type) {
658 default:
659 case REG_DOUBLE:
660 printk(KERN_WARNING "unknown register type\n");
661 ret = -EINVAL;
662 goto end;
663
664 case REG_STRING:
665 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
666 ret = -EINVAL;
667 goto end;
668 case REG_S64:
669 break;
670 }
671 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
672 if (vstack_ax(stack)->type != REG_DOUBLE) {
673 printk(KERN_WARNING "Cast expects double\n");
674 ret = -EINVAL;
675 goto end;
676 }
677 }
678 break;
679 }
680 case FILTER_OP_CAST_NOP:
681 {
682 break;
683 }
684
685 /* get context ref */
686 case FILTER_OP_GET_CONTEXT_REF:
687 {
688 printk(KERN_WARNING "Unknown get context ref type\n");
689 ret = -EINVAL;
690 goto end;
691 }
692 case FILTER_OP_GET_CONTEXT_REF_STRING:
693 {
694 struct load_op *insn = (struct load_op *) pc;
695 struct field_ref *ref = (struct field_ref *) insn->data;
696
697 dbg_printk("Validate get context ref offset %u type string\n",
698 ref->offset);
699 break;
700 }
701 case FILTER_OP_GET_CONTEXT_REF_S64:
702 {
703 struct load_op *insn = (struct load_op *) pc;
704 struct field_ref *ref = (struct field_ref *) insn->data;
705
706 dbg_printk("Validate get context ref offset %u type s64\n",
707 ref->offset);
708 break;
709 }
710
711 }
712 end:
713 return ret;
714 }
715
716 /*
717 * Return value:
718 * 0: success
719 * <0: error
720 */
721 static
722 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
723 struct mp_table *mp_table,
724 struct vstack *stack,
725 void *start_pc,
726 void *pc)
727 {
728 int ret, found = 0;
729 unsigned long target_pc = pc - start_pc;
730 unsigned long hash;
731 struct hlist_head *head;
732 struct mp_node *mp_node;
733
734 /* Validate the context resulting from the previous instruction */
735 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
736 if (ret)
737 return ret;
738
739 /* Validate merge points */
740 hash = jhash_1word(target_pc, 0);
741 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
742 hlist_for_each_entry(mp_node, head, node) {
743 if (lttng_hash_match(mp_node, target_pc)) {
744 found = 1;
745 break;
746 }
747 }
748 if (found) {
749 dbg_printk("Filter: validate merge point at offset %lu\n",
750 target_pc);
751 if (merge_points_compare(stack, &mp_node->stack)) {
752 printk(KERN_WARNING "Merge points differ for offset %lu\n",
753 target_pc);
754 return -EINVAL;
755 }
756 /* Once validated, we can remove the merge point */
757 dbg_printk("Filter: remove merge point at offset %lu\n",
758 target_pc);
759 hlist_del(&mp_node->node);
760 }
761 return 0;
762 }
763
764 /*
765 * Return value:
766 * >0: going to next insn.
767 * 0: success, stop iteration.
768 * <0: error
769 */
770 static
771 int exec_insn(struct bytecode_runtime *bytecode,
772 struct mp_table *mp_table,
773 struct vstack *stack,
774 void **_next_pc,
775 void *pc)
776 {
777 int ret = 1;
778 void *next_pc = *_next_pc;
779
780 switch (*(filter_opcode_t *) pc) {
781 case FILTER_OP_UNKNOWN:
782 default:
783 {
784 printk(KERN_WARNING "unknown bytecode op %u\n",
785 (unsigned int) *(filter_opcode_t *) pc);
786 ret = -EINVAL;
787 goto end;
788 }
789
790 case FILTER_OP_RETURN:
791 {
792 if (!vstack_ax(stack)) {
793 printk(KERN_WARNING "Empty stack\n");
794 ret = -EINVAL;
795 goto end;
796 }
797 ret = 0;
798 goto end;
799 }
800
801 /* binary */
802 case FILTER_OP_MUL:
803 case FILTER_OP_DIV:
804 case FILTER_OP_MOD:
805 case FILTER_OP_PLUS:
806 case FILTER_OP_MINUS:
807 case FILTER_OP_RSHIFT:
808 case FILTER_OP_LSHIFT:
809 case FILTER_OP_BIN_AND:
810 case FILTER_OP_BIN_OR:
811 case FILTER_OP_BIN_XOR:
812 /* Floating point */
813 case FILTER_OP_EQ_DOUBLE:
814 case FILTER_OP_NE_DOUBLE:
815 case FILTER_OP_GT_DOUBLE:
816 case FILTER_OP_LT_DOUBLE:
817 case FILTER_OP_GE_DOUBLE:
818 case FILTER_OP_LE_DOUBLE:
819 case FILTER_OP_EQ_DOUBLE_S64:
820 case FILTER_OP_NE_DOUBLE_S64:
821 case FILTER_OP_GT_DOUBLE_S64:
822 case FILTER_OP_LT_DOUBLE_S64:
823 case FILTER_OP_GE_DOUBLE_S64:
824 case FILTER_OP_LE_DOUBLE_S64:
825 case FILTER_OP_EQ_S64_DOUBLE:
826 case FILTER_OP_NE_S64_DOUBLE:
827 case FILTER_OP_GT_S64_DOUBLE:
828 case FILTER_OP_LT_S64_DOUBLE:
829 case FILTER_OP_GE_S64_DOUBLE:
830 case FILTER_OP_LE_S64_DOUBLE:
831 case FILTER_OP_UNARY_PLUS_DOUBLE:
832 case FILTER_OP_UNARY_MINUS_DOUBLE:
833 case FILTER_OP_UNARY_NOT_DOUBLE:
834 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
835 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
836 case FILTER_OP_LOAD_DOUBLE:
837 case FILTER_OP_CAST_DOUBLE_TO_S64:
838 {
839 printk(KERN_WARNING "unsupported bytecode op %u\n",
840 (unsigned int) *(filter_opcode_t *) pc);
841 ret = -EINVAL;
842 goto end;
843 }
844
845 case FILTER_OP_EQ:
846 case FILTER_OP_NE:
847 case FILTER_OP_GT:
848 case FILTER_OP_LT:
849 case FILTER_OP_GE:
850 case FILTER_OP_LE:
851 case FILTER_OP_EQ_STRING:
852 case FILTER_OP_NE_STRING:
853 case FILTER_OP_GT_STRING:
854 case FILTER_OP_LT_STRING:
855 case FILTER_OP_GE_STRING:
856 case FILTER_OP_LE_STRING:
857 case FILTER_OP_EQ_S64:
858 case FILTER_OP_NE_S64:
859 case FILTER_OP_GT_S64:
860 case FILTER_OP_LT_S64:
861 case FILTER_OP_GE_S64:
862 case FILTER_OP_LE_S64:
863 {
864 /* Pop 2, push 1 */
865 if (vstack_pop(stack)) {
866 ret = -EINVAL;
867 goto end;
868 }
869 if (!vstack_ax(stack)) {
870 printk(KERN_WARNING "Empty stack\n");
871 ret = -EINVAL;
872 goto end;
873 }
874 vstack_ax(stack)->type = REG_S64;
875 next_pc += sizeof(struct binary_op);
876 break;
877 }
878
879 /* unary */
880 case FILTER_OP_UNARY_PLUS:
881 case FILTER_OP_UNARY_MINUS:
882 case FILTER_OP_UNARY_NOT:
883 case FILTER_OP_UNARY_PLUS_S64:
884 case FILTER_OP_UNARY_MINUS_S64:
885 case FILTER_OP_UNARY_NOT_S64:
886 {
887 /* Pop 1, push 1 */
888 if (!vstack_ax(stack)) {
889 printk(KERN_WARNING "Empty stack\n");
890 ret = -EINVAL;
891 goto end;
892 }
893 vstack_ax(stack)->type = REG_S64;
894 next_pc += sizeof(struct unary_op);
895 break;
896 }
897
898 /* logical */
899 case FILTER_OP_AND:
900 case FILTER_OP_OR:
901 {
902 struct logical_op *insn = (struct logical_op *) pc;
903 int merge_ret;
904
905 /* Add merge point to table */
906 merge_ret = merge_point_add_check(mp_table,
907 insn->skip_offset, stack);
908 if (merge_ret) {
909 ret = merge_ret;
910 goto end;
911 }
912 /* Continue to next instruction */
913 /* Pop 1 when jump not taken */
914 if (vstack_pop(stack)) {
915 ret = -EINVAL;
916 goto end;
917 }
918 next_pc += sizeof(struct logical_op);
919 break;
920 }
921
922 /* load field ref */
923 case FILTER_OP_LOAD_FIELD_REF:
924 {
925 printk(KERN_WARNING "Unknown field ref type\n");
926 ret = -EINVAL;
927 goto end;
928 }
929 /* get context ref */
930 case FILTER_OP_GET_CONTEXT_REF:
931 {
932 printk(KERN_WARNING "Unknown get context ref type\n");
933 ret = -EINVAL;
934 goto end;
935 }
936 case FILTER_OP_LOAD_FIELD_REF_STRING:
937 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
938 case FILTER_OP_GET_CONTEXT_REF_STRING:
939 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
940 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
941 {
942 if (vstack_push(stack)) {
943 ret = -EINVAL;
944 goto end;
945 }
946 vstack_ax(stack)->type = REG_STRING;
947 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
948 break;
949 }
950 case FILTER_OP_LOAD_FIELD_REF_S64:
951 case FILTER_OP_GET_CONTEXT_REF_S64:
952 {
953 if (vstack_push(stack)) {
954 ret = -EINVAL;
955 goto end;
956 }
957 vstack_ax(stack)->type = REG_S64;
958 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
959 break;
960 }
961
962 /* load from immediate operand */
963 case FILTER_OP_LOAD_STRING:
964 {
965 struct load_op *insn = (struct load_op *) pc;
966
967 if (vstack_push(stack)) {
968 ret = -EINVAL;
969 goto end;
970 }
971 vstack_ax(stack)->type = REG_STRING;
972 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
973 break;
974 }
975
976 case FILTER_OP_LOAD_S64:
977 {
978 if (vstack_push(stack)) {
979 ret = -EINVAL;
980 goto end;
981 }
982 vstack_ax(stack)->type = REG_S64;
983 next_pc += sizeof(struct load_op)
984 + sizeof(struct literal_numeric);
985 break;
986 }
987
988 case FILTER_OP_CAST_TO_S64:
989 {
990 /* Pop 1, push 1 */
991 if (!vstack_ax(stack)) {
992 printk(KERN_WARNING "Empty stack\n");
993 ret = -EINVAL;
994 goto end;
995 }
996 vstack_ax(stack)->type = REG_S64;
997 next_pc += sizeof(struct cast_op);
998 break;
999 }
1000 case FILTER_OP_CAST_NOP:
1001 {
1002 next_pc += sizeof(struct cast_op);
1003 break;
1004 }
1005
1006 }
1007 end:
1008 *_next_pc = next_pc;
1009 return ret;
1010 }
1011
1012 /*
1013 * Never called concurrently (hash seed is shared).
1014 */
1015 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1016 {
1017 struct mp_table *mp_table;
1018 void *pc, *next_pc, *start_pc;
1019 int ret = -EINVAL;
1020 struct vstack stack;
1021
1022 vstack_init(&stack);
1023
1024 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1025 if (!mp_table) {
1026 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1027 return -ENOMEM;
1028 }
1029 start_pc = &bytecode->data[0];
1030 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1031 pc = next_pc) {
1032 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1033 if (ret != 0) {
1034 if (ret == -ERANGE)
1035 printk(KERN_WARNING "filter bytecode overflow\n");
1036 goto end;
1037 }
1038 dbg_printk("Validating op %s (%u)\n",
1039 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1040 (unsigned int) *(filter_opcode_t *) pc);
1041
1042 /*
1043 * For each instruction, validate the current context
1044 * (traversal of entire execution flow), and validate
1045 * all merge points targeting this instruction.
1046 */
1047 ret = validate_instruction_all_contexts(bytecode, mp_table,
1048 &stack, start_pc, pc);
1049 if (ret)
1050 goto end;
1051 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1052 if (ret <= 0)
1053 goto end;
1054 }
1055 end:
1056 if (delete_all_nodes(mp_table)) {
1057 if (!ret) {
1058 printk(KERN_WARNING "Unexpected merge points\n");
1059 ret = -EINVAL;
1060 }
1061 }
1062 kfree(mp_table);
1063 return ret;
1064 }
This page took 0.049667 seconds and 5 git commands to generate.