Filter code relicensing to MIT license
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
30
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
33
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
36
37 /* merge point table node */
38 struct mp_node {
39 struct hlist_node node;
40
41 /* Context at merge point */
42 struct vstack stack;
43 unsigned long target_pc;
44 };
45
46 struct mp_table {
47 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
48 };
49
50 static
51 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
52 {
53 if (mp_node->target_pc == key_pc)
54 return 1;
55 else
56 return 0;
57 }
58
59 static
60 int merge_points_compare(const struct vstack *stacka,
61 const struct vstack *stackb)
62 {
63 int i, len;
64
65 if (stacka->top != stackb->top)
66 return 1;
67 len = stacka->top + 1;
68 WARN_ON_ONCE(len < 0);
69 for (i = 0; i < len; i++) {
70 if (stacka->e[i].type != stackb->e[i].type)
71 return 1;
72 }
73 return 0;
74 }
75
76 static
77 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
78 const struct vstack *stack)
79 {
80 struct mp_node *mp_node;
81 unsigned long hash = jhash_1word(target_pc, 0);
82 struct hlist_head *head;
83 struct mp_node *lookup_node;
84 int found = 0;
85
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
87 target_pc, hash);
88 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
89 if (!mp_node)
90 return -ENOMEM;
91 mp_node->target_pc = target_pc;
92 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
93
94 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
95 lttng_hlist_for_each_entry(lookup_node, head, node) {
96 if (lttng_hash_match(lookup_node, target_pc)) {
97 found = 1;
98 break;
99 }
100 }
101 if (found) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
104 target_pc, hash);
105 kfree(mp_node);
106 if (merge_points_compare(stack, &lookup_node->stack)) {
107 printk(KERN_WARNING "Merge points differ for offset %lu\n",
108 target_pc);
109 return -EINVAL;
110 }
111 } else {
112 hlist_add_head(&mp_node->node, head);
113 }
114 return 0;
115 }
116
117 /*
118 * Binary comparators use top of stack and top of stack -1.
119 */
120 static
121 int bin_op_compare_check(struct vstack *stack, const char *str)
122 {
123 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
124 goto error_unknown;
125
126 switch (vstack_ax(stack)->type) {
127 default:
128 case REG_DOUBLE:
129 goto error_unknown;
130
131 case REG_STRING:
132 switch (vstack_bx(stack)->type) {
133 default:
134 case REG_DOUBLE:
135 goto error_unknown;
136
137 case REG_STRING:
138 break;
139 case REG_S64:
140 goto error_mismatch;
141 }
142 break;
143 case REG_S64:
144 switch (vstack_bx(stack)->type) {
145 default:
146 case REG_DOUBLE:
147 goto error_unknown;
148
149 case REG_STRING:
150 goto error_mismatch;
151
152 case REG_S64:
153 break;
154 }
155 break;
156 }
157 return 0;
158
159 error_unknown:
160 return -EINVAL;
161
162 error_mismatch:
163 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
164 return -EINVAL;
165 }
166
167 /*
168 * Validate bytecode range overflow within the validation pass.
169 * Called for each instruction encountered.
170 */
171 static
172 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
173 char *start_pc, char *pc)
174 {
175 int ret = 0;
176
177 switch (*(filter_opcode_t *) pc) {
178 case FILTER_OP_UNKNOWN:
179 default:
180 {
181 printk(KERN_WARNING "unknown bytecode op %u\n",
182 (unsigned int) *(filter_opcode_t *) pc);
183 ret = -EINVAL;
184 break;
185 }
186
187 case FILTER_OP_RETURN:
188 {
189 if (unlikely(pc + sizeof(struct return_op)
190 > start_pc + bytecode->len)) {
191 ret = -ERANGE;
192 }
193 break;
194 }
195
196 /* binary */
197 case FILTER_OP_MUL:
198 case FILTER_OP_DIV:
199 case FILTER_OP_MOD:
200 case FILTER_OP_PLUS:
201 case FILTER_OP_MINUS:
202 case FILTER_OP_RSHIFT:
203 case FILTER_OP_LSHIFT:
204 case FILTER_OP_BIN_AND:
205 case FILTER_OP_BIN_OR:
206 case FILTER_OP_BIN_XOR:
207 case FILTER_OP_EQ_DOUBLE:
208 case FILTER_OP_NE_DOUBLE:
209 case FILTER_OP_GT_DOUBLE:
210 case FILTER_OP_LT_DOUBLE:
211 case FILTER_OP_GE_DOUBLE:
212 case FILTER_OP_LE_DOUBLE:
213 /* Floating point */
214 case FILTER_OP_EQ_DOUBLE_S64:
215 case FILTER_OP_NE_DOUBLE_S64:
216 case FILTER_OP_GT_DOUBLE_S64:
217 case FILTER_OP_LT_DOUBLE_S64:
218 case FILTER_OP_GE_DOUBLE_S64:
219 case FILTER_OP_LE_DOUBLE_S64:
220 case FILTER_OP_EQ_S64_DOUBLE:
221 case FILTER_OP_NE_S64_DOUBLE:
222 case FILTER_OP_GT_S64_DOUBLE:
223 case FILTER_OP_LT_S64_DOUBLE:
224 case FILTER_OP_GE_S64_DOUBLE:
225 case FILTER_OP_LE_S64_DOUBLE:
226 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
227 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
228 case FILTER_OP_LOAD_DOUBLE:
229 case FILTER_OP_CAST_DOUBLE_TO_S64:
230 case FILTER_OP_UNARY_PLUS_DOUBLE:
231 case FILTER_OP_UNARY_MINUS_DOUBLE:
232 case FILTER_OP_UNARY_NOT_DOUBLE:
233 {
234 printk(KERN_WARNING "unsupported bytecode op %u\n",
235 (unsigned int) *(filter_opcode_t *) pc);
236 ret = -EINVAL;
237 break;
238 }
239
240 case FILTER_OP_EQ:
241 case FILTER_OP_NE:
242 case FILTER_OP_GT:
243 case FILTER_OP_LT:
244 case FILTER_OP_GE:
245 case FILTER_OP_LE:
246 case FILTER_OP_EQ_STRING:
247 case FILTER_OP_NE_STRING:
248 case FILTER_OP_GT_STRING:
249 case FILTER_OP_LT_STRING:
250 case FILTER_OP_GE_STRING:
251 case FILTER_OP_LE_STRING:
252 case FILTER_OP_EQ_S64:
253 case FILTER_OP_NE_S64:
254 case FILTER_OP_GT_S64:
255 case FILTER_OP_LT_S64:
256 case FILTER_OP_GE_S64:
257 case FILTER_OP_LE_S64:
258 {
259 if (unlikely(pc + sizeof(struct binary_op)
260 > start_pc + bytecode->len)) {
261 ret = -ERANGE;
262 }
263 break;
264 }
265
266 /* unary */
267 case FILTER_OP_UNARY_PLUS:
268 case FILTER_OP_UNARY_MINUS:
269 case FILTER_OP_UNARY_NOT:
270 case FILTER_OP_UNARY_PLUS_S64:
271 case FILTER_OP_UNARY_MINUS_S64:
272 case FILTER_OP_UNARY_NOT_S64:
273 {
274 if (unlikely(pc + sizeof(struct unary_op)
275 > start_pc + bytecode->len)) {
276 ret = -ERANGE;
277 }
278 break;
279 }
280
281 /* logical */
282 case FILTER_OP_AND:
283 case FILTER_OP_OR:
284 {
285 if (unlikely(pc + sizeof(struct logical_op)
286 > start_pc + bytecode->len)) {
287 ret = -ERANGE;
288 }
289 break;
290 }
291
292 /* load field ref */
293 case FILTER_OP_LOAD_FIELD_REF:
294 {
295 printk(KERN_WARNING "Unknown field ref type\n");
296 ret = -EINVAL;
297 break;
298 }
299 /* get context ref */
300 case FILTER_OP_GET_CONTEXT_REF:
301 {
302 printk(KERN_WARNING "Unknown field ref type\n");
303 ret = -EINVAL;
304 break;
305 }
306 case FILTER_OP_LOAD_FIELD_REF_STRING:
307 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
308 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
309 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
310 case FILTER_OP_LOAD_FIELD_REF_S64:
311 case FILTER_OP_GET_CONTEXT_REF_STRING:
312 case FILTER_OP_GET_CONTEXT_REF_S64:
313 {
314 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
315 > start_pc + bytecode->len)) {
316 ret = -ERANGE;
317 }
318 break;
319 }
320
321 /* load from immediate operand */
322 case FILTER_OP_LOAD_STRING:
323 {
324 struct load_op *insn = (struct load_op *) pc;
325 uint32_t str_len, maxlen;
326
327 if (unlikely(pc + sizeof(struct load_op)
328 > start_pc + bytecode->len)) {
329 ret = -ERANGE;
330 break;
331 }
332
333 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
334 str_len = strnlen(insn->data, maxlen);
335 if (unlikely(str_len >= maxlen)) {
336 /* Final '\0' not found within range */
337 ret = -ERANGE;
338 }
339 break;
340 }
341
342 case FILTER_OP_LOAD_S64:
343 {
344 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
345 > start_pc + bytecode->len)) {
346 ret = -ERANGE;
347 }
348 break;
349 }
350
351 case FILTER_OP_CAST_TO_S64:
352 case FILTER_OP_CAST_NOP:
353 {
354 if (unlikely(pc + sizeof(struct cast_op)
355 > start_pc + bytecode->len)) {
356 ret = -ERANGE;
357 }
358 break;
359 }
360
361 }
362
363 return ret;
364 }
365
366 static
367 unsigned long delete_all_nodes(struct mp_table *mp_table)
368 {
369 struct mp_node *mp_node;
370 struct hlist_node *tmp;
371 unsigned long nr_nodes = 0;
372 int i;
373
374 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
375 struct hlist_head *head;
376
377 head = &mp_table->mp_head[i];
378 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
379 kfree(mp_node);
380 nr_nodes++;
381 }
382 }
383 return nr_nodes;
384 }
385
386 /*
387 * Return value:
388 * 0: success
389 * <0: error
390 */
391 static
392 int validate_instruction_context(struct bytecode_runtime *bytecode,
393 struct vstack *stack,
394 char *start_pc,
395 char *pc)
396 {
397 int ret = 0;
398
399 switch (*(filter_opcode_t *) pc) {
400 case FILTER_OP_UNKNOWN:
401 default:
402 {
403 printk(KERN_WARNING "unknown bytecode op %u\n",
404 (unsigned int) *(filter_opcode_t *) pc);
405 ret = -EINVAL;
406 goto end;
407 }
408
409 case FILTER_OP_RETURN:
410 {
411 goto end;
412 }
413
414 /* binary */
415 case FILTER_OP_MUL:
416 case FILTER_OP_DIV:
417 case FILTER_OP_MOD:
418 case FILTER_OP_PLUS:
419 case FILTER_OP_MINUS:
420 case FILTER_OP_RSHIFT:
421 case FILTER_OP_LSHIFT:
422 case FILTER_OP_BIN_AND:
423 case FILTER_OP_BIN_OR:
424 case FILTER_OP_BIN_XOR:
425 /* Floating point */
426 case FILTER_OP_EQ_DOUBLE:
427 case FILTER_OP_NE_DOUBLE:
428 case FILTER_OP_GT_DOUBLE:
429 case FILTER_OP_LT_DOUBLE:
430 case FILTER_OP_GE_DOUBLE:
431 case FILTER_OP_LE_DOUBLE:
432 case FILTER_OP_EQ_DOUBLE_S64:
433 case FILTER_OP_NE_DOUBLE_S64:
434 case FILTER_OP_GT_DOUBLE_S64:
435 case FILTER_OP_LT_DOUBLE_S64:
436 case FILTER_OP_GE_DOUBLE_S64:
437 case FILTER_OP_LE_DOUBLE_S64:
438 case FILTER_OP_EQ_S64_DOUBLE:
439 case FILTER_OP_NE_S64_DOUBLE:
440 case FILTER_OP_GT_S64_DOUBLE:
441 case FILTER_OP_LT_S64_DOUBLE:
442 case FILTER_OP_GE_S64_DOUBLE:
443 case FILTER_OP_LE_S64_DOUBLE:
444 case FILTER_OP_UNARY_PLUS_DOUBLE:
445 case FILTER_OP_UNARY_MINUS_DOUBLE:
446 case FILTER_OP_UNARY_NOT_DOUBLE:
447 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
448 case FILTER_OP_LOAD_DOUBLE:
449 case FILTER_OP_CAST_DOUBLE_TO_S64:
450 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
451 {
452 printk(KERN_WARNING "unsupported bytecode op %u\n",
453 (unsigned int) *(filter_opcode_t *) pc);
454 ret = -EINVAL;
455 goto end;
456 }
457
458 case FILTER_OP_EQ:
459 {
460 ret = bin_op_compare_check(stack, "==");
461 if (ret)
462 goto end;
463 break;
464 }
465 case FILTER_OP_NE:
466 {
467 ret = bin_op_compare_check(stack, "!=");
468 if (ret)
469 goto end;
470 break;
471 }
472 case FILTER_OP_GT:
473 {
474 ret = bin_op_compare_check(stack, ">");
475 if (ret)
476 goto end;
477 break;
478 }
479 case FILTER_OP_LT:
480 {
481 ret = bin_op_compare_check(stack, "<");
482 if (ret)
483 goto end;
484 break;
485 }
486 case FILTER_OP_GE:
487 {
488 ret = bin_op_compare_check(stack, ">=");
489 if (ret)
490 goto end;
491 break;
492 }
493 case FILTER_OP_LE:
494 {
495 ret = bin_op_compare_check(stack, "<=");
496 if (ret)
497 goto end;
498 break;
499 }
500
501 case FILTER_OP_EQ_STRING:
502 case FILTER_OP_NE_STRING:
503 case FILTER_OP_GT_STRING:
504 case FILTER_OP_LT_STRING:
505 case FILTER_OP_GE_STRING:
506 case FILTER_OP_LE_STRING:
507 {
508 if (!vstack_ax(stack) || !vstack_bx(stack)) {
509 printk(KERN_WARNING "Empty stack\n");
510 ret = -EINVAL;
511 goto end;
512 }
513 if (vstack_ax(stack)->type != REG_STRING
514 || vstack_bx(stack)->type != REG_STRING) {
515 printk(KERN_WARNING "Unexpected register type for string comparator\n");
516 ret = -EINVAL;
517 goto end;
518 }
519 break;
520 }
521
522 case FILTER_OP_EQ_S64:
523 case FILTER_OP_NE_S64:
524 case FILTER_OP_GT_S64:
525 case FILTER_OP_LT_S64:
526 case FILTER_OP_GE_S64:
527 case FILTER_OP_LE_S64:
528 {
529 if (!vstack_ax(stack) || !vstack_bx(stack)) {
530 printk(KERN_WARNING "Empty stack\n");
531 ret = -EINVAL;
532 goto end;
533 }
534 if (vstack_ax(stack)->type != REG_S64
535 || vstack_bx(stack)->type != REG_S64) {
536 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
537 ret = -EINVAL;
538 goto end;
539 }
540 break;
541 }
542
543 /* unary */
544 case FILTER_OP_UNARY_PLUS:
545 case FILTER_OP_UNARY_MINUS:
546 case FILTER_OP_UNARY_NOT:
547 {
548 if (!vstack_ax(stack)) {
549 printk(KERN_WARNING "Empty stack\n");
550 ret = -EINVAL;
551 goto end;
552 }
553 switch (vstack_ax(stack)->type) {
554 default:
555 case REG_DOUBLE:
556 printk(KERN_WARNING "unknown register type\n");
557 ret = -EINVAL;
558 goto end;
559
560 case REG_STRING:
561 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
562 ret = -EINVAL;
563 goto end;
564 case REG_S64:
565 break;
566 }
567 break;
568 }
569
570 case FILTER_OP_UNARY_PLUS_S64:
571 case FILTER_OP_UNARY_MINUS_S64:
572 case FILTER_OP_UNARY_NOT_S64:
573 {
574 if (!vstack_ax(stack)) {
575 printk(KERN_WARNING "Empty stack\n");
576 ret = -EINVAL;
577 goto end;
578 }
579 if (vstack_ax(stack)->type != REG_S64) {
580 printk(KERN_WARNING "Invalid register type\n");
581 ret = -EINVAL;
582 goto end;
583 }
584 break;
585 }
586
587 /* logical */
588 case FILTER_OP_AND:
589 case FILTER_OP_OR:
590 {
591 struct logical_op *insn = (struct logical_op *) pc;
592
593 if (!vstack_ax(stack)) {
594 printk(KERN_WARNING "Empty stack\n");
595 ret = -EINVAL;
596 goto end;
597 }
598 if (vstack_ax(stack)->type != REG_S64) {
599 printk(KERN_WARNING "Logical comparator expects S64 register\n");
600 ret = -EINVAL;
601 goto end;
602 }
603
604 dbg_printk("Validate jumping to bytecode offset %u\n",
605 (unsigned int) insn->skip_offset);
606 if (unlikely(start_pc + insn->skip_offset <= pc)) {
607 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
608 ret = -EINVAL;
609 goto end;
610 }
611 break;
612 }
613
614 /* load field ref */
615 case FILTER_OP_LOAD_FIELD_REF:
616 {
617 printk(KERN_WARNING "Unknown field ref type\n");
618 ret = -EINVAL;
619 goto end;
620 }
621 case FILTER_OP_LOAD_FIELD_REF_STRING:
622 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
623 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
624 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
625 {
626 struct load_op *insn = (struct load_op *) pc;
627 struct field_ref *ref = (struct field_ref *) insn->data;
628
629 dbg_printk("Validate load field ref offset %u type string\n",
630 ref->offset);
631 break;
632 }
633 case FILTER_OP_LOAD_FIELD_REF_S64:
634 {
635 struct load_op *insn = (struct load_op *) pc;
636 struct field_ref *ref = (struct field_ref *) insn->data;
637
638 dbg_printk("Validate load field ref offset %u type s64\n",
639 ref->offset);
640 break;
641 }
642
643 /* load from immediate operand */
644 case FILTER_OP_LOAD_STRING:
645 {
646 break;
647 }
648
649 case FILTER_OP_LOAD_S64:
650 {
651 break;
652 }
653
654 case FILTER_OP_CAST_TO_S64:
655 {
656 struct cast_op *insn = (struct cast_op *) pc;
657
658 if (!vstack_ax(stack)) {
659 printk(KERN_WARNING "Empty stack\n");
660 ret = -EINVAL;
661 goto end;
662 }
663 switch (vstack_ax(stack)->type) {
664 default:
665 case REG_DOUBLE:
666 printk(KERN_WARNING "unknown register type\n");
667 ret = -EINVAL;
668 goto end;
669
670 case REG_STRING:
671 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
672 ret = -EINVAL;
673 goto end;
674 case REG_S64:
675 break;
676 }
677 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
678 if (vstack_ax(stack)->type != REG_DOUBLE) {
679 printk(KERN_WARNING "Cast expects double\n");
680 ret = -EINVAL;
681 goto end;
682 }
683 }
684 break;
685 }
686 case FILTER_OP_CAST_NOP:
687 {
688 break;
689 }
690
691 /* get context ref */
692 case FILTER_OP_GET_CONTEXT_REF:
693 {
694 printk(KERN_WARNING "Unknown get context ref type\n");
695 ret = -EINVAL;
696 goto end;
697 }
698 case FILTER_OP_GET_CONTEXT_REF_STRING:
699 {
700 struct load_op *insn = (struct load_op *) pc;
701 struct field_ref *ref = (struct field_ref *) insn->data;
702
703 dbg_printk("Validate get context ref offset %u type string\n",
704 ref->offset);
705 break;
706 }
707 case FILTER_OP_GET_CONTEXT_REF_S64:
708 {
709 struct load_op *insn = (struct load_op *) pc;
710 struct field_ref *ref = (struct field_ref *) insn->data;
711
712 dbg_printk("Validate get context ref offset %u type s64\n",
713 ref->offset);
714 break;
715 }
716
717 }
718 end:
719 return ret;
720 }
721
722 /*
723 * Return value:
724 * 0: success
725 * <0: error
726 */
727 static
728 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
729 struct mp_table *mp_table,
730 struct vstack *stack,
731 char *start_pc,
732 char *pc)
733 {
734 int ret, found = 0;
735 unsigned long target_pc = pc - start_pc;
736 unsigned long hash;
737 struct hlist_head *head;
738 struct mp_node *mp_node;
739
740 /* Validate the context resulting from the previous instruction */
741 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
742 if (ret)
743 return ret;
744
745 /* Validate merge points */
746 hash = jhash_1word(target_pc, 0);
747 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
748 lttng_hlist_for_each_entry(mp_node, head, node) {
749 if (lttng_hash_match(mp_node, target_pc)) {
750 found = 1;
751 break;
752 }
753 }
754 if (found) {
755 dbg_printk("Filter: validate merge point at offset %lu\n",
756 target_pc);
757 if (merge_points_compare(stack, &mp_node->stack)) {
758 printk(KERN_WARNING "Merge points differ for offset %lu\n",
759 target_pc);
760 return -EINVAL;
761 }
762 /* Once validated, we can remove the merge point */
763 dbg_printk("Filter: remove merge point at offset %lu\n",
764 target_pc);
765 hlist_del(&mp_node->node);
766 }
767 return 0;
768 }
769
770 /*
771 * Return value:
772 * >0: going to next insn.
773 * 0: success, stop iteration.
774 * <0: error
775 */
776 static
777 int exec_insn(struct bytecode_runtime *bytecode,
778 struct mp_table *mp_table,
779 struct vstack *stack,
780 char **_next_pc,
781 char *pc)
782 {
783 int ret = 1;
784 char *next_pc = *_next_pc;
785
786 switch (*(filter_opcode_t *) pc) {
787 case FILTER_OP_UNKNOWN:
788 default:
789 {
790 printk(KERN_WARNING "unknown bytecode op %u\n",
791 (unsigned int) *(filter_opcode_t *) pc);
792 ret = -EINVAL;
793 goto end;
794 }
795
796 case FILTER_OP_RETURN:
797 {
798 if (!vstack_ax(stack)) {
799 printk(KERN_WARNING "Empty stack\n");
800 ret = -EINVAL;
801 goto end;
802 }
803 ret = 0;
804 goto end;
805 }
806
807 /* binary */
808 case FILTER_OP_MUL:
809 case FILTER_OP_DIV:
810 case FILTER_OP_MOD:
811 case FILTER_OP_PLUS:
812 case FILTER_OP_MINUS:
813 case FILTER_OP_RSHIFT:
814 case FILTER_OP_LSHIFT:
815 case FILTER_OP_BIN_AND:
816 case FILTER_OP_BIN_OR:
817 case FILTER_OP_BIN_XOR:
818 /* Floating point */
819 case FILTER_OP_EQ_DOUBLE:
820 case FILTER_OP_NE_DOUBLE:
821 case FILTER_OP_GT_DOUBLE:
822 case FILTER_OP_LT_DOUBLE:
823 case FILTER_OP_GE_DOUBLE:
824 case FILTER_OP_LE_DOUBLE:
825 case FILTER_OP_EQ_DOUBLE_S64:
826 case FILTER_OP_NE_DOUBLE_S64:
827 case FILTER_OP_GT_DOUBLE_S64:
828 case FILTER_OP_LT_DOUBLE_S64:
829 case FILTER_OP_GE_DOUBLE_S64:
830 case FILTER_OP_LE_DOUBLE_S64:
831 case FILTER_OP_EQ_S64_DOUBLE:
832 case FILTER_OP_NE_S64_DOUBLE:
833 case FILTER_OP_GT_S64_DOUBLE:
834 case FILTER_OP_LT_S64_DOUBLE:
835 case FILTER_OP_GE_S64_DOUBLE:
836 case FILTER_OP_LE_S64_DOUBLE:
837 case FILTER_OP_UNARY_PLUS_DOUBLE:
838 case FILTER_OP_UNARY_MINUS_DOUBLE:
839 case FILTER_OP_UNARY_NOT_DOUBLE:
840 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
841 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
842 case FILTER_OP_LOAD_DOUBLE:
843 case FILTER_OP_CAST_DOUBLE_TO_S64:
844 {
845 printk(KERN_WARNING "unsupported bytecode op %u\n",
846 (unsigned int) *(filter_opcode_t *) pc);
847 ret = -EINVAL;
848 goto end;
849 }
850
851 case FILTER_OP_EQ:
852 case FILTER_OP_NE:
853 case FILTER_OP_GT:
854 case FILTER_OP_LT:
855 case FILTER_OP_GE:
856 case FILTER_OP_LE:
857 case FILTER_OP_EQ_STRING:
858 case FILTER_OP_NE_STRING:
859 case FILTER_OP_GT_STRING:
860 case FILTER_OP_LT_STRING:
861 case FILTER_OP_GE_STRING:
862 case FILTER_OP_LE_STRING:
863 case FILTER_OP_EQ_S64:
864 case FILTER_OP_NE_S64:
865 case FILTER_OP_GT_S64:
866 case FILTER_OP_LT_S64:
867 case FILTER_OP_GE_S64:
868 case FILTER_OP_LE_S64:
869 {
870 /* Pop 2, push 1 */
871 if (vstack_pop(stack)) {
872 ret = -EINVAL;
873 goto end;
874 }
875 if (!vstack_ax(stack)) {
876 printk(KERN_WARNING "Empty stack\n");
877 ret = -EINVAL;
878 goto end;
879 }
880 vstack_ax(stack)->type = REG_S64;
881 next_pc += sizeof(struct binary_op);
882 break;
883 }
884
885 /* unary */
886 case FILTER_OP_UNARY_PLUS:
887 case FILTER_OP_UNARY_MINUS:
888 case FILTER_OP_UNARY_NOT:
889 case FILTER_OP_UNARY_PLUS_S64:
890 case FILTER_OP_UNARY_MINUS_S64:
891 case FILTER_OP_UNARY_NOT_S64:
892 {
893 /* Pop 1, push 1 */
894 if (!vstack_ax(stack)) {
895 printk(KERN_WARNING "Empty stack\n");
896 ret = -EINVAL;
897 goto end;
898 }
899 vstack_ax(stack)->type = REG_S64;
900 next_pc += sizeof(struct unary_op);
901 break;
902 }
903
904 /* logical */
905 case FILTER_OP_AND:
906 case FILTER_OP_OR:
907 {
908 struct logical_op *insn = (struct logical_op *) pc;
909 int merge_ret;
910
911 /* Add merge point to table */
912 merge_ret = merge_point_add_check(mp_table,
913 insn->skip_offset, stack);
914 if (merge_ret) {
915 ret = merge_ret;
916 goto end;
917 }
918 /* Continue to next instruction */
919 /* Pop 1 when jump not taken */
920 if (vstack_pop(stack)) {
921 ret = -EINVAL;
922 goto end;
923 }
924 next_pc += sizeof(struct logical_op);
925 break;
926 }
927
928 /* load field ref */
929 case FILTER_OP_LOAD_FIELD_REF:
930 {
931 printk(KERN_WARNING "Unknown field ref type\n");
932 ret = -EINVAL;
933 goto end;
934 }
935 /* get context ref */
936 case FILTER_OP_GET_CONTEXT_REF:
937 {
938 printk(KERN_WARNING "Unknown get context ref type\n");
939 ret = -EINVAL;
940 goto end;
941 }
942 case FILTER_OP_LOAD_FIELD_REF_STRING:
943 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
944 case FILTER_OP_GET_CONTEXT_REF_STRING:
945 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
946 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
947 {
948 if (vstack_push(stack)) {
949 ret = -EINVAL;
950 goto end;
951 }
952 vstack_ax(stack)->type = REG_STRING;
953 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
954 break;
955 }
956 case FILTER_OP_LOAD_FIELD_REF_S64:
957 case FILTER_OP_GET_CONTEXT_REF_S64:
958 {
959 if (vstack_push(stack)) {
960 ret = -EINVAL;
961 goto end;
962 }
963 vstack_ax(stack)->type = REG_S64;
964 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
965 break;
966 }
967
968 /* load from immediate operand */
969 case FILTER_OP_LOAD_STRING:
970 {
971 struct load_op *insn = (struct load_op *) pc;
972
973 if (vstack_push(stack)) {
974 ret = -EINVAL;
975 goto end;
976 }
977 vstack_ax(stack)->type = REG_STRING;
978 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
979 break;
980 }
981
982 case FILTER_OP_LOAD_S64:
983 {
984 if (vstack_push(stack)) {
985 ret = -EINVAL;
986 goto end;
987 }
988 vstack_ax(stack)->type = REG_S64;
989 next_pc += sizeof(struct load_op)
990 + sizeof(struct literal_numeric);
991 break;
992 }
993
994 case FILTER_OP_CAST_TO_S64:
995 {
996 /* Pop 1, push 1 */
997 if (!vstack_ax(stack)) {
998 printk(KERN_WARNING "Empty stack\n");
999 ret = -EINVAL;
1000 goto end;
1001 }
1002 vstack_ax(stack)->type = REG_S64;
1003 next_pc += sizeof(struct cast_op);
1004 break;
1005 }
1006 case FILTER_OP_CAST_NOP:
1007 {
1008 next_pc += sizeof(struct cast_op);
1009 break;
1010 }
1011
1012 }
1013 end:
1014 *_next_pc = next_pc;
1015 return ret;
1016 }
1017
1018 /*
1019 * Never called concurrently (hash seed is shared).
1020 */
1021 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1022 {
1023 struct mp_table *mp_table;
1024 char *pc, *next_pc, *start_pc;
1025 int ret = -EINVAL;
1026 struct vstack stack;
1027
1028 vstack_init(&stack);
1029
1030 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1031 if (!mp_table) {
1032 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1033 return -ENOMEM;
1034 }
1035 start_pc = &bytecode->data[0];
1036 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1037 pc = next_pc) {
1038 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1039 if (ret != 0) {
1040 if (ret == -ERANGE)
1041 printk(KERN_WARNING "filter bytecode overflow\n");
1042 goto end;
1043 }
1044 dbg_printk("Validating op %s (%u)\n",
1045 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1046 (unsigned int) *(filter_opcode_t *) pc);
1047
1048 /*
1049 * For each instruction, validate the current context
1050 * (traversal of entire execution flow), and validate
1051 * all merge points targeting this instruction.
1052 */
1053 ret = validate_instruction_all_contexts(bytecode, mp_table,
1054 &stack, start_pc, pc);
1055 if (ret)
1056 goto end;
1057 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1058 if (ret <= 0)
1059 goto end;
1060 }
1061 end:
1062 if (delete_all_nodes(mp_table)) {
1063 if (!ret) {
1064 printk(KERN_WARNING "Unexpected merge points\n");
1065 ret = -EINVAL;
1066 }
1067 }
1068 kfree(mp_table);
1069 return ret;
1070 }
This page took 0.086002 seconds and 5 git commands to generate.