Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-filter-validator.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-validator.c
4 *
5 * LTTng modules filter bytecode validator.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/types.h>
11 #include <linux/jhash.h>
12 #include <linux/slab.h>
13
14 #include <wrapper/list.h>
15 #include <lttng-filter.h>
16
17 #define MERGE_POINT_TABLE_BITS 7
18 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
19
20 /* merge point table node */
21 struct mp_node {
22 struct hlist_node node;
23
24 /* Context at merge point */
25 struct vstack stack;
26 unsigned long target_pc;
27 };
28
29 struct mp_table {
30 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
31 };
32
33 static
34 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
35 {
36 if (mp_node->target_pc == key_pc)
37 return 1;
38 else
39 return 0;
40 }
41
42 static
43 int merge_points_compare(const struct vstack *stacka,
44 const struct vstack *stackb)
45 {
46 int i, len;
47
48 if (stacka->top != stackb->top)
49 return 1;
50 len = stacka->top + 1;
51 WARN_ON_ONCE(len < 0);
52 for (i = 0; i < len; i++) {
53 if (stacka->e[i].type != stackb->e[i].type)
54 return 1;
55 }
56 return 0;
57 }
58
59 static
60 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
61 const struct vstack *stack)
62 {
63 struct mp_node *mp_node;
64 unsigned long hash = jhash_1word(target_pc, 0);
65 struct hlist_head *head;
66 struct mp_node *lookup_node;
67 int found = 0;
68
69 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
70 target_pc, hash);
71 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
72 if (!mp_node)
73 return -ENOMEM;
74 mp_node->target_pc = target_pc;
75 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
76
77 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
78 lttng_hlist_for_each_entry(lookup_node, head, node) {
79 if (lttng_hash_match(lookup_node, target_pc)) {
80 found = 1;
81 break;
82 }
83 }
84 if (found) {
85 /* Key already present */
86 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
87 target_pc, hash);
88 kfree(mp_node);
89 if (merge_points_compare(stack, &lookup_node->stack)) {
90 printk(KERN_WARNING "Merge points differ for offset %lu\n",
91 target_pc);
92 return -EINVAL;
93 }
94 } else {
95 hlist_add_head(&mp_node->node, head);
96 }
97 return 0;
98 }
99
100 /*
101 * Binary comparators use top of stack and top of stack -1.
102 */
103 static
104 int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
105 const char *str)
106 {
107 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
108 goto error_empty;
109
110 switch (vstack_ax(stack)->type) {
111 default:
112 case REG_DOUBLE:
113 goto error_type;
114
115 case REG_STRING:
116 switch (vstack_bx(stack)->type) {
117 default:
118 case REG_DOUBLE:
119 goto error_type;
120 case REG_TYPE_UNKNOWN:
121 goto unknown;
122 case REG_STRING:
123 break;
124 case REG_STAR_GLOB_STRING:
125 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
126 goto error_mismatch;
127 }
128 break;
129 case REG_S64:
130 goto error_mismatch;
131 }
132 break;
133 case REG_STAR_GLOB_STRING:
134 switch (vstack_bx(stack)->type) {
135 default:
136 case REG_DOUBLE:
137 goto error_type;
138 case REG_TYPE_UNKNOWN:
139 goto unknown;
140 case REG_STRING:
141 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
142 goto error_mismatch;
143 }
144 break;
145 case REG_STAR_GLOB_STRING:
146 case REG_S64:
147 goto error_mismatch;
148 }
149 break;
150 case REG_S64:
151 switch (vstack_bx(stack)->type) {
152 default:
153 case REG_DOUBLE:
154 goto error_type;
155 case REG_TYPE_UNKNOWN:
156 goto unknown;
157 case REG_STRING:
158 case REG_STAR_GLOB_STRING:
159 goto error_mismatch;
160 case REG_S64:
161 break;
162 }
163 break;
164 case REG_TYPE_UNKNOWN:
165 switch (vstack_bx(stack)->type) {
166 default:
167 case REG_DOUBLE:
168 goto error_type;
169 case REG_TYPE_UNKNOWN:
170 case REG_STRING:
171 case REG_STAR_GLOB_STRING:
172 case REG_S64:
173 goto unknown;
174 }
175 break;
176 }
177 return 0;
178
179 unknown:
180 return 1;
181
182 error_empty:
183 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
184 return -EINVAL;
185
186 error_mismatch:
187 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
188 return -EINVAL;
189
190 error_type:
191 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
192 return -EINVAL;
193 }
194
195 /*
196 * Binary bitwise operators use top of stack and top of stack -1.
197 * Return 0 if typing is known to match, 1 if typing is dynamic
198 * (unknown), negative error value on error.
199 */
200 static
201 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
202 const char *str)
203 {
204 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
205 goto error_empty;
206
207 switch (vstack_ax(stack)->type) {
208 default:
209 case REG_DOUBLE:
210 goto error_type;
211
212 case REG_TYPE_UNKNOWN:
213 switch (vstack_bx(stack)->type) {
214 default:
215 case REG_DOUBLE:
216 goto error_type;
217 case REG_TYPE_UNKNOWN:
218 case REG_STRING:
219 case REG_STAR_GLOB_STRING:
220 case REG_S64:
221 goto unknown;
222 }
223 break;
224 case REG_S64:
225 switch (vstack_bx(stack)->type) {
226 default:
227 case REG_DOUBLE:
228 goto error_type;
229 case REG_TYPE_UNKNOWN:
230 goto unknown;
231 case REG_S64:
232 break;
233 }
234 break;
235 }
236 return 0;
237
238 unknown:
239 return 1;
240
241 error_empty:
242 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
243 return -EINVAL;
244
245 error_type:
246 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
247 return -EINVAL;
248 }
249
250 static
251 int validate_get_symbol(struct bytecode_runtime *bytecode,
252 const struct get_symbol *sym)
253 {
254 const char *str, *str_limit;
255 size_t len_limit;
256
257 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
258 return -EINVAL;
259
260 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
261 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
262 len_limit = str_limit - str;
263 if (strnlen(str, len_limit) == len_limit)
264 return -EINVAL;
265 return 0;
266 }
267
268 /*
269 * Validate bytecode range overflow within the validation pass.
270 * Called for each instruction encountered.
271 */
272 static
273 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
274 char *start_pc, char *pc)
275 {
276 int ret = 0;
277
278 switch (*(filter_opcode_t *) pc) {
279 case FILTER_OP_UNKNOWN:
280 default:
281 {
282 printk(KERN_WARNING "unknown bytecode op %u\n",
283 (unsigned int) *(filter_opcode_t *) pc);
284 ret = -EINVAL;
285 break;
286 }
287
288 case FILTER_OP_RETURN:
289 case FILTER_OP_RETURN_S64:
290 {
291 if (unlikely(pc + sizeof(struct return_op)
292 > start_pc + bytecode->len)) {
293 ret = -ERANGE;
294 }
295 break;
296 }
297
298 /* binary */
299 case FILTER_OP_MUL:
300 case FILTER_OP_DIV:
301 case FILTER_OP_MOD:
302 case FILTER_OP_PLUS:
303 case FILTER_OP_MINUS:
304 case FILTER_OP_EQ_DOUBLE:
305 case FILTER_OP_NE_DOUBLE:
306 case FILTER_OP_GT_DOUBLE:
307 case FILTER_OP_LT_DOUBLE:
308 case FILTER_OP_GE_DOUBLE:
309 case FILTER_OP_LE_DOUBLE:
310 /* Floating point */
311 case FILTER_OP_EQ_DOUBLE_S64:
312 case FILTER_OP_NE_DOUBLE_S64:
313 case FILTER_OP_GT_DOUBLE_S64:
314 case FILTER_OP_LT_DOUBLE_S64:
315 case FILTER_OP_GE_DOUBLE_S64:
316 case FILTER_OP_LE_DOUBLE_S64:
317 case FILTER_OP_EQ_S64_DOUBLE:
318 case FILTER_OP_NE_S64_DOUBLE:
319 case FILTER_OP_GT_S64_DOUBLE:
320 case FILTER_OP_LT_S64_DOUBLE:
321 case FILTER_OP_GE_S64_DOUBLE:
322 case FILTER_OP_LE_S64_DOUBLE:
323 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
324 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
325 case FILTER_OP_LOAD_DOUBLE:
326 case FILTER_OP_CAST_DOUBLE_TO_S64:
327 case FILTER_OP_UNARY_PLUS_DOUBLE:
328 case FILTER_OP_UNARY_MINUS_DOUBLE:
329 case FILTER_OP_UNARY_NOT_DOUBLE:
330 {
331 printk(KERN_WARNING "unsupported bytecode op %u\n",
332 (unsigned int) *(filter_opcode_t *) pc);
333 ret = -EINVAL;
334 break;
335 }
336
337 case FILTER_OP_EQ:
338 case FILTER_OP_NE:
339 case FILTER_OP_GT:
340 case FILTER_OP_LT:
341 case FILTER_OP_GE:
342 case FILTER_OP_LE:
343 case FILTER_OP_EQ_STRING:
344 case FILTER_OP_NE_STRING:
345 case FILTER_OP_GT_STRING:
346 case FILTER_OP_LT_STRING:
347 case FILTER_OP_GE_STRING:
348 case FILTER_OP_LE_STRING:
349 case FILTER_OP_EQ_STAR_GLOB_STRING:
350 case FILTER_OP_NE_STAR_GLOB_STRING:
351 case FILTER_OP_EQ_S64:
352 case FILTER_OP_NE_S64:
353 case FILTER_OP_GT_S64:
354 case FILTER_OP_LT_S64:
355 case FILTER_OP_GE_S64:
356 case FILTER_OP_LE_S64:
357 case FILTER_OP_BIT_RSHIFT:
358 case FILTER_OP_BIT_LSHIFT:
359 case FILTER_OP_BIT_AND:
360 case FILTER_OP_BIT_OR:
361 case FILTER_OP_BIT_XOR:
362 {
363 if (unlikely(pc + sizeof(struct binary_op)
364 > start_pc + bytecode->len)) {
365 ret = -ERANGE;
366 }
367 break;
368 }
369
370 /* unary */
371 case FILTER_OP_UNARY_PLUS:
372 case FILTER_OP_UNARY_MINUS:
373 case FILTER_OP_UNARY_NOT:
374 case FILTER_OP_UNARY_PLUS_S64:
375 case FILTER_OP_UNARY_MINUS_S64:
376 case FILTER_OP_UNARY_NOT_S64:
377 case FILTER_OP_UNARY_BIT_NOT:
378 {
379 if (unlikely(pc + sizeof(struct unary_op)
380 > start_pc + bytecode->len)) {
381 ret = -ERANGE;
382 }
383 break;
384 }
385
386 /* logical */
387 case FILTER_OP_AND:
388 case FILTER_OP_OR:
389 {
390 if (unlikely(pc + sizeof(struct logical_op)
391 > start_pc + bytecode->len)) {
392 ret = -ERANGE;
393 }
394 break;
395 }
396
397 /* load field ref */
398 case FILTER_OP_LOAD_FIELD_REF:
399 {
400 printk(KERN_WARNING "Unknown field ref type\n");
401 ret = -EINVAL;
402 break;
403 }
404
405 /* get context ref */
406 case FILTER_OP_GET_CONTEXT_REF:
407 {
408 printk(KERN_WARNING "Unknown field ref type\n");
409 ret = -EINVAL;
410 break;
411 }
412 case FILTER_OP_LOAD_FIELD_REF_STRING:
413 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
414 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
415 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
416 case FILTER_OP_LOAD_FIELD_REF_S64:
417 case FILTER_OP_GET_CONTEXT_REF_STRING:
418 case FILTER_OP_GET_CONTEXT_REF_S64:
419 {
420 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
421 > start_pc + bytecode->len)) {
422 ret = -ERANGE;
423 }
424 break;
425 }
426
427 /* load from immediate operand */
428 case FILTER_OP_LOAD_STRING:
429 case FILTER_OP_LOAD_STAR_GLOB_STRING:
430 {
431 struct load_op *insn = (struct load_op *) pc;
432 uint32_t str_len, maxlen;
433
434 if (unlikely(pc + sizeof(struct load_op)
435 > start_pc + bytecode->len)) {
436 ret = -ERANGE;
437 break;
438 }
439
440 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
441 str_len = strnlen(insn->data, maxlen);
442 if (unlikely(str_len >= maxlen)) {
443 /* Final '\0' not found within range */
444 ret = -ERANGE;
445 }
446 break;
447 }
448
449 case FILTER_OP_LOAD_S64:
450 {
451 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
452 > start_pc + bytecode->len)) {
453 ret = -ERANGE;
454 }
455 break;
456 }
457
458 case FILTER_OP_CAST_TO_S64:
459 case FILTER_OP_CAST_NOP:
460 {
461 if (unlikely(pc + sizeof(struct cast_op)
462 > start_pc + bytecode->len)) {
463 ret = -ERANGE;
464 }
465 break;
466 }
467
468 /*
469 * Instructions for recursive traversal through composed types.
470 */
471 case FILTER_OP_GET_CONTEXT_ROOT:
472 case FILTER_OP_GET_APP_CONTEXT_ROOT:
473 case FILTER_OP_GET_PAYLOAD_ROOT:
474 case FILTER_OP_LOAD_FIELD:
475 case FILTER_OP_LOAD_FIELD_S8:
476 case FILTER_OP_LOAD_FIELD_S16:
477 case FILTER_OP_LOAD_FIELD_S32:
478 case FILTER_OP_LOAD_FIELD_S64:
479 case FILTER_OP_LOAD_FIELD_U8:
480 case FILTER_OP_LOAD_FIELD_U16:
481 case FILTER_OP_LOAD_FIELD_U32:
482 case FILTER_OP_LOAD_FIELD_U64:
483 case FILTER_OP_LOAD_FIELD_STRING:
484 case FILTER_OP_LOAD_FIELD_SEQUENCE:
485 case FILTER_OP_LOAD_FIELD_DOUBLE:
486 if (unlikely(pc + sizeof(struct load_op)
487 > start_pc + bytecode->len)) {
488 ret = -ERANGE;
489 }
490 break;
491
492 case FILTER_OP_GET_SYMBOL:
493 {
494 struct load_op *insn = (struct load_op *) pc;
495 struct get_symbol *sym = (struct get_symbol *) insn->data;
496
497 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
498 > start_pc + bytecode->len)) {
499 ret = -ERANGE;
500 break;
501 }
502 ret = validate_get_symbol(bytecode, sym);
503 break;
504 }
505
506 case FILTER_OP_GET_SYMBOL_FIELD:
507 printk(KERN_WARNING "Unexpected get symbol field\n");
508 ret = -EINVAL;
509 break;
510
511 case FILTER_OP_GET_INDEX_U16:
512 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
513 > start_pc + bytecode->len)) {
514 ret = -ERANGE;
515 }
516 break;
517
518 case FILTER_OP_GET_INDEX_U64:
519 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
520 > start_pc + bytecode->len)) {
521 ret = -ERANGE;
522 }
523 break;
524 }
525
526 return ret;
527 }
528
529 static
530 unsigned long delete_all_nodes(struct mp_table *mp_table)
531 {
532 struct mp_node *mp_node;
533 struct hlist_node *tmp;
534 unsigned long nr_nodes = 0;
535 int i;
536
537 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
538 struct hlist_head *head;
539
540 head = &mp_table->mp_head[i];
541 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
542 kfree(mp_node);
543 nr_nodes++;
544 }
545 }
546 return nr_nodes;
547 }
548
549 /*
550 * Return value:
551 * >=0: success
552 * <0: error
553 */
554 static
555 int validate_instruction_context(struct bytecode_runtime *bytecode,
556 struct vstack *stack,
557 char *start_pc,
558 char *pc)
559 {
560 int ret = 0;
561 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
562
563 switch (opcode) {
564 case FILTER_OP_UNKNOWN:
565 default:
566 {
567 printk(KERN_WARNING "unknown bytecode op %u\n",
568 (unsigned int) *(filter_opcode_t *) pc);
569 ret = -EINVAL;
570 goto end;
571 }
572
573 case FILTER_OP_RETURN:
574 case FILTER_OP_RETURN_S64:
575 {
576 goto end;
577 }
578
579 /* binary */
580 case FILTER_OP_MUL:
581 case FILTER_OP_DIV:
582 case FILTER_OP_MOD:
583 case FILTER_OP_PLUS:
584 case FILTER_OP_MINUS:
585 /* Floating point */
586 case FILTER_OP_EQ_DOUBLE:
587 case FILTER_OP_NE_DOUBLE:
588 case FILTER_OP_GT_DOUBLE:
589 case FILTER_OP_LT_DOUBLE:
590 case FILTER_OP_GE_DOUBLE:
591 case FILTER_OP_LE_DOUBLE:
592 case FILTER_OP_EQ_DOUBLE_S64:
593 case FILTER_OP_NE_DOUBLE_S64:
594 case FILTER_OP_GT_DOUBLE_S64:
595 case FILTER_OP_LT_DOUBLE_S64:
596 case FILTER_OP_GE_DOUBLE_S64:
597 case FILTER_OP_LE_DOUBLE_S64:
598 case FILTER_OP_EQ_S64_DOUBLE:
599 case FILTER_OP_NE_S64_DOUBLE:
600 case FILTER_OP_GT_S64_DOUBLE:
601 case FILTER_OP_LT_S64_DOUBLE:
602 case FILTER_OP_GE_S64_DOUBLE:
603 case FILTER_OP_LE_S64_DOUBLE:
604 case FILTER_OP_UNARY_PLUS_DOUBLE:
605 case FILTER_OP_UNARY_MINUS_DOUBLE:
606 case FILTER_OP_UNARY_NOT_DOUBLE:
607 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
608 case FILTER_OP_LOAD_DOUBLE:
609 case FILTER_OP_CAST_DOUBLE_TO_S64:
610 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
611 {
612 printk(KERN_WARNING "unsupported bytecode op %u\n",
613 (unsigned int) *(filter_opcode_t *) pc);
614 ret = -EINVAL;
615 goto end;
616 }
617
618 case FILTER_OP_EQ:
619 {
620 ret = bin_op_compare_check(stack, opcode, "==");
621 if (ret < 0)
622 goto end;
623 break;
624 }
625 case FILTER_OP_NE:
626 {
627 ret = bin_op_compare_check(stack, opcode, "!=");
628 if (ret < 0)
629 goto end;
630 break;
631 }
632 case FILTER_OP_GT:
633 {
634 ret = bin_op_compare_check(stack, opcode, ">");
635 if (ret < 0)
636 goto end;
637 break;
638 }
639 case FILTER_OP_LT:
640 {
641 ret = bin_op_compare_check(stack, opcode, "<");
642 if (ret < 0)
643 goto end;
644 break;
645 }
646 case FILTER_OP_GE:
647 {
648 ret = bin_op_compare_check(stack, opcode, ">=");
649 if (ret < 0)
650 goto end;
651 break;
652 }
653 case FILTER_OP_LE:
654 {
655 ret = bin_op_compare_check(stack, opcode, "<=");
656 if (ret < 0)
657 goto end;
658 break;
659 }
660
661 case FILTER_OP_EQ_STRING:
662 case FILTER_OP_NE_STRING:
663 case FILTER_OP_GT_STRING:
664 case FILTER_OP_LT_STRING:
665 case FILTER_OP_GE_STRING:
666 case FILTER_OP_LE_STRING:
667 {
668 if (!vstack_ax(stack) || !vstack_bx(stack)) {
669 printk(KERN_WARNING "Empty stack\n");
670 ret = -EINVAL;
671 goto end;
672 }
673 if (vstack_ax(stack)->type != REG_STRING
674 || vstack_bx(stack)->type != REG_STRING) {
675 printk(KERN_WARNING "Unexpected register type for string comparator\n");
676 ret = -EINVAL;
677 goto end;
678 }
679 break;
680 }
681
682
683 case FILTER_OP_EQ_STAR_GLOB_STRING:
684 case FILTER_OP_NE_STAR_GLOB_STRING:
685 {
686 if (!vstack_ax(stack) || !vstack_bx(stack)) {
687 printk(KERN_WARNING "Empty stack\n");
688 ret = -EINVAL;
689 goto end;
690 }
691 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
692 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
693 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
694 ret = -EINVAL;
695 goto end;
696 }
697 break;
698 }
699
700 case FILTER_OP_EQ_S64:
701 case FILTER_OP_NE_S64:
702 case FILTER_OP_GT_S64:
703 case FILTER_OP_LT_S64:
704 case FILTER_OP_GE_S64:
705 case FILTER_OP_LE_S64:
706 {
707 if (!vstack_ax(stack) || !vstack_bx(stack)) {
708 printk(KERN_WARNING "Empty stack\n");
709 ret = -EINVAL;
710 goto end;
711 }
712 if (vstack_ax(stack)->type != REG_S64
713 || vstack_bx(stack)->type != REG_S64) {
714 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
715 ret = -EINVAL;
716 goto end;
717 }
718 break;
719 }
720
721 case FILTER_OP_BIT_RSHIFT:
722 ret = bin_op_bitwise_check(stack, opcode, ">>");
723 if (ret < 0)
724 goto end;
725 break;
726 case FILTER_OP_BIT_LSHIFT:
727 ret = bin_op_bitwise_check(stack, opcode, "<<");
728 if (ret < 0)
729 goto end;
730 break;
731 case FILTER_OP_BIT_AND:
732 ret = bin_op_bitwise_check(stack, opcode, "&");
733 if (ret < 0)
734 goto end;
735 break;
736 case FILTER_OP_BIT_OR:
737 ret = bin_op_bitwise_check(stack, opcode, "|");
738 if (ret < 0)
739 goto end;
740 break;
741 case FILTER_OP_BIT_XOR:
742 ret = bin_op_bitwise_check(stack, opcode, "^");
743 if (ret < 0)
744 goto end;
745 break;
746
747 /* unary */
748 case FILTER_OP_UNARY_PLUS:
749 case FILTER_OP_UNARY_MINUS:
750 case FILTER_OP_UNARY_NOT:
751 {
752 if (!vstack_ax(stack)) {
753 printk(KERN_WARNING "Empty stack\n");
754 ret = -EINVAL;
755 goto end;
756 }
757 switch (vstack_ax(stack)->type) {
758 default:
759 case REG_DOUBLE:
760 printk(KERN_WARNING "unknown register type\n");
761 ret = -EINVAL;
762 goto end;
763
764 case REG_STRING:
765 case REG_STAR_GLOB_STRING:
766 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
767 ret = -EINVAL;
768 goto end;
769 case REG_S64:
770 case REG_TYPE_UNKNOWN:
771 break;
772 }
773 break;
774 }
775 case FILTER_OP_UNARY_BIT_NOT:
776 {
777 if (!vstack_ax(stack)) {
778 printk(KERN_WARNING "Empty stack\n");
779 ret = -EINVAL;
780 goto end;
781 }
782 switch (vstack_ax(stack)->type) {
783 default:
784 printk(KERN_WARNING "unknown register type\n");
785 ret = -EINVAL;
786 goto end;
787
788 case REG_STRING:
789 case REG_STAR_GLOB_STRING:
790 case REG_DOUBLE:
791 printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
792 ret = -EINVAL;
793 goto end;
794 case REG_S64:
795 break;
796 case REG_TYPE_UNKNOWN:
797 break;
798 }
799 break;
800 }
801
802 case FILTER_OP_UNARY_PLUS_S64:
803 case FILTER_OP_UNARY_MINUS_S64:
804 case FILTER_OP_UNARY_NOT_S64:
805 {
806 if (!vstack_ax(stack)) {
807 printk(KERN_WARNING "Empty stack\n");
808 ret = -EINVAL;
809 goto end;
810 }
811 if (vstack_ax(stack)->type != REG_S64) {
812 printk(KERN_WARNING "Invalid register type\n");
813 ret = -EINVAL;
814 goto end;
815 }
816 break;
817 }
818
819 /* logical */
820 case FILTER_OP_AND:
821 case FILTER_OP_OR:
822 {
823 struct logical_op *insn = (struct logical_op *) pc;
824
825 if (!vstack_ax(stack)) {
826 printk(KERN_WARNING "Empty stack\n");
827 ret = -EINVAL;
828 goto end;
829 }
830 if (vstack_ax(stack)->type != REG_S64) {
831 printk(KERN_WARNING "Logical comparator expects S64 register\n");
832 ret = -EINVAL;
833 goto end;
834 }
835
836 dbg_printk("Validate jumping to bytecode offset %u\n",
837 (unsigned int) insn->skip_offset);
838 if (unlikely(start_pc + insn->skip_offset <= pc)) {
839 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
840 ret = -EINVAL;
841 goto end;
842 }
843 break;
844 }
845
846 /* load field ref */
847 case FILTER_OP_LOAD_FIELD_REF:
848 {
849 printk(KERN_WARNING "Unknown field ref type\n");
850 ret = -EINVAL;
851 goto end;
852 }
853 case FILTER_OP_LOAD_FIELD_REF_STRING:
854 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
855 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
856 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
857 {
858 struct load_op *insn = (struct load_op *) pc;
859 struct field_ref *ref = (struct field_ref *) insn->data;
860
861 dbg_printk("Validate load field ref offset %u type string\n",
862 ref->offset);
863 break;
864 }
865 case FILTER_OP_LOAD_FIELD_REF_S64:
866 {
867 struct load_op *insn = (struct load_op *) pc;
868 struct field_ref *ref = (struct field_ref *) insn->data;
869
870 dbg_printk("Validate load field ref offset %u type s64\n",
871 ref->offset);
872 break;
873 }
874
875 /* load from immediate operand */
876 case FILTER_OP_LOAD_STRING:
877 case FILTER_OP_LOAD_STAR_GLOB_STRING:
878 {
879 break;
880 }
881
882 case FILTER_OP_LOAD_S64:
883 {
884 break;
885 }
886
887 case FILTER_OP_CAST_TO_S64:
888 {
889 struct cast_op *insn = (struct cast_op *) pc;
890
891 if (!vstack_ax(stack)) {
892 printk(KERN_WARNING "Empty stack\n");
893 ret = -EINVAL;
894 goto end;
895 }
896 switch (vstack_ax(stack)->type) {
897 default:
898 case REG_DOUBLE:
899 printk(KERN_WARNING "unknown register type\n");
900 ret = -EINVAL;
901 goto end;
902
903 case REG_STRING:
904 case REG_STAR_GLOB_STRING:
905 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
906 ret = -EINVAL;
907 goto end;
908 case REG_S64:
909 break;
910 }
911 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
912 if (vstack_ax(stack)->type != REG_DOUBLE) {
913 printk(KERN_WARNING "Cast expects double\n");
914 ret = -EINVAL;
915 goto end;
916 }
917 }
918 break;
919 }
920 case FILTER_OP_CAST_NOP:
921 {
922 break;
923 }
924
925 /* get context ref */
926 case FILTER_OP_GET_CONTEXT_REF:
927 {
928 printk(KERN_WARNING "Unknown get context ref type\n");
929 ret = -EINVAL;
930 goto end;
931 }
932 case FILTER_OP_GET_CONTEXT_REF_STRING:
933 {
934 struct load_op *insn = (struct load_op *) pc;
935 struct field_ref *ref = (struct field_ref *) insn->data;
936
937 dbg_printk("Validate get context ref offset %u type string\n",
938 ref->offset);
939 break;
940 }
941 case FILTER_OP_GET_CONTEXT_REF_S64:
942 {
943 struct load_op *insn = (struct load_op *) pc;
944 struct field_ref *ref = (struct field_ref *) insn->data;
945
946 dbg_printk("Validate get context ref offset %u type s64\n",
947 ref->offset);
948 break;
949 }
950
951 /*
952 * Instructions for recursive traversal through composed types.
953 */
954 case FILTER_OP_GET_CONTEXT_ROOT:
955 {
956 dbg_printk("Validate get context root\n");
957 break;
958 }
959 case FILTER_OP_GET_APP_CONTEXT_ROOT:
960 {
961 dbg_printk("Validate get app context root\n");
962 break;
963 }
964 case FILTER_OP_GET_PAYLOAD_ROOT:
965 {
966 dbg_printk("Validate get payload root\n");
967 break;
968 }
969 case FILTER_OP_LOAD_FIELD:
970 {
971 /*
972 * We tolerate that field type is unknown at validation,
973 * because we are performing the load specialization in
974 * a phase after validation.
975 */
976 dbg_printk("Validate load field\n");
977 break;
978 }
979 case FILTER_OP_LOAD_FIELD_S8:
980 {
981 dbg_printk("Validate load field s8\n");
982 break;
983 }
984 case FILTER_OP_LOAD_FIELD_S16:
985 {
986 dbg_printk("Validate load field s16\n");
987 break;
988 }
989 case FILTER_OP_LOAD_FIELD_S32:
990 {
991 dbg_printk("Validate load field s32\n");
992 break;
993 }
994 case FILTER_OP_LOAD_FIELD_S64:
995 {
996 dbg_printk("Validate load field s64\n");
997 break;
998 }
999 case FILTER_OP_LOAD_FIELD_U8:
1000 {
1001 dbg_printk("Validate load field u8\n");
1002 break;
1003 }
1004 case FILTER_OP_LOAD_FIELD_U16:
1005 {
1006 dbg_printk("Validate load field u16\n");
1007 break;
1008 }
1009 case FILTER_OP_LOAD_FIELD_U32:
1010 {
1011 dbg_printk("Validate load field u32\n");
1012 break;
1013 }
1014 case FILTER_OP_LOAD_FIELD_U64:
1015 {
1016 dbg_printk("Validate load field u64\n");
1017 break;
1018 }
1019 case FILTER_OP_LOAD_FIELD_STRING:
1020 {
1021 dbg_printk("Validate load field string\n");
1022 break;
1023 }
1024 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1025 {
1026 dbg_printk("Validate load field sequence\n");
1027 break;
1028 }
1029 case FILTER_OP_LOAD_FIELD_DOUBLE:
1030 {
1031 dbg_printk("Validate load field double\n");
1032 break;
1033 }
1034
1035 case FILTER_OP_GET_SYMBOL:
1036 {
1037 struct load_op *insn = (struct load_op *) pc;
1038 struct get_symbol *sym = (struct get_symbol *) insn->data;
1039
1040 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1041 break;
1042 }
1043
1044 case FILTER_OP_GET_SYMBOL_FIELD:
1045 {
1046 struct load_op *insn = (struct load_op *) pc;
1047 struct get_symbol *sym = (struct get_symbol *) insn->data;
1048
1049 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1050 break;
1051 }
1052
1053 case FILTER_OP_GET_INDEX_U16:
1054 {
1055 struct load_op *insn = (struct load_op *) pc;
1056 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1057
1058 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1059 break;
1060 }
1061
1062 case FILTER_OP_GET_INDEX_U64:
1063 {
1064 struct load_op *insn = (struct load_op *) pc;
1065 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1066
1067 dbg_printk("Validate get index u64 index %llu\n",
1068 (unsigned long long) get_index->index);
1069 break;
1070 }
1071 }
1072 end:
1073 return ret;
1074 }
1075
1076 /*
1077 * Return value:
1078 * 0: success
1079 * <0: error
1080 */
1081 static
1082 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1083 struct mp_table *mp_table,
1084 struct vstack *stack,
1085 char *start_pc,
1086 char *pc)
1087 {
1088 int ret, found = 0;
1089 unsigned long target_pc = pc - start_pc;
1090 unsigned long hash;
1091 struct hlist_head *head;
1092 struct mp_node *mp_node;
1093
1094 /* Validate the context resulting from the previous instruction */
1095 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1096 if (ret < 0)
1097 return ret;
1098
1099 /* Validate merge points */
1100 hash = jhash_1word(target_pc, 0);
1101 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1102 lttng_hlist_for_each_entry(mp_node, head, node) {
1103 if (lttng_hash_match(mp_node, target_pc)) {
1104 found = 1;
1105 break;
1106 }
1107 }
1108 if (found) {
1109 dbg_printk("Filter: validate merge point at offset %lu\n",
1110 target_pc);
1111 if (merge_points_compare(stack, &mp_node->stack)) {
1112 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1113 target_pc);
1114 return -EINVAL;
1115 }
1116 /* Once validated, we can remove the merge point */
1117 dbg_printk("Filter: remove merge point at offset %lu\n",
1118 target_pc);
1119 hlist_del(&mp_node->node);
1120 }
1121 return 0;
1122 }
1123
1124 /*
1125 * Return value:
1126 * >0: going to next insn.
1127 * 0: success, stop iteration.
1128 * <0: error
1129 */
1130 static
1131 int exec_insn(struct bytecode_runtime *bytecode,
1132 struct mp_table *mp_table,
1133 struct vstack *stack,
1134 char **_next_pc,
1135 char *pc)
1136 {
1137 int ret = 1;
1138 char *next_pc = *_next_pc;
1139
1140 switch (*(filter_opcode_t *) pc) {
1141 case FILTER_OP_UNKNOWN:
1142 default:
1143 {
1144 printk(KERN_WARNING "unknown bytecode op %u\n",
1145 (unsigned int) *(filter_opcode_t *) pc);
1146 ret = -EINVAL;
1147 goto end;
1148 }
1149
1150 case FILTER_OP_RETURN:
1151 {
1152 if (!vstack_ax(stack)) {
1153 printk(KERN_WARNING "Empty stack\n");
1154 ret = -EINVAL;
1155 goto end;
1156 }
1157 switch (vstack_ax(stack)->type) {
1158 case REG_S64:
1159 case REG_TYPE_UNKNOWN:
1160 break;
1161 default:
1162 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1163 (int) vstack_ax(stack)->type);
1164 ret = -EINVAL;
1165 goto end;
1166 }
1167
1168 ret = 0;
1169 goto end;
1170 }
1171
1172 case FILTER_OP_RETURN_S64:
1173 {
1174 if (!vstack_ax(stack)) {
1175 printk(KERN_WARNING "Empty stack\n");
1176 ret = -EINVAL;
1177 goto end;
1178 }
1179 switch (vstack_ax(stack)->type) {
1180 case REG_S64:
1181 break;
1182 default:
1183 case REG_TYPE_UNKNOWN:
1184 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1185 (int) vstack_ax(stack)->type);
1186 ret = -EINVAL;
1187 goto end;
1188 }
1189
1190 ret = 0;
1191 goto end;
1192 }
1193
1194 /* binary */
1195 case FILTER_OP_MUL:
1196 case FILTER_OP_DIV:
1197 case FILTER_OP_MOD:
1198 case FILTER_OP_PLUS:
1199 case FILTER_OP_MINUS:
1200 /* Floating point */
1201 case FILTER_OP_EQ_DOUBLE:
1202 case FILTER_OP_NE_DOUBLE:
1203 case FILTER_OP_GT_DOUBLE:
1204 case FILTER_OP_LT_DOUBLE:
1205 case FILTER_OP_GE_DOUBLE:
1206 case FILTER_OP_LE_DOUBLE:
1207 case FILTER_OP_EQ_DOUBLE_S64:
1208 case FILTER_OP_NE_DOUBLE_S64:
1209 case FILTER_OP_GT_DOUBLE_S64:
1210 case FILTER_OP_LT_DOUBLE_S64:
1211 case FILTER_OP_GE_DOUBLE_S64:
1212 case FILTER_OP_LE_DOUBLE_S64:
1213 case FILTER_OP_EQ_S64_DOUBLE:
1214 case FILTER_OP_NE_S64_DOUBLE:
1215 case FILTER_OP_GT_S64_DOUBLE:
1216 case FILTER_OP_LT_S64_DOUBLE:
1217 case FILTER_OP_GE_S64_DOUBLE:
1218 case FILTER_OP_LE_S64_DOUBLE:
1219 case FILTER_OP_UNARY_PLUS_DOUBLE:
1220 case FILTER_OP_UNARY_MINUS_DOUBLE:
1221 case FILTER_OP_UNARY_NOT_DOUBLE:
1222 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1223 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1224 case FILTER_OP_LOAD_DOUBLE:
1225 case FILTER_OP_CAST_DOUBLE_TO_S64:
1226 {
1227 printk(KERN_WARNING "unsupported bytecode op %u\n",
1228 (unsigned int) *(filter_opcode_t *) pc);
1229 ret = -EINVAL;
1230 goto end;
1231 }
1232
1233 case FILTER_OP_EQ:
1234 case FILTER_OP_NE:
1235 case FILTER_OP_GT:
1236 case FILTER_OP_LT:
1237 case FILTER_OP_GE:
1238 case FILTER_OP_LE:
1239 case FILTER_OP_EQ_STRING:
1240 case FILTER_OP_NE_STRING:
1241 case FILTER_OP_GT_STRING:
1242 case FILTER_OP_LT_STRING:
1243 case FILTER_OP_GE_STRING:
1244 case FILTER_OP_LE_STRING:
1245 case FILTER_OP_EQ_STAR_GLOB_STRING:
1246 case FILTER_OP_NE_STAR_GLOB_STRING:
1247 case FILTER_OP_EQ_S64:
1248 case FILTER_OP_NE_S64:
1249 case FILTER_OP_GT_S64:
1250 case FILTER_OP_LT_S64:
1251 case FILTER_OP_GE_S64:
1252 case FILTER_OP_LE_S64:
1253 case FILTER_OP_BIT_RSHIFT:
1254 case FILTER_OP_BIT_LSHIFT:
1255 case FILTER_OP_BIT_AND:
1256 case FILTER_OP_BIT_OR:
1257 case FILTER_OP_BIT_XOR:
1258 {
1259 /* Pop 2, push 1 */
1260 if (vstack_pop(stack)) {
1261 ret = -EINVAL;
1262 goto end;
1263 }
1264 if (!vstack_ax(stack)) {
1265 printk(KERN_WARNING "Empty stack\n");
1266 ret = -EINVAL;
1267 goto end;
1268 }
1269 switch (vstack_ax(stack)->type) {
1270 case REG_S64:
1271 case REG_DOUBLE:
1272 case REG_STRING:
1273 case REG_STAR_GLOB_STRING:
1274 case REG_TYPE_UNKNOWN:
1275 break;
1276 default:
1277 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1278 (int) vstack_ax(stack)->type);
1279 ret = -EINVAL;
1280 goto end;
1281 }
1282
1283 vstack_ax(stack)->type = REG_S64;
1284 next_pc += sizeof(struct binary_op);
1285 break;
1286 }
1287
1288 /* unary */
1289 case FILTER_OP_UNARY_PLUS:
1290 case FILTER_OP_UNARY_MINUS:
1291 {
1292 /* Pop 1, push 1 */
1293 if (!vstack_ax(stack)) {
1294 printk(KERN_WARNING "Empty stack\n\n");
1295 ret = -EINVAL;
1296 goto end;
1297 }
1298 switch (vstack_ax(stack)->type) {
1299 case REG_S64:
1300 case REG_TYPE_UNKNOWN:
1301 break;
1302 default:
1303 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1304 (int) vstack_ax(stack)->type);
1305 ret = -EINVAL;
1306 goto end;
1307 }
1308
1309 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1310 next_pc += sizeof(struct unary_op);
1311 break;
1312 }
1313
1314 case FILTER_OP_UNARY_PLUS_S64:
1315 case FILTER_OP_UNARY_MINUS_S64:
1316 case FILTER_OP_UNARY_NOT_S64:
1317 {
1318 /* Pop 1, push 1 */
1319 if (!vstack_ax(stack)) {
1320 printk(KERN_WARNING "Empty stack\n\n");
1321 ret = -EINVAL;
1322 goto end;
1323 }
1324 switch (vstack_ax(stack)->type) {
1325 case REG_S64:
1326 break;
1327 default:
1328 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1329 (int) vstack_ax(stack)->type);
1330 ret = -EINVAL;
1331 goto end;
1332 }
1333
1334 vstack_ax(stack)->type = REG_S64;
1335 next_pc += sizeof(struct unary_op);
1336 break;
1337 }
1338
1339 case FILTER_OP_UNARY_NOT:
1340 {
1341 /* Pop 1, push 1 */
1342 if (!vstack_ax(stack)) {
1343 printk(KERN_WARNING "Empty stack\n\n");
1344 ret = -EINVAL;
1345 goto end;
1346 }
1347 switch (vstack_ax(stack)->type) {
1348 case REG_S64:
1349 case REG_TYPE_UNKNOWN:
1350 break;
1351 default:
1352 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1353 (int) vstack_ax(stack)->type);
1354 ret = -EINVAL;
1355 goto end;
1356 }
1357
1358 vstack_ax(stack)->type = REG_S64;
1359 next_pc += sizeof(struct unary_op);
1360 break;
1361 }
1362
1363 case FILTER_OP_UNARY_BIT_NOT:
1364 {
1365 /* Pop 1, push 1 */
1366 if (!vstack_ax(stack)) {
1367 printk(KERN_WARNING "Empty stack\n");
1368 ret = -EINVAL;
1369 goto end;
1370 }
1371 switch (vstack_ax(stack)->type) {
1372 case REG_S64:
1373 case REG_TYPE_UNKNOWN:
1374 break;
1375 case REG_DOUBLE:
1376 default:
1377 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1378 (int) vstack_ax(stack)->type);
1379 ret = -EINVAL;
1380 goto end;
1381 }
1382
1383 vstack_ax(stack)->type = REG_S64;
1384 next_pc += sizeof(struct unary_op);
1385 break;
1386 }
1387
1388 /* logical */
1389 case FILTER_OP_AND:
1390 case FILTER_OP_OR:
1391 {
1392 struct logical_op *insn = (struct logical_op *) pc;
1393 int merge_ret;
1394
1395 /* Add merge point to table */
1396 merge_ret = merge_point_add_check(mp_table,
1397 insn->skip_offset, stack);
1398 if (merge_ret) {
1399 ret = merge_ret;
1400 goto end;
1401 }
1402
1403 if (!vstack_ax(stack)) {
1404 printk(KERN_WARNING "Empty stack\n\n");
1405 ret = -EINVAL;
1406 goto end;
1407 }
1408 /* There is always a cast-to-s64 operation before a or/and op. */
1409 switch (vstack_ax(stack)->type) {
1410 case REG_S64:
1411 break;
1412 default:
1413 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1414 (int) vstack_ax(stack)->type);
1415 ret = -EINVAL;
1416 goto end;
1417 }
1418
1419 /* Continue to next instruction */
1420 /* Pop 1 when jump not taken */
1421 if (vstack_pop(stack)) {
1422 ret = -EINVAL;
1423 goto end;
1424 }
1425 next_pc += sizeof(struct logical_op);
1426 break;
1427 }
1428
1429 /* load field ref */
1430 case FILTER_OP_LOAD_FIELD_REF:
1431 {
1432 printk(KERN_WARNING "Unknown field ref type\n");
1433 ret = -EINVAL;
1434 goto end;
1435 }
1436 /* get context ref */
1437 case FILTER_OP_GET_CONTEXT_REF:
1438 {
1439 printk(KERN_WARNING "Unknown get context ref type\n");
1440 ret = -EINVAL;
1441 goto end;
1442 }
1443 case FILTER_OP_LOAD_FIELD_REF_STRING:
1444 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1445 case FILTER_OP_GET_CONTEXT_REF_STRING:
1446 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
1447 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1448 {
1449 if (vstack_push(stack)) {
1450 ret = -EINVAL;
1451 goto end;
1452 }
1453 vstack_ax(stack)->type = REG_STRING;
1454 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1455 break;
1456 }
1457 case FILTER_OP_LOAD_FIELD_REF_S64:
1458 case FILTER_OP_GET_CONTEXT_REF_S64:
1459 {
1460 if (vstack_push(stack)) {
1461 ret = -EINVAL;
1462 goto end;
1463 }
1464 vstack_ax(stack)->type = REG_S64;
1465 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1466 break;
1467 }
1468
1469 /* load from immediate operand */
1470 case FILTER_OP_LOAD_STRING:
1471 {
1472 struct load_op *insn = (struct load_op *) pc;
1473
1474 if (vstack_push(stack)) {
1475 ret = -EINVAL;
1476 goto end;
1477 }
1478 vstack_ax(stack)->type = REG_STRING;
1479 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1480 break;
1481 }
1482
1483 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1484 {
1485 struct load_op *insn = (struct load_op *) pc;
1486
1487 if (vstack_push(stack)) {
1488 ret = -EINVAL;
1489 goto end;
1490 }
1491 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1492 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1493 break;
1494 }
1495
1496 case FILTER_OP_LOAD_S64:
1497 {
1498 if (vstack_push(stack)) {
1499 ret = -EINVAL;
1500 goto end;
1501 }
1502 vstack_ax(stack)->type = REG_S64;
1503 next_pc += sizeof(struct load_op)
1504 + sizeof(struct literal_numeric);
1505 break;
1506 }
1507
1508 case FILTER_OP_CAST_TO_S64:
1509 {
1510 /* Pop 1, push 1 */
1511 if (!vstack_ax(stack)) {
1512 printk(KERN_WARNING "Empty stack\n");
1513 ret = -EINVAL;
1514 goto end;
1515 }
1516 switch (vstack_ax(stack)->type) {
1517 case REG_S64:
1518 case REG_DOUBLE:
1519 case REG_TYPE_UNKNOWN:
1520 break;
1521 default:
1522 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1523 (int) vstack_ax(stack)->type);
1524 ret = -EINVAL;
1525 goto end;
1526 }
1527 vstack_ax(stack)->type = REG_S64;
1528 next_pc += sizeof(struct cast_op);
1529 break;
1530 }
1531 case FILTER_OP_CAST_NOP:
1532 {
1533 next_pc += sizeof(struct cast_op);
1534 break;
1535 }
1536
1537 /*
1538 * Instructions for recursive traversal through composed types.
1539 */
1540 case FILTER_OP_GET_CONTEXT_ROOT:
1541 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1542 case FILTER_OP_GET_PAYLOAD_ROOT:
1543 {
1544 if (vstack_push(stack)) {
1545 ret = -EINVAL;
1546 goto end;
1547 }
1548 vstack_ax(stack)->type = REG_PTR;
1549 next_pc += sizeof(struct load_op);
1550 break;
1551 }
1552
1553 case FILTER_OP_LOAD_FIELD:
1554 {
1555 /* Pop 1, push 1 */
1556 if (!vstack_ax(stack)) {
1557 printk(KERN_WARNING "Empty stack\n\n");
1558 ret = -EINVAL;
1559 goto end;
1560 }
1561 if (vstack_ax(stack)->type != REG_PTR) {
1562 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1563 ret = -EINVAL;
1564 goto end;
1565 }
1566 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1567 next_pc += sizeof(struct load_op);
1568 break;
1569 }
1570
1571 case FILTER_OP_LOAD_FIELD_S8:
1572 case FILTER_OP_LOAD_FIELD_S16:
1573 case FILTER_OP_LOAD_FIELD_S32:
1574 case FILTER_OP_LOAD_FIELD_S64:
1575 case FILTER_OP_LOAD_FIELD_U8:
1576 case FILTER_OP_LOAD_FIELD_U16:
1577 case FILTER_OP_LOAD_FIELD_U32:
1578 case FILTER_OP_LOAD_FIELD_U64:
1579 {
1580 /* Pop 1, push 1 */
1581 if (!vstack_ax(stack)) {
1582 printk(KERN_WARNING "Empty stack\n\n");
1583 ret = -EINVAL;
1584 goto end;
1585 }
1586 if (vstack_ax(stack)->type != REG_PTR) {
1587 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1588 ret = -EINVAL;
1589 goto end;
1590 }
1591 vstack_ax(stack)->type = REG_S64;
1592 next_pc += sizeof(struct load_op);
1593 break;
1594 }
1595
1596 case FILTER_OP_LOAD_FIELD_STRING:
1597 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1598 {
1599 /* Pop 1, push 1 */
1600 if (!vstack_ax(stack)) {
1601 printk(KERN_WARNING "Empty stack\n\n");
1602 ret = -EINVAL;
1603 goto end;
1604 }
1605 if (vstack_ax(stack)->type != REG_PTR) {
1606 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1607 ret = -EINVAL;
1608 goto end;
1609 }
1610 vstack_ax(stack)->type = REG_STRING;
1611 next_pc += sizeof(struct load_op);
1612 break;
1613 }
1614
1615 case FILTER_OP_LOAD_FIELD_DOUBLE:
1616 {
1617 /* Pop 1, push 1 */
1618 if (!vstack_ax(stack)) {
1619 printk(KERN_WARNING "Empty stack\n\n");
1620 ret = -EINVAL;
1621 goto end;
1622 }
1623 if (vstack_ax(stack)->type != REG_PTR) {
1624 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1625 ret = -EINVAL;
1626 goto end;
1627 }
1628 vstack_ax(stack)->type = REG_DOUBLE;
1629 next_pc += sizeof(struct load_op);
1630 break;
1631 }
1632
1633 case FILTER_OP_GET_SYMBOL:
1634 case FILTER_OP_GET_SYMBOL_FIELD:
1635 {
1636 /* Pop 1, push 1 */
1637 if (!vstack_ax(stack)) {
1638 printk(KERN_WARNING "Empty stack\n\n");
1639 ret = -EINVAL;
1640 goto end;
1641 }
1642 if (vstack_ax(stack)->type != REG_PTR) {
1643 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1644 ret = -EINVAL;
1645 goto end;
1646 }
1647 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1648 break;
1649 }
1650
1651 case FILTER_OP_GET_INDEX_U16:
1652 {
1653 /* Pop 1, push 1 */
1654 if (!vstack_ax(stack)) {
1655 printk(KERN_WARNING "Empty stack\n\n");
1656 ret = -EINVAL;
1657 goto end;
1658 }
1659 if (vstack_ax(stack)->type != REG_PTR) {
1660 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1661 ret = -EINVAL;
1662 goto end;
1663 }
1664 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1665 break;
1666 }
1667
1668 case FILTER_OP_GET_INDEX_U64:
1669 {
1670 /* Pop 1, push 1 */
1671 if (!vstack_ax(stack)) {
1672 printk(KERN_WARNING "Empty stack\n\n");
1673 ret = -EINVAL;
1674 goto end;
1675 }
1676 if (vstack_ax(stack)->type != REG_PTR) {
1677 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1678 ret = -EINVAL;
1679 goto end;
1680 }
1681 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1682 break;
1683 }
1684
1685 }
1686 end:
1687 *_next_pc = next_pc;
1688 return ret;
1689 }
1690
1691 /*
1692 * Never called concurrently (hash seed is shared).
1693 */
1694 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1695 {
1696 struct mp_table *mp_table;
1697 char *pc, *next_pc, *start_pc;
1698 int ret = -EINVAL;
1699 struct vstack stack;
1700
1701 vstack_init(&stack);
1702
1703 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1704 if (!mp_table) {
1705 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1706 return -ENOMEM;
1707 }
1708 start_pc = &bytecode->code[0];
1709 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1710 pc = next_pc) {
1711 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1712 if (ret != 0) {
1713 if (ret == -ERANGE)
1714 printk(KERN_WARNING "filter bytecode overflow\n");
1715 goto end;
1716 }
1717 dbg_printk("Validating op %s (%u)\n",
1718 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1719 (unsigned int) *(filter_opcode_t *) pc);
1720
1721 /*
1722 * For each instruction, validate the current context
1723 * (traversal of entire execution flow), and validate
1724 * all merge points targeting this instruction.
1725 */
1726 ret = validate_instruction_all_contexts(bytecode, mp_table,
1727 &stack, start_pc, pc);
1728 if (ret)
1729 goto end;
1730 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1731 if (ret <= 0)
1732 goto end;
1733 }
1734 end:
1735 if (delete_all_nodes(mp_table)) {
1736 if (!ret) {
1737 printk(KERN_WARNING "Unexpected merge points\n");
1738 ret = -EINVAL;
1739 }
1740 }
1741 kfree(mp_table);
1742 return ret;
1743 }
This page took 0.114328 seconds and 4 git commands to generate.