7780f64ad3fe1e7fa8f846ae327c1220dd882a14
[lttng-modules.git] / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
13
14 #include <wrapper/compiler_attributes.h>
15
16 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
17 size_t align, size_t len)
18 {
19 ssize_t ret;
20 size_t padding = offset_align(runtime->data_len, align);
21 size_t new_len = runtime->data_len + padding + len;
22 size_t new_alloc_len = new_len;
23 size_t old_alloc_len = runtime->data_alloc_len;
24
25 if (new_len > FILTER_MAX_DATA_LEN)
26 return -EINVAL;
27
28 if (new_alloc_len > old_alloc_len) {
29 char *newptr;
30
31 new_alloc_len =
32 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
33 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
34 if (!newptr)
35 return -ENOMEM;
36 runtime->data = newptr;
37 /* We zero directly the memory from start of allocation. */
38 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
39 runtime->data_alloc_len = new_alloc_len;
40 }
41 runtime->data_len += padding;
42 ret = runtime->data_len;
43 runtime->data_len += len;
44 return ret;
45 }
46
47 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
48 const void *p, size_t align, size_t len)
49 {
50 ssize_t offset;
51
52 offset = bytecode_reserve_data(runtime, align, len);
53 if (offset < 0)
54 return -ENOMEM;
55 memcpy(&runtime->data[offset], p, len);
56 return offset;
57 }
58
59 static int specialize_load_field(struct vstack_entry *stack_top,
60 struct load_op *insn)
61 {
62 int ret;
63
64 switch (stack_top->load.type) {
65 case LOAD_OBJECT:
66 break;
67 case LOAD_ROOT_CONTEXT:
68 case LOAD_ROOT_APP_CONTEXT:
69 case LOAD_ROOT_PAYLOAD:
70 default:
71 dbg_printk("Filter warning: cannot load root, missing field name.\n");
72 ret = -EINVAL;
73 goto end;
74 }
75 switch (stack_top->load.object_type) {
76 case OBJECT_TYPE_S8:
77 dbg_printk("op load field s8\n");
78 stack_top->type = REG_S64;
79 if (!stack_top->load.rev_bo)
80 insn->op = FILTER_OP_LOAD_FIELD_S8;
81 break;
82 case OBJECT_TYPE_S16:
83 dbg_printk("op load field s16\n");
84 stack_top->type = REG_S64;
85 if (!stack_top->load.rev_bo)
86 insn->op = FILTER_OP_LOAD_FIELD_S16;
87 break;
88 case OBJECT_TYPE_S32:
89 dbg_printk("op load field s32\n");
90 stack_top->type = REG_S64;
91 if (!stack_top->load.rev_bo)
92 insn->op = FILTER_OP_LOAD_FIELD_S32;
93 break;
94 case OBJECT_TYPE_S64:
95 dbg_printk("op load field s64\n");
96 stack_top->type = REG_S64;
97 if (!stack_top->load.rev_bo)
98 insn->op = FILTER_OP_LOAD_FIELD_S64;
99 break;
100 case OBJECT_TYPE_U8:
101 dbg_printk("op load field u8\n");
102 stack_top->type = REG_S64;
103 insn->op = FILTER_OP_LOAD_FIELD_U8;
104 break;
105 case OBJECT_TYPE_U16:
106 dbg_printk("op load field u16\n");
107 stack_top->type = REG_S64;
108 if (!stack_top->load.rev_bo)
109 insn->op = FILTER_OP_LOAD_FIELD_U16;
110 break;
111 case OBJECT_TYPE_U32:
112 dbg_printk("op load field u32\n");
113 stack_top->type = REG_S64;
114 if (!stack_top->load.rev_bo)
115 insn->op = FILTER_OP_LOAD_FIELD_U32;
116 break;
117 case OBJECT_TYPE_U64:
118 dbg_printk("op load field u64\n");
119 stack_top->type = REG_S64;
120 if (!stack_top->load.rev_bo)
121 insn->op = FILTER_OP_LOAD_FIELD_U64;
122 break;
123 case OBJECT_TYPE_DOUBLE:
124 printk(KERN_WARNING "Double type unsupported\n\n");
125 ret = -EINVAL;
126 goto end;
127 case OBJECT_TYPE_STRING:
128 dbg_printk("op load field string\n");
129 stack_top->type = REG_STRING;
130 insn->op = FILTER_OP_LOAD_FIELD_STRING;
131 break;
132 case OBJECT_TYPE_STRING_SEQUENCE:
133 dbg_printk("op load field string sequence\n");
134 stack_top->type = REG_STRING;
135 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
136 break;
137 case OBJECT_TYPE_DYNAMIC:
138 ret = -EINVAL;
139 goto end;
140 case OBJECT_TYPE_SEQUENCE:
141 case OBJECT_TYPE_ARRAY:
142 case OBJECT_TYPE_STRUCT:
143 case OBJECT_TYPE_VARIANT:
144 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
145 ret = -EINVAL;
146 goto end;
147 }
148 return 0;
149
150 end:
151 return ret;
152 }
153
154 static int specialize_get_index_object_type(enum object_type *otype,
155 int signedness, uint32_t elem_len)
156 {
157 switch (elem_len) {
158 case 8:
159 if (signedness)
160 *otype = OBJECT_TYPE_S8;
161 else
162 *otype = OBJECT_TYPE_U8;
163 break;
164 case 16:
165 if (signedness)
166 *otype = OBJECT_TYPE_S16;
167 else
168 *otype = OBJECT_TYPE_U16;
169 break;
170 case 32:
171 if (signedness)
172 *otype = OBJECT_TYPE_S32;
173 else
174 *otype = OBJECT_TYPE_U32;
175 break;
176 case 64:
177 if (signedness)
178 *otype = OBJECT_TYPE_S64;
179 else
180 *otype = OBJECT_TYPE_U64;
181 break;
182 default:
183 return -EINVAL;
184 }
185 return 0;
186 }
187
188 static int specialize_get_index(struct bytecode_runtime *runtime,
189 struct load_op *insn, uint64_t index,
190 struct vstack_entry *stack_top,
191 int idx_len)
192 {
193 int ret;
194 struct filter_get_index_data gid;
195 ssize_t data_offset;
196
197 memset(&gid, 0, sizeof(gid));
198 switch (stack_top->load.type) {
199 case LOAD_OBJECT:
200 switch (stack_top->load.object_type) {
201 case OBJECT_TYPE_ARRAY:
202 {
203 const struct lttng_event_field *field;
204 uint32_t elem_len, num_elems;
205 int signedness;
206
207 field = stack_top->load.field;
208 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
209 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
210 num_elems = field->type.u.array.length;
211 if (index >= num_elems) {
212 ret = -EINVAL;
213 goto end;
214 }
215 ret = specialize_get_index_object_type(&stack_top->load.object_type,
216 signedness, elem_len);
217 if (ret)
218 goto end;
219 gid.offset = index * (elem_len / CHAR_BIT);
220 gid.array_len = num_elems * (elem_len / CHAR_BIT);
221 gid.elem.type = stack_top->load.object_type;
222 gid.elem.len = elem_len;
223 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
224 gid.elem.rev_bo = true;
225 stack_top->load.rev_bo = gid.elem.rev_bo;
226 break;
227 }
228 case OBJECT_TYPE_SEQUENCE:
229 {
230 const struct lttng_event_field *field;
231 uint32_t elem_len;
232 int signedness;
233
234 field = stack_top->load.field;
235 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
236 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
237 ret = specialize_get_index_object_type(&stack_top->load.object_type,
238 signedness, elem_len);
239 if (ret)
240 goto end;
241 gid.offset = index * (elem_len / CHAR_BIT);
242 gid.elem.type = stack_top->load.object_type;
243 gid.elem.len = elem_len;
244 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
245 gid.elem.rev_bo = true;
246 stack_top->load.rev_bo = gid.elem.rev_bo;
247 break;
248 }
249 case OBJECT_TYPE_STRUCT:
250 /* Only generated by the specialize phase. */
251 case OBJECT_TYPE_VARIANT:
252 lttng_fallthrough;
253 default:
254 printk(KERN_WARNING "Unexpected get index type %d",
255 (int) stack_top->load.object_type);
256 ret = -EINVAL;
257 goto end;
258 }
259 break;
260 case LOAD_ROOT_CONTEXT:
261 case LOAD_ROOT_APP_CONTEXT:
262 case LOAD_ROOT_PAYLOAD:
263 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
264 ret = -EINVAL;
265 goto end;
266 }
267 data_offset = bytecode_push_data(runtime, &gid,
268 __alignof__(gid), sizeof(gid));
269 if (data_offset < 0) {
270 ret = -EINVAL;
271 goto end;
272 }
273 switch (idx_len) {
274 case 2:
275 ((struct get_index_u16 *) insn->data)->index = data_offset;
276 break;
277 case 8:
278 ((struct get_index_u64 *) insn->data)->index = data_offset;
279 break;
280 default:
281 ret = -EINVAL;
282 goto end;
283 }
284
285 return 0;
286
287 end:
288 return ret;
289 }
290
291 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
292 struct load_op *insn)
293 {
294 uint16_t offset;
295 const char *name;
296
297 offset = ((struct get_symbol *) insn->data)->offset;
298 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
299 return lttng_get_context_index(lttng_static_ctx, name);
300 }
301
302 static int specialize_load_object(const struct lttng_event_field *field,
303 struct vstack_load *load, bool is_context)
304 {
305 load->type = LOAD_OBJECT;
306 /*
307 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
308 */
309 switch (field->type.atype) {
310 case atype_integer:
311 if (field->type.u.basic.integer.signedness)
312 load->object_type = OBJECT_TYPE_S64;
313 else
314 load->object_type = OBJECT_TYPE_U64;
315 load->rev_bo = false;
316 break;
317 case atype_enum:
318 {
319 const struct lttng_integer_type *itype =
320 &field->type.u.basic.enumeration.container_type;
321
322 if (itype->signedness)
323 load->object_type = OBJECT_TYPE_S64;
324 else
325 load->object_type = OBJECT_TYPE_U64;
326 load->rev_bo = false;
327 break;
328 }
329 case atype_array:
330 if (field->type.u.array.elem_type.atype != atype_integer) {
331 printk(KERN_WARNING "Array nesting only supports integer types.\n");
332 return -EINVAL;
333 }
334 if (is_context) {
335 load->object_type = OBJECT_TYPE_STRING;
336 } else {
337 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
338 load->object_type = OBJECT_TYPE_ARRAY;
339 load->field = field;
340 } else {
341 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
342 }
343 }
344 break;
345 case atype_sequence:
346 if (field->type.u.sequence.elem_type.atype != atype_integer) {
347 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
348 return -EINVAL;
349 }
350 if (is_context) {
351 load->object_type = OBJECT_TYPE_STRING;
352 } else {
353 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
354 load->object_type = OBJECT_TYPE_SEQUENCE;
355 load->field = field;
356 } else {
357 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
358 }
359 }
360 break;
361 case atype_array_bitfield:
362 printk(KERN_WARNING "Bitfield array type is not supported.\n");
363 return -EINVAL;
364 case atype_sequence_bitfield:
365 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
366 return -EINVAL;
367 case atype_string:
368 load->object_type = OBJECT_TYPE_STRING;
369 break;
370 case atype_struct:
371 printk(KERN_WARNING "Structure type cannot be loaded.\n");
372 return -EINVAL;
373 default:
374 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
375 return -EINVAL;
376 }
377 return 0;
378 }
379
380 static int specialize_context_lookup(struct bytecode_runtime *runtime,
381 struct load_op *insn,
382 struct vstack_load *load)
383 {
384 int idx, ret;
385 struct lttng_ctx_field *ctx_field;
386 struct lttng_event_field *field;
387 struct filter_get_index_data gid;
388 ssize_t data_offset;
389
390 idx = specialize_context_lookup_name(runtime, insn);
391 if (idx < 0) {
392 return -ENOENT;
393 }
394 ctx_field = &lttng_static_ctx->fields[idx];
395 field = &ctx_field->event_field;
396 ret = specialize_load_object(field, load, true);
397 if (ret)
398 return ret;
399 /* Specialize each get_symbol into a get_index. */
400 insn->op = FILTER_OP_GET_INDEX_U16;
401 memset(&gid, 0, sizeof(gid));
402 gid.ctx_index = idx;
403 gid.elem.type = load->object_type;
404 gid.elem.rev_bo = load->rev_bo;
405 data_offset = bytecode_push_data(runtime, &gid,
406 __alignof__(gid), sizeof(gid));
407 if (data_offset < 0) {
408 return -EINVAL;
409 }
410 ((struct get_index_u16 *) insn->data)->index = data_offset;
411 return 0;
412 }
413
414 static int specialize_event_payload_lookup(struct lttng_event *event,
415 struct bytecode_runtime *runtime,
416 struct load_op *insn,
417 struct vstack_load *load)
418 {
419 const char *name;
420 uint16_t offset;
421 const struct lttng_event_desc *desc = event->desc;
422 unsigned int i, nr_fields;
423 bool found = false;
424 uint32_t field_offset = 0;
425 const struct lttng_event_field *field;
426 int ret;
427 struct filter_get_index_data gid;
428 ssize_t data_offset;
429
430 nr_fields = desc->nr_fields;
431 offset = ((struct get_symbol *) insn->data)->offset;
432 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
433 for (i = 0; i < nr_fields; i++) {
434 field = &desc->fields[i];
435 if (!strcmp(field->name, name)) {
436 found = true;
437 break;
438 }
439 /* compute field offset on stack */
440 switch (field->type.atype) {
441 case atype_integer:
442 case atype_enum:
443 field_offset += sizeof(int64_t);
444 break;
445 case atype_array:
446 case atype_sequence:
447 case atype_array_bitfield:
448 case atype_sequence_bitfield:
449 field_offset += sizeof(unsigned long);
450 field_offset += sizeof(void *);
451 break;
452 case atype_string:
453 field_offset += sizeof(void *);
454 break;
455 default:
456 ret = -EINVAL;
457 goto end;
458 }
459 }
460 if (!found) {
461 ret = -EINVAL;
462 goto end;
463 }
464
465 ret = specialize_load_object(field, load, false);
466 if (ret)
467 goto end;
468
469 /* Specialize each get_symbol into a get_index. */
470 insn->op = FILTER_OP_GET_INDEX_U16;
471 memset(&gid, 0, sizeof(gid));
472 gid.offset = field_offset;
473 gid.elem.type = load->object_type;
474 gid.elem.rev_bo = load->rev_bo;
475 data_offset = bytecode_push_data(runtime, &gid,
476 __alignof__(gid), sizeof(gid));
477 if (data_offset < 0) {
478 ret = -EINVAL;
479 goto end;
480 }
481 ((struct get_index_u16 *) insn->data)->index = data_offset;
482 ret = 0;
483 end:
484 return ret;
485 }
486
487 int lttng_filter_specialize_bytecode(struct lttng_event *event,
488 struct bytecode_runtime *bytecode)
489 {
490 void *pc, *next_pc, *start_pc;
491 int ret = -EINVAL;
492 struct vstack _stack;
493 struct vstack *stack = &_stack;
494
495 vstack_init(stack);
496
497 start_pc = &bytecode->code[0];
498 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
499 pc = next_pc) {
500 switch (*(filter_opcode_t *) pc) {
501 case FILTER_OP_UNKNOWN:
502 default:
503 printk(KERN_WARNING "unknown bytecode op %u\n",
504 (unsigned int) *(filter_opcode_t *) pc);
505 ret = -EINVAL;
506 goto end;
507
508 case FILTER_OP_RETURN:
509 case FILTER_OP_RETURN_S64:
510 ret = 0;
511 goto end;
512
513 /* binary */
514 case FILTER_OP_MUL:
515 case FILTER_OP_DIV:
516 case FILTER_OP_MOD:
517 case FILTER_OP_PLUS:
518 case FILTER_OP_MINUS:
519 printk(KERN_WARNING "unsupported bytecode op %u\n",
520 (unsigned int) *(filter_opcode_t *) pc);
521 ret = -EINVAL;
522 goto end;
523
524 case FILTER_OP_EQ:
525 {
526 struct binary_op *insn = (struct binary_op *) pc;
527
528 switch(vstack_ax(stack)->type) {
529 default:
530 printk(KERN_WARNING "unknown register type\n");
531 ret = -EINVAL;
532 goto end;
533
534 case REG_STRING:
535 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
536 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
537 else
538 insn->op = FILTER_OP_EQ_STRING;
539 break;
540 case REG_STAR_GLOB_STRING:
541 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
542 break;
543 case REG_S64:
544 if (vstack_bx(stack)->type == REG_S64)
545 insn->op = FILTER_OP_EQ_S64;
546 else
547 insn->op = FILTER_OP_EQ_DOUBLE_S64;
548 break;
549 case REG_DOUBLE:
550 if (vstack_bx(stack)->type == REG_S64)
551 insn->op = FILTER_OP_EQ_S64_DOUBLE;
552 else
553 insn->op = FILTER_OP_EQ_DOUBLE;
554 break;
555 }
556 /* Pop 2, push 1 */
557 if (vstack_pop(stack)) {
558 ret = -EINVAL;
559 goto end;
560 }
561 vstack_ax(stack)->type = REG_S64;
562 next_pc += sizeof(struct binary_op);
563 break;
564 }
565
566 case FILTER_OP_NE:
567 {
568 struct binary_op *insn = (struct binary_op *) pc;
569
570 switch(vstack_ax(stack)->type) {
571 default:
572 printk(KERN_WARNING "unknown register type\n");
573 ret = -EINVAL;
574 goto end;
575
576 case REG_STRING:
577 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
578 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
579 else
580 insn->op = FILTER_OP_NE_STRING;
581 break;
582 case REG_STAR_GLOB_STRING:
583 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
584 break;
585 case REG_S64:
586 if (vstack_bx(stack)->type == REG_S64)
587 insn->op = FILTER_OP_NE_S64;
588 else
589 insn->op = FILTER_OP_NE_DOUBLE_S64;
590 break;
591 case REG_DOUBLE:
592 if (vstack_bx(stack)->type == REG_S64)
593 insn->op = FILTER_OP_NE_S64_DOUBLE;
594 else
595 insn->op = FILTER_OP_NE_DOUBLE;
596 break;
597 }
598 /* Pop 2, push 1 */
599 if (vstack_pop(stack)) {
600 ret = -EINVAL;
601 goto end;
602 }
603 vstack_ax(stack)->type = REG_S64;
604 next_pc += sizeof(struct binary_op);
605 break;
606 }
607
608 case FILTER_OP_GT:
609 {
610 struct binary_op *insn = (struct binary_op *) pc;
611
612 switch(vstack_ax(stack)->type) {
613 default:
614 printk(KERN_WARNING "unknown register type\n");
615 ret = -EINVAL;
616 goto end;
617
618 case REG_STAR_GLOB_STRING:
619 printk(KERN_WARNING "invalid register type for > binary operator\n");
620 ret = -EINVAL;
621 goto end;
622 case REG_STRING:
623 insn->op = FILTER_OP_GT_STRING;
624 break;
625 case REG_S64:
626 if (vstack_bx(stack)->type == REG_S64)
627 insn->op = FILTER_OP_GT_S64;
628 else
629 insn->op = FILTER_OP_GT_DOUBLE_S64;
630 break;
631 case REG_DOUBLE:
632 if (vstack_bx(stack)->type == REG_S64)
633 insn->op = FILTER_OP_GT_S64_DOUBLE;
634 else
635 insn->op = FILTER_OP_GT_DOUBLE;
636 break;
637 }
638 /* Pop 2, push 1 */
639 if (vstack_pop(stack)) {
640 ret = -EINVAL;
641 goto end;
642 }
643 vstack_ax(stack)->type = REG_S64;
644 next_pc += sizeof(struct binary_op);
645 break;
646 }
647
648 case FILTER_OP_LT:
649 {
650 struct binary_op *insn = (struct binary_op *) pc;
651
652 switch(vstack_ax(stack)->type) {
653 default:
654 printk(KERN_WARNING "unknown register type\n");
655 ret = -EINVAL;
656 goto end;
657
658 case REG_STAR_GLOB_STRING:
659 printk(KERN_WARNING "invalid register type for < binary operator\n");
660 ret = -EINVAL;
661 goto end;
662 case REG_STRING:
663 insn->op = FILTER_OP_LT_STRING;
664 break;
665 case REG_S64:
666 if (vstack_bx(stack)->type == REG_S64)
667 insn->op = FILTER_OP_LT_S64;
668 else
669 insn->op = FILTER_OP_LT_DOUBLE_S64;
670 break;
671 case REG_DOUBLE:
672 if (vstack_bx(stack)->type == REG_S64)
673 insn->op = FILTER_OP_LT_S64_DOUBLE;
674 else
675 insn->op = FILTER_OP_LT_DOUBLE;
676 break;
677 }
678 /* Pop 2, push 1 */
679 if (vstack_pop(stack)) {
680 ret = -EINVAL;
681 goto end;
682 }
683 vstack_ax(stack)->type = REG_S64;
684 next_pc += sizeof(struct binary_op);
685 break;
686 }
687
688 case FILTER_OP_GE:
689 {
690 struct binary_op *insn = (struct binary_op *) pc;
691
692 switch(vstack_ax(stack)->type) {
693 default:
694 printk(KERN_WARNING "unknown register type\n");
695 ret = -EINVAL;
696 goto end;
697
698 case REG_STAR_GLOB_STRING:
699 printk(KERN_WARNING "invalid register type for >= binary operator\n");
700 ret = -EINVAL;
701 goto end;
702 case REG_STRING:
703 insn->op = FILTER_OP_GE_STRING;
704 break;
705 case REG_S64:
706 if (vstack_bx(stack)->type == REG_S64)
707 insn->op = FILTER_OP_GE_S64;
708 else
709 insn->op = FILTER_OP_GE_DOUBLE_S64;
710 break;
711 case REG_DOUBLE:
712 if (vstack_bx(stack)->type == REG_S64)
713 insn->op = FILTER_OP_GE_S64_DOUBLE;
714 else
715 insn->op = FILTER_OP_GE_DOUBLE;
716 break;
717 }
718 /* Pop 2, push 1 */
719 if (vstack_pop(stack)) {
720 ret = -EINVAL;
721 goto end;
722 }
723 vstack_ax(stack)->type = REG_S64;
724 next_pc += sizeof(struct binary_op);
725 break;
726 }
727 case FILTER_OP_LE:
728 {
729 struct binary_op *insn = (struct binary_op *) pc;
730
731 switch(vstack_ax(stack)->type) {
732 default:
733 printk(KERN_WARNING "unknown register type\n");
734 ret = -EINVAL;
735 goto end;
736
737 case REG_STAR_GLOB_STRING:
738 printk(KERN_WARNING "invalid register type for <= binary operator\n");
739 ret = -EINVAL;
740 goto end;
741 case REG_STRING:
742 insn->op = FILTER_OP_LE_STRING;
743 break;
744 case REG_S64:
745 if (vstack_bx(stack)->type == REG_S64)
746 insn->op = FILTER_OP_LE_S64;
747 else
748 insn->op = FILTER_OP_LE_DOUBLE_S64;
749 break;
750 case REG_DOUBLE:
751 if (vstack_bx(stack)->type == REG_S64)
752 insn->op = FILTER_OP_LE_S64_DOUBLE;
753 else
754 insn->op = FILTER_OP_LE_DOUBLE;
755 break;
756 }
757 vstack_ax(stack)->type = REG_S64;
758 next_pc += sizeof(struct binary_op);
759 break;
760 }
761
762 case FILTER_OP_EQ_STRING:
763 case FILTER_OP_NE_STRING:
764 case FILTER_OP_GT_STRING:
765 case FILTER_OP_LT_STRING:
766 case FILTER_OP_GE_STRING:
767 case FILTER_OP_LE_STRING:
768 case FILTER_OP_EQ_STAR_GLOB_STRING:
769 case FILTER_OP_NE_STAR_GLOB_STRING:
770 case FILTER_OP_EQ_S64:
771 case FILTER_OP_NE_S64:
772 case FILTER_OP_GT_S64:
773 case FILTER_OP_LT_S64:
774 case FILTER_OP_GE_S64:
775 case FILTER_OP_LE_S64:
776 case FILTER_OP_EQ_DOUBLE:
777 case FILTER_OP_NE_DOUBLE:
778 case FILTER_OP_GT_DOUBLE:
779 case FILTER_OP_LT_DOUBLE:
780 case FILTER_OP_GE_DOUBLE:
781 case FILTER_OP_LE_DOUBLE:
782 case FILTER_OP_EQ_DOUBLE_S64:
783 case FILTER_OP_NE_DOUBLE_S64:
784 case FILTER_OP_GT_DOUBLE_S64:
785 case FILTER_OP_LT_DOUBLE_S64:
786 case FILTER_OP_GE_DOUBLE_S64:
787 case FILTER_OP_LE_DOUBLE_S64:
788 case FILTER_OP_EQ_S64_DOUBLE:
789 case FILTER_OP_NE_S64_DOUBLE:
790 case FILTER_OP_GT_S64_DOUBLE:
791 case FILTER_OP_LT_S64_DOUBLE:
792 case FILTER_OP_GE_S64_DOUBLE:
793 case FILTER_OP_LE_S64_DOUBLE:
794 case FILTER_OP_BIT_RSHIFT:
795 case FILTER_OP_BIT_LSHIFT:
796 case FILTER_OP_BIT_AND:
797 case FILTER_OP_BIT_OR:
798 case FILTER_OP_BIT_XOR:
799 {
800 /* Pop 2, push 1 */
801 if (vstack_pop(stack)) {
802 ret = -EINVAL;
803 goto end;
804 }
805 vstack_ax(stack)->type = REG_S64;
806 next_pc += sizeof(struct binary_op);
807 break;
808 }
809
810 /* unary */
811 case FILTER_OP_UNARY_PLUS:
812 {
813 struct unary_op *insn = (struct unary_op *) pc;
814
815 switch(vstack_ax(stack)->type) {
816 default:
817 printk(KERN_WARNING "unknown register type\n");
818 ret = -EINVAL;
819 goto end;
820
821 case REG_S64:
822 insn->op = FILTER_OP_UNARY_PLUS_S64;
823 break;
824 case REG_DOUBLE:
825 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
826 break;
827 }
828 /* Pop 1, push 1 */
829 next_pc += sizeof(struct unary_op);
830 break;
831 }
832
833 case FILTER_OP_UNARY_MINUS:
834 {
835 struct unary_op *insn = (struct unary_op *) pc;
836
837 switch(vstack_ax(stack)->type) {
838 default:
839 printk(KERN_WARNING "unknown register type\n");
840 ret = -EINVAL;
841 goto end;
842
843 case REG_S64:
844 insn->op = FILTER_OP_UNARY_MINUS_S64;
845 break;
846 case REG_DOUBLE:
847 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
848 break;
849 }
850 /* Pop 1, push 1 */
851 next_pc += sizeof(struct unary_op);
852 break;
853 }
854
855 case FILTER_OP_UNARY_NOT:
856 {
857 struct unary_op *insn = (struct unary_op *) pc;
858
859 switch(vstack_ax(stack)->type) {
860 default:
861 printk(KERN_WARNING "unknown register type\n");
862 ret = -EINVAL;
863 goto end;
864
865 case REG_S64:
866 insn->op = FILTER_OP_UNARY_NOT_S64;
867 break;
868 case REG_DOUBLE:
869 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
870 break;
871 }
872 /* Pop 1, push 1 */
873 next_pc += sizeof(struct unary_op);
874 break;
875 }
876
877 case FILTER_OP_UNARY_BIT_NOT:
878 {
879 /* Pop 1, push 1 */
880 next_pc += sizeof(struct unary_op);
881 break;
882 }
883
884 case FILTER_OP_UNARY_PLUS_S64:
885 case FILTER_OP_UNARY_MINUS_S64:
886 case FILTER_OP_UNARY_NOT_S64:
887 case FILTER_OP_UNARY_PLUS_DOUBLE:
888 case FILTER_OP_UNARY_MINUS_DOUBLE:
889 case FILTER_OP_UNARY_NOT_DOUBLE:
890 {
891 /* Pop 1, push 1 */
892 next_pc += sizeof(struct unary_op);
893 break;
894 }
895
896 /* logical */
897 case FILTER_OP_AND:
898 case FILTER_OP_OR:
899 {
900 /* Continue to next instruction */
901 /* Pop 1 when jump not taken */
902 if (vstack_pop(stack)) {
903 ret = -EINVAL;
904 goto end;
905 }
906 next_pc += sizeof(struct logical_op);
907 break;
908 }
909
910 /* load field ref */
911 case FILTER_OP_LOAD_FIELD_REF:
912 {
913 printk(KERN_WARNING "Unknown field ref type\n");
914 ret = -EINVAL;
915 goto end;
916 }
917 /* get context ref */
918 case FILTER_OP_GET_CONTEXT_REF:
919 {
920 printk(KERN_WARNING "Unknown get context ref type\n");
921 ret = -EINVAL;
922 goto end;
923 }
924 case FILTER_OP_LOAD_FIELD_REF_STRING:
925 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
926 case FILTER_OP_GET_CONTEXT_REF_STRING:
927 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
928 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
929 {
930 if (vstack_push(stack)) {
931 ret = -EINVAL;
932 goto end;
933 }
934 vstack_ax(stack)->type = REG_STRING;
935 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
936 break;
937 }
938 case FILTER_OP_LOAD_FIELD_REF_S64:
939 case FILTER_OP_GET_CONTEXT_REF_S64:
940 {
941 if (vstack_push(stack)) {
942 ret = -EINVAL;
943 goto end;
944 }
945 vstack_ax(stack)->type = REG_S64;
946 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
947 break;
948 }
949 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
950 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
951 {
952 if (vstack_push(stack)) {
953 ret = -EINVAL;
954 goto end;
955 }
956 vstack_ax(stack)->type = REG_DOUBLE;
957 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
958 break;
959 }
960
961 /* load from immediate operand */
962 case FILTER_OP_LOAD_STRING:
963 {
964 struct load_op *insn = (struct load_op *) pc;
965
966 if (vstack_push(stack)) {
967 ret = -EINVAL;
968 goto end;
969 }
970 vstack_ax(stack)->type = REG_STRING;
971 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
972 break;
973 }
974
975 case FILTER_OP_LOAD_STAR_GLOB_STRING:
976 {
977 struct load_op *insn = (struct load_op *) pc;
978
979 if (vstack_push(stack)) {
980 ret = -EINVAL;
981 goto end;
982 }
983 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
984 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
985 break;
986 }
987
988 case FILTER_OP_LOAD_S64:
989 {
990 if (vstack_push(stack)) {
991 ret = -EINVAL;
992 goto end;
993 }
994 vstack_ax(stack)->type = REG_S64;
995 next_pc += sizeof(struct load_op)
996 + sizeof(struct literal_numeric);
997 break;
998 }
999
1000 case FILTER_OP_LOAD_DOUBLE:
1001 {
1002 if (vstack_push(stack)) {
1003 ret = -EINVAL;
1004 goto end;
1005 }
1006 vstack_ax(stack)->type = REG_DOUBLE;
1007 next_pc += sizeof(struct load_op)
1008 + sizeof(struct literal_double);
1009 break;
1010 }
1011
1012 /* cast */
1013 case FILTER_OP_CAST_TO_S64:
1014 {
1015 struct cast_op *insn = (struct cast_op *) pc;
1016
1017 switch (vstack_ax(stack)->type) {
1018 default:
1019 printk(KERN_WARNING "unknown register type\n");
1020 ret = -EINVAL;
1021 goto end;
1022
1023 case REG_STRING:
1024 case REG_STAR_GLOB_STRING:
1025 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1026 ret = -EINVAL;
1027 goto end;
1028 case REG_S64:
1029 insn->op = FILTER_OP_CAST_NOP;
1030 break;
1031 case REG_DOUBLE:
1032 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1033 break;
1034 }
1035 /* Pop 1, push 1 */
1036 vstack_ax(stack)->type = REG_S64;
1037 next_pc += sizeof(struct cast_op);
1038 break;
1039 }
1040 case FILTER_OP_CAST_DOUBLE_TO_S64:
1041 {
1042 /* Pop 1, push 1 */
1043 vstack_ax(stack)->type = REG_S64;
1044 next_pc += sizeof(struct cast_op);
1045 break;
1046 }
1047 case FILTER_OP_CAST_NOP:
1048 {
1049 next_pc += sizeof(struct cast_op);
1050 break;
1051 }
1052
1053 /*
1054 * Instructions for recursive traversal through composed types.
1055 */
1056 case FILTER_OP_GET_CONTEXT_ROOT:
1057 {
1058 if (vstack_push(stack)) {
1059 ret = -EINVAL;
1060 goto end;
1061 }
1062 vstack_ax(stack)->type = REG_PTR;
1063 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1064 next_pc += sizeof(struct load_op);
1065 break;
1066 }
1067 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1068 {
1069 if (vstack_push(stack)) {
1070 ret = -EINVAL;
1071 goto end;
1072 }
1073 vstack_ax(stack)->type = REG_PTR;
1074 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1075 next_pc += sizeof(struct load_op);
1076 break;
1077 }
1078 case FILTER_OP_GET_PAYLOAD_ROOT:
1079 {
1080 if (vstack_push(stack)) {
1081 ret = -EINVAL;
1082 goto end;
1083 }
1084 vstack_ax(stack)->type = REG_PTR;
1085 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1086 next_pc += sizeof(struct load_op);
1087 break;
1088 }
1089
1090 case FILTER_OP_LOAD_FIELD:
1091 {
1092 struct load_op *insn = (struct load_op *) pc;
1093
1094 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1095 /* Pop 1, push 1 */
1096 ret = specialize_load_field(vstack_ax(stack), insn);
1097 if (ret)
1098 goto end;
1099
1100 next_pc += sizeof(struct load_op);
1101 break;
1102 }
1103
1104 case FILTER_OP_LOAD_FIELD_S8:
1105 case FILTER_OP_LOAD_FIELD_S16:
1106 case FILTER_OP_LOAD_FIELD_S32:
1107 case FILTER_OP_LOAD_FIELD_S64:
1108 case FILTER_OP_LOAD_FIELD_U8:
1109 case FILTER_OP_LOAD_FIELD_U16:
1110 case FILTER_OP_LOAD_FIELD_U32:
1111 case FILTER_OP_LOAD_FIELD_U64:
1112 {
1113 /* Pop 1, push 1 */
1114 vstack_ax(stack)->type = REG_S64;
1115 next_pc += sizeof(struct load_op);
1116 break;
1117 }
1118
1119 case FILTER_OP_LOAD_FIELD_STRING:
1120 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1121 {
1122 /* Pop 1, push 1 */
1123 vstack_ax(stack)->type = REG_STRING;
1124 next_pc += sizeof(struct load_op);
1125 break;
1126 }
1127
1128 case FILTER_OP_LOAD_FIELD_DOUBLE:
1129 {
1130 /* Pop 1, push 1 */
1131 vstack_ax(stack)->type = REG_DOUBLE;
1132 next_pc += sizeof(struct load_op);
1133 break;
1134 }
1135
1136 case FILTER_OP_GET_SYMBOL:
1137 {
1138 struct load_op *insn = (struct load_op *) pc;
1139
1140 dbg_printk("op get symbol\n");
1141 switch (vstack_ax(stack)->load.type) {
1142 case LOAD_OBJECT:
1143 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1144 ret = -EINVAL;
1145 goto end;
1146 case LOAD_ROOT_CONTEXT:
1147 /* Lookup context field. */
1148 ret = specialize_context_lookup(bytecode, insn,
1149 &vstack_ax(stack)->load);
1150 if (ret)
1151 goto end;
1152 break;
1153 case LOAD_ROOT_APP_CONTEXT:
1154 ret = -EINVAL;
1155 goto end;
1156 case LOAD_ROOT_PAYLOAD:
1157 /* Lookup event payload field. */
1158 ret = specialize_event_payload_lookup(event,
1159 bytecode, insn,
1160 &vstack_ax(stack)->load);
1161 if (ret)
1162 goto end;
1163 break;
1164 }
1165 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1166 break;
1167 }
1168
1169 case FILTER_OP_GET_SYMBOL_FIELD:
1170 {
1171 /* Always generated by specialize phase. */
1172 ret = -EINVAL;
1173 goto end;
1174 }
1175
1176 case FILTER_OP_GET_INDEX_U16:
1177 {
1178 struct load_op *insn = (struct load_op *) pc;
1179 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1180
1181 dbg_printk("op get index u16\n");
1182 /* Pop 1, push 1 */
1183 ret = specialize_get_index(bytecode, insn, index->index,
1184 vstack_ax(stack), sizeof(*index));
1185 if (ret)
1186 goto end;
1187 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1188 break;
1189 }
1190
1191 case FILTER_OP_GET_INDEX_U64:
1192 {
1193 struct load_op *insn = (struct load_op *) pc;
1194 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1195
1196 dbg_printk("op get index u64\n");
1197 /* Pop 1, push 1 */
1198 ret = specialize_get_index(bytecode, insn, index->index,
1199 vstack_ax(stack), sizeof(*index));
1200 if (ret)
1201 goto end;
1202 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1203 break;
1204 }
1205
1206 }
1207 }
1208 end:
1209 return ret;
1210 }
This page took 0.05803 seconds and 3 git commands to generate.