Cleanup: Move headers from lib/ to include/lttng/
[lttng-modules.git] / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include <lttng/align.h>
13
14 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
15 size_t align, size_t len)
16 {
17 ssize_t ret;
18 size_t padding = offset_align(runtime->data_len, align);
19 size_t new_len = runtime->data_len + padding + len;
20 size_t new_alloc_len = new_len;
21 size_t old_alloc_len = runtime->data_alloc_len;
22
23 if (new_len > FILTER_MAX_DATA_LEN)
24 return -EINVAL;
25
26 if (new_alloc_len > old_alloc_len) {
27 char *newptr;
28
29 new_alloc_len =
30 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
31 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
32 if (!newptr)
33 return -ENOMEM;
34 runtime->data = newptr;
35 /* We zero directly the memory from start of allocation. */
36 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
37 runtime->data_alloc_len = new_alloc_len;
38 }
39 runtime->data_len += padding;
40 ret = runtime->data_len;
41 runtime->data_len += len;
42 return ret;
43 }
44
45 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
46 const void *p, size_t align, size_t len)
47 {
48 ssize_t offset;
49
50 offset = bytecode_reserve_data(runtime, align, len);
51 if (offset < 0)
52 return -ENOMEM;
53 memcpy(&runtime->data[offset], p, len);
54 return offset;
55 }
56
57 static int specialize_load_field(struct vstack_entry *stack_top,
58 struct load_op *insn)
59 {
60 int ret;
61
62 switch (stack_top->load.type) {
63 case LOAD_OBJECT:
64 break;
65 case LOAD_ROOT_CONTEXT:
66 case LOAD_ROOT_APP_CONTEXT:
67 case LOAD_ROOT_PAYLOAD:
68 default:
69 dbg_printk("Filter warning: cannot load root, missing field name.\n");
70 ret = -EINVAL;
71 goto end;
72 }
73 switch (stack_top->load.object_type) {
74 case OBJECT_TYPE_S8:
75 dbg_printk("op load field s8\n");
76 stack_top->type = REG_S64;
77 if (!stack_top->load.rev_bo)
78 insn->op = FILTER_OP_LOAD_FIELD_S8;
79 break;
80 case OBJECT_TYPE_S16:
81 dbg_printk("op load field s16\n");
82 stack_top->type = REG_S64;
83 if (!stack_top->load.rev_bo)
84 insn->op = FILTER_OP_LOAD_FIELD_S16;
85 break;
86 case OBJECT_TYPE_S32:
87 dbg_printk("op load field s32\n");
88 stack_top->type = REG_S64;
89 if (!stack_top->load.rev_bo)
90 insn->op = FILTER_OP_LOAD_FIELD_S32;
91 break;
92 case OBJECT_TYPE_S64:
93 dbg_printk("op load field s64\n");
94 stack_top->type = REG_S64;
95 if (!stack_top->load.rev_bo)
96 insn->op = FILTER_OP_LOAD_FIELD_S64;
97 break;
98 case OBJECT_TYPE_U8:
99 dbg_printk("op load field u8\n");
100 stack_top->type = REG_S64;
101 insn->op = FILTER_OP_LOAD_FIELD_U8;
102 break;
103 case OBJECT_TYPE_U16:
104 dbg_printk("op load field u16\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_U16;
108 break;
109 case OBJECT_TYPE_U32:
110 dbg_printk("op load field u32\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_U32;
114 break;
115 case OBJECT_TYPE_U64:
116 dbg_printk("op load field u64\n");
117 stack_top->type = REG_S64;
118 if (!stack_top->load.rev_bo)
119 insn->op = FILTER_OP_LOAD_FIELD_U64;
120 break;
121 case OBJECT_TYPE_DOUBLE:
122 printk(KERN_WARNING "Double type unsupported\n\n");
123 ret = -EINVAL;
124 goto end;
125 case OBJECT_TYPE_STRING:
126 dbg_printk("op load field string\n");
127 stack_top->type = REG_STRING;
128 insn->op = FILTER_OP_LOAD_FIELD_STRING;
129 break;
130 case OBJECT_TYPE_STRING_SEQUENCE:
131 dbg_printk("op load field string sequence\n");
132 stack_top->type = REG_STRING;
133 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
134 break;
135 case OBJECT_TYPE_DYNAMIC:
136 ret = -EINVAL;
137 goto end;
138 case OBJECT_TYPE_SEQUENCE:
139 case OBJECT_TYPE_ARRAY:
140 case OBJECT_TYPE_STRUCT:
141 case OBJECT_TYPE_VARIANT:
142 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
143 ret = -EINVAL;
144 goto end;
145 }
146 return 0;
147
148 end:
149 return ret;
150 }
151
152 static int specialize_get_index_object_type(enum object_type *otype,
153 int signedness, uint32_t elem_len)
154 {
155 switch (elem_len) {
156 case 8:
157 if (signedness)
158 *otype = OBJECT_TYPE_S8;
159 else
160 *otype = OBJECT_TYPE_U8;
161 break;
162 case 16:
163 if (signedness)
164 *otype = OBJECT_TYPE_S16;
165 else
166 *otype = OBJECT_TYPE_U16;
167 break;
168 case 32:
169 if (signedness)
170 *otype = OBJECT_TYPE_S32;
171 else
172 *otype = OBJECT_TYPE_U32;
173 break;
174 case 64:
175 if (signedness)
176 *otype = OBJECT_TYPE_S64;
177 else
178 *otype = OBJECT_TYPE_U64;
179 break;
180 default:
181 return -EINVAL;
182 }
183 return 0;
184 }
185
186 static int specialize_get_index(struct bytecode_runtime *runtime,
187 struct load_op *insn, uint64_t index,
188 struct vstack_entry *stack_top,
189 int idx_len)
190 {
191 int ret;
192 struct filter_get_index_data gid;
193 ssize_t data_offset;
194
195 memset(&gid, 0, sizeof(gid));
196 switch (stack_top->load.type) {
197 case LOAD_OBJECT:
198 switch (stack_top->load.object_type) {
199 case OBJECT_TYPE_ARRAY:
200 {
201 const struct lttng_integer_type *integer_type;
202 const struct lttng_event_field *field;
203 uint32_t elem_len, num_elems;
204 int signedness;
205
206 field = stack_top->load.field;
207 if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
208 ret = -EINVAL;
209 goto end;
210 }
211 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
212 num_elems = field->type.u.array_nestable.length;
213 elem_len = integer_type->size;
214 signedness = integer_type->signedness;
215 if (index >= num_elems) {
216 ret = -EINVAL;
217 goto end;
218 }
219 ret = specialize_get_index_object_type(&stack_top->load.object_type,
220 signedness, elem_len);
221 if (ret)
222 goto end;
223 gid.offset = index * (elem_len / CHAR_BIT);
224 gid.array_len = num_elems * (elem_len / CHAR_BIT);
225 gid.elem.type = stack_top->load.object_type;
226 gid.elem.len = elem_len;
227 if (integer_type->reverse_byte_order)
228 gid.elem.rev_bo = true;
229 stack_top->load.rev_bo = gid.elem.rev_bo;
230 break;
231 }
232 case OBJECT_TYPE_SEQUENCE:
233 {
234 const struct lttng_integer_type *integer_type;
235 const struct lttng_event_field *field;
236 uint32_t elem_len;
237 int signedness;
238
239 field = stack_top->load.field;
240 if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
241 ret = -EINVAL;
242 goto end;
243 }
244 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
245 elem_len = integer_type->size;
246 signedness = integer_type->signedness;
247 ret = specialize_get_index_object_type(&stack_top->load.object_type,
248 signedness, elem_len);
249 if (ret)
250 goto end;
251 gid.offset = index * (elem_len / CHAR_BIT);
252 gid.elem.type = stack_top->load.object_type;
253 gid.elem.len = elem_len;
254 if (integer_type->reverse_byte_order)
255 gid.elem.rev_bo = true;
256 stack_top->load.rev_bo = gid.elem.rev_bo;
257 break;
258 }
259 case OBJECT_TYPE_STRUCT:
260 /* Only generated by the specialize phase. */
261 case OBJECT_TYPE_VARIANT: /* Fall-through */
262 default:
263 printk(KERN_WARNING "Unexpected get index type %d",
264 (int) stack_top->load.object_type);
265 ret = -EINVAL;
266 goto end;
267 }
268 break;
269 case LOAD_ROOT_CONTEXT:
270 case LOAD_ROOT_APP_CONTEXT:
271 case LOAD_ROOT_PAYLOAD:
272 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
273 ret = -EINVAL;
274 goto end;
275 }
276 data_offset = bytecode_push_data(runtime, &gid,
277 __alignof__(gid), sizeof(gid));
278 if (data_offset < 0) {
279 ret = -EINVAL;
280 goto end;
281 }
282 switch (idx_len) {
283 case 2:
284 ((struct get_index_u16 *) insn->data)->index = data_offset;
285 break;
286 case 8:
287 ((struct get_index_u64 *) insn->data)->index = data_offset;
288 break;
289 default:
290 ret = -EINVAL;
291 goto end;
292 }
293
294 return 0;
295
296 end:
297 return ret;
298 }
299
300 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
301 struct load_op *insn)
302 {
303 uint16_t offset;
304 const char *name;
305
306 offset = ((struct get_symbol *) insn->data)->offset;
307 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
308 return lttng_get_context_index(lttng_static_ctx, name);
309 }
310
311 static int specialize_load_object(const struct lttng_event_field *field,
312 struct vstack_load *load, bool is_context)
313 {
314 load->type = LOAD_OBJECT;
315 /*
316 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
317 */
318 switch (field->type.atype) {
319 case atype_integer:
320 if (field->type.u.integer.signedness)
321 load->object_type = OBJECT_TYPE_S64;
322 else
323 load->object_type = OBJECT_TYPE_U64;
324 load->rev_bo = false;
325 break;
326 case atype_enum_nestable:
327 {
328 const struct lttng_integer_type *itype =
329 &field->type.u.enum_nestable.container_type->u.integer;
330
331 if (itype->signedness)
332 load->object_type = OBJECT_TYPE_S64;
333 else
334 load->object_type = OBJECT_TYPE_U64;
335 load->rev_bo = false;
336 break;
337 }
338 case atype_array_nestable:
339 if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
340 printk(KERN_WARNING "Array nesting only supports integer types.\n");
341 return -EINVAL;
342 }
343 if (is_context) {
344 load->object_type = OBJECT_TYPE_STRING;
345 } else {
346 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
347 load->object_type = OBJECT_TYPE_ARRAY;
348 load->field = field;
349 } else {
350 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
351 }
352 }
353 break;
354 case atype_sequence_nestable:
355 if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
356 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
357 return -EINVAL;
358 }
359 if (is_context) {
360 load->object_type = OBJECT_TYPE_STRING;
361 } else {
362 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
363 load->object_type = OBJECT_TYPE_SEQUENCE;
364 load->field = field;
365 } else {
366 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
367 }
368 }
369 break;
370 case atype_string:
371 load->object_type = OBJECT_TYPE_STRING;
372 break;
373 case atype_struct_nestable:
374 printk(KERN_WARNING "Structure type cannot be loaded.\n");
375 return -EINVAL;
376 case atype_variant_nestable:
377 printk(KERN_WARNING "Variant type cannot be loaded.\n");
378 return -EINVAL;
379 default:
380 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
381 return -EINVAL;
382 }
383 return 0;
384 }
385
386 static int specialize_context_lookup(struct bytecode_runtime *runtime,
387 struct load_op *insn,
388 struct vstack_load *load)
389 {
390 int idx, ret;
391 struct lttng_ctx_field *ctx_field;
392 struct lttng_event_field *field;
393 struct filter_get_index_data gid;
394 ssize_t data_offset;
395
396 idx = specialize_context_lookup_name(runtime, insn);
397 if (idx < 0) {
398 return -ENOENT;
399 }
400 ctx_field = &lttng_static_ctx->fields[idx];
401 field = &ctx_field->event_field;
402 ret = specialize_load_object(field, load, true);
403 if (ret)
404 return ret;
405 /* Specialize each get_symbol into a get_index. */
406 insn->op = FILTER_OP_GET_INDEX_U16;
407 memset(&gid, 0, sizeof(gid));
408 gid.ctx_index = idx;
409 gid.elem.type = load->object_type;
410 data_offset = bytecode_push_data(runtime, &gid,
411 __alignof__(gid), sizeof(gid));
412 if (data_offset < 0) {
413 return -EINVAL;
414 }
415 ((struct get_index_u16 *) insn->data)->index = data_offset;
416 return 0;
417 }
418
419 static int specialize_event_payload_lookup(struct lttng_event *event,
420 struct bytecode_runtime *runtime,
421 struct load_op *insn,
422 struct vstack_load *load)
423 {
424 const char *name;
425 uint16_t offset;
426 const struct lttng_event_desc *desc = event->desc;
427 unsigned int i, nr_fields;
428 bool found = false;
429 uint32_t field_offset = 0;
430 const struct lttng_event_field *field;
431 int ret;
432 struct filter_get_index_data gid;
433 ssize_t data_offset;
434
435 nr_fields = desc->nr_fields;
436 offset = ((struct get_symbol *) insn->data)->offset;
437 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
438 for (i = 0; i < nr_fields; i++) {
439 field = &desc->fields[i];
440 if (field->nofilter) {
441 continue;
442 }
443 if (!strcmp(field->name, name)) {
444 found = true;
445 break;
446 }
447 /* compute field offset on stack */
448 switch (field->type.atype) {
449 case atype_integer:
450 case atype_enum_nestable:
451 field_offset += sizeof(int64_t);
452 break;
453 case atype_array_nestable:
454 case atype_sequence_nestable:
455 field_offset += sizeof(unsigned long);
456 field_offset += sizeof(void *);
457 break;
458 case atype_string:
459 field_offset += sizeof(void *);
460 break;
461 default:
462 ret = -EINVAL;
463 goto end;
464 }
465 }
466 if (!found) {
467 ret = -EINVAL;
468 goto end;
469 }
470
471 ret = specialize_load_object(field, load, false);
472 if (ret)
473 goto end;
474
475 /* Specialize each get_symbol into a get_index. */
476 insn->op = FILTER_OP_GET_INDEX_U16;
477 memset(&gid, 0, sizeof(gid));
478 gid.offset = field_offset;
479 gid.elem.type = load->object_type;
480 data_offset = bytecode_push_data(runtime, &gid,
481 __alignof__(gid), sizeof(gid));
482 if (data_offset < 0) {
483 ret = -EINVAL;
484 goto end;
485 }
486 ((struct get_index_u16 *) insn->data)->index = data_offset;
487 ret = 0;
488 end:
489 return ret;
490 }
491
492 int lttng_filter_specialize_bytecode(struct lttng_event *event,
493 struct bytecode_runtime *bytecode)
494 {
495 void *pc, *next_pc, *start_pc;
496 int ret = -EINVAL;
497 struct vstack _stack;
498 struct vstack *stack = &_stack;
499
500 vstack_init(stack);
501
502 start_pc = &bytecode->code[0];
503 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
504 pc = next_pc) {
505 switch (*(filter_opcode_t *) pc) {
506 case FILTER_OP_UNKNOWN:
507 default:
508 printk(KERN_WARNING "unknown bytecode op %u\n",
509 (unsigned int) *(filter_opcode_t *) pc);
510 ret = -EINVAL;
511 goto end;
512
513 case FILTER_OP_RETURN:
514 case FILTER_OP_RETURN_S64:
515 ret = 0;
516 goto end;
517
518 /* binary */
519 case FILTER_OP_MUL:
520 case FILTER_OP_DIV:
521 case FILTER_OP_MOD:
522 case FILTER_OP_PLUS:
523 case FILTER_OP_MINUS:
524 printk(KERN_WARNING "unsupported bytecode op %u\n",
525 (unsigned int) *(filter_opcode_t *) pc);
526 ret = -EINVAL;
527 goto end;
528
529 case FILTER_OP_EQ:
530 {
531 struct binary_op *insn = (struct binary_op *) pc;
532
533 switch(vstack_ax(stack)->type) {
534 default:
535 printk(KERN_WARNING "unknown register type\n");
536 ret = -EINVAL;
537 goto end;
538
539 case REG_STRING:
540 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
541 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
542 else
543 insn->op = FILTER_OP_EQ_STRING;
544 break;
545 case REG_STAR_GLOB_STRING:
546 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
547 break;
548 case REG_S64:
549 if (vstack_bx(stack)->type == REG_S64)
550 insn->op = FILTER_OP_EQ_S64;
551 else
552 insn->op = FILTER_OP_EQ_DOUBLE_S64;
553 break;
554 case REG_DOUBLE:
555 if (vstack_bx(stack)->type == REG_S64)
556 insn->op = FILTER_OP_EQ_S64_DOUBLE;
557 else
558 insn->op = FILTER_OP_EQ_DOUBLE;
559 break;
560 }
561 /* Pop 2, push 1 */
562 if (vstack_pop(stack)) {
563 ret = -EINVAL;
564 goto end;
565 }
566 vstack_ax(stack)->type = REG_S64;
567 next_pc += sizeof(struct binary_op);
568 break;
569 }
570
571 case FILTER_OP_NE:
572 {
573 struct binary_op *insn = (struct binary_op *) pc;
574
575 switch(vstack_ax(stack)->type) {
576 default:
577 printk(KERN_WARNING "unknown register type\n");
578 ret = -EINVAL;
579 goto end;
580
581 case REG_STRING:
582 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
583 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
584 else
585 insn->op = FILTER_OP_NE_STRING;
586 break;
587 case REG_STAR_GLOB_STRING:
588 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
589 break;
590 case REG_S64:
591 if (vstack_bx(stack)->type == REG_S64)
592 insn->op = FILTER_OP_NE_S64;
593 else
594 insn->op = FILTER_OP_NE_DOUBLE_S64;
595 break;
596 case REG_DOUBLE:
597 if (vstack_bx(stack)->type == REG_S64)
598 insn->op = FILTER_OP_NE_S64_DOUBLE;
599 else
600 insn->op = FILTER_OP_NE_DOUBLE;
601 break;
602 }
603 /* Pop 2, push 1 */
604 if (vstack_pop(stack)) {
605 ret = -EINVAL;
606 goto end;
607 }
608 vstack_ax(stack)->type = REG_S64;
609 next_pc += sizeof(struct binary_op);
610 break;
611 }
612
613 case FILTER_OP_GT:
614 {
615 struct binary_op *insn = (struct binary_op *) pc;
616
617 switch(vstack_ax(stack)->type) {
618 default:
619 printk(KERN_WARNING "unknown register type\n");
620 ret = -EINVAL;
621 goto end;
622
623 case REG_STAR_GLOB_STRING:
624 printk(KERN_WARNING "invalid register type for > binary operator\n");
625 ret = -EINVAL;
626 goto end;
627 case REG_STRING:
628 insn->op = FILTER_OP_GT_STRING;
629 break;
630 case REG_S64:
631 if (vstack_bx(stack)->type == REG_S64)
632 insn->op = FILTER_OP_GT_S64;
633 else
634 insn->op = FILTER_OP_GT_DOUBLE_S64;
635 break;
636 case REG_DOUBLE:
637 if (vstack_bx(stack)->type == REG_S64)
638 insn->op = FILTER_OP_GT_S64_DOUBLE;
639 else
640 insn->op = FILTER_OP_GT_DOUBLE;
641 break;
642 }
643 /* Pop 2, push 1 */
644 if (vstack_pop(stack)) {
645 ret = -EINVAL;
646 goto end;
647 }
648 vstack_ax(stack)->type = REG_S64;
649 next_pc += sizeof(struct binary_op);
650 break;
651 }
652
653 case FILTER_OP_LT:
654 {
655 struct binary_op *insn = (struct binary_op *) pc;
656
657 switch(vstack_ax(stack)->type) {
658 default:
659 printk(KERN_WARNING "unknown register type\n");
660 ret = -EINVAL;
661 goto end;
662
663 case REG_STAR_GLOB_STRING:
664 printk(KERN_WARNING "invalid register type for < binary operator\n");
665 ret = -EINVAL;
666 goto end;
667 case REG_STRING:
668 insn->op = FILTER_OP_LT_STRING;
669 break;
670 case REG_S64:
671 if (vstack_bx(stack)->type == REG_S64)
672 insn->op = FILTER_OP_LT_S64;
673 else
674 insn->op = FILTER_OP_LT_DOUBLE_S64;
675 break;
676 case REG_DOUBLE:
677 if (vstack_bx(stack)->type == REG_S64)
678 insn->op = FILTER_OP_LT_S64_DOUBLE;
679 else
680 insn->op = FILTER_OP_LT_DOUBLE;
681 break;
682 }
683 /* Pop 2, push 1 */
684 if (vstack_pop(stack)) {
685 ret = -EINVAL;
686 goto end;
687 }
688 vstack_ax(stack)->type = REG_S64;
689 next_pc += sizeof(struct binary_op);
690 break;
691 }
692
693 case FILTER_OP_GE:
694 {
695 struct binary_op *insn = (struct binary_op *) pc;
696
697 switch(vstack_ax(stack)->type) {
698 default:
699 printk(KERN_WARNING "unknown register type\n");
700 ret = -EINVAL;
701 goto end;
702
703 case REG_STAR_GLOB_STRING:
704 printk(KERN_WARNING "invalid register type for >= binary operator\n");
705 ret = -EINVAL;
706 goto end;
707 case REG_STRING:
708 insn->op = FILTER_OP_GE_STRING;
709 break;
710 case REG_S64:
711 if (vstack_bx(stack)->type == REG_S64)
712 insn->op = FILTER_OP_GE_S64;
713 else
714 insn->op = FILTER_OP_GE_DOUBLE_S64;
715 break;
716 case REG_DOUBLE:
717 if (vstack_bx(stack)->type == REG_S64)
718 insn->op = FILTER_OP_GE_S64_DOUBLE;
719 else
720 insn->op = FILTER_OP_GE_DOUBLE;
721 break;
722 }
723 /* Pop 2, push 1 */
724 if (vstack_pop(stack)) {
725 ret = -EINVAL;
726 goto end;
727 }
728 vstack_ax(stack)->type = REG_S64;
729 next_pc += sizeof(struct binary_op);
730 break;
731 }
732 case FILTER_OP_LE:
733 {
734 struct binary_op *insn = (struct binary_op *) pc;
735
736 switch(vstack_ax(stack)->type) {
737 default:
738 printk(KERN_WARNING "unknown register type\n");
739 ret = -EINVAL;
740 goto end;
741
742 case REG_STAR_GLOB_STRING:
743 printk(KERN_WARNING "invalid register type for <= binary operator\n");
744 ret = -EINVAL;
745 goto end;
746 case REG_STRING:
747 insn->op = FILTER_OP_LE_STRING;
748 break;
749 case REG_S64:
750 if (vstack_bx(stack)->type == REG_S64)
751 insn->op = FILTER_OP_LE_S64;
752 else
753 insn->op = FILTER_OP_LE_DOUBLE_S64;
754 break;
755 case REG_DOUBLE:
756 if (vstack_bx(stack)->type == REG_S64)
757 insn->op = FILTER_OP_LE_S64_DOUBLE;
758 else
759 insn->op = FILTER_OP_LE_DOUBLE;
760 break;
761 }
762 vstack_ax(stack)->type = REG_S64;
763 next_pc += sizeof(struct binary_op);
764 break;
765 }
766
767 case FILTER_OP_EQ_STRING:
768 case FILTER_OP_NE_STRING:
769 case FILTER_OP_GT_STRING:
770 case FILTER_OP_LT_STRING:
771 case FILTER_OP_GE_STRING:
772 case FILTER_OP_LE_STRING:
773 case FILTER_OP_EQ_STAR_GLOB_STRING:
774 case FILTER_OP_NE_STAR_GLOB_STRING:
775 case FILTER_OP_EQ_S64:
776 case FILTER_OP_NE_S64:
777 case FILTER_OP_GT_S64:
778 case FILTER_OP_LT_S64:
779 case FILTER_OP_GE_S64:
780 case FILTER_OP_LE_S64:
781 case FILTER_OP_EQ_DOUBLE:
782 case FILTER_OP_NE_DOUBLE:
783 case FILTER_OP_GT_DOUBLE:
784 case FILTER_OP_LT_DOUBLE:
785 case FILTER_OP_GE_DOUBLE:
786 case FILTER_OP_LE_DOUBLE:
787 case FILTER_OP_EQ_DOUBLE_S64:
788 case FILTER_OP_NE_DOUBLE_S64:
789 case FILTER_OP_GT_DOUBLE_S64:
790 case FILTER_OP_LT_DOUBLE_S64:
791 case FILTER_OP_GE_DOUBLE_S64:
792 case FILTER_OP_LE_DOUBLE_S64:
793 case FILTER_OP_EQ_S64_DOUBLE:
794 case FILTER_OP_NE_S64_DOUBLE:
795 case FILTER_OP_GT_S64_DOUBLE:
796 case FILTER_OP_LT_S64_DOUBLE:
797 case FILTER_OP_GE_S64_DOUBLE:
798 case FILTER_OP_LE_S64_DOUBLE:
799 case FILTER_OP_BIT_RSHIFT:
800 case FILTER_OP_BIT_LSHIFT:
801 case FILTER_OP_BIT_AND:
802 case FILTER_OP_BIT_OR:
803 case FILTER_OP_BIT_XOR:
804 {
805 /* Pop 2, push 1 */
806 if (vstack_pop(stack)) {
807 ret = -EINVAL;
808 goto end;
809 }
810 vstack_ax(stack)->type = REG_S64;
811 next_pc += sizeof(struct binary_op);
812 break;
813 }
814
815 /* unary */
816 case FILTER_OP_UNARY_PLUS:
817 {
818 struct unary_op *insn = (struct unary_op *) pc;
819
820 switch(vstack_ax(stack)->type) {
821 default:
822 printk(KERN_WARNING "unknown register type\n");
823 ret = -EINVAL;
824 goto end;
825
826 case REG_S64:
827 insn->op = FILTER_OP_UNARY_PLUS_S64;
828 break;
829 case REG_DOUBLE:
830 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
831 break;
832 }
833 /* Pop 1, push 1 */
834 next_pc += sizeof(struct unary_op);
835 break;
836 }
837
838 case FILTER_OP_UNARY_MINUS:
839 {
840 struct unary_op *insn = (struct unary_op *) pc;
841
842 switch(vstack_ax(stack)->type) {
843 default:
844 printk(KERN_WARNING "unknown register type\n");
845 ret = -EINVAL;
846 goto end;
847
848 case REG_S64:
849 insn->op = FILTER_OP_UNARY_MINUS_S64;
850 break;
851 case REG_DOUBLE:
852 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
853 break;
854 }
855 /* Pop 1, push 1 */
856 next_pc += sizeof(struct unary_op);
857 break;
858 }
859
860 case FILTER_OP_UNARY_NOT:
861 {
862 struct unary_op *insn = (struct unary_op *) pc;
863
864 switch(vstack_ax(stack)->type) {
865 default:
866 printk(KERN_WARNING "unknown register type\n");
867 ret = -EINVAL;
868 goto end;
869
870 case REG_S64:
871 insn->op = FILTER_OP_UNARY_NOT_S64;
872 break;
873 case REG_DOUBLE:
874 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
875 break;
876 }
877 /* Pop 1, push 1 */
878 next_pc += sizeof(struct unary_op);
879 break;
880 }
881
882 case FILTER_OP_UNARY_BIT_NOT:
883 {
884 /* Pop 1, push 1 */
885 next_pc += sizeof(struct unary_op);
886 break;
887 }
888
889 case FILTER_OP_UNARY_PLUS_S64:
890 case FILTER_OP_UNARY_MINUS_S64:
891 case FILTER_OP_UNARY_NOT_S64:
892 case FILTER_OP_UNARY_PLUS_DOUBLE:
893 case FILTER_OP_UNARY_MINUS_DOUBLE:
894 case FILTER_OP_UNARY_NOT_DOUBLE:
895 {
896 /* Pop 1, push 1 */
897 next_pc += sizeof(struct unary_op);
898 break;
899 }
900
901 /* logical */
902 case FILTER_OP_AND:
903 case FILTER_OP_OR:
904 {
905 /* Continue to next instruction */
906 /* Pop 1 when jump not taken */
907 if (vstack_pop(stack)) {
908 ret = -EINVAL;
909 goto end;
910 }
911 next_pc += sizeof(struct logical_op);
912 break;
913 }
914
915 /* load field ref */
916 case FILTER_OP_LOAD_FIELD_REF:
917 {
918 printk(KERN_WARNING "Unknown field ref type\n");
919 ret = -EINVAL;
920 goto end;
921 }
922 /* get context ref */
923 case FILTER_OP_GET_CONTEXT_REF:
924 {
925 printk(KERN_WARNING "Unknown get context ref type\n");
926 ret = -EINVAL;
927 goto end;
928 }
929 case FILTER_OP_LOAD_FIELD_REF_STRING:
930 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
931 case FILTER_OP_GET_CONTEXT_REF_STRING:
932 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
933 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
934 {
935 if (vstack_push(stack)) {
936 ret = -EINVAL;
937 goto end;
938 }
939 vstack_ax(stack)->type = REG_STRING;
940 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
941 break;
942 }
943 case FILTER_OP_LOAD_FIELD_REF_S64:
944 case FILTER_OP_GET_CONTEXT_REF_S64:
945 {
946 if (vstack_push(stack)) {
947 ret = -EINVAL;
948 goto end;
949 }
950 vstack_ax(stack)->type = REG_S64;
951 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
952 break;
953 }
954 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
955 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
956 {
957 if (vstack_push(stack)) {
958 ret = -EINVAL;
959 goto end;
960 }
961 vstack_ax(stack)->type = REG_DOUBLE;
962 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
963 break;
964 }
965
966 /* load from immediate operand */
967 case FILTER_OP_LOAD_STRING:
968 {
969 struct load_op *insn = (struct load_op *) pc;
970
971 if (vstack_push(stack)) {
972 ret = -EINVAL;
973 goto end;
974 }
975 vstack_ax(stack)->type = REG_STRING;
976 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
977 break;
978 }
979
980 case FILTER_OP_LOAD_STAR_GLOB_STRING:
981 {
982 struct load_op *insn = (struct load_op *) pc;
983
984 if (vstack_push(stack)) {
985 ret = -EINVAL;
986 goto end;
987 }
988 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
989 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
990 break;
991 }
992
993 case FILTER_OP_LOAD_S64:
994 {
995 if (vstack_push(stack)) {
996 ret = -EINVAL;
997 goto end;
998 }
999 vstack_ax(stack)->type = REG_S64;
1000 next_pc += sizeof(struct load_op)
1001 + sizeof(struct literal_numeric);
1002 break;
1003 }
1004
1005 case FILTER_OP_LOAD_DOUBLE:
1006 {
1007 if (vstack_push(stack)) {
1008 ret = -EINVAL;
1009 goto end;
1010 }
1011 vstack_ax(stack)->type = REG_DOUBLE;
1012 next_pc += sizeof(struct load_op)
1013 + sizeof(struct literal_double);
1014 break;
1015 }
1016
1017 /* cast */
1018 case FILTER_OP_CAST_TO_S64:
1019 {
1020 struct cast_op *insn = (struct cast_op *) pc;
1021
1022 switch (vstack_ax(stack)->type) {
1023 default:
1024 printk(KERN_WARNING "unknown register type\n");
1025 ret = -EINVAL;
1026 goto end;
1027
1028 case REG_STRING:
1029 case REG_STAR_GLOB_STRING:
1030 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1031 ret = -EINVAL;
1032 goto end;
1033 case REG_S64:
1034 insn->op = FILTER_OP_CAST_NOP;
1035 break;
1036 case REG_DOUBLE:
1037 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1038 break;
1039 }
1040 /* Pop 1, push 1 */
1041 vstack_ax(stack)->type = REG_S64;
1042 next_pc += sizeof(struct cast_op);
1043 break;
1044 }
1045 case FILTER_OP_CAST_DOUBLE_TO_S64:
1046 {
1047 /* Pop 1, push 1 */
1048 vstack_ax(stack)->type = REG_S64;
1049 next_pc += sizeof(struct cast_op);
1050 break;
1051 }
1052 case FILTER_OP_CAST_NOP:
1053 {
1054 next_pc += sizeof(struct cast_op);
1055 break;
1056 }
1057
1058 /*
1059 * Instructions for recursive traversal through composed types.
1060 */
1061 case FILTER_OP_GET_CONTEXT_ROOT:
1062 {
1063 if (vstack_push(stack)) {
1064 ret = -EINVAL;
1065 goto end;
1066 }
1067 vstack_ax(stack)->type = REG_PTR;
1068 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1069 next_pc += sizeof(struct load_op);
1070 break;
1071 }
1072 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1073 {
1074 if (vstack_push(stack)) {
1075 ret = -EINVAL;
1076 goto end;
1077 }
1078 vstack_ax(stack)->type = REG_PTR;
1079 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1080 next_pc += sizeof(struct load_op);
1081 break;
1082 }
1083 case FILTER_OP_GET_PAYLOAD_ROOT:
1084 {
1085 if (vstack_push(stack)) {
1086 ret = -EINVAL;
1087 goto end;
1088 }
1089 vstack_ax(stack)->type = REG_PTR;
1090 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1091 next_pc += sizeof(struct load_op);
1092 break;
1093 }
1094
1095 case FILTER_OP_LOAD_FIELD:
1096 {
1097 struct load_op *insn = (struct load_op *) pc;
1098
1099 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1100 /* Pop 1, push 1 */
1101 ret = specialize_load_field(vstack_ax(stack), insn);
1102 if (ret)
1103 goto end;
1104
1105 next_pc += sizeof(struct load_op);
1106 break;
1107 }
1108
1109 case FILTER_OP_LOAD_FIELD_S8:
1110 case FILTER_OP_LOAD_FIELD_S16:
1111 case FILTER_OP_LOAD_FIELD_S32:
1112 case FILTER_OP_LOAD_FIELD_S64:
1113 case FILTER_OP_LOAD_FIELD_U8:
1114 case FILTER_OP_LOAD_FIELD_U16:
1115 case FILTER_OP_LOAD_FIELD_U32:
1116 case FILTER_OP_LOAD_FIELD_U64:
1117 {
1118 /* Pop 1, push 1 */
1119 vstack_ax(stack)->type = REG_S64;
1120 next_pc += sizeof(struct load_op);
1121 break;
1122 }
1123
1124 case FILTER_OP_LOAD_FIELD_STRING:
1125 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1126 {
1127 /* Pop 1, push 1 */
1128 vstack_ax(stack)->type = REG_STRING;
1129 next_pc += sizeof(struct load_op);
1130 break;
1131 }
1132
1133 case FILTER_OP_LOAD_FIELD_DOUBLE:
1134 {
1135 /* Pop 1, push 1 */
1136 vstack_ax(stack)->type = REG_DOUBLE;
1137 next_pc += sizeof(struct load_op);
1138 break;
1139 }
1140
1141 case FILTER_OP_GET_SYMBOL:
1142 {
1143 struct load_op *insn = (struct load_op *) pc;
1144
1145 dbg_printk("op get symbol\n");
1146 switch (vstack_ax(stack)->load.type) {
1147 case LOAD_OBJECT:
1148 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1149 ret = -EINVAL;
1150 goto end;
1151 case LOAD_ROOT_CONTEXT:
1152 /* Lookup context field. */
1153 ret = specialize_context_lookup(bytecode, insn,
1154 &vstack_ax(stack)->load);
1155 if (ret)
1156 goto end;
1157 break;
1158 case LOAD_ROOT_APP_CONTEXT:
1159 ret = -EINVAL;
1160 goto end;
1161 case LOAD_ROOT_PAYLOAD:
1162 /* Lookup event payload field. */
1163 ret = specialize_event_payload_lookup(event,
1164 bytecode, insn,
1165 &vstack_ax(stack)->load);
1166 if (ret)
1167 goto end;
1168 break;
1169 }
1170 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1171 break;
1172 }
1173
1174 case FILTER_OP_GET_SYMBOL_FIELD:
1175 {
1176 /* Always generated by specialize phase. */
1177 ret = -EINVAL;
1178 goto end;
1179 }
1180
1181 case FILTER_OP_GET_INDEX_U16:
1182 {
1183 struct load_op *insn = (struct load_op *) pc;
1184 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1185
1186 dbg_printk("op get index u16\n");
1187 /* Pop 1, push 1 */
1188 ret = specialize_get_index(bytecode, insn, index->index,
1189 vstack_ax(stack), sizeof(*index));
1190 if (ret)
1191 goto end;
1192 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1193 break;
1194 }
1195
1196 case FILTER_OP_GET_INDEX_U64:
1197 {
1198 struct load_op *insn = (struct load_op *) pc;
1199 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1200
1201 dbg_printk("op get index u64\n");
1202 /* Pop 1, push 1 */
1203 ret = specialize_get_index(bytecode, insn, index->index,
1204 vstack_ax(stack), sizeof(*index));
1205 if (ret)
1206 goto end;
1207 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1208 break;
1209 }
1210
1211 }
1212 }
1213 end:
1214 return ret;
1215 }
This page took 0.082577 seconds and 5 git commands to generate.