Document supported kernel versions for stable-2.12 branch
[lttng-modules.git] / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
13
14 #include <wrapper/compiler_attributes.h>
15
16 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
17 size_t align, size_t len)
18 {
19 ssize_t ret;
20 size_t padding = offset_align(runtime->data_len, align);
21 size_t new_len = runtime->data_len + padding + len;
22 size_t new_alloc_len = new_len;
23 size_t old_alloc_len = runtime->data_alloc_len;
24
25 if (new_len > FILTER_MAX_DATA_LEN)
26 return -EINVAL;
27
28 if (new_alloc_len > old_alloc_len) {
29 char *newptr;
30
31 new_alloc_len =
32 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
33 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
34 if (!newptr)
35 return -ENOMEM;
36 runtime->data = newptr;
37 /* We zero directly the memory from start of allocation. */
38 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
39 runtime->data_alloc_len = new_alloc_len;
40 }
41 runtime->data_len += padding;
42 ret = runtime->data_len;
43 runtime->data_len += len;
44 return ret;
45 }
46
47 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
48 const void *p, size_t align, size_t len)
49 {
50 ssize_t offset;
51
52 offset = bytecode_reserve_data(runtime, align, len);
53 if (offset < 0)
54 return -ENOMEM;
55 memcpy(&runtime->data[offset], p, len);
56 return offset;
57 }
58
59 static int specialize_load_field(struct vstack_entry *stack_top,
60 struct load_op *insn)
61 {
62 int ret;
63
64 switch (stack_top->load.type) {
65 case LOAD_OBJECT:
66 break;
67 case LOAD_ROOT_CONTEXT:
68 case LOAD_ROOT_APP_CONTEXT:
69 case LOAD_ROOT_PAYLOAD:
70 default:
71 dbg_printk("Filter warning: cannot load root, missing field name.\n");
72 ret = -EINVAL;
73 goto end;
74 }
75 switch (stack_top->load.object_type) {
76 case OBJECT_TYPE_S8:
77 dbg_printk("op load field s8\n");
78 stack_top->type = REG_S64;
79 if (!stack_top->load.rev_bo)
80 insn->op = FILTER_OP_LOAD_FIELD_S8;
81 break;
82 case OBJECT_TYPE_S16:
83 dbg_printk("op load field s16\n");
84 stack_top->type = REG_S64;
85 if (!stack_top->load.rev_bo)
86 insn->op = FILTER_OP_LOAD_FIELD_S16;
87 break;
88 case OBJECT_TYPE_S32:
89 dbg_printk("op load field s32\n");
90 stack_top->type = REG_S64;
91 if (!stack_top->load.rev_bo)
92 insn->op = FILTER_OP_LOAD_FIELD_S32;
93 break;
94 case OBJECT_TYPE_S64:
95 dbg_printk("op load field s64\n");
96 stack_top->type = REG_S64;
97 if (!stack_top->load.rev_bo)
98 insn->op = FILTER_OP_LOAD_FIELD_S64;
99 break;
100 case OBJECT_TYPE_U8:
101 dbg_printk("op load field u8\n");
102 stack_top->type = REG_S64;
103 insn->op = FILTER_OP_LOAD_FIELD_U8;
104 break;
105 case OBJECT_TYPE_U16:
106 dbg_printk("op load field u16\n");
107 stack_top->type = REG_S64;
108 if (!stack_top->load.rev_bo)
109 insn->op = FILTER_OP_LOAD_FIELD_U16;
110 break;
111 case OBJECT_TYPE_U32:
112 dbg_printk("op load field u32\n");
113 stack_top->type = REG_S64;
114 if (!stack_top->load.rev_bo)
115 insn->op = FILTER_OP_LOAD_FIELD_U32;
116 break;
117 case OBJECT_TYPE_U64:
118 dbg_printk("op load field u64\n");
119 stack_top->type = REG_S64;
120 if (!stack_top->load.rev_bo)
121 insn->op = FILTER_OP_LOAD_FIELD_U64;
122 break;
123 case OBJECT_TYPE_DOUBLE:
124 printk(KERN_WARNING "Double type unsupported\n\n");
125 ret = -EINVAL;
126 goto end;
127 case OBJECT_TYPE_STRING:
128 dbg_printk("op load field string\n");
129 stack_top->type = REG_STRING;
130 insn->op = FILTER_OP_LOAD_FIELD_STRING;
131 break;
132 case OBJECT_TYPE_STRING_SEQUENCE:
133 dbg_printk("op load field string sequence\n");
134 stack_top->type = REG_STRING;
135 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
136 break;
137 case OBJECT_TYPE_DYNAMIC:
138 ret = -EINVAL;
139 goto end;
140 case OBJECT_TYPE_SEQUENCE:
141 case OBJECT_TYPE_ARRAY:
142 case OBJECT_TYPE_STRUCT:
143 case OBJECT_TYPE_VARIANT:
144 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
145 ret = -EINVAL;
146 goto end;
147 }
148 return 0;
149
150 end:
151 return ret;
152 }
153
154 static int specialize_get_index_object_type(enum object_type *otype,
155 int signedness, uint32_t elem_len)
156 {
157 switch (elem_len) {
158 case 8:
159 if (signedness)
160 *otype = OBJECT_TYPE_S8;
161 else
162 *otype = OBJECT_TYPE_U8;
163 break;
164 case 16:
165 if (signedness)
166 *otype = OBJECT_TYPE_S16;
167 else
168 *otype = OBJECT_TYPE_U16;
169 break;
170 case 32:
171 if (signedness)
172 *otype = OBJECT_TYPE_S32;
173 else
174 *otype = OBJECT_TYPE_U32;
175 break;
176 case 64:
177 if (signedness)
178 *otype = OBJECT_TYPE_S64;
179 else
180 *otype = OBJECT_TYPE_U64;
181 break;
182 default:
183 return -EINVAL;
184 }
185 return 0;
186 }
187
188 static int specialize_get_index(struct bytecode_runtime *runtime,
189 struct load_op *insn, uint64_t index,
190 struct vstack_entry *stack_top,
191 int idx_len)
192 {
193 int ret;
194 struct filter_get_index_data gid;
195 ssize_t data_offset;
196
197 memset(&gid, 0, sizeof(gid));
198 switch (stack_top->load.type) {
199 case LOAD_OBJECT:
200 switch (stack_top->load.object_type) {
201 case OBJECT_TYPE_ARRAY:
202 {
203 const struct lttng_event_field *field;
204 uint32_t elem_len, num_elems;
205 int signedness;
206
207 field = stack_top->load.field;
208 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
209 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
210 num_elems = field->type.u.array.length;
211 if (index >= num_elems) {
212 ret = -EINVAL;
213 goto end;
214 }
215 ret = specialize_get_index_object_type(&stack_top->load.object_type,
216 signedness, elem_len);
217 if (ret)
218 goto end;
219 gid.offset = index * (elem_len / CHAR_BIT);
220 gid.array_len = num_elems * (elem_len / CHAR_BIT);
221 gid.elem.type = stack_top->load.object_type;
222 gid.elem.len = elem_len;
223 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
224 gid.elem.rev_bo = true;
225 stack_top->load.rev_bo = gid.elem.rev_bo;
226 break;
227 }
228 case OBJECT_TYPE_SEQUENCE:
229 {
230 const struct lttng_event_field *field;
231 uint32_t elem_len;
232 int signedness;
233
234 field = stack_top->load.field;
235 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
236 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
237 ret = specialize_get_index_object_type(&stack_top->load.object_type,
238 signedness, elem_len);
239 if (ret)
240 goto end;
241 gid.offset = index * (elem_len / CHAR_BIT);
242 gid.elem.type = stack_top->load.object_type;
243 gid.elem.len = elem_len;
244 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
245 gid.elem.rev_bo = true;
246 stack_top->load.rev_bo = gid.elem.rev_bo;
247 break;
248 }
249 case OBJECT_TYPE_STRUCT:
250 /* Only generated by the specialize phase. */
251 case OBJECT_TYPE_VARIANT:
252 lttng_fallthrough;
253 default:
254 printk(KERN_WARNING "Unexpected get index type %d",
255 (int) stack_top->load.object_type);
256 ret = -EINVAL;
257 goto end;
258 }
259 break;
260 case LOAD_ROOT_CONTEXT:
261 case LOAD_ROOT_APP_CONTEXT:
262 case LOAD_ROOT_PAYLOAD:
263 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
264 ret = -EINVAL;
265 goto end;
266 }
267 data_offset = bytecode_push_data(runtime, &gid,
268 __alignof__(gid), sizeof(gid));
269 if (data_offset < 0) {
270 ret = -EINVAL;
271 goto end;
272 }
273 switch (idx_len) {
274 case 2:
275 ((struct get_index_u16 *) insn->data)->index = data_offset;
276 break;
277 case 8:
278 ((struct get_index_u64 *) insn->data)->index = data_offset;
279 break;
280 default:
281 ret = -EINVAL;
282 goto end;
283 }
284
285 return 0;
286
287 end:
288 return ret;
289 }
290
291 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
292 struct load_op *insn)
293 {
294 uint16_t offset;
295 const char *name;
296
297 offset = ((struct get_symbol *) insn->data)->offset;
298 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
299 return lttng_get_context_index(lttng_static_ctx, name);
300 }
301
302 static int specialize_load_object(const struct lttng_event_field *field,
303 struct vstack_load *load, bool is_context)
304 {
305 load->type = LOAD_OBJECT;
306 /*
307 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
308 */
309 switch (field->type.atype) {
310 case atype_integer:
311 if (field->type.u.basic.integer.signedness)
312 load->object_type = OBJECT_TYPE_S64;
313 else
314 load->object_type = OBJECT_TYPE_U64;
315 load->rev_bo = false;
316 break;
317 case atype_enum:
318 {
319 const struct lttng_integer_type *itype =
320 &field->type.u.basic.enumeration.container_type;
321
322 if (itype->signedness)
323 load->object_type = OBJECT_TYPE_S64;
324 else
325 load->object_type = OBJECT_TYPE_U64;
326 load->rev_bo = false;
327 break;
328 }
329 case atype_array:
330 if (field->type.u.array.elem_type.atype != atype_integer) {
331 printk(KERN_WARNING "Array nesting only supports integer types.\n");
332 return -EINVAL;
333 }
334 if (is_context) {
335 load->object_type = OBJECT_TYPE_STRING;
336 } else {
337 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
338 load->object_type = OBJECT_TYPE_ARRAY;
339 load->field = field;
340 } else {
341 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
342 }
343 }
344 break;
345 case atype_sequence:
346 if (field->type.u.sequence.elem_type.atype != atype_integer) {
347 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
348 return -EINVAL;
349 }
350 if (is_context) {
351 load->object_type = OBJECT_TYPE_STRING;
352 } else {
353 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
354 load->object_type = OBJECT_TYPE_SEQUENCE;
355 load->field = field;
356 } else {
357 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
358 }
359 }
360 break;
361 case atype_array_bitfield:
362 printk(KERN_WARNING "Bitfield array type is not supported.\n");
363 return -EINVAL;
364 case atype_sequence_bitfield:
365 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
366 return -EINVAL;
367 case atype_string:
368 load->object_type = OBJECT_TYPE_STRING;
369 break;
370 case atype_struct:
371 printk(KERN_WARNING "Structure type cannot be loaded.\n");
372 return -EINVAL;
373 default:
374 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
375 return -EINVAL;
376 }
377 return 0;
378 }
379
380 static int specialize_context_lookup(struct bytecode_runtime *runtime,
381 struct load_op *insn,
382 struct vstack_load *load)
383 {
384 int idx, ret;
385 struct lttng_ctx_field *ctx_field;
386 struct lttng_event_field *field;
387 struct filter_get_index_data gid;
388 ssize_t data_offset;
389
390 idx = specialize_context_lookup_name(runtime, insn);
391 if (idx < 0) {
392 return -ENOENT;
393 }
394 ctx_field = &lttng_static_ctx->fields[idx];
395 field = &ctx_field->event_field;
396 ret = specialize_load_object(field, load, true);
397 if (ret)
398 return ret;
399 /* Specialize each get_symbol into a get_index. */
400 insn->op = FILTER_OP_GET_INDEX_U16;
401 memset(&gid, 0, sizeof(gid));
402 gid.ctx_index = idx;
403 gid.elem.type = load->object_type;
404 data_offset = bytecode_push_data(runtime, &gid,
405 __alignof__(gid), sizeof(gid));
406 if (data_offset < 0) {
407 return -EINVAL;
408 }
409 ((struct get_index_u16 *) insn->data)->index = data_offset;
410 return 0;
411 }
412
413 static int specialize_event_payload_lookup(struct lttng_event *event,
414 struct bytecode_runtime *runtime,
415 struct load_op *insn,
416 struct vstack_load *load)
417 {
418 const char *name;
419 uint16_t offset;
420 const struct lttng_event_desc *desc = event->desc;
421 unsigned int i, nr_fields;
422 bool found = false;
423 uint32_t field_offset = 0;
424 const struct lttng_event_field *field;
425 int ret;
426 struct filter_get_index_data gid;
427 ssize_t data_offset;
428
429 nr_fields = desc->nr_fields;
430 offset = ((struct get_symbol *) insn->data)->offset;
431 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
432 for (i = 0; i < nr_fields; i++) {
433 field = &desc->fields[i];
434 if (!strcmp(field->name, name)) {
435 found = true;
436 break;
437 }
438 /* compute field offset on stack */
439 switch (field->type.atype) {
440 case atype_integer:
441 case atype_enum:
442 field_offset += sizeof(int64_t);
443 break;
444 case atype_array:
445 case atype_sequence:
446 case atype_array_bitfield:
447 case atype_sequence_bitfield:
448 field_offset += sizeof(unsigned long);
449 field_offset += sizeof(void *);
450 break;
451 case atype_string:
452 field_offset += sizeof(void *);
453 break;
454 default:
455 ret = -EINVAL;
456 goto end;
457 }
458 }
459 if (!found) {
460 ret = -EINVAL;
461 goto end;
462 }
463
464 ret = specialize_load_object(field, load, false);
465 if (ret)
466 goto end;
467
468 /* Specialize each get_symbol into a get_index. */
469 insn->op = FILTER_OP_GET_INDEX_U16;
470 memset(&gid, 0, sizeof(gid));
471 gid.offset = field_offset;
472 gid.elem.type = load->object_type;
473 data_offset = bytecode_push_data(runtime, &gid,
474 __alignof__(gid), sizeof(gid));
475 if (data_offset < 0) {
476 ret = -EINVAL;
477 goto end;
478 }
479 ((struct get_index_u16 *) insn->data)->index = data_offset;
480 ret = 0;
481 end:
482 return ret;
483 }
484
485 int lttng_filter_specialize_bytecode(struct lttng_event *event,
486 struct bytecode_runtime *bytecode)
487 {
488 void *pc, *next_pc, *start_pc;
489 int ret = -EINVAL;
490 struct vstack _stack;
491 struct vstack *stack = &_stack;
492
493 vstack_init(stack);
494
495 start_pc = &bytecode->code[0];
496 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
497 pc = next_pc) {
498 switch (*(filter_opcode_t *) pc) {
499 case FILTER_OP_UNKNOWN:
500 default:
501 printk(KERN_WARNING "unknown bytecode op %u\n",
502 (unsigned int) *(filter_opcode_t *) pc);
503 ret = -EINVAL;
504 goto end;
505
506 case FILTER_OP_RETURN:
507 case FILTER_OP_RETURN_S64:
508 ret = 0;
509 goto end;
510
511 /* binary */
512 case FILTER_OP_MUL:
513 case FILTER_OP_DIV:
514 case FILTER_OP_MOD:
515 case FILTER_OP_PLUS:
516 case FILTER_OP_MINUS:
517 printk(KERN_WARNING "unsupported bytecode op %u\n",
518 (unsigned int) *(filter_opcode_t *) pc);
519 ret = -EINVAL;
520 goto end;
521
522 case FILTER_OP_EQ:
523 {
524 struct binary_op *insn = (struct binary_op *) pc;
525
526 switch(vstack_ax(stack)->type) {
527 default:
528 printk(KERN_WARNING "unknown register type\n");
529 ret = -EINVAL;
530 goto end;
531
532 case REG_STRING:
533 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
534 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
535 else
536 insn->op = FILTER_OP_EQ_STRING;
537 break;
538 case REG_STAR_GLOB_STRING:
539 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
540 break;
541 case REG_S64:
542 if (vstack_bx(stack)->type == REG_S64)
543 insn->op = FILTER_OP_EQ_S64;
544 else
545 insn->op = FILTER_OP_EQ_DOUBLE_S64;
546 break;
547 case REG_DOUBLE:
548 if (vstack_bx(stack)->type == REG_S64)
549 insn->op = FILTER_OP_EQ_S64_DOUBLE;
550 else
551 insn->op = FILTER_OP_EQ_DOUBLE;
552 break;
553 }
554 /* Pop 2, push 1 */
555 if (vstack_pop(stack)) {
556 ret = -EINVAL;
557 goto end;
558 }
559 vstack_ax(stack)->type = REG_S64;
560 next_pc += sizeof(struct binary_op);
561 break;
562 }
563
564 case FILTER_OP_NE:
565 {
566 struct binary_op *insn = (struct binary_op *) pc;
567
568 switch(vstack_ax(stack)->type) {
569 default:
570 printk(KERN_WARNING "unknown register type\n");
571 ret = -EINVAL;
572 goto end;
573
574 case REG_STRING:
575 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
576 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
577 else
578 insn->op = FILTER_OP_NE_STRING;
579 break;
580 case REG_STAR_GLOB_STRING:
581 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
582 break;
583 case REG_S64:
584 if (vstack_bx(stack)->type == REG_S64)
585 insn->op = FILTER_OP_NE_S64;
586 else
587 insn->op = FILTER_OP_NE_DOUBLE_S64;
588 break;
589 case REG_DOUBLE:
590 if (vstack_bx(stack)->type == REG_S64)
591 insn->op = FILTER_OP_NE_S64_DOUBLE;
592 else
593 insn->op = FILTER_OP_NE_DOUBLE;
594 break;
595 }
596 /* Pop 2, push 1 */
597 if (vstack_pop(stack)) {
598 ret = -EINVAL;
599 goto end;
600 }
601 vstack_ax(stack)->type = REG_S64;
602 next_pc += sizeof(struct binary_op);
603 break;
604 }
605
606 case FILTER_OP_GT:
607 {
608 struct binary_op *insn = (struct binary_op *) pc;
609
610 switch(vstack_ax(stack)->type) {
611 default:
612 printk(KERN_WARNING "unknown register type\n");
613 ret = -EINVAL;
614 goto end;
615
616 case REG_STAR_GLOB_STRING:
617 printk(KERN_WARNING "invalid register type for > binary operator\n");
618 ret = -EINVAL;
619 goto end;
620 case REG_STRING:
621 insn->op = FILTER_OP_GT_STRING;
622 break;
623 case REG_S64:
624 if (vstack_bx(stack)->type == REG_S64)
625 insn->op = FILTER_OP_GT_S64;
626 else
627 insn->op = FILTER_OP_GT_DOUBLE_S64;
628 break;
629 case REG_DOUBLE:
630 if (vstack_bx(stack)->type == REG_S64)
631 insn->op = FILTER_OP_GT_S64_DOUBLE;
632 else
633 insn->op = FILTER_OP_GT_DOUBLE;
634 break;
635 }
636 /* Pop 2, push 1 */
637 if (vstack_pop(stack)) {
638 ret = -EINVAL;
639 goto end;
640 }
641 vstack_ax(stack)->type = REG_S64;
642 next_pc += sizeof(struct binary_op);
643 break;
644 }
645
646 case FILTER_OP_LT:
647 {
648 struct binary_op *insn = (struct binary_op *) pc;
649
650 switch(vstack_ax(stack)->type) {
651 default:
652 printk(KERN_WARNING "unknown register type\n");
653 ret = -EINVAL;
654 goto end;
655
656 case REG_STAR_GLOB_STRING:
657 printk(KERN_WARNING "invalid register type for < binary operator\n");
658 ret = -EINVAL;
659 goto end;
660 case REG_STRING:
661 insn->op = FILTER_OP_LT_STRING;
662 break;
663 case REG_S64:
664 if (vstack_bx(stack)->type == REG_S64)
665 insn->op = FILTER_OP_LT_S64;
666 else
667 insn->op = FILTER_OP_LT_DOUBLE_S64;
668 break;
669 case REG_DOUBLE:
670 if (vstack_bx(stack)->type == REG_S64)
671 insn->op = FILTER_OP_LT_S64_DOUBLE;
672 else
673 insn->op = FILTER_OP_LT_DOUBLE;
674 break;
675 }
676 /* Pop 2, push 1 */
677 if (vstack_pop(stack)) {
678 ret = -EINVAL;
679 goto end;
680 }
681 vstack_ax(stack)->type = REG_S64;
682 next_pc += sizeof(struct binary_op);
683 break;
684 }
685
686 case FILTER_OP_GE:
687 {
688 struct binary_op *insn = (struct binary_op *) pc;
689
690 switch(vstack_ax(stack)->type) {
691 default:
692 printk(KERN_WARNING "unknown register type\n");
693 ret = -EINVAL;
694 goto end;
695
696 case REG_STAR_GLOB_STRING:
697 printk(KERN_WARNING "invalid register type for >= binary operator\n");
698 ret = -EINVAL;
699 goto end;
700 case REG_STRING:
701 insn->op = FILTER_OP_GE_STRING;
702 break;
703 case REG_S64:
704 if (vstack_bx(stack)->type == REG_S64)
705 insn->op = FILTER_OP_GE_S64;
706 else
707 insn->op = FILTER_OP_GE_DOUBLE_S64;
708 break;
709 case REG_DOUBLE:
710 if (vstack_bx(stack)->type == REG_S64)
711 insn->op = FILTER_OP_GE_S64_DOUBLE;
712 else
713 insn->op = FILTER_OP_GE_DOUBLE;
714 break;
715 }
716 /* Pop 2, push 1 */
717 if (vstack_pop(stack)) {
718 ret = -EINVAL;
719 goto end;
720 }
721 vstack_ax(stack)->type = REG_S64;
722 next_pc += sizeof(struct binary_op);
723 break;
724 }
725 case FILTER_OP_LE:
726 {
727 struct binary_op *insn = (struct binary_op *) pc;
728
729 switch(vstack_ax(stack)->type) {
730 default:
731 printk(KERN_WARNING "unknown register type\n");
732 ret = -EINVAL;
733 goto end;
734
735 case REG_STAR_GLOB_STRING:
736 printk(KERN_WARNING "invalid register type for <= binary operator\n");
737 ret = -EINVAL;
738 goto end;
739 case REG_STRING:
740 insn->op = FILTER_OP_LE_STRING;
741 break;
742 case REG_S64:
743 if (vstack_bx(stack)->type == REG_S64)
744 insn->op = FILTER_OP_LE_S64;
745 else
746 insn->op = FILTER_OP_LE_DOUBLE_S64;
747 break;
748 case REG_DOUBLE:
749 if (vstack_bx(stack)->type == REG_S64)
750 insn->op = FILTER_OP_LE_S64_DOUBLE;
751 else
752 insn->op = FILTER_OP_LE_DOUBLE;
753 break;
754 }
755 vstack_ax(stack)->type = REG_S64;
756 next_pc += sizeof(struct binary_op);
757 break;
758 }
759
760 case FILTER_OP_EQ_STRING:
761 case FILTER_OP_NE_STRING:
762 case FILTER_OP_GT_STRING:
763 case FILTER_OP_LT_STRING:
764 case FILTER_OP_GE_STRING:
765 case FILTER_OP_LE_STRING:
766 case FILTER_OP_EQ_STAR_GLOB_STRING:
767 case FILTER_OP_NE_STAR_GLOB_STRING:
768 case FILTER_OP_EQ_S64:
769 case FILTER_OP_NE_S64:
770 case FILTER_OP_GT_S64:
771 case FILTER_OP_LT_S64:
772 case FILTER_OP_GE_S64:
773 case FILTER_OP_LE_S64:
774 case FILTER_OP_EQ_DOUBLE:
775 case FILTER_OP_NE_DOUBLE:
776 case FILTER_OP_GT_DOUBLE:
777 case FILTER_OP_LT_DOUBLE:
778 case FILTER_OP_GE_DOUBLE:
779 case FILTER_OP_LE_DOUBLE:
780 case FILTER_OP_EQ_DOUBLE_S64:
781 case FILTER_OP_NE_DOUBLE_S64:
782 case FILTER_OP_GT_DOUBLE_S64:
783 case FILTER_OP_LT_DOUBLE_S64:
784 case FILTER_OP_GE_DOUBLE_S64:
785 case FILTER_OP_LE_DOUBLE_S64:
786 case FILTER_OP_EQ_S64_DOUBLE:
787 case FILTER_OP_NE_S64_DOUBLE:
788 case FILTER_OP_GT_S64_DOUBLE:
789 case FILTER_OP_LT_S64_DOUBLE:
790 case FILTER_OP_GE_S64_DOUBLE:
791 case FILTER_OP_LE_S64_DOUBLE:
792 case FILTER_OP_BIT_RSHIFT:
793 case FILTER_OP_BIT_LSHIFT:
794 case FILTER_OP_BIT_AND:
795 case FILTER_OP_BIT_OR:
796 case FILTER_OP_BIT_XOR:
797 {
798 /* Pop 2, push 1 */
799 if (vstack_pop(stack)) {
800 ret = -EINVAL;
801 goto end;
802 }
803 vstack_ax(stack)->type = REG_S64;
804 next_pc += sizeof(struct binary_op);
805 break;
806 }
807
808 /* unary */
809 case FILTER_OP_UNARY_PLUS:
810 {
811 struct unary_op *insn = (struct unary_op *) pc;
812
813 switch(vstack_ax(stack)->type) {
814 default:
815 printk(KERN_WARNING "unknown register type\n");
816 ret = -EINVAL;
817 goto end;
818
819 case REG_S64:
820 insn->op = FILTER_OP_UNARY_PLUS_S64;
821 break;
822 case REG_DOUBLE:
823 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
824 break;
825 }
826 /* Pop 1, push 1 */
827 next_pc += sizeof(struct unary_op);
828 break;
829 }
830
831 case FILTER_OP_UNARY_MINUS:
832 {
833 struct unary_op *insn = (struct unary_op *) pc;
834
835 switch(vstack_ax(stack)->type) {
836 default:
837 printk(KERN_WARNING "unknown register type\n");
838 ret = -EINVAL;
839 goto end;
840
841 case REG_S64:
842 insn->op = FILTER_OP_UNARY_MINUS_S64;
843 break;
844 case REG_DOUBLE:
845 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
846 break;
847 }
848 /* Pop 1, push 1 */
849 next_pc += sizeof(struct unary_op);
850 break;
851 }
852
853 case FILTER_OP_UNARY_NOT:
854 {
855 struct unary_op *insn = (struct unary_op *) pc;
856
857 switch(vstack_ax(stack)->type) {
858 default:
859 printk(KERN_WARNING "unknown register type\n");
860 ret = -EINVAL;
861 goto end;
862
863 case REG_S64:
864 insn->op = FILTER_OP_UNARY_NOT_S64;
865 break;
866 case REG_DOUBLE:
867 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
868 break;
869 }
870 /* Pop 1, push 1 */
871 next_pc += sizeof(struct unary_op);
872 break;
873 }
874
875 case FILTER_OP_UNARY_BIT_NOT:
876 {
877 /* Pop 1, push 1 */
878 next_pc += sizeof(struct unary_op);
879 break;
880 }
881
882 case FILTER_OP_UNARY_PLUS_S64:
883 case FILTER_OP_UNARY_MINUS_S64:
884 case FILTER_OP_UNARY_NOT_S64:
885 case FILTER_OP_UNARY_PLUS_DOUBLE:
886 case FILTER_OP_UNARY_MINUS_DOUBLE:
887 case FILTER_OP_UNARY_NOT_DOUBLE:
888 {
889 /* Pop 1, push 1 */
890 next_pc += sizeof(struct unary_op);
891 break;
892 }
893
894 /* logical */
895 case FILTER_OP_AND:
896 case FILTER_OP_OR:
897 {
898 /* Continue to next instruction */
899 /* Pop 1 when jump not taken */
900 if (vstack_pop(stack)) {
901 ret = -EINVAL;
902 goto end;
903 }
904 next_pc += sizeof(struct logical_op);
905 break;
906 }
907
908 /* load field ref */
909 case FILTER_OP_LOAD_FIELD_REF:
910 {
911 printk(KERN_WARNING "Unknown field ref type\n");
912 ret = -EINVAL;
913 goto end;
914 }
915 /* get context ref */
916 case FILTER_OP_GET_CONTEXT_REF:
917 {
918 printk(KERN_WARNING "Unknown get context ref type\n");
919 ret = -EINVAL;
920 goto end;
921 }
922 case FILTER_OP_LOAD_FIELD_REF_STRING:
923 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
924 case FILTER_OP_GET_CONTEXT_REF_STRING:
925 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
926 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
927 {
928 if (vstack_push(stack)) {
929 ret = -EINVAL;
930 goto end;
931 }
932 vstack_ax(stack)->type = REG_STRING;
933 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
934 break;
935 }
936 case FILTER_OP_LOAD_FIELD_REF_S64:
937 case FILTER_OP_GET_CONTEXT_REF_S64:
938 {
939 if (vstack_push(stack)) {
940 ret = -EINVAL;
941 goto end;
942 }
943 vstack_ax(stack)->type = REG_S64;
944 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
945 break;
946 }
947 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
948 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
949 {
950 if (vstack_push(stack)) {
951 ret = -EINVAL;
952 goto end;
953 }
954 vstack_ax(stack)->type = REG_DOUBLE;
955 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
956 break;
957 }
958
959 /* load from immediate operand */
960 case FILTER_OP_LOAD_STRING:
961 {
962 struct load_op *insn = (struct load_op *) pc;
963
964 if (vstack_push(stack)) {
965 ret = -EINVAL;
966 goto end;
967 }
968 vstack_ax(stack)->type = REG_STRING;
969 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
970 break;
971 }
972
973 case FILTER_OP_LOAD_STAR_GLOB_STRING:
974 {
975 struct load_op *insn = (struct load_op *) pc;
976
977 if (vstack_push(stack)) {
978 ret = -EINVAL;
979 goto end;
980 }
981 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
982 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
983 break;
984 }
985
986 case FILTER_OP_LOAD_S64:
987 {
988 if (vstack_push(stack)) {
989 ret = -EINVAL;
990 goto end;
991 }
992 vstack_ax(stack)->type = REG_S64;
993 next_pc += sizeof(struct load_op)
994 + sizeof(struct literal_numeric);
995 break;
996 }
997
998 case FILTER_OP_LOAD_DOUBLE:
999 {
1000 if (vstack_push(stack)) {
1001 ret = -EINVAL;
1002 goto end;
1003 }
1004 vstack_ax(stack)->type = REG_DOUBLE;
1005 next_pc += sizeof(struct load_op)
1006 + sizeof(struct literal_double);
1007 break;
1008 }
1009
1010 /* cast */
1011 case FILTER_OP_CAST_TO_S64:
1012 {
1013 struct cast_op *insn = (struct cast_op *) pc;
1014
1015 switch (vstack_ax(stack)->type) {
1016 default:
1017 printk(KERN_WARNING "unknown register type\n");
1018 ret = -EINVAL;
1019 goto end;
1020
1021 case REG_STRING:
1022 case REG_STAR_GLOB_STRING:
1023 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1024 ret = -EINVAL;
1025 goto end;
1026 case REG_S64:
1027 insn->op = FILTER_OP_CAST_NOP;
1028 break;
1029 case REG_DOUBLE:
1030 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1031 break;
1032 }
1033 /* Pop 1, push 1 */
1034 vstack_ax(stack)->type = REG_S64;
1035 next_pc += sizeof(struct cast_op);
1036 break;
1037 }
1038 case FILTER_OP_CAST_DOUBLE_TO_S64:
1039 {
1040 /* Pop 1, push 1 */
1041 vstack_ax(stack)->type = REG_S64;
1042 next_pc += sizeof(struct cast_op);
1043 break;
1044 }
1045 case FILTER_OP_CAST_NOP:
1046 {
1047 next_pc += sizeof(struct cast_op);
1048 break;
1049 }
1050
1051 /*
1052 * Instructions for recursive traversal through composed types.
1053 */
1054 case FILTER_OP_GET_CONTEXT_ROOT:
1055 {
1056 if (vstack_push(stack)) {
1057 ret = -EINVAL;
1058 goto end;
1059 }
1060 vstack_ax(stack)->type = REG_PTR;
1061 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1062 next_pc += sizeof(struct load_op);
1063 break;
1064 }
1065 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1066 {
1067 if (vstack_push(stack)) {
1068 ret = -EINVAL;
1069 goto end;
1070 }
1071 vstack_ax(stack)->type = REG_PTR;
1072 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1073 next_pc += sizeof(struct load_op);
1074 break;
1075 }
1076 case FILTER_OP_GET_PAYLOAD_ROOT:
1077 {
1078 if (vstack_push(stack)) {
1079 ret = -EINVAL;
1080 goto end;
1081 }
1082 vstack_ax(stack)->type = REG_PTR;
1083 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1084 next_pc += sizeof(struct load_op);
1085 break;
1086 }
1087
1088 case FILTER_OP_LOAD_FIELD:
1089 {
1090 struct load_op *insn = (struct load_op *) pc;
1091
1092 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1093 /* Pop 1, push 1 */
1094 ret = specialize_load_field(vstack_ax(stack), insn);
1095 if (ret)
1096 goto end;
1097
1098 next_pc += sizeof(struct load_op);
1099 break;
1100 }
1101
1102 case FILTER_OP_LOAD_FIELD_S8:
1103 case FILTER_OP_LOAD_FIELD_S16:
1104 case FILTER_OP_LOAD_FIELD_S32:
1105 case FILTER_OP_LOAD_FIELD_S64:
1106 case FILTER_OP_LOAD_FIELD_U8:
1107 case FILTER_OP_LOAD_FIELD_U16:
1108 case FILTER_OP_LOAD_FIELD_U32:
1109 case FILTER_OP_LOAD_FIELD_U64:
1110 {
1111 /* Pop 1, push 1 */
1112 vstack_ax(stack)->type = REG_S64;
1113 next_pc += sizeof(struct load_op);
1114 break;
1115 }
1116
1117 case FILTER_OP_LOAD_FIELD_STRING:
1118 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1119 {
1120 /* Pop 1, push 1 */
1121 vstack_ax(stack)->type = REG_STRING;
1122 next_pc += sizeof(struct load_op);
1123 break;
1124 }
1125
1126 case FILTER_OP_LOAD_FIELD_DOUBLE:
1127 {
1128 /* Pop 1, push 1 */
1129 vstack_ax(stack)->type = REG_DOUBLE;
1130 next_pc += sizeof(struct load_op);
1131 break;
1132 }
1133
1134 case FILTER_OP_GET_SYMBOL:
1135 {
1136 struct load_op *insn = (struct load_op *) pc;
1137
1138 dbg_printk("op get symbol\n");
1139 switch (vstack_ax(stack)->load.type) {
1140 case LOAD_OBJECT:
1141 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1142 ret = -EINVAL;
1143 goto end;
1144 case LOAD_ROOT_CONTEXT:
1145 /* Lookup context field. */
1146 ret = specialize_context_lookup(bytecode, insn,
1147 &vstack_ax(stack)->load);
1148 if (ret)
1149 goto end;
1150 break;
1151 case LOAD_ROOT_APP_CONTEXT:
1152 ret = -EINVAL;
1153 goto end;
1154 case LOAD_ROOT_PAYLOAD:
1155 /* Lookup event payload field. */
1156 ret = specialize_event_payload_lookup(event,
1157 bytecode, insn,
1158 &vstack_ax(stack)->load);
1159 if (ret)
1160 goto end;
1161 break;
1162 }
1163 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1164 break;
1165 }
1166
1167 case FILTER_OP_GET_SYMBOL_FIELD:
1168 {
1169 /* Always generated by specialize phase. */
1170 ret = -EINVAL;
1171 goto end;
1172 }
1173
1174 case FILTER_OP_GET_INDEX_U16:
1175 {
1176 struct load_op *insn = (struct load_op *) pc;
1177 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1178
1179 dbg_printk("op get index u16\n");
1180 /* Pop 1, push 1 */
1181 ret = specialize_get_index(bytecode, insn, index->index,
1182 vstack_ax(stack), sizeof(*index));
1183 if (ret)
1184 goto end;
1185 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1186 break;
1187 }
1188
1189 case FILTER_OP_GET_INDEX_U64:
1190 {
1191 struct load_op *insn = (struct load_op *) pc;
1192 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1193
1194 dbg_printk("op get index u64\n");
1195 /* Pop 1, push 1 */
1196 ret = specialize_get_index(bytecode, insn, index->index,
1197 vstack_ax(stack), sizeof(*index));
1198 if (ret)
1199 goto end;
1200 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1201 break;
1202 }
1203
1204 }
1205 }
1206 end:
1207 return ret;
1208 }
This page took 0.086968 seconds and 4 git commands to generate.