Filter: index array, sequences, implement bitwise binary operators
[lttng-modules.git] / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng modules filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/slab.h>
28 #include <lttng-filter.h>
29 #include "lib/align.h"
30
31 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
32 size_t align, size_t len)
33 {
34 ssize_t ret;
35 size_t padding = offset_align(runtime->data_len, align);
36 size_t new_len = runtime->data_len + padding + len;
37 size_t new_alloc_len = new_len;
38 size_t old_alloc_len = runtime->data_alloc_len;
39
40 if (new_len > FILTER_MAX_DATA_LEN)
41 return -EINVAL;
42
43 if (new_alloc_len > old_alloc_len) {
44 char *newptr;
45
46 new_alloc_len =
47 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
48 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
49 if (!newptr)
50 return -ENOMEM;
51 runtime->data = newptr;
52 /* We zero directly the memory from start of allocation. */
53 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
54 runtime->data_alloc_len = new_alloc_len;
55 }
56 runtime->data_len += padding;
57 ret = runtime->data_len;
58 runtime->data_len += len;
59 return ret;
60 }
61
62 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
63 const void *p, size_t align, size_t len)
64 {
65 ssize_t offset;
66
67 offset = bytecode_reserve_data(runtime, align, len);
68 if (offset < 0)
69 return -ENOMEM;
70 memcpy(&runtime->data[offset], p, len);
71 return offset;
72 }
73
74 static int specialize_load_field(struct vstack_entry *stack_top,
75 struct load_op *insn)
76 {
77 int ret;
78
79 switch (stack_top->load.type) {
80 case LOAD_OBJECT:
81 break;
82 case LOAD_ROOT_CONTEXT:
83 case LOAD_ROOT_APP_CONTEXT:
84 case LOAD_ROOT_PAYLOAD:
85 default:
86 dbg_printk("Filter warning: cannot load root, missing field name.\n");
87 ret = -EINVAL;
88 goto end;
89 }
90 switch (stack_top->load.object_type) {
91 case OBJECT_TYPE_S8:
92 dbg_printk("op load field s8\n");
93 stack_top->type = REG_S64;
94 if (!stack_top->load.rev_bo)
95 insn->op = FILTER_OP_LOAD_FIELD_S8;
96 break;
97 case OBJECT_TYPE_S16:
98 dbg_printk("op load field s16\n");
99 stack_top->type = REG_S64;
100 if (!stack_top->load.rev_bo)
101 insn->op = FILTER_OP_LOAD_FIELD_S16;
102 break;
103 case OBJECT_TYPE_S32:
104 dbg_printk("op load field s32\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_S32;
108 break;
109 case OBJECT_TYPE_S64:
110 dbg_printk("op load field s64\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_S64;
114 break;
115 case OBJECT_TYPE_U8:
116 dbg_printk("op load field u8\n");
117 stack_top->type = REG_S64;
118 insn->op = FILTER_OP_LOAD_FIELD_U8;
119 break;
120 case OBJECT_TYPE_U16:
121 dbg_printk("op load field u16\n");
122 stack_top->type = REG_S64;
123 if (!stack_top->load.rev_bo)
124 insn->op = FILTER_OP_LOAD_FIELD_U16;
125 break;
126 case OBJECT_TYPE_U32:
127 dbg_printk("op load field u32\n");
128 stack_top->type = REG_S64;
129 if (!stack_top->load.rev_bo)
130 insn->op = FILTER_OP_LOAD_FIELD_U32;
131 break;
132 case OBJECT_TYPE_U64:
133 dbg_printk("op load field u64\n");
134 stack_top->type = REG_S64;
135 if (!stack_top->load.rev_bo)
136 insn->op = FILTER_OP_LOAD_FIELD_U64;
137 break;
138 case OBJECT_TYPE_DOUBLE:
139 printk(KERN_WARNING "Double type unsupported\n\n");
140 ret = -EINVAL;
141 goto end;
142 case OBJECT_TYPE_STRING:
143 dbg_printk("op load field string\n");
144 stack_top->type = REG_STRING;
145 insn->op = FILTER_OP_LOAD_FIELD_STRING;
146 break;
147 case OBJECT_TYPE_STRING_SEQUENCE:
148 dbg_printk("op load field string sequence\n");
149 stack_top->type = REG_STRING;
150 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
151 break;
152 case OBJECT_TYPE_DYNAMIC:
153 ret = -EINVAL;
154 goto end;
155 case OBJECT_TYPE_SEQUENCE:
156 case OBJECT_TYPE_ARRAY:
157 case OBJECT_TYPE_STRUCT:
158 case OBJECT_TYPE_VARIANT:
159 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
160 ret = -EINVAL;
161 goto end;
162 }
163 return 0;
164
165 end:
166 return ret;
167 }
168
169 static int specialize_get_index_object_type(enum object_type *otype,
170 int signedness, uint32_t elem_len)
171 {
172 switch (elem_len) {
173 case 8:
174 if (signedness)
175 *otype = OBJECT_TYPE_S8;
176 else
177 *otype = OBJECT_TYPE_U8;
178 break;
179 case 16:
180 if (signedness)
181 *otype = OBJECT_TYPE_S16;
182 else
183 *otype = OBJECT_TYPE_U16;
184 break;
185 case 32:
186 if (signedness)
187 *otype = OBJECT_TYPE_S32;
188 else
189 *otype = OBJECT_TYPE_U32;
190 break;
191 case 64:
192 if (signedness)
193 *otype = OBJECT_TYPE_S64;
194 else
195 *otype = OBJECT_TYPE_U64;
196 break;
197 default:
198 return -EINVAL;
199 }
200 return 0;
201 }
202
203 static int specialize_get_index(struct bytecode_runtime *runtime,
204 struct load_op *insn, uint64_t index,
205 struct vstack_entry *stack_top,
206 int idx_len)
207 {
208 int ret;
209 struct filter_get_index_data gid;
210 ssize_t data_offset;
211
212 memset(&gid, 0, sizeof(gid));
213 switch (stack_top->load.type) {
214 case LOAD_OBJECT:
215 switch (stack_top->load.object_type) {
216 case OBJECT_TYPE_ARRAY:
217 {
218 const struct lttng_event_field *field;
219 uint32_t elem_len, num_elems;
220 int signedness;
221
222 field = stack_top->load.field;
223 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
224 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
225 num_elems = field->type.u.array.length;
226 if (index >= num_elems) {
227 ret = -EINVAL;
228 goto end;
229 }
230 ret = specialize_get_index_object_type(&stack_top->load.object_type,
231 signedness, elem_len);
232 if (ret)
233 goto end;
234 gid.offset = index * (elem_len / CHAR_BIT);
235 gid.array_len = num_elems * (elem_len / CHAR_BIT);
236 gid.elem.type = stack_top->load.object_type;
237 gid.elem.len = elem_len;
238 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
239 gid.elem.rev_bo = true;
240 stack_top->load.rev_bo = gid.elem.rev_bo;
241 break;
242 }
243 case OBJECT_TYPE_SEQUENCE:
244 {
245 const struct lttng_event_field *field;
246 uint32_t elem_len;
247 int signedness;
248
249 field = stack_top->load.field;
250 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
251 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
252 ret = specialize_get_index_object_type(&stack_top->load.object_type,
253 signedness, elem_len);
254 if (ret)
255 goto end;
256 gid.offset = index * (elem_len / CHAR_BIT);
257 gid.elem.type = stack_top->load.object_type;
258 gid.elem.len = elem_len;
259 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
260 gid.elem.rev_bo = true;
261 stack_top->load.rev_bo = gid.elem.rev_bo;
262 break;
263 }
264 case OBJECT_TYPE_STRUCT:
265 /* Only generated by the specialize phase. */
266 case OBJECT_TYPE_VARIANT: /* Fall-through */
267 default:
268 printk(KERN_WARNING "Unexpected get index type %d",
269 (int) stack_top->load.object_type);
270 ret = -EINVAL;
271 goto end;
272 }
273 break;
274 case LOAD_ROOT_CONTEXT:
275 case LOAD_ROOT_APP_CONTEXT:
276 case LOAD_ROOT_PAYLOAD:
277 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
278 ret = -EINVAL;
279 goto end;
280 }
281 data_offset = bytecode_push_data(runtime, &gid,
282 __alignof__(gid), sizeof(gid));
283 if (data_offset < 0) {
284 ret = -EINVAL;
285 goto end;
286 }
287 switch (idx_len) {
288 case 2:
289 ((struct get_index_u16 *) insn->data)->index = data_offset;
290 break;
291 case 8:
292 ((struct get_index_u64 *) insn->data)->index = data_offset;
293 break;
294 default:
295 ret = -EINVAL;
296 goto end;
297 }
298
299 return 0;
300
301 end:
302 return ret;
303 }
304
305 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
306 struct load_op *insn)
307 {
308 uint16_t offset;
309 const char *name;
310
311 offset = ((struct get_symbol *) insn->data)->offset;
312 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
313 return lttng_get_context_index(lttng_static_ctx, name);
314 }
315
316 static int specialize_load_object(const struct lttng_event_field *field,
317 struct vstack_load *load, bool is_context)
318 {
319 load->type = LOAD_OBJECT;
320 /*
321 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
322 */
323 switch (field->type.atype) {
324 case atype_integer:
325 if (field->type.u.basic.integer.signedness)
326 load->object_type = OBJECT_TYPE_S64;
327 else
328 load->object_type = OBJECT_TYPE_U64;
329 load->rev_bo = false;
330 break;
331 case atype_enum:
332 {
333 const struct lttng_integer_type *itype =
334 &field->type.u.basic.enumeration.container_type;
335
336 if (itype->signedness)
337 load->object_type = OBJECT_TYPE_S64;
338 else
339 load->object_type = OBJECT_TYPE_U64;
340 load->rev_bo = false;
341 break;
342 }
343 case atype_array:
344 if (field->type.u.array.elem_type.atype != atype_integer) {
345 printk(KERN_WARNING "Array nesting only supports integer types.\n");
346 return -EINVAL;
347 }
348 if (is_context) {
349 load->object_type = OBJECT_TYPE_STRING;
350 } else {
351 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
352 load->object_type = OBJECT_TYPE_ARRAY;
353 load->field = field;
354 } else {
355 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
356 }
357 }
358 break;
359 case atype_sequence:
360 if (field->type.u.sequence.elem_type.atype != atype_integer) {
361 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
362 return -EINVAL;
363 }
364 if (is_context) {
365 load->object_type = OBJECT_TYPE_STRING;
366 } else {
367 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
368 load->object_type = OBJECT_TYPE_SEQUENCE;
369 load->field = field;
370 } else {
371 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
372 }
373 }
374 break;
375 case atype_array_bitfield:
376 printk(KERN_WARNING "Bitfield array type is not supported.\n");
377 return -EINVAL;
378 case atype_sequence_bitfield:
379 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
380 return -EINVAL;
381 case atype_string:
382 load->object_type = OBJECT_TYPE_STRING;
383 break;
384 case atype_struct:
385 printk(KERN_WARNING "Structure type cannot be loaded.\n");
386 return -EINVAL;
387 default:
388 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
389 return -EINVAL;
390 }
391 return 0;
392 }
393
394 static int specialize_context_lookup(struct bytecode_runtime *runtime,
395 struct load_op *insn,
396 struct vstack_load *load)
397 {
398 int idx, ret;
399 struct lttng_ctx_field *ctx_field;
400 struct lttng_event_field *field;
401 struct filter_get_index_data gid;
402 ssize_t data_offset;
403
404 idx = specialize_context_lookup_name(runtime, insn);
405 if (idx < 0) {
406 return -ENOENT;
407 }
408 ctx_field = &lttng_static_ctx->fields[idx];
409 field = &ctx_field->event_field;
410 ret = specialize_load_object(field, load, true);
411 if (ret)
412 return ret;
413 /* Specialize each get_symbol into a get_index. */
414 insn->op = FILTER_OP_GET_INDEX_U16;
415 memset(&gid, 0, sizeof(gid));
416 gid.ctx_index = idx;
417 gid.elem.type = load->object_type;
418 data_offset = bytecode_push_data(runtime, &gid,
419 __alignof__(gid), sizeof(gid));
420 if (data_offset < 0) {
421 return -EINVAL;
422 }
423 ((struct get_index_u16 *) insn->data)->index = data_offset;
424 return 0;
425 }
426
427 static int specialize_event_payload_lookup(struct lttng_event *event,
428 struct bytecode_runtime *runtime,
429 struct load_op *insn,
430 struct vstack_load *load)
431 {
432 const char *name;
433 uint16_t offset;
434 const struct lttng_event_desc *desc = event->desc;
435 unsigned int i, nr_fields;
436 bool found = false;
437 uint32_t field_offset = 0;
438 const struct lttng_event_field *field;
439 int ret;
440 struct filter_get_index_data gid;
441 ssize_t data_offset;
442
443 nr_fields = desc->nr_fields;
444 offset = ((struct get_symbol *) insn->data)->offset;
445 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
446 for (i = 0; i < nr_fields; i++) {
447 field = &desc->fields[i];
448 if (!strcmp(field->name, name)) {
449 found = true;
450 break;
451 }
452 /* compute field offset on stack */
453 switch (field->type.atype) {
454 case atype_integer:
455 case atype_enum:
456 field_offset += sizeof(int64_t);
457 break;
458 case atype_array:
459 case atype_sequence:
460 case atype_array_bitfield:
461 case atype_sequence_bitfield:
462 field_offset += sizeof(unsigned long);
463 field_offset += sizeof(void *);
464 break;
465 case atype_string:
466 field_offset += sizeof(void *);
467 break;
468 default:
469 ret = -EINVAL;
470 goto end;
471 }
472 }
473 if (!found) {
474 ret = -EINVAL;
475 goto end;
476 }
477
478 ret = specialize_load_object(field, load, false);
479 if (ret)
480 goto end;
481
482 /* Specialize each get_symbol into a get_index. */
483 insn->op = FILTER_OP_GET_INDEX_U16;
484 memset(&gid, 0, sizeof(gid));
485 gid.offset = field_offset;
486 gid.elem.type = load->object_type;
487 data_offset = bytecode_push_data(runtime, &gid,
488 __alignof__(gid), sizeof(gid));
489 if (data_offset < 0) {
490 ret = -EINVAL;
491 goto end;
492 }
493 ((struct get_index_u16 *) insn->data)->index = data_offset;
494 ret = 0;
495 end:
496 return ret;
497 }
498
499 int lttng_filter_specialize_bytecode(struct lttng_event *event,
500 struct bytecode_runtime *bytecode)
501 {
502 void *pc, *next_pc, *start_pc;
503 int ret = -EINVAL;
504 struct vstack _stack;
505 struct vstack *stack = &_stack;
506
507 vstack_init(stack);
508
509 start_pc = &bytecode->code[0];
510 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
511 pc = next_pc) {
512 switch (*(filter_opcode_t *) pc) {
513 case FILTER_OP_UNKNOWN:
514 default:
515 printk(KERN_WARNING "unknown bytecode op %u\n",
516 (unsigned int) *(filter_opcode_t *) pc);
517 ret = -EINVAL;
518 goto end;
519
520 case FILTER_OP_RETURN:
521 ret = 0;
522 goto end;
523
524 /* binary */
525 case FILTER_OP_MUL:
526 case FILTER_OP_DIV:
527 case FILTER_OP_MOD:
528 case FILTER_OP_PLUS:
529 case FILTER_OP_MINUS:
530 case FILTER_OP_RSHIFT:
531 case FILTER_OP_LSHIFT:
532 printk(KERN_WARNING "unsupported bytecode op %u\n",
533 (unsigned int) *(filter_opcode_t *) pc);
534 ret = -EINVAL;
535 goto end;
536
537 case FILTER_OP_EQ:
538 {
539 struct binary_op *insn = (struct binary_op *) pc;
540
541 switch(vstack_ax(stack)->type) {
542 default:
543 printk(KERN_WARNING "unknown register type\n");
544 ret = -EINVAL;
545 goto end;
546
547 case REG_STRING:
548 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
549 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
550 else
551 insn->op = FILTER_OP_EQ_STRING;
552 break;
553 case REG_STAR_GLOB_STRING:
554 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
555 break;
556 case REG_S64:
557 if (vstack_bx(stack)->type == REG_S64)
558 insn->op = FILTER_OP_EQ_S64;
559 else
560 insn->op = FILTER_OP_EQ_DOUBLE_S64;
561 break;
562 case REG_DOUBLE:
563 if (vstack_bx(stack)->type == REG_S64)
564 insn->op = FILTER_OP_EQ_S64_DOUBLE;
565 else
566 insn->op = FILTER_OP_EQ_DOUBLE;
567 break;
568 }
569 /* Pop 2, push 1 */
570 if (vstack_pop(stack)) {
571 ret = -EINVAL;
572 goto end;
573 }
574 vstack_ax(stack)->type = REG_S64;
575 next_pc += sizeof(struct binary_op);
576 break;
577 }
578
579 case FILTER_OP_NE:
580 {
581 struct binary_op *insn = (struct binary_op *) pc;
582
583 switch(vstack_ax(stack)->type) {
584 default:
585 printk(KERN_WARNING "unknown register type\n");
586 ret = -EINVAL;
587 goto end;
588
589 case REG_STRING:
590 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
591 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
592 else
593 insn->op = FILTER_OP_NE_STRING;
594 break;
595 case REG_STAR_GLOB_STRING:
596 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
597 break;
598 case REG_S64:
599 if (vstack_bx(stack)->type == REG_S64)
600 insn->op = FILTER_OP_NE_S64;
601 else
602 insn->op = FILTER_OP_NE_DOUBLE_S64;
603 break;
604 case REG_DOUBLE:
605 if (vstack_bx(stack)->type == REG_S64)
606 insn->op = FILTER_OP_NE_S64_DOUBLE;
607 else
608 insn->op = FILTER_OP_NE_DOUBLE;
609 break;
610 }
611 /* Pop 2, push 1 */
612 if (vstack_pop(stack)) {
613 ret = -EINVAL;
614 goto end;
615 }
616 vstack_ax(stack)->type = REG_S64;
617 next_pc += sizeof(struct binary_op);
618 break;
619 }
620
621 case FILTER_OP_GT:
622 {
623 struct binary_op *insn = (struct binary_op *) pc;
624
625 switch(vstack_ax(stack)->type) {
626 default:
627 printk(KERN_WARNING "unknown register type\n");
628 ret = -EINVAL;
629 goto end;
630
631 case REG_STAR_GLOB_STRING:
632 printk(KERN_WARNING "invalid register type for > binary operator\n");
633 ret = -EINVAL;
634 goto end;
635 case REG_STRING:
636 insn->op = FILTER_OP_GT_STRING;
637 break;
638 case REG_S64:
639 if (vstack_bx(stack)->type == REG_S64)
640 insn->op = FILTER_OP_GT_S64;
641 else
642 insn->op = FILTER_OP_GT_DOUBLE_S64;
643 break;
644 case REG_DOUBLE:
645 if (vstack_bx(stack)->type == REG_S64)
646 insn->op = FILTER_OP_GT_S64_DOUBLE;
647 else
648 insn->op = FILTER_OP_GT_DOUBLE;
649 break;
650 }
651 /* Pop 2, push 1 */
652 if (vstack_pop(stack)) {
653 ret = -EINVAL;
654 goto end;
655 }
656 vstack_ax(stack)->type = REG_S64;
657 next_pc += sizeof(struct binary_op);
658 break;
659 }
660
661 case FILTER_OP_LT:
662 {
663 struct binary_op *insn = (struct binary_op *) pc;
664
665 switch(vstack_ax(stack)->type) {
666 default:
667 printk(KERN_WARNING "unknown register type\n");
668 ret = -EINVAL;
669 goto end;
670
671 case REG_STAR_GLOB_STRING:
672 printk(KERN_WARNING "invalid register type for < binary operator\n");
673 ret = -EINVAL;
674 goto end;
675 case REG_STRING:
676 insn->op = FILTER_OP_LT_STRING;
677 break;
678 case REG_S64:
679 if (vstack_bx(stack)->type == REG_S64)
680 insn->op = FILTER_OP_LT_S64;
681 else
682 insn->op = FILTER_OP_LT_DOUBLE_S64;
683 break;
684 case REG_DOUBLE:
685 if (vstack_bx(stack)->type == REG_S64)
686 insn->op = FILTER_OP_LT_S64_DOUBLE;
687 else
688 insn->op = FILTER_OP_LT_DOUBLE;
689 break;
690 }
691 /* Pop 2, push 1 */
692 if (vstack_pop(stack)) {
693 ret = -EINVAL;
694 goto end;
695 }
696 vstack_ax(stack)->type = REG_S64;
697 next_pc += sizeof(struct binary_op);
698 break;
699 }
700
701 case FILTER_OP_GE:
702 {
703 struct binary_op *insn = (struct binary_op *) pc;
704
705 switch(vstack_ax(stack)->type) {
706 default:
707 printk(KERN_WARNING "unknown register type\n");
708 ret = -EINVAL;
709 goto end;
710
711 case REG_STAR_GLOB_STRING:
712 printk(KERN_WARNING "invalid register type for >= binary operator\n");
713 ret = -EINVAL;
714 goto end;
715 case REG_STRING:
716 insn->op = FILTER_OP_GE_STRING;
717 break;
718 case REG_S64:
719 if (vstack_bx(stack)->type == REG_S64)
720 insn->op = FILTER_OP_GE_S64;
721 else
722 insn->op = FILTER_OP_GE_DOUBLE_S64;
723 break;
724 case REG_DOUBLE:
725 if (vstack_bx(stack)->type == REG_S64)
726 insn->op = FILTER_OP_GE_S64_DOUBLE;
727 else
728 insn->op = FILTER_OP_GE_DOUBLE;
729 break;
730 }
731 /* Pop 2, push 1 */
732 if (vstack_pop(stack)) {
733 ret = -EINVAL;
734 goto end;
735 }
736 vstack_ax(stack)->type = REG_S64;
737 next_pc += sizeof(struct binary_op);
738 break;
739 }
740 case FILTER_OP_LE:
741 {
742 struct binary_op *insn = (struct binary_op *) pc;
743
744 switch(vstack_ax(stack)->type) {
745 default:
746 printk(KERN_WARNING "unknown register type\n");
747 ret = -EINVAL;
748 goto end;
749
750 case REG_STAR_GLOB_STRING:
751 printk(KERN_WARNING "invalid register type for <= binary operator\n");
752 ret = -EINVAL;
753 goto end;
754 case REG_STRING:
755 insn->op = FILTER_OP_LE_STRING;
756 break;
757 case REG_S64:
758 if (vstack_bx(stack)->type == REG_S64)
759 insn->op = FILTER_OP_LE_S64;
760 else
761 insn->op = FILTER_OP_LE_DOUBLE_S64;
762 break;
763 case REG_DOUBLE:
764 if (vstack_bx(stack)->type == REG_S64)
765 insn->op = FILTER_OP_LE_S64_DOUBLE;
766 else
767 insn->op = FILTER_OP_LE_DOUBLE;
768 break;
769 }
770 vstack_ax(stack)->type = REG_S64;
771 next_pc += sizeof(struct binary_op);
772 break;
773 }
774
775 case FILTER_OP_EQ_STRING:
776 case FILTER_OP_NE_STRING:
777 case FILTER_OP_GT_STRING:
778 case FILTER_OP_LT_STRING:
779 case FILTER_OP_GE_STRING:
780 case FILTER_OP_LE_STRING:
781 case FILTER_OP_EQ_STAR_GLOB_STRING:
782 case FILTER_OP_NE_STAR_GLOB_STRING:
783 case FILTER_OP_EQ_S64:
784 case FILTER_OP_NE_S64:
785 case FILTER_OP_GT_S64:
786 case FILTER_OP_LT_S64:
787 case FILTER_OP_GE_S64:
788 case FILTER_OP_LE_S64:
789 case FILTER_OP_EQ_DOUBLE:
790 case FILTER_OP_NE_DOUBLE:
791 case FILTER_OP_GT_DOUBLE:
792 case FILTER_OP_LT_DOUBLE:
793 case FILTER_OP_GE_DOUBLE:
794 case FILTER_OP_LE_DOUBLE:
795 case FILTER_OP_EQ_DOUBLE_S64:
796 case FILTER_OP_NE_DOUBLE_S64:
797 case FILTER_OP_GT_DOUBLE_S64:
798 case FILTER_OP_LT_DOUBLE_S64:
799 case FILTER_OP_GE_DOUBLE_S64:
800 case FILTER_OP_LE_DOUBLE_S64:
801 case FILTER_OP_EQ_S64_DOUBLE:
802 case FILTER_OP_NE_S64_DOUBLE:
803 case FILTER_OP_GT_S64_DOUBLE:
804 case FILTER_OP_LT_S64_DOUBLE:
805 case FILTER_OP_GE_S64_DOUBLE:
806 case FILTER_OP_LE_S64_DOUBLE:
807 case FILTER_OP_BIT_AND:
808 case FILTER_OP_BIT_OR:
809 case FILTER_OP_BIT_XOR:
810 {
811 /* Pop 2, push 1 */
812 if (vstack_pop(stack)) {
813 ret = -EINVAL;
814 goto end;
815 }
816 vstack_ax(stack)->type = REG_S64;
817 next_pc += sizeof(struct binary_op);
818 break;
819 }
820
821 /* unary */
822 case FILTER_OP_UNARY_PLUS:
823 {
824 struct unary_op *insn = (struct unary_op *) pc;
825
826 switch(vstack_ax(stack)->type) {
827 default:
828 printk(KERN_WARNING "unknown register type\n");
829 ret = -EINVAL;
830 goto end;
831
832 case REG_S64:
833 insn->op = FILTER_OP_UNARY_PLUS_S64;
834 break;
835 case REG_DOUBLE:
836 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
837 break;
838 }
839 /* Pop 1, push 1 */
840 next_pc += sizeof(struct unary_op);
841 break;
842 }
843
844 case FILTER_OP_UNARY_MINUS:
845 {
846 struct unary_op *insn = (struct unary_op *) pc;
847
848 switch(vstack_ax(stack)->type) {
849 default:
850 printk(KERN_WARNING "unknown register type\n");
851 ret = -EINVAL;
852 goto end;
853
854 case REG_S64:
855 insn->op = FILTER_OP_UNARY_MINUS_S64;
856 break;
857 case REG_DOUBLE:
858 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
859 break;
860 }
861 /* Pop 1, push 1 */
862 next_pc += sizeof(struct unary_op);
863 break;
864 }
865
866 case FILTER_OP_UNARY_NOT:
867 {
868 struct unary_op *insn = (struct unary_op *) pc;
869
870 switch(vstack_ax(stack)->type) {
871 default:
872 printk(KERN_WARNING "unknown register type\n");
873 ret = -EINVAL;
874 goto end;
875
876 case REG_S64:
877 insn->op = FILTER_OP_UNARY_NOT_S64;
878 break;
879 case REG_DOUBLE:
880 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
881 break;
882 }
883 /* Pop 1, push 1 */
884 next_pc += sizeof(struct unary_op);
885 break;
886 }
887
888 case FILTER_OP_UNARY_PLUS_S64:
889 case FILTER_OP_UNARY_MINUS_S64:
890 case FILTER_OP_UNARY_NOT_S64:
891 case FILTER_OP_UNARY_PLUS_DOUBLE:
892 case FILTER_OP_UNARY_MINUS_DOUBLE:
893 case FILTER_OP_UNARY_NOT_DOUBLE:
894 {
895 /* Pop 1, push 1 */
896 next_pc += sizeof(struct unary_op);
897 break;
898 }
899
900 /* logical */
901 case FILTER_OP_AND:
902 case FILTER_OP_OR:
903 {
904 /* Continue to next instruction */
905 /* Pop 1 when jump not taken */
906 if (vstack_pop(stack)) {
907 ret = -EINVAL;
908 goto end;
909 }
910 next_pc += sizeof(struct logical_op);
911 break;
912 }
913
914 /* load field ref */
915 case FILTER_OP_LOAD_FIELD_REF:
916 {
917 printk(KERN_WARNING "Unknown field ref type\n");
918 ret = -EINVAL;
919 goto end;
920 }
921 /* get context ref */
922 case FILTER_OP_GET_CONTEXT_REF:
923 {
924 printk(KERN_WARNING "Unknown get context ref type\n");
925 ret = -EINVAL;
926 goto end;
927 }
928 case FILTER_OP_LOAD_FIELD_REF_STRING:
929 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
930 case FILTER_OP_GET_CONTEXT_REF_STRING:
931 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
932 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
933 {
934 if (vstack_push(stack)) {
935 ret = -EINVAL;
936 goto end;
937 }
938 vstack_ax(stack)->type = REG_STRING;
939 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
940 break;
941 }
942 case FILTER_OP_LOAD_FIELD_REF_S64:
943 case FILTER_OP_GET_CONTEXT_REF_S64:
944 {
945 if (vstack_push(stack)) {
946 ret = -EINVAL;
947 goto end;
948 }
949 vstack_ax(stack)->type = REG_S64;
950 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
951 break;
952 }
953 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
954 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
955 {
956 if (vstack_push(stack)) {
957 ret = -EINVAL;
958 goto end;
959 }
960 vstack_ax(stack)->type = REG_DOUBLE;
961 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
962 break;
963 }
964
965 /* load from immediate operand */
966 case FILTER_OP_LOAD_STRING:
967 {
968 struct load_op *insn = (struct load_op *) pc;
969
970 if (vstack_push(stack)) {
971 ret = -EINVAL;
972 goto end;
973 }
974 vstack_ax(stack)->type = REG_STRING;
975 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
976 break;
977 }
978
979 case FILTER_OP_LOAD_STAR_GLOB_STRING:
980 {
981 struct load_op *insn = (struct load_op *) pc;
982
983 if (vstack_push(stack)) {
984 ret = -EINVAL;
985 goto end;
986 }
987 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
988 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
989 break;
990 }
991
992 case FILTER_OP_LOAD_S64:
993 {
994 if (vstack_push(stack)) {
995 ret = -EINVAL;
996 goto end;
997 }
998 vstack_ax(stack)->type = REG_S64;
999 next_pc += sizeof(struct load_op)
1000 + sizeof(struct literal_numeric);
1001 break;
1002 }
1003
1004 case FILTER_OP_LOAD_DOUBLE:
1005 {
1006 if (vstack_push(stack)) {
1007 ret = -EINVAL;
1008 goto end;
1009 }
1010 vstack_ax(stack)->type = REG_DOUBLE;
1011 next_pc += sizeof(struct load_op)
1012 + sizeof(struct literal_double);
1013 break;
1014 }
1015
1016 /* cast */
1017 case FILTER_OP_CAST_TO_S64:
1018 {
1019 struct cast_op *insn = (struct cast_op *) pc;
1020
1021 switch (vstack_ax(stack)->type) {
1022 default:
1023 printk(KERN_WARNING "unknown register type\n");
1024 ret = -EINVAL;
1025 goto end;
1026
1027 case REG_STRING:
1028 case REG_STAR_GLOB_STRING:
1029 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1030 ret = -EINVAL;
1031 goto end;
1032 case REG_S64:
1033 insn->op = FILTER_OP_CAST_NOP;
1034 break;
1035 case REG_DOUBLE:
1036 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1037 break;
1038 }
1039 /* Pop 1, push 1 */
1040 vstack_ax(stack)->type = REG_S64;
1041 next_pc += sizeof(struct cast_op);
1042 break;
1043 }
1044 case FILTER_OP_CAST_DOUBLE_TO_S64:
1045 {
1046 /* Pop 1, push 1 */
1047 vstack_ax(stack)->type = REG_S64;
1048 next_pc += sizeof(struct cast_op);
1049 break;
1050 }
1051 case FILTER_OP_CAST_NOP:
1052 {
1053 next_pc += sizeof(struct cast_op);
1054 break;
1055 }
1056
1057 /*
1058 * Instructions for recursive traversal through composed types.
1059 */
1060 case FILTER_OP_GET_CONTEXT_ROOT:
1061 {
1062 if (vstack_push(stack)) {
1063 ret = -EINVAL;
1064 goto end;
1065 }
1066 vstack_ax(stack)->type = REG_PTR;
1067 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1068 next_pc += sizeof(struct load_op);
1069 break;
1070 }
1071 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1072 {
1073 if (vstack_push(stack)) {
1074 ret = -EINVAL;
1075 goto end;
1076 }
1077 vstack_ax(stack)->type = REG_PTR;
1078 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1079 next_pc += sizeof(struct load_op);
1080 break;
1081 }
1082 case FILTER_OP_GET_PAYLOAD_ROOT:
1083 {
1084 if (vstack_push(stack)) {
1085 ret = -EINVAL;
1086 goto end;
1087 }
1088 vstack_ax(stack)->type = REG_PTR;
1089 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1090 next_pc += sizeof(struct load_op);
1091 break;
1092 }
1093
1094 case FILTER_OP_LOAD_FIELD:
1095 {
1096 struct load_op *insn = (struct load_op *) pc;
1097
1098 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1099 /* Pop 1, push 1 */
1100 ret = specialize_load_field(vstack_ax(stack), insn);
1101 if (ret)
1102 goto end;
1103
1104 next_pc += sizeof(struct load_op);
1105 break;
1106 }
1107
1108 case FILTER_OP_LOAD_FIELD_S8:
1109 case FILTER_OP_LOAD_FIELD_S16:
1110 case FILTER_OP_LOAD_FIELD_S32:
1111 case FILTER_OP_LOAD_FIELD_S64:
1112 case FILTER_OP_LOAD_FIELD_U8:
1113 case FILTER_OP_LOAD_FIELD_U16:
1114 case FILTER_OP_LOAD_FIELD_U32:
1115 case FILTER_OP_LOAD_FIELD_U64:
1116 {
1117 /* Pop 1, push 1 */
1118 vstack_ax(stack)->type = REG_S64;
1119 next_pc += sizeof(struct load_op);
1120 break;
1121 }
1122
1123 case FILTER_OP_LOAD_FIELD_STRING:
1124 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1125 {
1126 /* Pop 1, push 1 */
1127 vstack_ax(stack)->type = REG_STRING;
1128 next_pc += sizeof(struct load_op);
1129 break;
1130 }
1131
1132 case FILTER_OP_LOAD_FIELD_DOUBLE:
1133 {
1134 /* Pop 1, push 1 */
1135 vstack_ax(stack)->type = REG_DOUBLE;
1136 next_pc += sizeof(struct load_op);
1137 break;
1138 }
1139
1140 case FILTER_OP_GET_SYMBOL:
1141 {
1142 struct load_op *insn = (struct load_op *) pc;
1143
1144 dbg_printk("op get symbol\n");
1145 switch (vstack_ax(stack)->load.type) {
1146 case LOAD_OBJECT:
1147 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1148 ret = -EINVAL;
1149 goto end;
1150 case LOAD_ROOT_CONTEXT:
1151 /* Lookup context field. */
1152 ret = specialize_context_lookup(bytecode, insn,
1153 &vstack_ax(stack)->load);
1154 if (ret)
1155 goto end;
1156 break;
1157 case LOAD_ROOT_APP_CONTEXT:
1158 ret = -EINVAL;
1159 goto end;
1160 case LOAD_ROOT_PAYLOAD:
1161 /* Lookup event payload field. */
1162 ret = specialize_event_payload_lookup(event,
1163 bytecode, insn,
1164 &vstack_ax(stack)->load);
1165 if (ret)
1166 goto end;
1167 break;
1168 }
1169 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1170 break;
1171 }
1172
1173 case FILTER_OP_GET_SYMBOL_FIELD:
1174 {
1175 /* Always generated by specialize phase. */
1176 ret = -EINVAL;
1177 goto end;
1178 }
1179
1180 case FILTER_OP_GET_INDEX_U16:
1181 {
1182 struct load_op *insn = (struct load_op *) pc;
1183 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1184
1185 dbg_printk("op get index u16\n");
1186 /* Pop 1, push 1 */
1187 ret = specialize_get_index(bytecode, insn, index->index,
1188 vstack_ax(stack), sizeof(*index));
1189 if (ret)
1190 goto end;
1191 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1192 break;
1193 }
1194
1195 case FILTER_OP_GET_INDEX_U64:
1196 {
1197 struct load_op *insn = (struct load_op *) pc;
1198 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1199
1200 dbg_printk("op get index u64\n");
1201 /* Pop 1, push 1 */
1202 ret = specialize_get_index(bytecode, insn, index->index,
1203 vstack_ax(stack), sizeof(*index));
1204 if (ret)
1205 goto end;
1206 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1207 break;
1208 }
1209
1210 }
1211 }
1212 end:
1213 return ret;
1214 }
This page took 0.058906 seconds and 5 git commands to generate.