Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
13
14 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
15 size_t align, size_t len)
16 {
17 ssize_t ret;
18 size_t padding = offset_align(runtime->data_len, align);
19 size_t new_len = runtime->data_len + padding + len;
20 size_t new_alloc_len = new_len;
21 size_t old_alloc_len = runtime->data_alloc_len;
22
23 if (new_len > FILTER_MAX_DATA_LEN)
24 return -EINVAL;
25
26 if (new_alloc_len > old_alloc_len) {
27 char *newptr;
28
29 new_alloc_len =
30 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
31 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
32 if (!newptr)
33 return -ENOMEM;
34 runtime->data = newptr;
35 /* We zero directly the memory from start of allocation. */
36 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
37 runtime->data_alloc_len = new_alloc_len;
38 }
39 runtime->data_len += padding;
40 ret = runtime->data_len;
41 runtime->data_len += len;
42 return ret;
43 }
44
45 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
46 const void *p, size_t align, size_t len)
47 {
48 ssize_t offset;
49
50 offset = bytecode_reserve_data(runtime, align, len);
51 if (offset < 0)
52 return -ENOMEM;
53 memcpy(&runtime->data[offset], p, len);
54 return offset;
55 }
56
57 static int specialize_load_field(struct vstack_entry *stack_top,
58 struct load_op *insn)
59 {
60 int ret;
61
62 switch (stack_top->load.type) {
63 case LOAD_OBJECT:
64 break;
65 case LOAD_ROOT_CONTEXT:
66 case LOAD_ROOT_APP_CONTEXT:
67 case LOAD_ROOT_PAYLOAD:
68 default:
69 dbg_printk("Filter warning: cannot load root, missing field name.\n");
70 ret = -EINVAL;
71 goto end;
72 }
73 switch (stack_top->load.object_type) {
74 case OBJECT_TYPE_S8:
75 dbg_printk("op load field s8\n");
76 stack_top->type = REG_S64;
77 if (!stack_top->load.rev_bo)
78 insn->op = FILTER_OP_LOAD_FIELD_S8;
79 break;
80 case OBJECT_TYPE_S16:
81 dbg_printk("op load field s16\n");
82 stack_top->type = REG_S64;
83 if (!stack_top->load.rev_bo)
84 insn->op = FILTER_OP_LOAD_FIELD_S16;
85 break;
86 case OBJECT_TYPE_S32:
87 dbg_printk("op load field s32\n");
88 stack_top->type = REG_S64;
89 if (!stack_top->load.rev_bo)
90 insn->op = FILTER_OP_LOAD_FIELD_S32;
91 break;
92 case OBJECT_TYPE_S64:
93 dbg_printk("op load field s64\n");
94 stack_top->type = REG_S64;
95 if (!stack_top->load.rev_bo)
96 insn->op = FILTER_OP_LOAD_FIELD_S64;
97 break;
98 case OBJECT_TYPE_U8:
99 dbg_printk("op load field u8\n");
100 stack_top->type = REG_S64;
101 insn->op = FILTER_OP_LOAD_FIELD_U8;
102 break;
103 case OBJECT_TYPE_U16:
104 dbg_printk("op load field u16\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_U16;
108 break;
109 case OBJECT_TYPE_U32:
110 dbg_printk("op load field u32\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_U32;
114 break;
115 case OBJECT_TYPE_U64:
116 dbg_printk("op load field u64\n");
117 stack_top->type = REG_S64;
118 if (!stack_top->load.rev_bo)
119 insn->op = FILTER_OP_LOAD_FIELD_U64;
120 break;
121 case OBJECT_TYPE_DOUBLE:
122 printk(KERN_WARNING "Double type unsupported\n\n");
123 ret = -EINVAL;
124 goto end;
125 case OBJECT_TYPE_STRING:
126 dbg_printk("op load field string\n");
127 stack_top->type = REG_STRING;
128 insn->op = FILTER_OP_LOAD_FIELD_STRING;
129 break;
130 case OBJECT_TYPE_STRING_SEQUENCE:
131 dbg_printk("op load field string sequence\n");
132 stack_top->type = REG_STRING;
133 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
134 break;
135 case OBJECT_TYPE_DYNAMIC:
136 ret = -EINVAL;
137 goto end;
138 case OBJECT_TYPE_SEQUENCE:
139 case OBJECT_TYPE_ARRAY:
140 case OBJECT_TYPE_STRUCT:
141 case OBJECT_TYPE_VARIANT:
142 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
143 ret = -EINVAL;
144 goto end;
145 }
146 return 0;
147
148 end:
149 return ret;
150 }
151
152 static int specialize_get_index_object_type(enum object_type *otype,
153 int signedness, uint32_t elem_len)
154 {
155 switch (elem_len) {
156 case 8:
157 if (signedness)
158 *otype = OBJECT_TYPE_S8;
159 else
160 *otype = OBJECT_TYPE_U8;
161 break;
162 case 16:
163 if (signedness)
164 *otype = OBJECT_TYPE_S16;
165 else
166 *otype = OBJECT_TYPE_U16;
167 break;
168 case 32:
169 if (signedness)
170 *otype = OBJECT_TYPE_S32;
171 else
172 *otype = OBJECT_TYPE_U32;
173 break;
174 case 64:
175 if (signedness)
176 *otype = OBJECT_TYPE_S64;
177 else
178 *otype = OBJECT_TYPE_U64;
179 break;
180 default:
181 return -EINVAL;
182 }
183 return 0;
184 }
185
186 static int specialize_get_index(struct bytecode_runtime *runtime,
187 struct load_op *insn, uint64_t index,
188 struct vstack_entry *stack_top,
189 int idx_len)
190 {
191 int ret;
192 struct filter_get_index_data gid;
193 ssize_t data_offset;
194
195 memset(&gid, 0, sizeof(gid));
196 switch (stack_top->load.type) {
197 case LOAD_OBJECT:
198 switch (stack_top->load.object_type) {
199 case OBJECT_TYPE_ARRAY:
200 {
201 const struct lttng_event_field *field;
202 uint32_t elem_len, num_elems;
203 int signedness;
204
205 field = stack_top->load.field;
206 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
207 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
208 num_elems = field->type.u.array.length;
209 if (index >= num_elems) {
210 ret = -EINVAL;
211 goto end;
212 }
213 ret = specialize_get_index_object_type(&stack_top->load.object_type,
214 signedness, elem_len);
215 if (ret)
216 goto end;
217 gid.offset = index * (elem_len / CHAR_BIT);
218 gid.array_len = num_elems * (elem_len / CHAR_BIT);
219 gid.elem.type = stack_top->load.object_type;
220 gid.elem.len = elem_len;
221 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
222 gid.elem.rev_bo = true;
223 stack_top->load.rev_bo = gid.elem.rev_bo;
224 break;
225 }
226 case OBJECT_TYPE_SEQUENCE:
227 {
228 const struct lttng_event_field *field;
229 uint32_t elem_len;
230 int signedness;
231
232 field = stack_top->load.field;
233 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
234 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
235 ret = specialize_get_index_object_type(&stack_top->load.object_type,
236 signedness, elem_len);
237 if (ret)
238 goto end;
239 gid.offset = index * (elem_len / CHAR_BIT);
240 gid.elem.type = stack_top->load.object_type;
241 gid.elem.len = elem_len;
242 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
243 gid.elem.rev_bo = true;
244 stack_top->load.rev_bo = gid.elem.rev_bo;
245 break;
246 }
247 case OBJECT_TYPE_STRUCT:
248 /* Only generated by the specialize phase. */
249 case OBJECT_TYPE_VARIANT: /* Fall-through */
250 default:
251 printk(KERN_WARNING "Unexpected get index type %d",
252 (int) stack_top->load.object_type);
253 ret = -EINVAL;
254 goto end;
255 }
256 break;
257 case LOAD_ROOT_CONTEXT:
258 case LOAD_ROOT_APP_CONTEXT:
259 case LOAD_ROOT_PAYLOAD:
260 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
261 ret = -EINVAL;
262 goto end;
263 }
264 data_offset = bytecode_push_data(runtime, &gid,
265 __alignof__(gid), sizeof(gid));
266 if (data_offset < 0) {
267 ret = -EINVAL;
268 goto end;
269 }
270 switch (idx_len) {
271 case 2:
272 ((struct get_index_u16 *) insn->data)->index = data_offset;
273 break;
274 case 8:
275 ((struct get_index_u64 *) insn->data)->index = data_offset;
276 break;
277 default:
278 ret = -EINVAL;
279 goto end;
280 }
281
282 return 0;
283
284 end:
285 return ret;
286 }
287
288 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
289 struct load_op *insn)
290 {
291 uint16_t offset;
292 const char *name;
293
294 offset = ((struct get_symbol *) insn->data)->offset;
295 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
296 return lttng_get_context_index(lttng_static_ctx, name);
297 }
298
299 static int specialize_load_object(const struct lttng_event_field *field,
300 struct vstack_load *load, bool is_context)
301 {
302 load->type = LOAD_OBJECT;
303 /*
304 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
305 */
306 switch (field->type.atype) {
307 case atype_integer:
308 if (field->type.u.basic.integer.signedness)
309 load->object_type = OBJECT_TYPE_S64;
310 else
311 load->object_type = OBJECT_TYPE_U64;
312 load->rev_bo = false;
313 break;
314 case atype_enum:
315 {
316 const struct lttng_integer_type *itype =
317 &field->type.u.basic.enumeration.container_type;
318
319 if (itype->signedness)
320 load->object_type = OBJECT_TYPE_S64;
321 else
322 load->object_type = OBJECT_TYPE_U64;
323 load->rev_bo = false;
324 break;
325 }
326 case atype_array:
327 if (field->type.u.array.elem_type.atype != atype_integer) {
328 printk(KERN_WARNING "Array nesting only supports integer types.\n");
329 return -EINVAL;
330 }
331 if (is_context) {
332 load->object_type = OBJECT_TYPE_STRING;
333 } else {
334 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
335 load->object_type = OBJECT_TYPE_ARRAY;
336 load->field = field;
337 } else {
338 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
339 }
340 }
341 break;
342 case atype_sequence:
343 if (field->type.u.sequence.elem_type.atype != atype_integer) {
344 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
345 return -EINVAL;
346 }
347 if (is_context) {
348 load->object_type = OBJECT_TYPE_STRING;
349 } else {
350 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
351 load->object_type = OBJECT_TYPE_SEQUENCE;
352 load->field = field;
353 } else {
354 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
355 }
356 }
357 break;
358 case atype_array_bitfield:
359 printk(KERN_WARNING "Bitfield array type is not supported.\n");
360 return -EINVAL;
361 case atype_sequence_bitfield:
362 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
363 return -EINVAL;
364 case atype_string:
365 load->object_type = OBJECT_TYPE_STRING;
366 break;
367 case atype_struct:
368 printk(KERN_WARNING "Structure type cannot be loaded.\n");
369 return -EINVAL;
370 default:
371 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
372 return -EINVAL;
373 }
374 return 0;
375 }
376
377 static int specialize_context_lookup(struct bytecode_runtime *runtime,
378 struct load_op *insn,
379 struct vstack_load *load)
380 {
381 int idx, ret;
382 struct lttng_ctx_field *ctx_field;
383 struct lttng_event_field *field;
384 struct filter_get_index_data gid;
385 ssize_t data_offset;
386
387 idx = specialize_context_lookup_name(runtime, insn);
388 if (idx < 0) {
389 return -ENOENT;
390 }
391 ctx_field = &lttng_static_ctx->fields[idx];
392 field = &ctx_field->event_field;
393 ret = specialize_load_object(field, load, true);
394 if (ret)
395 return ret;
396 /* Specialize each get_symbol into a get_index. */
397 insn->op = FILTER_OP_GET_INDEX_U16;
398 memset(&gid, 0, sizeof(gid));
399 gid.ctx_index = idx;
400 gid.elem.type = load->object_type;
401 data_offset = bytecode_push_data(runtime, &gid,
402 __alignof__(gid), sizeof(gid));
403 if (data_offset < 0) {
404 return -EINVAL;
405 }
406 ((struct get_index_u16 *) insn->data)->index = data_offset;
407 return 0;
408 }
409
410 static int specialize_event_payload_lookup(struct lttng_event *event,
411 struct bytecode_runtime *runtime,
412 struct load_op *insn,
413 struct vstack_load *load)
414 {
415 const char *name;
416 uint16_t offset;
417 const struct lttng_event_desc *desc = event->desc;
418 unsigned int i, nr_fields;
419 bool found = false;
420 uint32_t field_offset = 0;
421 const struct lttng_event_field *field;
422 int ret;
423 struct filter_get_index_data gid;
424 ssize_t data_offset;
425
426 nr_fields = desc->nr_fields;
427 offset = ((struct get_symbol *) insn->data)->offset;
428 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
429 for (i = 0; i < nr_fields; i++) {
430 field = &desc->fields[i];
431 if (!strcmp(field->name, name)) {
432 found = true;
433 break;
434 }
435 /* compute field offset on stack */
436 switch (field->type.atype) {
437 case atype_integer:
438 case atype_enum:
439 field_offset += sizeof(int64_t);
440 break;
441 case atype_array:
442 case atype_sequence:
443 case atype_array_bitfield:
444 case atype_sequence_bitfield:
445 field_offset += sizeof(unsigned long);
446 field_offset += sizeof(void *);
447 break;
448 case atype_string:
449 field_offset += sizeof(void *);
450 break;
451 default:
452 ret = -EINVAL;
453 goto end;
454 }
455 }
456 if (!found) {
457 ret = -EINVAL;
458 goto end;
459 }
460
461 ret = specialize_load_object(field, load, false);
462 if (ret)
463 goto end;
464
465 /* Specialize each get_symbol into a get_index. */
466 insn->op = FILTER_OP_GET_INDEX_U16;
467 memset(&gid, 0, sizeof(gid));
468 gid.offset = field_offset;
469 gid.elem.type = load->object_type;
470 data_offset = bytecode_push_data(runtime, &gid,
471 __alignof__(gid), sizeof(gid));
472 if (data_offset < 0) {
473 ret = -EINVAL;
474 goto end;
475 }
476 ((struct get_index_u16 *) insn->data)->index = data_offset;
477 ret = 0;
478 end:
479 return ret;
480 }
481
482 int lttng_filter_specialize_bytecode(struct lttng_event *event,
483 struct bytecode_runtime *bytecode)
484 {
485 void *pc, *next_pc, *start_pc;
486 int ret = -EINVAL;
487 struct vstack _stack;
488 struct vstack *stack = &_stack;
489
490 vstack_init(stack);
491
492 start_pc = &bytecode->code[0];
493 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
494 pc = next_pc) {
495 switch (*(filter_opcode_t *) pc) {
496 case FILTER_OP_UNKNOWN:
497 default:
498 printk(KERN_WARNING "unknown bytecode op %u\n",
499 (unsigned int) *(filter_opcode_t *) pc);
500 ret = -EINVAL;
501 goto end;
502
503 case FILTER_OP_RETURN:
504 case FILTER_OP_RETURN_S64:
505 ret = 0;
506 goto end;
507
508 /* binary */
509 case FILTER_OP_MUL:
510 case FILTER_OP_DIV:
511 case FILTER_OP_MOD:
512 case FILTER_OP_PLUS:
513 case FILTER_OP_MINUS:
514 printk(KERN_WARNING "unsupported bytecode op %u\n",
515 (unsigned int) *(filter_opcode_t *) pc);
516 ret = -EINVAL;
517 goto end;
518
519 case FILTER_OP_EQ:
520 {
521 struct binary_op *insn = (struct binary_op *) pc;
522
523 switch(vstack_ax(stack)->type) {
524 default:
525 printk(KERN_WARNING "unknown register type\n");
526 ret = -EINVAL;
527 goto end;
528
529 case REG_STRING:
530 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
531 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
532 else
533 insn->op = FILTER_OP_EQ_STRING;
534 break;
535 case REG_STAR_GLOB_STRING:
536 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
537 break;
538 case REG_S64:
539 if (vstack_bx(stack)->type == REG_S64)
540 insn->op = FILTER_OP_EQ_S64;
541 else
542 insn->op = FILTER_OP_EQ_DOUBLE_S64;
543 break;
544 case REG_DOUBLE:
545 if (vstack_bx(stack)->type == REG_S64)
546 insn->op = FILTER_OP_EQ_S64_DOUBLE;
547 else
548 insn->op = FILTER_OP_EQ_DOUBLE;
549 break;
550 }
551 /* Pop 2, push 1 */
552 if (vstack_pop(stack)) {
553 ret = -EINVAL;
554 goto end;
555 }
556 vstack_ax(stack)->type = REG_S64;
557 next_pc += sizeof(struct binary_op);
558 break;
559 }
560
561 case FILTER_OP_NE:
562 {
563 struct binary_op *insn = (struct binary_op *) pc;
564
565 switch(vstack_ax(stack)->type) {
566 default:
567 printk(KERN_WARNING "unknown register type\n");
568 ret = -EINVAL;
569 goto end;
570
571 case REG_STRING:
572 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
573 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
574 else
575 insn->op = FILTER_OP_NE_STRING;
576 break;
577 case REG_STAR_GLOB_STRING:
578 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
579 break;
580 case REG_S64:
581 if (vstack_bx(stack)->type == REG_S64)
582 insn->op = FILTER_OP_NE_S64;
583 else
584 insn->op = FILTER_OP_NE_DOUBLE_S64;
585 break;
586 case REG_DOUBLE:
587 if (vstack_bx(stack)->type == REG_S64)
588 insn->op = FILTER_OP_NE_S64_DOUBLE;
589 else
590 insn->op = FILTER_OP_NE_DOUBLE;
591 break;
592 }
593 /* Pop 2, push 1 */
594 if (vstack_pop(stack)) {
595 ret = -EINVAL;
596 goto end;
597 }
598 vstack_ax(stack)->type = REG_S64;
599 next_pc += sizeof(struct binary_op);
600 break;
601 }
602
603 case FILTER_OP_GT:
604 {
605 struct binary_op *insn = (struct binary_op *) pc;
606
607 switch(vstack_ax(stack)->type) {
608 default:
609 printk(KERN_WARNING "unknown register type\n");
610 ret = -EINVAL;
611 goto end;
612
613 case REG_STAR_GLOB_STRING:
614 printk(KERN_WARNING "invalid register type for > binary operator\n");
615 ret = -EINVAL;
616 goto end;
617 case REG_STRING:
618 insn->op = FILTER_OP_GT_STRING;
619 break;
620 case REG_S64:
621 if (vstack_bx(stack)->type == REG_S64)
622 insn->op = FILTER_OP_GT_S64;
623 else
624 insn->op = FILTER_OP_GT_DOUBLE_S64;
625 break;
626 case REG_DOUBLE:
627 if (vstack_bx(stack)->type == REG_S64)
628 insn->op = FILTER_OP_GT_S64_DOUBLE;
629 else
630 insn->op = FILTER_OP_GT_DOUBLE;
631 break;
632 }
633 /* Pop 2, push 1 */
634 if (vstack_pop(stack)) {
635 ret = -EINVAL;
636 goto end;
637 }
638 vstack_ax(stack)->type = REG_S64;
639 next_pc += sizeof(struct binary_op);
640 break;
641 }
642
643 case FILTER_OP_LT:
644 {
645 struct binary_op *insn = (struct binary_op *) pc;
646
647 switch(vstack_ax(stack)->type) {
648 default:
649 printk(KERN_WARNING "unknown register type\n");
650 ret = -EINVAL;
651 goto end;
652
653 case REG_STAR_GLOB_STRING:
654 printk(KERN_WARNING "invalid register type for < binary operator\n");
655 ret = -EINVAL;
656 goto end;
657 case REG_STRING:
658 insn->op = FILTER_OP_LT_STRING;
659 break;
660 case REG_S64:
661 if (vstack_bx(stack)->type == REG_S64)
662 insn->op = FILTER_OP_LT_S64;
663 else
664 insn->op = FILTER_OP_LT_DOUBLE_S64;
665 break;
666 case REG_DOUBLE:
667 if (vstack_bx(stack)->type == REG_S64)
668 insn->op = FILTER_OP_LT_S64_DOUBLE;
669 else
670 insn->op = FILTER_OP_LT_DOUBLE;
671 break;
672 }
673 /* Pop 2, push 1 */
674 if (vstack_pop(stack)) {
675 ret = -EINVAL;
676 goto end;
677 }
678 vstack_ax(stack)->type = REG_S64;
679 next_pc += sizeof(struct binary_op);
680 break;
681 }
682
683 case FILTER_OP_GE:
684 {
685 struct binary_op *insn = (struct binary_op *) pc;
686
687 switch(vstack_ax(stack)->type) {
688 default:
689 printk(KERN_WARNING "unknown register type\n");
690 ret = -EINVAL;
691 goto end;
692
693 case REG_STAR_GLOB_STRING:
694 printk(KERN_WARNING "invalid register type for >= binary operator\n");
695 ret = -EINVAL;
696 goto end;
697 case REG_STRING:
698 insn->op = FILTER_OP_GE_STRING;
699 break;
700 case REG_S64:
701 if (vstack_bx(stack)->type == REG_S64)
702 insn->op = FILTER_OP_GE_S64;
703 else
704 insn->op = FILTER_OP_GE_DOUBLE_S64;
705 break;
706 case REG_DOUBLE:
707 if (vstack_bx(stack)->type == REG_S64)
708 insn->op = FILTER_OP_GE_S64_DOUBLE;
709 else
710 insn->op = FILTER_OP_GE_DOUBLE;
711 break;
712 }
713 /* Pop 2, push 1 */
714 if (vstack_pop(stack)) {
715 ret = -EINVAL;
716 goto end;
717 }
718 vstack_ax(stack)->type = REG_S64;
719 next_pc += sizeof(struct binary_op);
720 break;
721 }
722 case FILTER_OP_LE:
723 {
724 struct binary_op *insn = (struct binary_op *) pc;
725
726 switch(vstack_ax(stack)->type) {
727 default:
728 printk(KERN_WARNING "unknown register type\n");
729 ret = -EINVAL;
730 goto end;
731
732 case REG_STAR_GLOB_STRING:
733 printk(KERN_WARNING "invalid register type for <= binary operator\n");
734 ret = -EINVAL;
735 goto end;
736 case REG_STRING:
737 insn->op = FILTER_OP_LE_STRING;
738 break;
739 case REG_S64:
740 if (vstack_bx(stack)->type == REG_S64)
741 insn->op = FILTER_OP_LE_S64;
742 else
743 insn->op = FILTER_OP_LE_DOUBLE_S64;
744 break;
745 case REG_DOUBLE:
746 if (vstack_bx(stack)->type == REG_S64)
747 insn->op = FILTER_OP_LE_S64_DOUBLE;
748 else
749 insn->op = FILTER_OP_LE_DOUBLE;
750 break;
751 }
752 vstack_ax(stack)->type = REG_S64;
753 next_pc += sizeof(struct binary_op);
754 break;
755 }
756
757 case FILTER_OP_EQ_STRING:
758 case FILTER_OP_NE_STRING:
759 case FILTER_OP_GT_STRING:
760 case FILTER_OP_LT_STRING:
761 case FILTER_OP_GE_STRING:
762 case FILTER_OP_LE_STRING:
763 case FILTER_OP_EQ_STAR_GLOB_STRING:
764 case FILTER_OP_NE_STAR_GLOB_STRING:
765 case FILTER_OP_EQ_S64:
766 case FILTER_OP_NE_S64:
767 case FILTER_OP_GT_S64:
768 case FILTER_OP_LT_S64:
769 case FILTER_OP_GE_S64:
770 case FILTER_OP_LE_S64:
771 case FILTER_OP_EQ_DOUBLE:
772 case FILTER_OP_NE_DOUBLE:
773 case FILTER_OP_GT_DOUBLE:
774 case FILTER_OP_LT_DOUBLE:
775 case FILTER_OP_GE_DOUBLE:
776 case FILTER_OP_LE_DOUBLE:
777 case FILTER_OP_EQ_DOUBLE_S64:
778 case FILTER_OP_NE_DOUBLE_S64:
779 case FILTER_OP_GT_DOUBLE_S64:
780 case FILTER_OP_LT_DOUBLE_S64:
781 case FILTER_OP_GE_DOUBLE_S64:
782 case FILTER_OP_LE_DOUBLE_S64:
783 case FILTER_OP_EQ_S64_DOUBLE:
784 case FILTER_OP_NE_S64_DOUBLE:
785 case FILTER_OP_GT_S64_DOUBLE:
786 case FILTER_OP_LT_S64_DOUBLE:
787 case FILTER_OP_GE_S64_DOUBLE:
788 case FILTER_OP_LE_S64_DOUBLE:
789 case FILTER_OP_BIT_RSHIFT:
790 case FILTER_OP_BIT_LSHIFT:
791 case FILTER_OP_BIT_AND:
792 case FILTER_OP_BIT_OR:
793 case FILTER_OP_BIT_XOR:
794 {
795 /* Pop 2, push 1 */
796 if (vstack_pop(stack)) {
797 ret = -EINVAL;
798 goto end;
799 }
800 vstack_ax(stack)->type = REG_S64;
801 next_pc += sizeof(struct binary_op);
802 break;
803 }
804
805 /* unary */
806 case FILTER_OP_UNARY_PLUS:
807 {
808 struct unary_op *insn = (struct unary_op *) pc;
809
810 switch(vstack_ax(stack)->type) {
811 default:
812 printk(KERN_WARNING "unknown register type\n");
813 ret = -EINVAL;
814 goto end;
815
816 case REG_S64:
817 insn->op = FILTER_OP_UNARY_PLUS_S64;
818 break;
819 case REG_DOUBLE:
820 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
821 break;
822 }
823 /* Pop 1, push 1 */
824 next_pc += sizeof(struct unary_op);
825 break;
826 }
827
828 case FILTER_OP_UNARY_MINUS:
829 {
830 struct unary_op *insn = (struct unary_op *) pc;
831
832 switch(vstack_ax(stack)->type) {
833 default:
834 printk(KERN_WARNING "unknown register type\n");
835 ret = -EINVAL;
836 goto end;
837
838 case REG_S64:
839 insn->op = FILTER_OP_UNARY_MINUS_S64;
840 break;
841 case REG_DOUBLE:
842 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
843 break;
844 }
845 /* Pop 1, push 1 */
846 next_pc += sizeof(struct unary_op);
847 break;
848 }
849
850 case FILTER_OP_UNARY_NOT:
851 {
852 struct unary_op *insn = (struct unary_op *) pc;
853
854 switch(vstack_ax(stack)->type) {
855 default:
856 printk(KERN_WARNING "unknown register type\n");
857 ret = -EINVAL;
858 goto end;
859
860 case REG_S64:
861 insn->op = FILTER_OP_UNARY_NOT_S64;
862 break;
863 case REG_DOUBLE:
864 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
865 break;
866 }
867 /* Pop 1, push 1 */
868 next_pc += sizeof(struct unary_op);
869 break;
870 }
871
872 case FILTER_OP_UNARY_BIT_NOT:
873 {
874 /* Pop 1, push 1 */
875 next_pc += sizeof(struct unary_op);
876 break;
877 }
878
879 case FILTER_OP_UNARY_PLUS_S64:
880 case FILTER_OP_UNARY_MINUS_S64:
881 case FILTER_OP_UNARY_NOT_S64:
882 case FILTER_OP_UNARY_PLUS_DOUBLE:
883 case FILTER_OP_UNARY_MINUS_DOUBLE:
884 case FILTER_OP_UNARY_NOT_DOUBLE:
885 {
886 /* Pop 1, push 1 */
887 next_pc += sizeof(struct unary_op);
888 break;
889 }
890
891 /* logical */
892 case FILTER_OP_AND:
893 case FILTER_OP_OR:
894 {
895 /* Continue to next instruction */
896 /* Pop 1 when jump not taken */
897 if (vstack_pop(stack)) {
898 ret = -EINVAL;
899 goto end;
900 }
901 next_pc += sizeof(struct logical_op);
902 break;
903 }
904
905 /* load field ref */
906 case FILTER_OP_LOAD_FIELD_REF:
907 {
908 printk(KERN_WARNING "Unknown field ref type\n");
909 ret = -EINVAL;
910 goto end;
911 }
912 /* get context ref */
913 case FILTER_OP_GET_CONTEXT_REF:
914 {
915 printk(KERN_WARNING "Unknown get context ref type\n");
916 ret = -EINVAL;
917 goto end;
918 }
919 case FILTER_OP_LOAD_FIELD_REF_STRING:
920 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
921 case FILTER_OP_GET_CONTEXT_REF_STRING:
922 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
923 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
924 {
925 if (vstack_push(stack)) {
926 ret = -EINVAL;
927 goto end;
928 }
929 vstack_ax(stack)->type = REG_STRING;
930 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
931 break;
932 }
933 case FILTER_OP_LOAD_FIELD_REF_S64:
934 case FILTER_OP_GET_CONTEXT_REF_S64:
935 {
936 if (vstack_push(stack)) {
937 ret = -EINVAL;
938 goto end;
939 }
940 vstack_ax(stack)->type = REG_S64;
941 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
942 break;
943 }
944 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
945 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
946 {
947 if (vstack_push(stack)) {
948 ret = -EINVAL;
949 goto end;
950 }
951 vstack_ax(stack)->type = REG_DOUBLE;
952 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
953 break;
954 }
955
956 /* load from immediate operand */
957 case FILTER_OP_LOAD_STRING:
958 {
959 struct load_op *insn = (struct load_op *) pc;
960
961 if (vstack_push(stack)) {
962 ret = -EINVAL;
963 goto end;
964 }
965 vstack_ax(stack)->type = REG_STRING;
966 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
967 break;
968 }
969
970 case FILTER_OP_LOAD_STAR_GLOB_STRING:
971 {
972 struct load_op *insn = (struct load_op *) pc;
973
974 if (vstack_push(stack)) {
975 ret = -EINVAL;
976 goto end;
977 }
978 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
979 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
980 break;
981 }
982
983 case FILTER_OP_LOAD_S64:
984 {
985 if (vstack_push(stack)) {
986 ret = -EINVAL;
987 goto end;
988 }
989 vstack_ax(stack)->type = REG_S64;
990 next_pc += sizeof(struct load_op)
991 + sizeof(struct literal_numeric);
992 break;
993 }
994
995 case FILTER_OP_LOAD_DOUBLE:
996 {
997 if (vstack_push(stack)) {
998 ret = -EINVAL;
999 goto end;
1000 }
1001 vstack_ax(stack)->type = REG_DOUBLE;
1002 next_pc += sizeof(struct load_op)
1003 + sizeof(struct literal_double);
1004 break;
1005 }
1006
1007 /* cast */
1008 case FILTER_OP_CAST_TO_S64:
1009 {
1010 struct cast_op *insn = (struct cast_op *) pc;
1011
1012 switch (vstack_ax(stack)->type) {
1013 default:
1014 printk(KERN_WARNING "unknown register type\n");
1015 ret = -EINVAL;
1016 goto end;
1017
1018 case REG_STRING:
1019 case REG_STAR_GLOB_STRING:
1020 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1021 ret = -EINVAL;
1022 goto end;
1023 case REG_S64:
1024 insn->op = FILTER_OP_CAST_NOP;
1025 break;
1026 case REG_DOUBLE:
1027 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1028 break;
1029 }
1030 /* Pop 1, push 1 */
1031 vstack_ax(stack)->type = REG_S64;
1032 next_pc += sizeof(struct cast_op);
1033 break;
1034 }
1035 case FILTER_OP_CAST_DOUBLE_TO_S64:
1036 {
1037 /* Pop 1, push 1 */
1038 vstack_ax(stack)->type = REG_S64;
1039 next_pc += sizeof(struct cast_op);
1040 break;
1041 }
1042 case FILTER_OP_CAST_NOP:
1043 {
1044 next_pc += sizeof(struct cast_op);
1045 break;
1046 }
1047
1048 /*
1049 * Instructions for recursive traversal through composed types.
1050 */
1051 case FILTER_OP_GET_CONTEXT_ROOT:
1052 {
1053 if (vstack_push(stack)) {
1054 ret = -EINVAL;
1055 goto end;
1056 }
1057 vstack_ax(stack)->type = REG_PTR;
1058 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1059 next_pc += sizeof(struct load_op);
1060 break;
1061 }
1062 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1063 {
1064 if (vstack_push(stack)) {
1065 ret = -EINVAL;
1066 goto end;
1067 }
1068 vstack_ax(stack)->type = REG_PTR;
1069 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1070 next_pc += sizeof(struct load_op);
1071 break;
1072 }
1073 case FILTER_OP_GET_PAYLOAD_ROOT:
1074 {
1075 if (vstack_push(stack)) {
1076 ret = -EINVAL;
1077 goto end;
1078 }
1079 vstack_ax(stack)->type = REG_PTR;
1080 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1081 next_pc += sizeof(struct load_op);
1082 break;
1083 }
1084
1085 case FILTER_OP_LOAD_FIELD:
1086 {
1087 struct load_op *insn = (struct load_op *) pc;
1088
1089 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1090 /* Pop 1, push 1 */
1091 ret = specialize_load_field(vstack_ax(stack), insn);
1092 if (ret)
1093 goto end;
1094
1095 next_pc += sizeof(struct load_op);
1096 break;
1097 }
1098
1099 case FILTER_OP_LOAD_FIELD_S8:
1100 case FILTER_OP_LOAD_FIELD_S16:
1101 case FILTER_OP_LOAD_FIELD_S32:
1102 case FILTER_OP_LOAD_FIELD_S64:
1103 case FILTER_OP_LOAD_FIELD_U8:
1104 case FILTER_OP_LOAD_FIELD_U16:
1105 case FILTER_OP_LOAD_FIELD_U32:
1106 case FILTER_OP_LOAD_FIELD_U64:
1107 {
1108 /* Pop 1, push 1 */
1109 vstack_ax(stack)->type = REG_S64;
1110 next_pc += sizeof(struct load_op);
1111 break;
1112 }
1113
1114 case FILTER_OP_LOAD_FIELD_STRING:
1115 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1116 {
1117 /* Pop 1, push 1 */
1118 vstack_ax(stack)->type = REG_STRING;
1119 next_pc += sizeof(struct load_op);
1120 break;
1121 }
1122
1123 case FILTER_OP_LOAD_FIELD_DOUBLE:
1124 {
1125 /* Pop 1, push 1 */
1126 vstack_ax(stack)->type = REG_DOUBLE;
1127 next_pc += sizeof(struct load_op);
1128 break;
1129 }
1130
1131 case FILTER_OP_GET_SYMBOL:
1132 {
1133 struct load_op *insn = (struct load_op *) pc;
1134
1135 dbg_printk("op get symbol\n");
1136 switch (vstack_ax(stack)->load.type) {
1137 case LOAD_OBJECT:
1138 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1139 ret = -EINVAL;
1140 goto end;
1141 case LOAD_ROOT_CONTEXT:
1142 /* Lookup context field. */
1143 ret = specialize_context_lookup(bytecode, insn,
1144 &vstack_ax(stack)->load);
1145 if (ret)
1146 goto end;
1147 break;
1148 case LOAD_ROOT_APP_CONTEXT:
1149 ret = -EINVAL;
1150 goto end;
1151 case LOAD_ROOT_PAYLOAD:
1152 /* Lookup event payload field. */
1153 ret = specialize_event_payload_lookup(event,
1154 bytecode, insn,
1155 &vstack_ax(stack)->load);
1156 if (ret)
1157 goto end;
1158 break;
1159 }
1160 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1161 break;
1162 }
1163
1164 case FILTER_OP_GET_SYMBOL_FIELD:
1165 {
1166 /* Always generated by specialize phase. */
1167 ret = -EINVAL;
1168 goto end;
1169 }
1170
1171 case FILTER_OP_GET_INDEX_U16:
1172 {
1173 struct load_op *insn = (struct load_op *) pc;
1174 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1175
1176 dbg_printk("op get index u16\n");
1177 /* Pop 1, push 1 */
1178 ret = specialize_get_index(bytecode, insn, index->index,
1179 vstack_ax(stack), sizeof(*index));
1180 if (ret)
1181 goto end;
1182 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1183 break;
1184 }
1185
1186 case FILTER_OP_GET_INDEX_U64:
1187 {
1188 struct load_op *insn = (struct load_op *) pc;
1189 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1190
1191 dbg_printk("op get index u64\n");
1192 /* Pop 1, push 1 */
1193 ret = specialize_get_index(bytecode, insn, index->index,
1194 vstack_ax(stack), sizeof(*index));
1195 if (ret)
1196 goto end;
1197 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1198 break;
1199 }
1200
1201 }
1202 }
1203 end:
1204 return ret;
1205 }
This page took 0.061498 seconds and 4 git commands to generate.