Filter: implement bitwise lshift, rshift, not
[lttng-ust.git] / liblttng-ust / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng UST filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include "lttng-filter.h"
29 #include <lttng/align.h>
30
31 static int lttng_fls(int val)
32 {
33 int r = 32;
34 unsigned int x = (unsigned int) val;
35
36 if (!x)
37 return 0;
38 if (!(x & 0xFFFF0000U)) {
39 x <<= 16;
40 r -= 16;
41 }
42 if (!(x & 0xFF000000U)) {
43 x <<= 8;
44 r -= 8;
45 }
46 if (!(x & 0xF0000000U)) {
47 x <<= 4;
48 r -= 4;
49 }
50 if (!(x & 0xC0000000U)) {
51 x <<= 2;
52 r -= 2;
53 }
54 if (!(x & 0x80000000U)) {
55 r -= 1;
56 }
57 return r;
58 }
59
60 static int get_count_order(unsigned int count)
61 {
62 int order;
63
64 order = lttng_fls(count) - 1;
65 if (count & (count - 1))
66 order++;
67 return order;
68 }
69
70 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
71 size_t align, size_t len)
72 {
73 ssize_t ret;
74 size_t padding = offset_align(runtime->data_len, align);
75 size_t new_len = runtime->data_len + padding + len;
76 size_t new_alloc_len = new_len;
77 size_t old_alloc_len = runtime->data_alloc_len;
78
79 if (new_len > FILTER_MAX_DATA_LEN)
80 return -EINVAL;
81
82 if (new_alloc_len > old_alloc_len) {
83 char *newptr;
84
85 new_alloc_len =
86 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
87 newptr = realloc(runtime->data, new_alloc_len);
88 if (!newptr)
89 return -ENOMEM;
90 runtime->data = newptr;
91 /* We zero directly the memory from start of allocation. */
92 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
93 runtime->data_alloc_len = new_alloc_len;
94 }
95 runtime->data_len += padding;
96 ret = runtime->data_len;
97 runtime->data_len += len;
98 return ret;
99 }
100
101 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
102 const void *p, size_t align, size_t len)
103 {
104 ssize_t offset;
105
106 offset = bytecode_reserve_data(runtime, align, len);
107 if (offset < 0)
108 return -ENOMEM;
109 memcpy(&runtime->data[offset], p, len);
110 return offset;
111 }
112
113 static int specialize_load_field(struct vstack_entry *stack_top,
114 struct load_op *insn)
115 {
116 int ret;
117
118 switch (stack_top->load.type) {
119 case LOAD_OBJECT:
120 break;
121 case LOAD_ROOT_CONTEXT:
122 case LOAD_ROOT_APP_CONTEXT:
123 case LOAD_ROOT_PAYLOAD:
124 default:
125 dbg_printf("Filter warning: cannot load root, missing field name.\n");
126 ret = -EINVAL;
127 goto end;
128 }
129 switch (stack_top->load.object_type) {
130 case OBJECT_TYPE_S8:
131 dbg_printf("op load field s8\n");
132 stack_top->type = REG_S64;
133 if (!stack_top->load.rev_bo)
134 insn->op = FILTER_OP_LOAD_FIELD_S8;
135 break;
136 case OBJECT_TYPE_S16:
137 dbg_printf("op load field s16\n");
138 stack_top->type = REG_S64;
139 if (!stack_top->load.rev_bo)
140 insn->op = FILTER_OP_LOAD_FIELD_S16;
141 break;
142 case OBJECT_TYPE_S32:
143 dbg_printf("op load field s32\n");
144 stack_top->type = REG_S64;
145 if (!stack_top->load.rev_bo)
146 insn->op = FILTER_OP_LOAD_FIELD_S32;
147 break;
148 case OBJECT_TYPE_S64:
149 dbg_printf("op load field s64\n");
150 stack_top->type = REG_S64;
151 if (!stack_top->load.rev_bo)
152 insn->op = FILTER_OP_LOAD_FIELD_S64;
153 break;
154 case OBJECT_TYPE_U8:
155 dbg_printf("op load field u8\n");
156 stack_top->type = REG_S64;
157 insn->op = FILTER_OP_LOAD_FIELD_U8;
158 break;
159 case OBJECT_TYPE_U16:
160 dbg_printf("op load field u16\n");
161 stack_top->type = REG_S64;
162 if (!stack_top->load.rev_bo)
163 insn->op = FILTER_OP_LOAD_FIELD_U16;
164 break;
165 case OBJECT_TYPE_U32:
166 dbg_printf("op load field u32\n");
167 stack_top->type = REG_S64;
168 if (!stack_top->load.rev_bo)
169 insn->op = FILTER_OP_LOAD_FIELD_U32;
170 break;
171 case OBJECT_TYPE_U64:
172 dbg_printf("op load field u64\n");
173 stack_top->type = REG_S64;
174 if (!stack_top->load.rev_bo)
175 insn->op = FILTER_OP_LOAD_FIELD_U64;
176 break;
177 case OBJECT_TYPE_DOUBLE:
178 stack_top->type = REG_DOUBLE;
179 insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
180 break;
181 case OBJECT_TYPE_STRING:
182 dbg_printf("op load field string\n");
183 stack_top->type = REG_STRING;
184 insn->op = FILTER_OP_LOAD_FIELD_STRING;
185 break;
186 case OBJECT_TYPE_STRING_SEQUENCE:
187 dbg_printf("op load field string sequence\n");
188 stack_top->type = REG_STRING;
189 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
190 break;
191 case OBJECT_TYPE_DYNAMIC:
192 dbg_printf("op load field dynamic\n");
193 stack_top->type = REG_UNKNOWN;
194 /* Don't specialize load op. */
195 break;
196 case OBJECT_TYPE_SEQUENCE:
197 case OBJECT_TYPE_ARRAY:
198 case OBJECT_TYPE_STRUCT:
199 case OBJECT_TYPE_VARIANT:
200 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
201 ret = -EINVAL;
202 goto end;
203 }
204 return 0;
205
206 end:
207 return ret;
208 }
209
210 static int specialize_get_index_object_type(enum object_type *otype,
211 int signedness, uint32_t elem_len)
212 {
213 switch (elem_len) {
214 case 8:
215 if (signedness)
216 *otype = OBJECT_TYPE_S8;
217 else
218 *otype = OBJECT_TYPE_U8;
219 break;
220 case 16:
221 if (signedness)
222 *otype = OBJECT_TYPE_S16;
223 else
224 *otype = OBJECT_TYPE_U16;
225 break;
226 case 32:
227 if (signedness)
228 *otype = OBJECT_TYPE_S32;
229 else
230 *otype = OBJECT_TYPE_U32;
231 break;
232 case 64:
233 if (signedness)
234 *otype = OBJECT_TYPE_S64;
235 else
236 *otype = OBJECT_TYPE_U64;
237 break;
238 default:
239 return -EINVAL;
240 }
241 return 0;
242 }
243
244 static int specialize_get_index(struct bytecode_runtime *runtime,
245 struct load_op *insn, uint64_t index,
246 struct vstack_entry *stack_top,
247 int idx_len)
248 {
249 int ret;
250 struct filter_get_index_data gid;
251 ssize_t data_offset;
252
253 memset(&gid, 0, sizeof(gid));
254 switch (stack_top->load.type) {
255 case LOAD_OBJECT:
256 switch (stack_top->load.object_type) {
257 case OBJECT_TYPE_ARRAY:
258 {
259 const struct lttng_event_field *field;
260 uint32_t elem_len, num_elems;
261 int signedness;
262
263 field = stack_top->load.field;
264 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
265 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
266 num_elems = field->type.u.array.length;
267 if (index >= num_elems) {
268 ret = -EINVAL;
269 goto end;
270 }
271 ret = specialize_get_index_object_type(&stack_top->load.object_type,
272 signedness, elem_len);
273 if (ret)
274 goto end;
275 gid.offset = index * (elem_len / CHAR_BIT);
276 gid.array_len = num_elems * (elem_len / CHAR_BIT);
277 gid.elem.type = stack_top->load.object_type;
278 gid.elem.len = elem_len;
279 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
280 gid.elem.rev_bo = true;
281 stack_top->load.rev_bo = gid.elem.rev_bo;
282 break;
283 }
284 case OBJECT_TYPE_SEQUENCE:
285 {
286 const struct lttng_event_field *field;
287 uint32_t elem_len;
288 int signedness;
289
290 field = stack_top->load.field;
291 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
292 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
293 ret = specialize_get_index_object_type(&stack_top->load.object_type,
294 signedness, elem_len);
295 if (ret)
296 goto end;
297 gid.offset = index * (elem_len / CHAR_BIT);
298 gid.elem.type = stack_top->load.object_type;
299 gid.elem.len = elem_len;
300 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
301 gid.elem.rev_bo = true;
302 stack_top->load.rev_bo = gid.elem.rev_bo;
303 break;
304 }
305 case OBJECT_TYPE_STRUCT:
306 /* Only generated by the specialize phase. */
307 case OBJECT_TYPE_VARIANT: /* Fall-through */
308 default:
309 ERR("Unexpected get index type %d",
310 (int) stack_top->load.object_type);
311 ret = -EINVAL;
312 goto end;
313 }
314 break;
315 case LOAD_ROOT_CONTEXT:
316 case LOAD_ROOT_APP_CONTEXT:
317 case LOAD_ROOT_PAYLOAD:
318 ERR("Index lookup for root field not implemented yet.");
319 ret = -EINVAL;
320 goto end;
321 }
322 data_offset = bytecode_push_data(runtime, &gid,
323 __alignof__(gid), sizeof(gid));
324 if (data_offset < 0) {
325 ret = -EINVAL;
326 goto end;
327 }
328 switch (idx_len) {
329 case 2:
330 ((struct get_index_u16 *) insn->data)->index = data_offset;
331 break;
332 case 8:
333 ((struct get_index_u64 *) insn->data)->index = data_offset;
334 break;
335 default:
336 ret = -EINVAL;
337 goto end;
338 }
339
340 return 0;
341
342 end:
343 return ret;
344 }
345
346 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
347 struct bytecode_runtime *bytecode,
348 struct load_op *insn)
349 {
350 uint16_t offset;
351 const char *name;
352
353 offset = ((struct get_symbol *) insn->data)->offset;
354 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
355 return lttng_get_context_index(ctx, name);
356 }
357
358 static int specialize_load_object(const struct lttng_event_field *field,
359 struct vstack_load *load, bool is_context)
360 {
361 load->type = LOAD_OBJECT;
362 /*
363 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
364 */
365 switch (field->type.atype) {
366 case atype_integer:
367 if (field->type.u.basic.integer.signedness)
368 load->object_type = OBJECT_TYPE_S64;
369 else
370 load->object_type = OBJECT_TYPE_U64;
371 load->rev_bo = false;
372 break;
373 case atype_enum:
374 {
375 const struct lttng_integer_type *itype =
376 &field->type.u.basic.enumeration.container_type;
377
378 if (itype->signedness)
379 load->object_type = OBJECT_TYPE_S64;
380 else
381 load->object_type = OBJECT_TYPE_U64;
382 load->rev_bo = false;
383 break;
384 }
385 case atype_array:
386 if (field->type.u.array.elem_type.atype != atype_integer) {
387 ERR("Array nesting only supports integer types.");
388 return -EINVAL;
389 }
390 if (is_context) {
391 load->object_type = OBJECT_TYPE_STRING;
392 } else {
393 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
394 load->object_type = OBJECT_TYPE_ARRAY;
395 load->field = field;
396 } else {
397 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
398 }
399 }
400 break;
401 case atype_sequence:
402 if (field->type.u.sequence.elem_type.atype != atype_integer) {
403 ERR("Sequence nesting only supports integer types.");
404 return -EINVAL;
405 }
406 if (is_context) {
407 load->object_type = OBJECT_TYPE_STRING;
408 } else {
409 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
410 load->object_type = OBJECT_TYPE_SEQUENCE;
411 load->field = field;
412 } else {
413 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
414 }
415 }
416 break;
417 case atype_string:
418 load->object_type = OBJECT_TYPE_STRING;
419 break;
420 case atype_float:
421 load->object_type = OBJECT_TYPE_DOUBLE;
422 break;
423 case atype_dynamic:
424 load->object_type = OBJECT_TYPE_DYNAMIC;
425 return -EINVAL;
426 case atype_struct:
427 ERR("Structure type cannot be loaded.");
428 return -EINVAL;
429 default:
430 ERR("Unknown type: %d", (int) field->type.atype);
431 return -EINVAL;
432 }
433 return 0;
434 }
435
436 static int specialize_context_lookup(struct lttng_session *session,
437 struct bytecode_runtime *runtime,
438 struct load_op *insn,
439 struct vstack_load *load)
440 {
441 int idx, ret;
442 struct lttng_ctx_field *ctx_field;
443 struct lttng_event_field *field;
444 struct filter_get_index_data gid;
445 ssize_t data_offset;
446
447 idx = specialize_context_lookup_name(session->ctx, runtime, insn);
448 if (idx < 0) {
449 return -ENOENT;
450 }
451 ctx_field = &session->ctx->fields[idx];
452 field = &ctx_field->event_field;
453 ret = specialize_load_object(field, load, true);
454 if (ret)
455 return ret;
456 /* Specialize each get_symbol into a get_index. */
457 insn->op = FILTER_OP_GET_INDEX_U16;
458 memset(&gid, 0, sizeof(gid));
459 gid.ctx_index = idx;
460 gid.elem.type = load->object_type;
461 data_offset = bytecode_push_data(runtime, &gid,
462 __alignof__(gid), sizeof(gid));
463 if (data_offset < 0) {
464 return -EINVAL;
465 }
466 ((struct get_index_u16 *) insn->data)->index = data_offset;
467 return 0;
468 }
469
470 static int specialize_app_context_lookup(struct lttng_session *session,
471 struct bytecode_runtime *runtime,
472 struct load_op *insn,
473 struct vstack_load *load)
474 {
475 uint16_t offset;
476 const char *orig_name;
477 char *name = NULL;
478 int idx, ret;
479 struct lttng_ctx_field *ctx_field;
480 struct lttng_event_field *field;
481 struct filter_get_index_data gid;
482 ssize_t data_offset;
483
484 offset = ((struct get_symbol *) insn->data)->offset;
485 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
486 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
487 if (!name) {
488 ret = -ENOMEM;
489 goto end;
490 }
491 strcpy(name, "$app.");
492 strcat(name, orig_name);
493 idx = lttng_get_context_index(session->ctx, name);
494 if (idx < 0) {
495 assert(lttng_context_is_app(name));
496 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
497 &session->ctx);
498 if (ret)
499 return ret;
500 idx = lttng_get_context_index(session->ctx,
501 name);
502 if (idx < 0)
503 return -ENOENT;
504 }
505 ctx_field = &session->ctx->fields[idx];
506 field = &ctx_field->event_field;
507 ret = specialize_load_object(field, load, true);
508 if (ret)
509 goto end;
510 /* Specialize each get_symbol into a get_index. */
511 insn->op = FILTER_OP_GET_INDEX_U16;
512 memset(&gid, 0, sizeof(gid));
513 gid.ctx_index = idx;
514 gid.elem.type = load->object_type;
515 data_offset = bytecode_push_data(runtime, &gid,
516 __alignof__(gid), sizeof(gid));
517 if (data_offset < 0) {
518 ret = -EINVAL;
519 goto end;
520 }
521 ((struct get_index_u16 *) insn->data)->index = data_offset;
522 ret = 0;
523 end:
524 free(name);
525 return ret;
526 }
527
528 static int specialize_event_payload_lookup(struct lttng_event *event,
529 struct bytecode_runtime *runtime,
530 struct load_op *insn,
531 struct vstack_load *load)
532 {
533 const char *name;
534 uint16_t offset;
535 const struct lttng_event_desc *desc = event->desc;
536 unsigned int i, nr_fields;
537 bool found = false;
538 uint32_t field_offset = 0;
539 const struct lttng_event_field *field;
540 int ret;
541 struct filter_get_index_data gid;
542 ssize_t data_offset;
543
544 nr_fields = desc->nr_fields;
545 offset = ((struct get_symbol *) insn->data)->offset;
546 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
547 for (i = 0; i < nr_fields; i++) {
548 field = &desc->fields[i];
549 if (!strcmp(field->name, name)) {
550 found = true;
551 break;
552 }
553 /* compute field offset on stack */
554 switch (field->type.atype) {
555 case atype_integer:
556 case atype_enum:
557 field_offset += sizeof(int64_t);
558 break;
559 case atype_array:
560 case atype_sequence:
561 field_offset += sizeof(unsigned long);
562 field_offset += sizeof(void *);
563 break;
564 case atype_string:
565 field_offset += sizeof(void *);
566 break;
567 case atype_float:
568 field_offset += sizeof(double);
569 break;
570 default:
571 ret = -EINVAL;
572 goto end;
573 }
574 }
575 if (!found) {
576 ret = -EINVAL;
577 goto end;
578 }
579
580 ret = specialize_load_object(field, load, false);
581 if (ret)
582 goto end;
583
584 /* Specialize each get_symbol into a get_index. */
585 insn->op = FILTER_OP_GET_INDEX_U16;
586 memset(&gid, 0, sizeof(gid));
587 gid.offset = field_offset;
588 gid.elem.type = load->object_type;
589 data_offset = bytecode_push_data(runtime, &gid,
590 __alignof__(gid), sizeof(gid));
591 if (data_offset < 0) {
592 ret = -EINVAL;
593 goto end;
594 }
595 ((struct get_index_u16 *) insn->data)->index = data_offset;
596 ret = 0;
597 end:
598 return ret;
599 }
600
601 int lttng_filter_specialize_bytecode(struct lttng_event *event,
602 struct bytecode_runtime *bytecode)
603 {
604 void *pc, *next_pc, *start_pc;
605 int ret = -EINVAL;
606 struct vstack _stack;
607 struct vstack *stack = &_stack;
608 struct lttng_session *session = bytecode->p.session;
609
610 vstack_init(stack);
611
612 start_pc = &bytecode->code[0];
613 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
614 pc = next_pc) {
615 switch (*(filter_opcode_t *) pc) {
616 case FILTER_OP_UNKNOWN:
617 default:
618 ERR("unknown bytecode op %u\n",
619 (unsigned int) *(filter_opcode_t *) pc);
620 ret = -EINVAL;
621 goto end;
622
623 case FILTER_OP_RETURN:
624 ret = 0;
625 goto end;
626
627 /* binary */
628 case FILTER_OP_MUL:
629 case FILTER_OP_DIV:
630 case FILTER_OP_MOD:
631 case FILTER_OP_PLUS:
632 case FILTER_OP_MINUS:
633 ERR("unsupported bytecode op %u\n",
634 (unsigned int) *(filter_opcode_t *) pc);
635 ret = -EINVAL;
636 goto end;
637
638 case FILTER_OP_EQ:
639 {
640 struct binary_op *insn = (struct binary_op *) pc;
641
642 switch(vstack_ax(stack)->type) {
643 default:
644 ERR("unknown register type\n");
645 ret = -EINVAL;
646 goto end;
647
648 case REG_STRING:
649 if (vstack_bx(stack)->type == REG_UNKNOWN)
650 break;
651 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
652 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
653 else
654 insn->op = FILTER_OP_EQ_STRING;
655 break;
656 case REG_STAR_GLOB_STRING:
657 if (vstack_bx(stack)->type == REG_UNKNOWN)
658 break;
659 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
660 break;
661 case REG_S64:
662 if (vstack_bx(stack)->type == REG_UNKNOWN)
663 break;
664 if (vstack_bx(stack)->type == REG_S64)
665 insn->op = FILTER_OP_EQ_S64;
666 else
667 insn->op = FILTER_OP_EQ_DOUBLE_S64;
668 break;
669 case REG_DOUBLE:
670 if (vstack_bx(stack)->type == REG_UNKNOWN)
671 break;
672 if (vstack_bx(stack)->type == REG_S64)
673 insn->op = FILTER_OP_EQ_S64_DOUBLE;
674 else
675 insn->op = FILTER_OP_EQ_DOUBLE;
676 break;
677 case REG_UNKNOWN:
678 break; /* Dynamic typing. */
679 }
680 /* Pop 2, push 1 */
681 if (vstack_pop(stack)) {
682 ret = -EINVAL;
683 goto end;
684 }
685 vstack_ax(stack)->type = REG_S64;
686 next_pc += sizeof(struct binary_op);
687 break;
688 }
689
690 case FILTER_OP_NE:
691 {
692 struct binary_op *insn = (struct binary_op *) pc;
693
694 switch(vstack_ax(stack)->type) {
695 default:
696 ERR("unknown register type\n");
697 ret = -EINVAL;
698 goto end;
699
700 case REG_STRING:
701 if (vstack_bx(stack)->type == REG_UNKNOWN)
702 break;
703 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
704 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
705 else
706 insn->op = FILTER_OP_NE_STRING;
707 break;
708 case REG_STAR_GLOB_STRING:
709 if (vstack_bx(stack)->type == REG_UNKNOWN)
710 break;
711 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
712 break;
713 case REG_S64:
714 if (vstack_bx(stack)->type == REG_UNKNOWN)
715 break;
716 if (vstack_bx(stack)->type == REG_S64)
717 insn->op = FILTER_OP_NE_S64;
718 else
719 insn->op = FILTER_OP_NE_DOUBLE_S64;
720 break;
721 case REG_DOUBLE:
722 if (vstack_bx(stack)->type == REG_UNKNOWN)
723 break;
724 if (vstack_bx(stack)->type == REG_S64)
725 insn->op = FILTER_OP_NE_S64_DOUBLE;
726 else
727 insn->op = FILTER_OP_NE_DOUBLE;
728 break;
729 case REG_UNKNOWN:
730 break; /* Dynamic typing. */
731 }
732 /* Pop 2, push 1 */
733 if (vstack_pop(stack)) {
734 ret = -EINVAL;
735 goto end;
736 }
737 vstack_ax(stack)->type = REG_S64;
738 next_pc += sizeof(struct binary_op);
739 break;
740 }
741
742 case FILTER_OP_GT:
743 {
744 struct binary_op *insn = (struct binary_op *) pc;
745
746 switch(vstack_ax(stack)->type) {
747 default:
748 ERR("unknown register type\n");
749 ret = -EINVAL;
750 goto end;
751
752 case REG_STAR_GLOB_STRING:
753 ERR("invalid register type for > binary operator\n");
754 ret = -EINVAL;
755 goto end;
756 case REG_STRING:
757 if (vstack_bx(stack)->type == REG_UNKNOWN)
758 break;
759 insn->op = FILTER_OP_GT_STRING;
760 break;
761 case REG_S64:
762 if (vstack_bx(stack)->type == REG_UNKNOWN)
763 break;
764 if (vstack_bx(stack)->type == REG_S64)
765 insn->op = FILTER_OP_GT_S64;
766 else
767 insn->op = FILTER_OP_GT_DOUBLE_S64;
768 break;
769 case REG_DOUBLE:
770 if (vstack_bx(stack)->type == REG_UNKNOWN)
771 break;
772 if (vstack_bx(stack)->type == REG_S64)
773 insn->op = FILTER_OP_GT_S64_DOUBLE;
774 else
775 insn->op = FILTER_OP_GT_DOUBLE;
776 break;
777 case REG_UNKNOWN:
778 break; /* Dynamic typing. */
779 }
780 /* Pop 2, push 1 */
781 if (vstack_pop(stack)) {
782 ret = -EINVAL;
783 goto end;
784 }
785 vstack_ax(stack)->type = REG_S64;
786 next_pc += sizeof(struct binary_op);
787 break;
788 }
789
790 case FILTER_OP_LT:
791 {
792 struct binary_op *insn = (struct binary_op *) pc;
793
794 switch(vstack_ax(stack)->type) {
795 default:
796 ERR("unknown register type\n");
797 ret = -EINVAL;
798 goto end;
799
800 case REG_STAR_GLOB_STRING:
801 ERR("invalid register type for < binary operator\n");
802 ret = -EINVAL;
803 goto end;
804 case REG_STRING:
805 if (vstack_bx(stack)->type == REG_UNKNOWN)
806 break;
807 insn->op = FILTER_OP_LT_STRING;
808 break;
809 case REG_S64:
810 if (vstack_bx(stack)->type == REG_UNKNOWN)
811 break;
812 if (vstack_bx(stack)->type == REG_S64)
813 insn->op = FILTER_OP_LT_S64;
814 else
815 insn->op = FILTER_OP_LT_DOUBLE_S64;
816 break;
817 case REG_DOUBLE:
818 if (vstack_bx(stack)->type == REG_UNKNOWN)
819 break;
820 if (vstack_bx(stack)->type == REG_S64)
821 insn->op = FILTER_OP_LT_S64_DOUBLE;
822 else
823 insn->op = FILTER_OP_LT_DOUBLE;
824 break;
825 case REG_UNKNOWN:
826 break; /* Dynamic typing. */
827 }
828 /* Pop 2, push 1 */
829 if (vstack_pop(stack)) {
830 ret = -EINVAL;
831 goto end;
832 }
833 vstack_ax(stack)->type = REG_S64;
834 next_pc += sizeof(struct binary_op);
835 break;
836 }
837
838 case FILTER_OP_GE:
839 {
840 struct binary_op *insn = (struct binary_op *) pc;
841
842 switch(vstack_ax(stack)->type) {
843 default:
844 ERR("unknown register type\n");
845 ret = -EINVAL;
846 goto end;
847
848 case REG_STAR_GLOB_STRING:
849 ERR("invalid register type for >= binary operator\n");
850 ret = -EINVAL;
851 goto end;
852 case REG_STRING:
853 if (vstack_bx(stack)->type == REG_UNKNOWN)
854 break;
855 insn->op = FILTER_OP_GE_STRING;
856 break;
857 case REG_S64:
858 if (vstack_bx(stack)->type == REG_UNKNOWN)
859 break;
860 if (vstack_bx(stack)->type == REG_S64)
861 insn->op = FILTER_OP_GE_S64;
862 else
863 insn->op = FILTER_OP_GE_DOUBLE_S64;
864 break;
865 case REG_DOUBLE:
866 if (vstack_bx(stack)->type == REG_UNKNOWN)
867 break;
868 if (vstack_bx(stack)->type == REG_S64)
869 insn->op = FILTER_OP_GE_S64_DOUBLE;
870 else
871 insn->op = FILTER_OP_GE_DOUBLE;
872 break;
873 case REG_UNKNOWN:
874 break; /* Dynamic typing. */
875 }
876 /* Pop 2, push 1 */
877 if (vstack_pop(stack)) {
878 ret = -EINVAL;
879 goto end;
880 }
881 vstack_ax(stack)->type = REG_S64;
882 next_pc += sizeof(struct binary_op);
883 break;
884 }
885 case FILTER_OP_LE:
886 {
887 struct binary_op *insn = (struct binary_op *) pc;
888
889 switch(vstack_ax(stack)->type) {
890 default:
891 ERR("unknown register type\n");
892 ret = -EINVAL;
893 goto end;
894
895 case REG_STAR_GLOB_STRING:
896 ERR("invalid register type for <= binary operator\n");
897 ret = -EINVAL;
898 goto end;
899 case REG_STRING:
900 if (vstack_bx(stack)->type == REG_UNKNOWN)
901 break;
902 insn->op = FILTER_OP_LE_STRING;
903 break;
904 case REG_S64:
905 if (vstack_bx(stack)->type == REG_UNKNOWN)
906 break;
907 if (vstack_bx(stack)->type == REG_S64)
908 insn->op = FILTER_OP_LE_S64;
909 else
910 insn->op = FILTER_OP_LE_DOUBLE_S64;
911 break;
912 case REG_DOUBLE:
913 if (vstack_bx(stack)->type == REG_UNKNOWN)
914 break;
915 if (vstack_bx(stack)->type == REG_S64)
916 insn->op = FILTER_OP_LE_S64_DOUBLE;
917 else
918 insn->op = FILTER_OP_LE_DOUBLE;
919 break;
920 case REG_UNKNOWN:
921 break; /* Dynamic typing. */
922 }
923 vstack_ax(stack)->type = REG_S64;
924 next_pc += sizeof(struct binary_op);
925 break;
926 }
927
928 case FILTER_OP_EQ_STRING:
929 case FILTER_OP_NE_STRING:
930 case FILTER_OP_GT_STRING:
931 case FILTER_OP_LT_STRING:
932 case FILTER_OP_GE_STRING:
933 case FILTER_OP_LE_STRING:
934 case FILTER_OP_EQ_STAR_GLOB_STRING:
935 case FILTER_OP_NE_STAR_GLOB_STRING:
936 case FILTER_OP_EQ_S64:
937 case FILTER_OP_NE_S64:
938 case FILTER_OP_GT_S64:
939 case FILTER_OP_LT_S64:
940 case FILTER_OP_GE_S64:
941 case FILTER_OP_LE_S64:
942 case FILTER_OP_EQ_DOUBLE:
943 case FILTER_OP_NE_DOUBLE:
944 case FILTER_OP_GT_DOUBLE:
945 case FILTER_OP_LT_DOUBLE:
946 case FILTER_OP_GE_DOUBLE:
947 case FILTER_OP_LE_DOUBLE:
948 case FILTER_OP_EQ_DOUBLE_S64:
949 case FILTER_OP_NE_DOUBLE_S64:
950 case FILTER_OP_GT_DOUBLE_S64:
951 case FILTER_OP_LT_DOUBLE_S64:
952 case FILTER_OP_GE_DOUBLE_S64:
953 case FILTER_OP_LE_DOUBLE_S64:
954 case FILTER_OP_EQ_S64_DOUBLE:
955 case FILTER_OP_NE_S64_DOUBLE:
956 case FILTER_OP_GT_S64_DOUBLE:
957 case FILTER_OP_LT_S64_DOUBLE:
958 case FILTER_OP_GE_S64_DOUBLE:
959 case FILTER_OP_LE_S64_DOUBLE:
960 case FILTER_OP_BIT_RSHIFT:
961 case FILTER_OP_BIT_LSHIFT:
962 case FILTER_OP_BIT_AND:
963 case FILTER_OP_BIT_OR:
964 case FILTER_OP_BIT_XOR:
965 {
966 /* Pop 2, push 1 */
967 if (vstack_pop(stack)) {
968 ret = -EINVAL;
969 goto end;
970 }
971 vstack_ax(stack)->type = REG_S64;
972 next_pc += sizeof(struct binary_op);
973 break;
974 }
975
976 /* unary */
977 case FILTER_OP_UNARY_PLUS:
978 {
979 struct unary_op *insn = (struct unary_op *) pc;
980
981 switch(vstack_ax(stack)->type) {
982 default:
983 ERR("unknown register type\n");
984 ret = -EINVAL;
985 goto end;
986
987 case REG_S64:
988 insn->op = FILTER_OP_UNARY_PLUS_S64;
989 break;
990 case REG_DOUBLE:
991 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
992 break;
993 case REG_UNKNOWN: /* Dynamic typing. */
994 break;
995 }
996 /* Pop 1, push 1 */
997 next_pc += sizeof(struct unary_op);
998 break;
999 }
1000
1001 case FILTER_OP_UNARY_MINUS:
1002 {
1003 struct unary_op *insn = (struct unary_op *) pc;
1004
1005 switch(vstack_ax(stack)->type) {
1006 default:
1007 ERR("unknown register type\n");
1008 ret = -EINVAL;
1009 goto end;
1010
1011 case REG_S64:
1012 insn->op = FILTER_OP_UNARY_MINUS_S64;
1013 break;
1014 case REG_DOUBLE:
1015 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1016 break;
1017 case REG_UNKNOWN: /* Dynamic typing. */
1018 break;
1019 }
1020 /* Pop 1, push 1 */
1021 next_pc += sizeof(struct unary_op);
1022 break;
1023 }
1024
1025 case FILTER_OP_UNARY_NOT:
1026 {
1027 struct unary_op *insn = (struct unary_op *) pc;
1028
1029 switch(vstack_ax(stack)->type) {
1030 default:
1031 ERR("unknown register type\n");
1032 ret = -EINVAL;
1033 goto end;
1034
1035 case REG_S64:
1036 insn->op = FILTER_OP_UNARY_NOT_S64;
1037 break;
1038 case REG_DOUBLE:
1039 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1040 break;
1041 case REG_UNKNOWN: /* Dynamic typing. */
1042 break;
1043 }
1044 /* Pop 1, push 1 */
1045 next_pc += sizeof(struct unary_op);
1046 break;
1047 }
1048
1049 case FILTER_OP_UNARY_BIT_NOT:
1050 {
1051 /* Pop 1, push 1 */
1052 next_pc += sizeof(struct unary_op);
1053 break;
1054 }
1055
1056 case FILTER_OP_UNARY_PLUS_S64:
1057 case FILTER_OP_UNARY_MINUS_S64:
1058 case FILTER_OP_UNARY_NOT_S64:
1059 case FILTER_OP_UNARY_PLUS_DOUBLE:
1060 case FILTER_OP_UNARY_MINUS_DOUBLE:
1061 case FILTER_OP_UNARY_NOT_DOUBLE:
1062 {
1063 /* Pop 1, push 1 */
1064 next_pc += sizeof(struct unary_op);
1065 break;
1066 }
1067
1068 /* logical */
1069 case FILTER_OP_AND:
1070 case FILTER_OP_OR:
1071 {
1072 /* Continue to next instruction */
1073 /* Pop 1 when jump not taken */
1074 if (vstack_pop(stack)) {
1075 ret = -EINVAL;
1076 goto end;
1077 }
1078 next_pc += sizeof(struct logical_op);
1079 break;
1080 }
1081
1082 /* load field ref */
1083 case FILTER_OP_LOAD_FIELD_REF:
1084 {
1085 ERR("Unknown field ref type\n");
1086 ret = -EINVAL;
1087 goto end;
1088 }
1089 /* get context ref */
1090 case FILTER_OP_GET_CONTEXT_REF:
1091 {
1092 if (vstack_push(stack)) {
1093 ret = -EINVAL;
1094 goto end;
1095 }
1096 vstack_ax(stack)->type = REG_UNKNOWN;
1097 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1098 break;
1099 }
1100 case FILTER_OP_LOAD_FIELD_REF_STRING:
1101 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1102 case FILTER_OP_GET_CONTEXT_REF_STRING:
1103 {
1104 if (vstack_push(stack)) {
1105 ret = -EINVAL;
1106 goto end;
1107 }
1108 vstack_ax(stack)->type = REG_STRING;
1109 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1110 break;
1111 }
1112 case FILTER_OP_LOAD_FIELD_REF_S64:
1113 case FILTER_OP_GET_CONTEXT_REF_S64:
1114 {
1115 if (vstack_push(stack)) {
1116 ret = -EINVAL;
1117 goto end;
1118 }
1119 vstack_ax(stack)->type = REG_S64;
1120 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1121 break;
1122 }
1123 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1124 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1125 {
1126 if (vstack_push(stack)) {
1127 ret = -EINVAL;
1128 goto end;
1129 }
1130 vstack_ax(stack)->type = REG_DOUBLE;
1131 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1132 break;
1133 }
1134
1135 /* load from immediate operand */
1136 case FILTER_OP_LOAD_STRING:
1137 {
1138 struct load_op *insn = (struct load_op *) pc;
1139
1140 if (vstack_push(stack)) {
1141 ret = -EINVAL;
1142 goto end;
1143 }
1144 vstack_ax(stack)->type = REG_STRING;
1145 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1146 break;
1147 }
1148
1149 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1150 {
1151 struct load_op *insn = (struct load_op *) pc;
1152
1153 if (vstack_push(stack)) {
1154 ret = -EINVAL;
1155 goto end;
1156 }
1157 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1158 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1159 break;
1160 }
1161
1162 case FILTER_OP_LOAD_S64:
1163 {
1164 if (vstack_push(stack)) {
1165 ret = -EINVAL;
1166 goto end;
1167 }
1168 vstack_ax(stack)->type = REG_S64;
1169 next_pc += sizeof(struct load_op)
1170 + sizeof(struct literal_numeric);
1171 break;
1172 }
1173
1174 case FILTER_OP_LOAD_DOUBLE:
1175 {
1176 if (vstack_push(stack)) {
1177 ret = -EINVAL;
1178 goto end;
1179 }
1180 vstack_ax(stack)->type = REG_DOUBLE;
1181 next_pc += sizeof(struct load_op)
1182 + sizeof(struct literal_double);
1183 break;
1184 }
1185
1186 /* cast */
1187 case FILTER_OP_CAST_TO_S64:
1188 {
1189 struct cast_op *insn = (struct cast_op *) pc;
1190
1191 switch (vstack_ax(stack)->type) {
1192 default:
1193 ERR("unknown register type\n");
1194 ret = -EINVAL;
1195 goto end;
1196
1197 case REG_STRING:
1198 case REG_STAR_GLOB_STRING:
1199 ERR("Cast op can only be applied to numeric or floating point registers\n");
1200 ret = -EINVAL;
1201 goto end;
1202 case REG_S64:
1203 insn->op = FILTER_OP_CAST_NOP;
1204 break;
1205 case REG_DOUBLE:
1206 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1207 break;
1208 case REG_UNKNOWN:
1209 break;
1210 }
1211 /* Pop 1, push 1 */
1212 vstack_ax(stack)->type = REG_S64;
1213 next_pc += sizeof(struct cast_op);
1214 break;
1215 }
1216 case FILTER_OP_CAST_DOUBLE_TO_S64:
1217 {
1218 /* Pop 1, push 1 */
1219 vstack_ax(stack)->type = REG_S64;
1220 next_pc += sizeof(struct cast_op);
1221 break;
1222 }
1223 case FILTER_OP_CAST_NOP:
1224 {
1225 next_pc += sizeof(struct cast_op);
1226 break;
1227 }
1228
1229 /*
1230 * Instructions for recursive traversal through composed types.
1231 */
1232 case FILTER_OP_GET_CONTEXT_ROOT:
1233 {
1234 if (vstack_push(stack)) {
1235 ret = -EINVAL;
1236 goto end;
1237 }
1238 vstack_ax(stack)->type = REG_PTR;
1239 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1240 next_pc += sizeof(struct load_op);
1241 break;
1242 }
1243 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1244 {
1245 if (vstack_push(stack)) {
1246 ret = -EINVAL;
1247 goto end;
1248 }
1249 vstack_ax(stack)->type = REG_PTR;
1250 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1251 next_pc += sizeof(struct load_op);
1252 break;
1253 }
1254 case FILTER_OP_GET_PAYLOAD_ROOT:
1255 {
1256 if (vstack_push(stack)) {
1257 ret = -EINVAL;
1258 goto end;
1259 }
1260 vstack_ax(stack)->type = REG_PTR;
1261 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1262 next_pc += sizeof(struct load_op);
1263 break;
1264 }
1265
1266 case FILTER_OP_LOAD_FIELD:
1267 {
1268 struct load_op *insn = (struct load_op *) pc;
1269
1270 assert(vstack_ax(stack)->type == REG_PTR);
1271 /* Pop 1, push 1 */
1272 ret = specialize_load_field(vstack_ax(stack), insn);
1273 if (ret)
1274 goto end;
1275
1276 next_pc += sizeof(struct load_op);
1277 break;
1278 }
1279
1280 case FILTER_OP_LOAD_FIELD_S8:
1281 case FILTER_OP_LOAD_FIELD_S16:
1282 case FILTER_OP_LOAD_FIELD_S32:
1283 case FILTER_OP_LOAD_FIELD_S64:
1284 case FILTER_OP_LOAD_FIELD_U8:
1285 case FILTER_OP_LOAD_FIELD_U16:
1286 case FILTER_OP_LOAD_FIELD_U32:
1287 case FILTER_OP_LOAD_FIELD_U64:
1288 {
1289 /* Pop 1, push 1 */
1290 vstack_ax(stack)->type = REG_S64;
1291 next_pc += sizeof(struct load_op);
1292 break;
1293 }
1294
1295 case FILTER_OP_LOAD_FIELD_STRING:
1296 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1297 {
1298 /* Pop 1, push 1 */
1299 vstack_ax(stack)->type = REG_STRING;
1300 next_pc += sizeof(struct load_op);
1301 break;
1302 }
1303
1304 case FILTER_OP_LOAD_FIELD_DOUBLE:
1305 {
1306 /* Pop 1, push 1 */
1307 vstack_ax(stack)->type = REG_DOUBLE;
1308 next_pc += sizeof(struct load_op);
1309 break;
1310 }
1311
1312 case FILTER_OP_GET_SYMBOL:
1313 {
1314 struct load_op *insn = (struct load_op *) pc;
1315
1316 dbg_printf("op get symbol\n");
1317 switch (vstack_ax(stack)->load.type) {
1318 case LOAD_OBJECT:
1319 ERR("Nested fields not implemented yet.");
1320 ret = -EINVAL;
1321 goto end;
1322 case LOAD_ROOT_CONTEXT:
1323 /* Lookup context field. */
1324 ret = specialize_context_lookup(session,
1325 bytecode, insn,
1326 &vstack_ax(stack)->load);
1327 if (ret)
1328 goto end;
1329 break;
1330 case LOAD_ROOT_APP_CONTEXT:
1331 /* Lookup app context field. */
1332 ret = specialize_app_context_lookup(session,
1333 bytecode, insn,
1334 &vstack_ax(stack)->load);
1335 if (ret)
1336 goto end;
1337 break;
1338 case LOAD_ROOT_PAYLOAD:
1339 /* Lookup event payload field. */
1340 ret = specialize_event_payload_lookup(event,
1341 bytecode, insn,
1342 &vstack_ax(stack)->load);
1343 if (ret)
1344 goto end;
1345 break;
1346 }
1347 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1348 break;
1349 }
1350
1351 case FILTER_OP_GET_SYMBOL_FIELD:
1352 {
1353 /* Always generated by specialize phase. */
1354 ret = -EINVAL;
1355 goto end;
1356 }
1357
1358 case FILTER_OP_GET_INDEX_U16:
1359 {
1360 struct load_op *insn = (struct load_op *) pc;
1361 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1362
1363 dbg_printf("op get index u16\n");
1364 /* Pop 1, push 1 */
1365 ret = specialize_get_index(bytecode, insn, index->index,
1366 vstack_ax(stack), sizeof(*index));
1367 if (ret)
1368 goto end;
1369 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1370 break;
1371 }
1372
1373 case FILTER_OP_GET_INDEX_U64:
1374 {
1375 struct load_op *insn = (struct load_op *) pc;
1376 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1377
1378 dbg_printf("op get index u64\n");
1379 /* Pop 1, push 1 */
1380 ret = specialize_get_index(bytecode, insn, index->index,
1381 vstack_ax(stack), sizeof(*index));
1382 if (ret)
1383 goto end;
1384 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1385 break;
1386 }
1387
1388 }
1389 }
1390 end:
1391 return ret;
1392 }
This page took 0.078676 seconds and 4 git commands to generate.