Filter: index array, sequences, implement bitwise binary operators
[lttng-ust.git] / liblttng-ust / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng UST filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include "lttng-filter.h"
29 #include <lttng/align.h>
30
31 static int lttng_fls(int val)
32 {
33 int r = 32;
34 unsigned int x = (unsigned int) val;
35
36 if (!x)
37 return 0;
38 if (!(x & 0xFFFF0000U)) {
39 x <<= 16;
40 r -= 16;
41 }
42 if (!(x & 0xFF000000U)) {
43 x <<= 8;
44 r -= 8;
45 }
46 if (!(x & 0xF0000000U)) {
47 x <<= 4;
48 r -= 4;
49 }
50 if (!(x & 0xC0000000U)) {
51 x <<= 2;
52 r -= 2;
53 }
54 if (!(x & 0x80000000U)) {
55 r -= 1;
56 }
57 return r;
58 }
59
60 static int get_count_order(unsigned int count)
61 {
62 int order;
63
64 order = lttng_fls(count) - 1;
65 if (count & (count - 1))
66 order++;
67 return order;
68 }
69
70 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
71 size_t align, size_t len)
72 {
73 ssize_t ret;
74 size_t padding = offset_align(runtime->data_len, align);
75 size_t new_len = runtime->data_len + padding + len;
76 size_t new_alloc_len = new_len;
77 size_t old_alloc_len = runtime->data_alloc_len;
78
79 if (new_len > FILTER_MAX_DATA_LEN)
80 return -EINVAL;
81
82 if (new_alloc_len > old_alloc_len) {
83 char *newptr;
84
85 new_alloc_len =
86 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
87 newptr = realloc(runtime->data, new_alloc_len);
88 if (!newptr)
89 return -ENOMEM;
90 runtime->data = newptr;
91 /* We zero directly the memory from start of allocation. */
92 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
93 runtime->data_alloc_len = new_alloc_len;
94 }
95 runtime->data_len += padding;
96 ret = runtime->data_len;
97 runtime->data_len += len;
98 return ret;
99 }
100
101 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
102 const void *p, size_t align, size_t len)
103 {
104 ssize_t offset;
105
106 offset = bytecode_reserve_data(runtime, align, len);
107 if (offset < 0)
108 return -ENOMEM;
109 memcpy(&runtime->data[offset], p, len);
110 return offset;
111 }
112
113 static int specialize_load_field(struct vstack_entry *stack_top,
114 struct load_op *insn)
115 {
116 int ret;
117
118 switch (stack_top->load.type) {
119 case LOAD_OBJECT:
120 break;
121 case LOAD_ROOT_CONTEXT:
122 case LOAD_ROOT_APP_CONTEXT:
123 case LOAD_ROOT_PAYLOAD:
124 default:
125 dbg_printf("Filter warning: cannot load root, missing field name.\n");
126 ret = -EINVAL;
127 goto end;
128 }
129 switch (stack_top->load.object_type) {
130 case OBJECT_TYPE_S8:
131 dbg_printf("op load field s8\n");
132 stack_top->type = REG_S64;
133 if (!stack_top->load.rev_bo)
134 insn->op = FILTER_OP_LOAD_FIELD_S8;
135 break;
136 case OBJECT_TYPE_S16:
137 dbg_printf("op load field s16\n");
138 stack_top->type = REG_S64;
139 if (!stack_top->load.rev_bo)
140 insn->op = FILTER_OP_LOAD_FIELD_S16;
141 break;
142 case OBJECT_TYPE_S32:
143 dbg_printf("op load field s32\n");
144 stack_top->type = REG_S64;
145 if (!stack_top->load.rev_bo)
146 insn->op = FILTER_OP_LOAD_FIELD_S32;
147 break;
148 case OBJECT_TYPE_S64:
149 dbg_printf("op load field s64\n");
150 stack_top->type = REG_S64;
151 if (!stack_top->load.rev_bo)
152 insn->op = FILTER_OP_LOAD_FIELD_S64;
153 break;
154 case OBJECT_TYPE_U8:
155 dbg_printf("op load field u8\n");
156 stack_top->type = REG_S64;
157 insn->op = FILTER_OP_LOAD_FIELD_U8;
158 break;
159 case OBJECT_TYPE_U16:
160 dbg_printf("op load field u16\n");
161 stack_top->type = REG_S64;
162 if (!stack_top->load.rev_bo)
163 insn->op = FILTER_OP_LOAD_FIELD_U16;
164 break;
165 case OBJECT_TYPE_U32:
166 dbg_printf("op load field u32\n");
167 stack_top->type = REG_S64;
168 if (!stack_top->load.rev_bo)
169 insn->op = FILTER_OP_LOAD_FIELD_U32;
170 break;
171 case OBJECT_TYPE_U64:
172 dbg_printf("op load field u64\n");
173 stack_top->type = REG_S64;
174 if (!stack_top->load.rev_bo)
175 insn->op = FILTER_OP_LOAD_FIELD_U64;
176 break;
177 case OBJECT_TYPE_DOUBLE:
178 stack_top->type = REG_DOUBLE;
179 insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
180 break;
181 case OBJECT_TYPE_STRING:
182 dbg_printf("op load field string\n");
183 stack_top->type = REG_STRING;
184 insn->op = FILTER_OP_LOAD_FIELD_STRING;
185 break;
186 case OBJECT_TYPE_STRING_SEQUENCE:
187 dbg_printf("op load field string sequence\n");
188 stack_top->type = REG_STRING;
189 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
190 break;
191 case OBJECT_TYPE_DYNAMIC:
192 dbg_printf("op load field dynamic\n");
193 stack_top->type = REG_UNKNOWN;
194 /* Don't specialize load op. */
195 break;
196 case OBJECT_TYPE_SEQUENCE:
197 case OBJECT_TYPE_ARRAY:
198 case OBJECT_TYPE_STRUCT:
199 case OBJECT_TYPE_VARIANT:
200 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
201 ret = -EINVAL;
202 goto end;
203 }
204 return 0;
205
206 end:
207 return ret;
208 }
209
210 static int specialize_get_index_object_type(enum object_type *otype,
211 int signedness, uint32_t elem_len)
212 {
213 switch (elem_len) {
214 case 8:
215 if (signedness)
216 *otype = OBJECT_TYPE_S8;
217 else
218 *otype = OBJECT_TYPE_U8;
219 break;
220 case 16:
221 if (signedness)
222 *otype = OBJECT_TYPE_S16;
223 else
224 *otype = OBJECT_TYPE_U16;
225 break;
226 case 32:
227 if (signedness)
228 *otype = OBJECT_TYPE_S32;
229 else
230 *otype = OBJECT_TYPE_U32;
231 break;
232 case 64:
233 if (signedness)
234 *otype = OBJECT_TYPE_S64;
235 else
236 *otype = OBJECT_TYPE_U64;
237 break;
238 default:
239 return -EINVAL;
240 }
241 return 0;
242 }
243
244 static int specialize_get_index(struct bytecode_runtime *runtime,
245 struct load_op *insn, uint64_t index,
246 struct vstack_entry *stack_top,
247 int idx_len)
248 {
249 int ret;
250 struct filter_get_index_data gid;
251 ssize_t data_offset;
252
253 memset(&gid, 0, sizeof(gid));
254 switch (stack_top->load.type) {
255 case LOAD_OBJECT:
256 switch (stack_top->load.object_type) {
257 case OBJECT_TYPE_ARRAY:
258 {
259 const struct lttng_event_field *field;
260 uint32_t elem_len, num_elems;
261 int signedness;
262
263 field = stack_top->load.field;
264 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
265 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
266 num_elems = field->type.u.array.length;
267 if (index >= num_elems) {
268 ret = -EINVAL;
269 goto end;
270 }
271 ret = specialize_get_index_object_type(&stack_top->load.object_type,
272 signedness, elem_len);
273 if (ret)
274 goto end;
275 gid.offset = index * (elem_len / CHAR_BIT);
276 gid.array_len = num_elems * (elem_len / CHAR_BIT);
277 gid.elem.type = stack_top->load.object_type;
278 gid.elem.len = elem_len;
279 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
280 gid.elem.rev_bo = true;
281 stack_top->load.rev_bo = gid.elem.rev_bo;
282 break;
283 }
284 case OBJECT_TYPE_SEQUENCE:
285 {
286 const struct lttng_event_field *field;
287 uint32_t elem_len;
288 int signedness;
289
290 field = stack_top->load.field;
291 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
292 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
293 ret = specialize_get_index_object_type(&stack_top->load.object_type,
294 signedness, elem_len);
295 if (ret)
296 goto end;
297 gid.offset = index * (elem_len / CHAR_BIT);
298 gid.elem.type = stack_top->load.object_type;
299 gid.elem.len = elem_len;
300 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
301 gid.elem.rev_bo = true;
302 stack_top->load.rev_bo = gid.elem.rev_bo;
303 break;
304 }
305 case OBJECT_TYPE_STRUCT:
306 /* Only generated by the specialize phase. */
307 case OBJECT_TYPE_VARIANT: /* Fall-through */
308 default:
309 ERR("Unexpected get index type %d",
310 (int) stack_top->load.object_type);
311 ret = -EINVAL;
312 goto end;
313 }
314 break;
315 case LOAD_ROOT_CONTEXT:
316 case LOAD_ROOT_APP_CONTEXT:
317 case LOAD_ROOT_PAYLOAD:
318 ERR("Index lookup for root field not implemented yet.");
319 ret = -EINVAL;
320 goto end;
321 }
322 data_offset = bytecode_push_data(runtime, &gid,
323 __alignof__(gid), sizeof(gid));
324 if (data_offset < 0) {
325 ret = -EINVAL;
326 goto end;
327 }
328 switch (idx_len) {
329 case 2:
330 ((struct get_index_u16 *) insn->data)->index = data_offset;
331 break;
332 case 8:
333 ((struct get_index_u64 *) insn->data)->index = data_offset;
334 break;
335 default:
336 ret = -EINVAL;
337 goto end;
338 }
339
340 return 0;
341
342 end:
343 return ret;
344 }
345
346 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
347 struct bytecode_runtime *bytecode,
348 struct load_op *insn)
349 {
350 uint16_t offset;
351 const char *name;
352
353 offset = ((struct get_symbol *) insn->data)->offset;
354 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
355 return lttng_get_context_index(ctx, name);
356 }
357
358 static int specialize_load_object(const struct lttng_event_field *field,
359 struct vstack_load *load, bool is_context)
360 {
361 load->type = LOAD_OBJECT;
362 /*
363 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
364 */
365 switch (field->type.atype) {
366 case atype_integer:
367 if (field->type.u.basic.integer.signedness)
368 load->object_type = OBJECT_TYPE_S64;
369 else
370 load->object_type = OBJECT_TYPE_U64;
371 load->rev_bo = false;
372 break;
373 case atype_enum:
374 {
375 const struct lttng_integer_type *itype =
376 &field->type.u.basic.enumeration.container_type;
377
378 if (itype->signedness)
379 load->object_type = OBJECT_TYPE_S64;
380 else
381 load->object_type = OBJECT_TYPE_U64;
382 load->rev_bo = false;
383 break;
384 }
385 case atype_array:
386 if (field->type.u.array.elem_type.atype != atype_integer) {
387 ERR("Array nesting only supports integer types.");
388 return -EINVAL;
389 }
390 if (is_context) {
391 load->object_type = OBJECT_TYPE_STRING;
392 } else {
393 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
394 load->object_type = OBJECT_TYPE_ARRAY;
395 load->field = field;
396 } else {
397 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
398 }
399 }
400 break;
401 case atype_sequence:
402 if (field->type.u.sequence.elem_type.atype != atype_integer) {
403 ERR("Sequence nesting only supports integer types.");
404 return -EINVAL;
405 }
406 if (is_context) {
407 load->object_type = OBJECT_TYPE_STRING;
408 } else {
409 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
410 load->object_type = OBJECT_TYPE_SEQUENCE;
411 load->field = field;
412 } else {
413 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
414 }
415 }
416 break;
417 case atype_string:
418 load->object_type = OBJECT_TYPE_STRING;
419 break;
420 case atype_float:
421 load->object_type = OBJECT_TYPE_DOUBLE;
422 break;
423 case atype_dynamic:
424 load->object_type = OBJECT_TYPE_DYNAMIC;
425 return -EINVAL;
426 case atype_struct:
427 ERR("Structure type cannot be loaded.");
428 return -EINVAL;
429 default:
430 ERR("Unknown type: %d", (int) field->type.atype);
431 return -EINVAL;
432 }
433 return 0;
434 }
435
436 static int specialize_context_lookup(struct lttng_session *session,
437 struct bytecode_runtime *runtime,
438 struct load_op *insn,
439 struct vstack_load *load)
440 {
441 int idx, ret;
442 struct lttng_ctx_field *ctx_field;
443 struct lttng_event_field *field;
444 struct filter_get_index_data gid;
445 ssize_t data_offset;
446
447 idx = specialize_context_lookup_name(session->ctx, runtime, insn);
448 if (idx < 0) {
449 return -ENOENT;
450 }
451 ctx_field = &session->ctx->fields[idx];
452 field = &ctx_field->event_field;
453 ret = specialize_load_object(field, load, true);
454 if (ret)
455 return ret;
456 /* Specialize each get_symbol into a get_index. */
457 insn->op = FILTER_OP_GET_INDEX_U16;
458 memset(&gid, 0, sizeof(gid));
459 gid.ctx_index = idx;
460 gid.elem.type = load->object_type;
461 data_offset = bytecode_push_data(runtime, &gid,
462 __alignof__(gid), sizeof(gid));
463 if (data_offset < 0) {
464 return -EINVAL;
465 }
466 ((struct get_index_u16 *) insn->data)->index = data_offset;
467 return 0;
468 }
469
470 static int specialize_app_context_lookup(struct lttng_session *session,
471 struct bytecode_runtime *runtime,
472 struct load_op *insn,
473 struct vstack_load *load)
474 {
475 uint16_t offset;
476 const char *orig_name;
477 char *name = NULL;
478 int idx, ret;
479 struct lttng_ctx_field *ctx_field;
480 struct lttng_event_field *field;
481 struct filter_get_index_data gid;
482 ssize_t data_offset;
483
484 offset = ((struct get_symbol *) insn->data)->offset;
485 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
486 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
487 if (!name) {
488 ret = -ENOMEM;
489 goto end;
490 }
491 strcpy(name, "$app.");
492 strcat(name, orig_name);
493 idx = lttng_get_context_index(session->ctx, name);
494 if (idx < 0) {
495 assert(lttng_context_is_app(name));
496 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
497 &session->ctx);
498 if (ret)
499 return ret;
500 idx = lttng_get_context_index(session->ctx,
501 name);
502 if (idx < 0)
503 return -ENOENT;
504 }
505 ctx_field = &session->ctx->fields[idx];
506 field = &ctx_field->event_field;
507 ret = specialize_load_object(field, load, true);
508 if (ret)
509 goto end;
510 /* Specialize each get_symbol into a get_index. */
511 insn->op = FILTER_OP_GET_INDEX_U16;
512 memset(&gid, 0, sizeof(gid));
513 gid.ctx_index = idx;
514 gid.elem.type = load->object_type;
515 data_offset = bytecode_push_data(runtime, &gid,
516 __alignof__(gid), sizeof(gid));
517 if (data_offset < 0) {
518 ret = -EINVAL;
519 goto end;
520 }
521 ((struct get_index_u16 *) insn->data)->index = data_offset;
522 ret = 0;
523 end:
524 free(name);
525 return ret;
526 }
527
528 static int specialize_event_payload_lookup(struct lttng_event *event,
529 struct bytecode_runtime *runtime,
530 struct load_op *insn,
531 struct vstack_load *load)
532 {
533 const char *name;
534 uint16_t offset;
535 const struct lttng_event_desc *desc = event->desc;
536 unsigned int i, nr_fields;
537 bool found = false;
538 uint32_t field_offset = 0;
539 const struct lttng_event_field *field;
540 int ret;
541 struct filter_get_index_data gid;
542 ssize_t data_offset;
543
544 nr_fields = desc->nr_fields;
545 offset = ((struct get_symbol *) insn->data)->offset;
546 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
547 for (i = 0; i < nr_fields; i++) {
548 field = &desc->fields[i];
549 if (!strcmp(field->name, name)) {
550 found = true;
551 break;
552 }
553 /* compute field offset on stack */
554 switch (field->type.atype) {
555 case atype_integer:
556 case atype_enum:
557 field_offset += sizeof(int64_t);
558 break;
559 case atype_array:
560 case atype_sequence:
561 field_offset += sizeof(unsigned long);
562 field_offset += sizeof(void *);
563 break;
564 case atype_string:
565 field_offset += sizeof(void *);
566 break;
567 case atype_float:
568 field_offset += sizeof(double);
569 break;
570 default:
571 ret = -EINVAL;
572 goto end;
573 }
574 }
575 if (!found) {
576 ret = -EINVAL;
577 goto end;
578 }
579
580 ret = specialize_load_object(field, load, false);
581 if (ret)
582 goto end;
583
584 /* Specialize each get_symbol into a get_index. */
585 insn->op = FILTER_OP_GET_INDEX_U16;
586 memset(&gid, 0, sizeof(gid));
587 gid.offset = field_offset;
588 gid.elem.type = load->object_type;
589 data_offset = bytecode_push_data(runtime, &gid,
590 __alignof__(gid), sizeof(gid));
591 if (data_offset < 0) {
592 ret = -EINVAL;
593 goto end;
594 }
595 ((struct get_index_u16 *) insn->data)->index = data_offset;
596 ret = 0;
597 end:
598 return ret;
599 }
600
601 int lttng_filter_specialize_bytecode(struct lttng_event *event,
602 struct bytecode_runtime *bytecode)
603 {
604 void *pc, *next_pc, *start_pc;
605 int ret = -EINVAL;
606 struct vstack _stack;
607 struct vstack *stack = &_stack;
608 struct lttng_session *session = bytecode->p.session;
609
610 vstack_init(stack);
611
612 start_pc = &bytecode->code[0];
613 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
614 pc = next_pc) {
615 switch (*(filter_opcode_t *) pc) {
616 case FILTER_OP_UNKNOWN:
617 default:
618 ERR("unknown bytecode op %u\n",
619 (unsigned int) *(filter_opcode_t *) pc);
620 ret = -EINVAL;
621 goto end;
622
623 case FILTER_OP_RETURN:
624 ret = 0;
625 goto end;
626
627 /* binary */
628 case FILTER_OP_MUL:
629 case FILTER_OP_DIV:
630 case FILTER_OP_MOD:
631 case FILTER_OP_PLUS:
632 case FILTER_OP_MINUS:
633 case FILTER_OP_RSHIFT:
634 case FILTER_OP_LSHIFT:
635 ERR("unsupported bytecode op %u\n",
636 (unsigned int) *(filter_opcode_t *) pc);
637 ret = -EINVAL;
638 goto end;
639
640 case FILTER_OP_EQ:
641 {
642 struct binary_op *insn = (struct binary_op *) pc;
643
644 switch(vstack_ax(stack)->type) {
645 default:
646 ERR("unknown register type\n");
647 ret = -EINVAL;
648 goto end;
649
650 case REG_STRING:
651 if (vstack_bx(stack)->type == REG_UNKNOWN)
652 break;
653 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
654 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
655 else
656 insn->op = FILTER_OP_EQ_STRING;
657 break;
658 case REG_STAR_GLOB_STRING:
659 if (vstack_bx(stack)->type == REG_UNKNOWN)
660 break;
661 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
662 break;
663 case REG_S64:
664 if (vstack_bx(stack)->type == REG_UNKNOWN)
665 break;
666 if (vstack_bx(stack)->type == REG_S64)
667 insn->op = FILTER_OP_EQ_S64;
668 else
669 insn->op = FILTER_OP_EQ_DOUBLE_S64;
670 break;
671 case REG_DOUBLE:
672 if (vstack_bx(stack)->type == REG_UNKNOWN)
673 break;
674 if (vstack_bx(stack)->type == REG_S64)
675 insn->op = FILTER_OP_EQ_S64_DOUBLE;
676 else
677 insn->op = FILTER_OP_EQ_DOUBLE;
678 break;
679 case REG_UNKNOWN:
680 break; /* Dynamic typing. */
681 }
682 /* Pop 2, push 1 */
683 if (vstack_pop(stack)) {
684 ret = -EINVAL;
685 goto end;
686 }
687 vstack_ax(stack)->type = REG_S64;
688 next_pc += sizeof(struct binary_op);
689 break;
690 }
691
692 case FILTER_OP_NE:
693 {
694 struct binary_op *insn = (struct binary_op *) pc;
695
696 switch(vstack_ax(stack)->type) {
697 default:
698 ERR("unknown register type\n");
699 ret = -EINVAL;
700 goto end;
701
702 case REG_STRING:
703 if (vstack_bx(stack)->type == REG_UNKNOWN)
704 break;
705 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
706 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
707 else
708 insn->op = FILTER_OP_NE_STRING;
709 break;
710 case REG_STAR_GLOB_STRING:
711 if (vstack_bx(stack)->type == REG_UNKNOWN)
712 break;
713 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
714 break;
715 case REG_S64:
716 if (vstack_bx(stack)->type == REG_UNKNOWN)
717 break;
718 if (vstack_bx(stack)->type == REG_S64)
719 insn->op = FILTER_OP_NE_S64;
720 else
721 insn->op = FILTER_OP_NE_DOUBLE_S64;
722 break;
723 case REG_DOUBLE:
724 if (vstack_bx(stack)->type == REG_UNKNOWN)
725 break;
726 if (vstack_bx(stack)->type == REG_S64)
727 insn->op = FILTER_OP_NE_S64_DOUBLE;
728 else
729 insn->op = FILTER_OP_NE_DOUBLE;
730 break;
731 case REG_UNKNOWN:
732 break; /* Dynamic typing. */
733 }
734 /* Pop 2, push 1 */
735 if (vstack_pop(stack)) {
736 ret = -EINVAL;
737 goto end;
738 }
739 vstack_ax(stack)->type = REG_S64;
740 next_pc += sizeof(struct binary_op);
741 break;
742 }
743
744 case FILTER_OP_GT:
745 {
746 struct binary_op *insn = (struct binary_op *) pc;
747
748 switch(vstack_ax(stack)->type) {
749 default:
750 ERR("unknown register type\n");
751 ret = -EINVAL;
752 goto end;
753
754 case REG_STAR_GLOB_STRING:
755 ERR("invalid register type for > binary operator\n");
756 ret = -EINVAL;
757 goto end;
758 case REG_STRING:
759 if (vstack_bx(stack)->type == REG_UNKNOWN)
760 break;
761 insn->op = FILTER_OP_GT_STRING;
762 break;
763 case REG_S64:
764 if (vstack_bx(stack)->type == REG_UNKNOWN)
765 break;
766 if (vstack_bx(stack)->type == REG_S64)
767 insn->op = FILTER_OP_GT_S64;
768 else
769 insn->op = FILTER_OP_GT_DOUBLE_S64;
770 break;
771 case REG_DOUBLE:
772 if (vstack_bx(stack)->type == REG_UNKNOWN)
773 break;
774 if (vstack_bx(stack)->type == REG_S64)
775 insn->op = FILTER_OP_GT_S64_DOUBLE;
776 else
777 insn->op = FILTER_OP_GT_DOUBLE;
778 break;
779 case REG_UNKNOWN:
780 break; /* Dynamic typing. */
781 }
782 /* Pop 2, push 1 */
783 if (vstack_pop(stack)) {
784 ret = -EINVAL;
785 goto end;
786 }
787 vstack_ax(stack)->type = REG_S64;
788 next_pc += sizeof(struct binary_op);
789 break;
790 }
791
792 case FILTER_OP_LT:
793 {
794 struct binary_op *insn = (struct binary_op *) pc;
795
796 switch(vstack_ax(stack)->type) {
797 default:
798 ERR("unknown register type\n");
799 ret = -EINVAL;
800 goto end;
801
802 case REG_STAR_GLOB_STRING:
803 ERR("invalid register type for < binary operator\n");
804 ret = -EINVAL;
805 goto end;
806 case REG_STRING:
807 if (vstack_bx(stack)->type == REG_UNKNOWN)
808 break;
809 insn->op = FILTER_OP_LT_STRING;
810 break;
811 case REG_S64:
812 if (vstack_bx(stack)->type == REG_UNKNOWN)
813 break;
814 if (vstack_bx(stack)->type == REG_S64)
815 insn->op = FILTER_OP_LT_S64;
816 else
817 insn->op = FILTER_OP_LT_DOUBLE_S64;
818 break;
819 case REG_DOUBLE:
820 if (vstack_bx(stack)->type == REG_UNKNOWN)
821 break;
822 if (vstack_bx(stack)->type == REG_S64)
823 insn->op = FILTER_OP_LT_S64_DOUBLE;
824 else
825 insn->op = FILTER_OP_LT_DOUBLE;
826 break;
827 case REG_UNKNOWN:
828 break; /* Dynamic typing. */
829 }
830 /* Pop 2, push 1 */
831 if (vstack_pop(stack)) {
832 ret = -EINVAL;
833 goto end;
834 }
835 vstack_ax(stack)->type = REG_S64;
836 next_pc += sizeof(struct binary_op);
837 break;
838 }
839
840 case FILTER_OP_GE:
841 {
842 struct binary_op *insn = (struct binary_op *) pc;
843
844 switch(vstack_ax(stack)->type) {
845 default:
846 ERR("unknown register type\n");
847 ret = -EINVAL;
848 goto end;
849
850 case REG_STAR_GLOB_STRING:
851 ERR("invalid register type for >= binary operator\n");
852 ret = -EINVAL;
853 goto end;
854 case REG_STRING:
855 if (vstack_bx(stack)->type == REG_UNKNOWN)
856 break;
857 insn->op = FILTER_OP_GE_STRING;
858 break;
859 case REG_S64:
860 if (vstack_bx(stack)->type == REG_UNKNOWN)
861 break;
862 if (vstack_bx(stack)->type == REG_S64)
863 insn->op = FILTER_OP_GE_S64;
864 else
865 insn->op = FILTER_OP_GE_DOUBLE_S64;
866 break;
867 case REG_DOUBLE:
868 if (vstack_bx(stack)->type == REG_UNKNOWN)
869 break;
870 if (vstack_bx(stack)->type == REG_S64)
871 insn->op = FILTER_OP_GE_S64_DOUBLE;
872 else
873 insn->op = FILTER_OP_GE_DOUBLE;
874 break;
875 case REG_UNKNOWN:
876 break; /* Dynamic typing. */
877 }
878 /* Pop 2, push 1 */
879 if (vstack_pop(stack)) {
880 ret = -EINVAL;
881 goto end;
882 }
883 vstack_ax(stack)->type = REG_S64;
884 next_pc += sizeof(struct binary_op);
885 break;
886 }
887 case FILTER_OP_LE:
888 {
889 struct binary_op *insn = (struct binary_op *) pc;
890
891 switch(vstack_ax(stack)->type) {
892 default:
893 ERR("unknown register type\n");
894 ret = -EINVAL;
895 goto end;
896
897 case REG_STAR_GLOB_STRING:
898 ERR("invalid register type for <= binary operator\n");
899 ret = -EINVAL;
900 goto end;
901 case REG_STRING:
902 if (vstack_bx(stack)->type == REG_UNKNOWN)
903 break;
904 insn->op = FILTER_OP_LE_STRING;
905 break;
906 case REG_S64:
907 if (vstack_bx(stack)->type == REG_UNKNOWN)
908 break;
909 if (vstack_bx(stack)->type == REG_S64)
910 insn->op = FILTER_OP_LE_S64;
911 else
912 insn->op = FILTER_OP_LE_DOUBLE_S64;
913 break;
914 case REG_DOUBLE:
915 if (vstack_bx(stack)->type == REG_UNKNOWN)
916 break;
917 if (vstack_bx(stack)->type == REG_S64)
918 insn->op = FILTER_OP_LE_S64_DOUBLE;
919 else
920 insn->op = FILTER_OP_LE_DOUBLE;
921 break;
922 case REG_UNKNOWN:
923 break; /* Dynamic typing. */
924 }
925 vstack_ax(stack)->type = REG_S64;
926 next_pc += sizeof(struct binary_op);
927 break;
928 }
929
930 case FILTER_OP_EQ_STRING:
931 case FILTER_OP_NE_STRING:
932 case FILTER_OP_GT_STRING:
933 case FILTER_OP_LT_STRING:
934 case FILTER_OP_GE_STRING:
935 case FILTER_OP_LE_STRING:
936 case FILTER_OP_EQ_STAR_GLOB_STRING:
937 case FILTER_OP_NE_STAR_GLOB_STRING:
938 case FILTER_OP_EQ_S64:
939 case FILTER_OP_NE_S64:
940 case FILTER_OP_GT_S64:
941 case FILTER_OP_LT_S64:
942 case FILTER_OP_GE_S64:
943 case FILTER_OP_LE_S64:
944 case FILTER_OP_EQ_DOUBLE:
945 case FILTER_OP_NE_DOUBLE:
946 case FILTER_OP_GT_DOUBLE:
947 case FILTER_OP_LT_DOUBLE:
948 case FILTER_OP_GE_DOUBLE:
949 case FILTER_OP_LE_DOUBLE:
950 case FILTER_OP_EQ_DOUBLE_S64:
951 case FILTER_OP_NE_DOUBLE_S64:
952 case FILTER_OP_GT_DOUBLE_S64:
953 case FILTER_OP_LT_DOUBLE_S64:
954 case FILTER_OP_GE_DOUBLE_S64:
955 case FILTER_OP_LE_DOUBLE_S64:
956 case FILTER_OP_EQ_S64_DOUBLE:
957 case FILTER_OP_NE_S64_DOUBLE:
958 case FILTER_OP_GT_S64_DOUBLE:
959 case FILTER_OP_LT_S64_DOUBLE:
960 case FILTER_OP_GE_S64_DOUBLE:
961 case FILTER_OP_LE_S64_DOUBLE:
962 case FILTER_OP_BIT_AND:
963 case FILTER_OP_BIT_OR:
964 case FILTER_OP_BIT_XOR:
965 {
966 /* Pop 2, push 1 */
967 if (vstack_pop(stack)) {
968 ret = -EINVAL;
969 goto end;
970 }
971 vstack_ax(stack)->type = REG_S64;
972 next_pc += sizeof(struct binary_op);
973 break;
974 }
975
976 /* unary */
977 case FILTER_OP_UNARY_PLUS:
978 {
979 struct unary_op *insn = (struct unary_op *) pc;
980
981 switch(vstack_ax(stack)->type) {
982 default:
983 ERR("unknown register type\n");
984 ret = -EINVAL;
985 goto end;
986
987 case REG_S64:
988 insn->op = FILTER_OP_UNARY_PLUS_S64;
989 break;
990 case REG_DOUBLE:
991 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
992 break;
993 case REG_UNKNOWN: /* Dynamic typing. */
994 break;
995 }
996 /* Pop 1, push 1 */
997 next_pc += sizeof(struct unary_op);
998 break;
999 }
1000
1001 case FILTER_OP_UNARY_MINUS:
1002 {
1003 struct unary_op *insn = (struct unary_op *) pc;
1004
1005 switch(vstack_ax(stack)->type) {
1006 default:
1007 ERR("unknown register type\n");
1008 ret = -EINVAL;
1009 goto end;
1010
1011 case REG_S64:
1012 insn->op = FILTER_OP_UNARY_MINUS_S64;
1013 break;
1014 case REG_DOUBLE:
1015 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1016 break;
1017 case REG_UNKNOWN: /* Dynamic typing. */
1018 break;
1019 }
1020 /* Pop 1, push 1 */
1021 next_pc += sizeof(struct unary_op);
1022 break;
1023 }
1024
1025 case FILTER_OP_UNARY_NOT:
1026 {
1027 struct unary_op *insn = (struct unary_op *) pc;
1028
1029 switch(vstack_ax(stack)->type) {
1030 default:
1031 ERR("unknown register type\n");
1032 ret = -EINVAL;
1033 goto end;
1034
1035 case REG_S64:
1036 insn->op = FILTER_OP_UNARY_NOT_S64;
1037 break;
1038 case REG_DOUBLE:
1039 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1040 break;
1041 case REG_UNKNOWN: /* Dynamic typing. */
1042 break;
1043 }
1044 /* Pop 1, push 1 */
1045 next_pc += sizeof(struct unary_op);
1046 break;
1047 }
1048
1049 case FILTER_OP_UNARY_PLUS_S64:
1050 case FILTER_OP_UNARY_MINUS_S64:
1051 case FILTER_OP_UNARY_NOT_S64:
1052 case FILTER_OP_UNARY_PLUS_DOUBLE:
1053 case FILTER_OP_UNARY_MINUS_DOUBLE:
1054 case FILTER_OP_UNARY_NOT_DOUBLE:
1055 {
1056 /* Pop 1, push 1 */
1057 next_pc += sizeof(struct unary_op);
1058 break;
1059 }
1060
1061 /* logical */
1062 case FILTER_OP_AND:
1063 case FILTER_OP_OR:
1064 {
1065 /* Continue to next instruction */
1066 /* Pop 1 when jump not taken */
1067 if (vstack_pop(stack)) {
1068 ret = -EINVAL;
1069 goto end;
1070 }
1071 next_pc += sizeof(struct logical_op);
1072 break;
1073 }
1074
1075 /* load field ref */
1076 case FILTER_OP_LOAD_FIELD_REF:
1077 {
1078 ERR("Unknown field ref type\n");
1079 ret = -EINVAL;
1080 goto end;
1081 }
1082 /* get context ref */
1083 case FILTER_OP_GET_CONTEXT_REF:
1084 {
1085 if (vstack_push(stack)) {
1086 ret = -EINVAL;
1087 goto end;
1088 }
1089 vstack_ax(stack)->type = REG_UNKNOWN;
1090 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1091 break;
1092 }
1093 case FILTER_OP_LOAD_FIELD_REF_STRING:
1094 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1095 case FILTER_OP_GET_CONTEXT_REF_STRING:
1096 {
1097 if (vstack_push(stack)) {
1098 ret = -EINVAL;
1099 goto end;
1100 }
1101 vstack_ax(stack)->type = REG_STRING;
1102 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1103 break;
1104 }
1105 case FILTER_OP_LOAD_FIELD_REF_S64:
1106 case FILTER_OP_GET_CONTEXT_REF_S64:
1107 {
1108 if (vstack_push(stack)) {
1109 ret = -EINVAL;
1110 goto end;
1111 }
1112 vstack_ax(stack)->type = REG_S64;
1113 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1114 break;
1115 }
1116 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1117 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1118 {
1119 if (vstack_push(stack)) {
1120 ret = -EINVAL;
1121 goto end;
1122 }
1123 vstack_ax(stack)->type = REG_DOUBLE;
1124 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1125 break;
1126 }
1127
1128 /* load from immediate operand */
1129 case FILTER_OP_LOAD_STRING:
1130 {
1131 struct load_op *insn = (struct load_op *) pc;
1132
1133 if (vstack_push(stack)) {
1134 ret = -EINVAL;
1135 goto end;
1136 }
1137 vstack_ax(stack)->type = REG_STRING;
1138 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1139 break;
1140 }
1141
1142 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1143 {
1144 struct load_op *insn = (struct load_op *) pc;
1145
1146 if (vstack_push(stack)) {
1147 ret = -EINVAL;
1148 goto end;
1149 }
1150 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1151 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1152 break;
1153 }
1154
1155 case FILTER_OP_LOAD_S64:
1156 {
1157 if (vstack_push(stack)) {
1158 ret = -EINVAL;
1159 goto end;
1160 }
1161 vstack_ax(stack)->type = REG_S64;
1162 next_pc += sizeof(struct load_op)
1163 + sizeof(struct literal_numeric);
1164 break;
1165 }
1166
1167 case FILTER_OP_LOAD_DOUBLE:
1168 {
1169 if (vstack_push(stack)) {
1170 ret = -EINVAL;
1171 goto end;
1172 }
1173 vstack_ax(stack)->type = REG_DOUBLE;
1174 next_pc += sizeof(struct load_op)
1175 + sizeof(struct literal_double);
1176 break;
1177 }
1178
1179 /* cast */
1180 case FILTER_OP_CAST_TO_S64:
1181 {
1182 struct cast_op *insn = (struct cast_op *) pc;
1183
1184 switch (vstack_ax(stack)->type) {
1185 default:
1186 ERR("unknown register type\n");
1187 ret = -EINVAL;
1188 goto end;
1189
1190 case REG_STRING:
1191 case REG_STAR_GLOB_STRING:
1192 ERR("Cast op can only be applied to numeric or floating point registers\n");
1193 ret = -EINVAL;
1194 goto end;
1195 case REG_S64:
1196 insn->op = FILTER_OP_CAST_NOP;
1197 break;
1198 case REG_DOUBLE:
1199 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1200 break;
1201 case REG_UNKNOWN:
1202 break;
1203 }
1204 /* Pop 1, push 1 */
1205 vstack_ax(stack)->type = REG_S64;
1206 next_pc += sizeof(struct cast_op);
1207 break;
1208 }
1209 case FILTER_OP_CAST_DOUBLE_TO_S64:
1210 {
1211 /* Pop 1, push 1 */
1212 vstack_ax(stack)->type = REG_S64;
1213 next_pc += sizeof(struct cast_op);
1214 break;
1215 }
1216 case FILTER_OP_CAST_NOP:
1217 {
1218 next_pc += sizeof(struct cast_op);
1219 break;
1220 }
1221
1222 /*
1223 * Instructions for recursive traversal through composed types.
1224 */
1225 case FILTER_OP_GET_CONTEXT_ROOT:
1226 {
1227 if (vstack_push(stack)) {
1228 ret = -EINVAL;
1229 goto end;
1230 }
1231 vstack_ax(stack)->type = REG_PTR;
1232 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1233 next_pc += sizeof(struct load_op);
1234 break;
1235 }
1236 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1237 {
1238 if (vstack_push(stack)) {
1239 ret = -EINVAL;
1240 goto end;
1241 }
1242 vstack_ax(stack)->type = REG_PTR;
1243 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1244 next_pc += sizeof(struct load_op);
1245 break;
1246 }
1247 case FILTER_OP_GET_PAYLOAD_ROOT:
1248 {
1249 if (vstack_push(stack)) {
1250 ret = -EINVAL;
1251 goto end;
1252 }
1253 vstack_ax(stack)->type = REG_PTR;
1254 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1255 next_pc += sizeof(struct load_op);
1256 break;
1257 }
1258
1259 case FILTER_OP_LOAD_FIELD:
1260 {
1261 struct load_op *insn = (struct load_op *) pc;
1262
1263 assert(vstack_ax(stack)->type == REG_PTR);
1264 /* Pop 1, push 1 */
1265 ret = specialize_load_field(vstack_ax(stack), insn);
1266 if (ret)
1267 goto end;
1268
1269 next_pc += sizeof(struct load_op);
1270 break;
1271 }
1272
1273 case FILTER_OP_LOAD_FIELD_S8:
1274 case FILTER_OP_LOAD_FIELD_S16:
1275 case FILTER_OP_LOAD_FIELD_S32:
1276 case FILTER_OP_LOAD_FIELD_S64:
1277 case FILTER_OP_LOAD_FIELD_U8:
1278 case FILTER_OP_LOAD_FIELD_U16:
1279 case FILTER_OP_LOAD_FIELD_U32:
1280 case FILTER_OP_LOAD_FIELD_U64:
1281 {
1282 /* Pop 1, push 1 */
1283 vstack_ax(stack)->type = REG_S64;
1284 next_pc += sizeof(struct load_op);
1285 break;
1286 }
1287
1288 case FILTER_OP_LOAD_FIELD_STRING:
1289 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1290 {
1291 /* Pop 1, push 1 */
1292 vstack_ax(stack)->type = REG_STRING;
1293 next_pc += sizeof(struct load_op);
1294 break;
1295 }
1296
1297 case FILTER_OP_LOAD_FIELD_DOUBLE:
1298 {
1299 /* Pop 1, push 1 */
1300 vstack_ax(stack)->type = REG_DOUBLE;
1301 next_pc += sizeof(struct load_op);
1302 break;
1303 }
1304
1305 case FILTER_OP_GET_SYMBOL:
1306 {
1307 struct load_op *insn = (struct load_op *) pc;
1308
1309 dbg_printf("op get symbol\n");
1310 switch (vstack_ax(stack)->load.type) {
1311 case LOAD_OBJECT:
1312 ERR("Nested fields not implemented yet.");
1313 ret = -EINVAL;
1314 goto end;
1315 case LOAD_ROOT_CONTEXT:
1316 /* Lookup context field. */
1317 ret = specialize_context_lookup(session,
1318 bytecode, insn,
1319 &vstack_ax(stack)->load);
1320 if (ret)
1321 goto end;
1322 break;
1323 case LOAD_ROOT_APP_CONTEXT:
1324 /* Lookup app context field. */
1325 ret = specialize_app_context_lookup(session,
1326 bytecode, insn,
1327 &vstack_ax(stack)->load);
1328 if (ret)
1329 goto end;
1330 break;
1331 case LOAD_ROOT_PAYLOAD:
1332 /* Lookup event payload field. */
1333 ret = specialize_event_payload_lookup(event,
1334 bytecode, insn,
1335 &vstack_ax(stack)->load);
1336 if (ret)
1337 goto end;
1338 break;
1339 }
1340 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1341 break;
1342 }
1343
1344 case FILTER_OP_GET_SYMBOL_FIELD:
1345 {
1346 /* Always generated by specialize phase. */
1347 ret = -EINVAL;
1348 goto end;
1349 }
1350
1351 case FILTER_OP_GET_INDEX_U16:
1352 {
1353 struct load_op *insn = (struct load_op *) pc;
1354 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1355
1356 dbg_printf("op get index u16\n");
1357 /* Pop 1, push 1 */
1358 ret = specialize_get_index(bytecode, insn, index->index,
1359 vstack_ax(stack), sizeof(*index));
1360 if (ret)
1361 goto end;
1362 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1363 break;
1364 }
1365
1366 case FILTER_OP_GET_INDEX_U64:
1367 {
1368 struct load_op *insn = (struct load_op *) pc;
1369 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1370
1371 dbg_printf("op get index u64\n");
1372 /* Pop 1, push 1 */
1373 ret = specialize_get_index(bytecode, insn, index->index,
1374 vstack_ax(stack), sizeof(*index));
1375 if (ret)
1376 goto end;
1377 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1378 break;
1379 }
1380
1381 }
1382 }
1383 end:
1384 return ret;
1385 }
This page took 0.091248 seconds and 4 git commands to generate.