2 * lttng-filter-specialize.c
4 * LTTng UST filter code specializer.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 #include "lttng-filter.h"
32 #include <lttng/align.h>
33 #include "ust-events-internal.h"
35 static int lttng_fls(int val
)
38 unsigned int x
= (unsigned int) val
;
42 if (!(x
& 0xFFFF0000U
)) {
46 if (!(x
& 0xFF000000U
)) {
50 if (!(x
& 0xF0000000U
)) {
54 if (!(x
& 0xC0000000U
)) {
58 if (!(x
& 0x80000000U
)) {
64 static int get_count_order(unsigned int count
)
68 order
= lttng_fls(count
) - 1;
69 if (count
& (count
- 1))
74 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
75 size_t align
, size_t len
)
78 size_t padding
= lttng_ust_offset_align(runtime
->data_len
, align
);
79 size_t new_len
= runtime
->data_len
+ padding
+ len
;
80 size_t new_alloc_len
= new_len
;
81 size_t old_alloc_len
= runtime
->data_alloc_len
;
83 if (new_len
> FILTER_MAX_DATA_LEN
)
86 if (new_alloc_len
> old_alloc_len
) {
90 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
91 newptr
= realloc(runtime
->data
, new_alloc_len
);
94 runtime
->data
= newptr
;
95 /* We zero directly the memory from start of allocation. */
96 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
97 runtime
->data_alloc_len
= new_alloc_len
;
99 runtime
->data_len
+= padding
;
100 ret
= runtime
->data_len
;
101 runtime
->data_len
+= len
;
105 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
106 const void *p
, size_t align
, size_t len
)
110 offset
= bytecode_reserve_data(runtime
, align
, len
);
113 memcpy(&runtime
->data
[offset
], p
, len
);
117 static int specialize_load_field(struct vstack_entry
*stack_top
,
118 struct load_op
*insn
)
122 switch (stack_top
->load
.type
) {
125 case LOAD_ROOT_CONTEXT
:
126 case LOAD_ROOT_APP_CONTEXT
:
127 case LOAD_ROOT_PAYLOAD
:
129 dbg_printf("Filter warning: cannot load root, missing field name.\n");
133 switch (stack_top
->load
.object_type
) {
135 dbg_printf("op load field s8\n");
136 stack_top
->type
= REG_S64
;
137 if (!stack_top
->load
.rev_bo
)
138 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
140 case OBJECT_TYPE_S16
:
141 dbg_printf("op load field s16\n");
142 stack_top
->type
= REG_S64
;
143 if (!stack_top
->load
.rev_bo
)
144 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
146 case OBJECT_TYPE_S32
:
147 dbg_printf("op load field s32\n");
148 stack_top
->type
= REG_S64
;
149 if (!stack_top
->load
.rev_bo
)
150 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
152 case OBJECT_TYPE_S64
:
153 dbg_printf("op load field s64\n");
154 stack_top
->type
= REG_S64
;
155 if (!stack_top
->load
.rev_bo
)
156 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
159 dbg_printf("op load field u8\n");
160 stack_top
->type
= REG_U64
;
161 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
163 case OBJECT_TYPE_U16
:
164 dbg_printf("op load field u16\n");
165 stack_top
->type
= REG_U64
;
166 if (!stack_top
->load
.rev_bo
)
167 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
169 case OBJECT_TYPE_U32
:
170 dbg_printf("op load field u32\n");
171 stack_top
->type
= REG_U64
;
172 if (!stack_top
->load
.rev_bo
)
173 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
175 case OBJECT_TYPE_U64
:
176 dbg_printf("op load field u64\n");
177 stack_top
->type
= REG_U64
;
178 if (!stack_top
->load
.rev_bo
)
179 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
181 case OBJECT_TYPE_DOUBLE
:
182 stack_top
->type
= REG_DOUBLE
;
183 insn
->op
= FILTER_OP_LOAD_FIELD_DOUBLE
;
185 case OBJECT_TYPE_STRING
:
186 dbg_printf("op load field string\n");
187 stack_top
->type
= REG_STRING
;
188 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
190 case OBJECT_TYPE_STRING_SEQUENCE
:
191 dbg_printf("op load field string sequence\n");
192 stack_top
->type
= REG_STRING
;
193 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
195 case OBJECT_TYPE_DYNAMIC
:
196 dbg_printf("op load field dynamic\n");
197 stack_top
->type
= REG_UNKNOWN
;
198 /* Don't specialize load op. */
200 case OBJECT_TYPE_SEQUENCE
:
201 case OBJECT_TYPE_ARRAY
:
202 case OBJECT_TYPE_STRUCT
:
203 case OBJECT_TYPE_VARIANT
:
204 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
214 static int specialize_get_index_object_type(enum object_type
*otype
,
215 int signedness
, uint32_t elem_len
)
220 *otype
= OBJECT_TYPE_S8
;
222 *otype
= OBJECT_TYPE_U8
;
226 *otype
= OBJECT_TYPE_S16
;
228 *otype
= OBJECT_TYPE_U16
;
232 *otype
= OBJECT_TYPE_S32
;
234 *otype
= OBJECT_TYPE_U32
;
238 *otype
= OBJECT_TYPE_S64
;
240 *otype
= OBJECT_TYPE_U64
;
248 static int specialize_get_index(struct bytecode_runtime
*runtime
,
249 struct load_op
*insn
, uint64_t index
,
250 struct vstack_entry
*stack_top
,
254 struct filter_get_index_data gid
;
257 memset(&gid
, 0, sizeof(gid
));
258 switch (stack_top
->load
.type
) {
260 switch (stack_top
->load
.object_type
) {
261 case OBJECT_TYPE_ARRAY
:
263 const struct lttng_integer_type
*integer_type
;
264 const struct lttng_event_field
*field
;
265 uint32_t elem_len
, num_elems
;
268 field
= stack_top
->load
.field
;
269 switch (field
->type
.atype
) {
271 integer_type
= &field
->type
.u
.legacy
.array
.elem_type
.u
.basic
.integer
;
272 num_elems
= field
->type
.u
.legacy
.array
.length
;
274 case atype_array_nestable
:
275 if (field
->type
.u
.array_nestable
.elem_type
->atype
!= atype_integer
) {
279 integer_type
= &field
->type
.u
.array_nestable
.elem_type
->u
.integer
;
280 num_elems
= field
->type
.u
.array_nestable
.length
;
286 elem_len
= integer_type
->size
;
287 signedness
= integer_type
->signedness
;
288 if (index
>= num_elems
) {
292 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
293 signedness
, elem_len
);
296 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
297 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
298 gid
.elem
.type
= stack_top
->load
.object_type
;
299 gid
.elem
.len
= elem_len
;
300 if (integer_type
->reverse_byte_order
)
301 gid
.elem
.rev_bo
= true;
302 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
305 case OBJECT_TYPE_SEQUENCE
:
307 const struct lttng_integer_type
*integer_type
;
308 const struct lttng_event_field
*field
;
312 field
= stack_top
->load
.field
;
313 switch (field
->type
.atype
) {
315 integer_type
= &field
->type
.u
.legacy
.sequence
.elem_type
.u
.basic
.integer
;
317 case atype_sequence_nestable
:
318 if (field
->type
.u
.sequence_nestable
.elem_type
->atype
!= atype_integer
) {
322 integer_type
= &field
->type
.u
.sequence_nestable
.elem_type
->u
.integer
;
328 elem_len
= integer_type
->size
;
329 signedness
= integer_type
->signedness
;
330 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
331 signedness
, elem_len
);
334 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
335 gid
.elem
.type
= stack_top
->load
.object_type
;
336 gid
.elem
.len
= elem_len
;
337 if (integer_type
->reverse_byte_order
)
338 gid
.elem
.rev_bo
= true;
339 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
342 case OBJECT_TYPE_STRUCT
:
343 /* Only generated by the specialize phase. */
344 case OBJECT_TYPE_VARIANT
: /* Fall-through */
346 ERR("Unexpected get index type %d",
347 (int) stack_top
->load
.object_type
);
352 case LOAD_ROOT_CONTEXT
:
353 case LOAD_ROOT_APP_CONTEXT
:
354 case LOAD_ROOT_PAYLOAD
:
355 ERR("Index lookup for root field not implemented yet.");
359 data_offset
= bytecode_push_data(runtime
, &gid
,
360 __alignof__(gid
), sizeof(gid
));
361 if (data_offset
< 0) {
367 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
370 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
383 static int specialize_context_lookup_name(struct lttng_ctx
*ctx
,
384 struct bytecode_runtime
*bytecode
,
385 struct load_op
*insn
)
390 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
391 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
392 return lttng_get_context_index(ctx
, name
);
395 static int specialize_load_object(const struct lttng_event_field
*field
,
396 struct vstack_load
*load
, bool is_context
)
398 load
->type
= LOAD_OBJECT
;
400 switch (field
->type
.atype
) {
402 if (field
->type
.u
.integer
.signedness
)
403 load
->object_type
= OBJECT_TYPE_S64
;
405 load
->object_type
= OBJECT_TYPE_U64
;
406 load
->rev_bo
= false;
409 case atype_enum_nestable
:
411 const struct lttng_integer_type
*itype
;
413 if (field
->type
.atype
== atype_enum
) {
414 itype
= &field
->type
.u
.legacy
.basic
.enumeration
.container_type
;
416 itype
= &field
->type
.u
.enum_nestable
.container_type
->u
.integer
;
418 if (itype
->signedness
)
419 load
->object_type
= OBJECT_TYPE_S64
;
421 load
->object_type
= OBJECT_TYPE_U64
;
422 load
->rev_bo
= false;
426 if (field
->type
.u
.legacy
.array
.elem_type
.atype
!= atype_integer
) {
427 ERR("Array nesting only supports integer types.");
431 load
->object_type
= OBJECT_TYPE_STRING
;
433 if (field
->type
.u
.legacy
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
434 load
->object_type
= OBJECT_TYPE_ARRAY
;
437 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
441 case atype_array_nestable
:
442 if (field
->type
.u
.array_nestable
.elem_type
->atype
!= atype_integer
) {
443 ERR("Array nesting only supports integer types.");
447 load
->object_type
= OBJECT_TYPE_STRING
;
449 if (field
->type
.u
.array_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
) {
450 load
->object_type
= OBJECT_TYPE_ARRAY
;
453 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
458 if (field
->type
.u
.legacy
.sequence
.elem_type
.atype
!= atype_integer
) {
459 ERR("Sequence nesting only supports integer types.");
463 load
->object_type
= OBJECT_TYPE_STRING
;
465 if (field
->type
.u
.legacy
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
466 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
469 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
473 case atype_sequence_nestable
:
474 if (field
->type
.u
.sequence_nestable
.elem_type
->atype
!= atype_integer
) {
475 ERR("Sequence nesting only supports integer types.");
479 load
->object_type
= OBJECT_TYPE_STRING
;
481 if (field
->type
.u
.sequence_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
) {
482 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
485 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
491 load
->object_type
= OBJECT_TYPE_STRING
;
494 load
->object_type
= OBJECT_TYPE_DOUBLE
;
497 load
->object_type
= OBJECT_TYPE_DYNAMIC
;
500 ERR("Structure type cannot be loaded.");
503 ERR("Unknown type: %d", (int) field
->type
.atype
);
509 static int specialize_context_lookup(struct lttng_ctx
*ctx
,
510 struct bytecode_runtime
*runtime
,
511 struct load_op
*insn
,
512 struct vstack_load
*load
)
515 struct lttng_ctx_field
*ctx_field
;
516 struct lttng_event_field
*field
;
517 struct filter_get_index_data gid
;
520 idx
= specialize_context_lookup_name(ctx
, runtime
, insn
);
524 ctx_field
= &ctx
->fields
[idx
];
525 field
= &ctx_field
->event_field
;
526 ret
= specialize_load_object(field
, load
, true);
529 /* Specialize each get_symbol into a get_index. */
530 insn
->op
= FILTER_OP_GET_INDEX_U16
;
531 memset(&gid
, 0, sizeof(gid
));
533 gid
.elem
.type
= load
->object_type
;
534 gid
.elem
.rev_bo
= load
->rev_bo
;
536 data_offset
= bytecode_push_data(runtime
, &gid
,
537 __alignof__(gid
), sizeof(gid
));
538 if (data_offset
< 0) {
541 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
545 static int specialize_app_context_lookup(struct lttng_ctx
**pctx
,
546 struct bytecode_runtime
*runtime
,
547 struct load_op
*insn
,
548 struct vstack_load
*load
)
551 const char *orig_name
;
554 struct lttng_ctx_field
*ctx_field
;
555 struct lttng_event_field
*field
;
556 struct filter_get_index_data gid
;
559 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
560 orig_name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
561 name
= zmalloc(strlen(orig_name
) + strlen("$app.") + 1);
566 strcpy(name
, "$app.");
567 strcat(name
, orig_name
);
568 idx
= lttng_get_context_index(*pctx
, name
);
570 assert(lttng_context_is_app(name
));
571 ret
= lttng_ust_add_app_context_to_ctx_rcu(name
,
575 idx
= lttng_get_context_index(*pctx
, name
);
579 ctx_field
= &(*pctx
)->fields
[idx
];
580 field
= &ctx_field
->event_field
;
581 ret
= specialize_load_object(field
, load
, true);
584 /* Specialize each get_symbol into a get_index. */
585 insn
->op
= FILTER_OP_GET_INDEX_U16
;
586 memset(&gid
, 0, sizeof(gid
));
588 gid
.elem
.type
= load
->object_type
;
589 gid
.elem
.rev_bo
= load
->rev_bo
;
591 data_offset
= bytecode_push_data(runtime
, &gid
,
592 __alignof__(gid
), sizeof(gid
));
593 if (data_offset
< 0) {
597 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
604 static int specialize_payload_lookup(const struct lttng_event_desc
*event_desc
,
605 struct bytecode_runtime
*runtime
,
606 struct load_op
*insn
,
607 struct vstack_load
*load
)
611 unsigned int i
, nr_fields
;
613 uint32_t field_offset
= 0;
614 const struct lttng_event_field
*field
;
616 struct filter_get_index_data gid
;
619 nr_fields
= event_desc
->nr_fields
;
620 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
621 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
622 for (i
= 0; i
< nr_fields
; i
++) {
623 field
= &event_desc
->fields
[i
];
624 if (field
->u
.ext
.nofilter
) {
627 if (!strcmp(field
->name
, name
)) {
631 /* compute field offset on stack */
632 switch (field
->type
.atype
) {
635 case atype_enum_nestable
:
636 field_offset
+= sizeof(int64_t);
639 case atype_array_nestable
:
641 case atype_sequence_nestable
:
642 field_offset
+= sizeof(unsigned long);
643 field_offset
+= sizeof(void *);
646 field_offset
+= sizeof(void *);
649 field_offset
+= sizeof(double);
661 ret
= specialize_load_object(field
, load
, false);
665 /* Specialize each get_symbol into a get_index. */
666 insn
->op
= FILTER_OP_GET_INDEX_U16
;
667 memset(&gid
, 0, sizeof(gid
));
668 gid
.offset
= field_offset
;
669 gid
.elem
.type
= load
->object_type
;
670 gid
.elem
.rev_bo
= load
->rev_bo
;
672 data_offset
= bytecode_push_data(runtime
, &gid
,
673 __alignof__(gid
), sizeof(gid
));
674 if (data_offset
< 0) {
678 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
684 int lttng_filter_specialize_bytecode(const struct lttng_event_desc
*event_desc
,
685 struct bytecode_runtime
*bytecode
)
687 void *pc
, *next_pc
, *start_pc
;
689 struct vstack _stack
;
690 struct vstack
*stack
= &_stack
;
691 struct lttng_ctx
**pctx
= bytecode
->p
.pctx
;
695 start_pc
= &bytecode
->code
[0];
696 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
698 switch (*(filter_opcode_t
*) pc
) {
699 case FILTER_OP_UNKNOWN
:
701 ERR("unknown bytecode op %u\n",
702 (unsigned int) *(filter_opcode_t
*) pc
);
706 case FILTER_OP_RETURN
:
707 if (vstack_ax(stack
)->type
== REG_S64
||
708 vstack_ax(stack
)->type
== REG_U64
)
709 *(filter_opcode_t
*) pc
= FILTER_OP_RETURN_S64
;
713 case FILTER_OP_RETURN_S64
:
714 if (vstack_ax(stack
)->type
!= REG_S64
&&
715 vstack_ax(stack
)->type
!= REG_U64
) {
716 ERR("Unexpected register type\n");
728 case FILTER_OP_MINUS
:
729 ERR("unsupported bytecode op %u\n",
730 (unsigned int) *(filter_opcode_t
*) pc
);
736 struct binary_op
*insn
= (struct binary_op
*) pc
;
738 switch(vstack_ax(stack
)->type
) {
740 ERR("unknown register type\n");
745 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
747 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
748 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
750 insn
->op
= FILTER_OP_EQ_STRING
;
752 case REG_STAR_GLOB_STRING
:
753 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
755 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
759 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
761 if (vstack_bx(stack
)->type
== REG_S64
||
762 vstack_bx(stack
)->type
== REG_U64
)
763 insn
->op
= FILTER_OP_EQ_S64
;
765 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
768 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
770 if (vstack_bx(stack
)->type
== REG_S64
||
771 vstack_bx(stack
)->type
== REG_U64
)
772 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
774 insn
->op
= FILTER_OP_EQ_DOUBLE
;
777 break; /* Dynamic typing. */
780 if (vstack_pop(stack
)) {
784 vstack_ax(stack
)->type
= REG_S64
;
785 next_pc
+= sizeof(struct binary_op
);
791 struct binary_op
*insn
= (struct binary_op
*) pc
;
793 switch(vstack_ax(stack
)->type
) {
795 ERR("unknown register type\n");
800 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
802 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
803 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
805 insn
->op
= FILTER_OP_NE_STRING
;
807 case REG_STAR_GLOB_STRING
:
808 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
810 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
814 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
816 if (vstack_bx(stack
)->type
== REG_S64
||
817 vstack_bx(stack
)->type
== REG_U64
)
818 insn
->op
= FILTER_OP_NE_S64
;
820 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
823 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
825 if (vstack_bx(stack
)->type
== REG_S64
||
826 vstack_bx(stack
)->type
== REG_U64
)
827 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
829 insn
->op
= FILTER_OP_NE_DOUBLE
;
832 break; /* Dynamic typing. */
835 if (vstack_pop(stack
)) {
839 vstack_ax(stack
)->type
= REG_S64
;
840 next_pc
+= sizeof(struct binary_op
);
846 struct binary_op
*insn
= (struct binary_op
*) pc
;
848 switch(vstack_ax(stack
)->type
) {
850 ERR("unknown register type\n");
854 case REG_STAR_GLOB_STRING
:
855 ERR("invalid register type for > binary operator\n");
859 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
861 insn
->op
= FILTER_OP_GT_STRING
;
865 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
867 if (vstack_bx(stack
)->type
== REG_S64
||
868 vstack_bx(stack
)->type
== REG_U64
)
869 insn
->op
= FILTER_OP_GT_S64
;
871 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
874 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
876 if (vstack_bx(stack
)->type
== REG_S64
||
877 vstack_bx(stack
)->type
== REG_U64
)
878 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
880 insn
->op
= FILTER_OP_GT_DOUBLE
;
883 break; /* Dynamic typing. */
886 if (vstack_pop(stack
)) {
890 vstack_ax(stack
)->type
= REG_S64
;
891 next_pc
+= sizeof(struct binary_op
);
897 struct binary_op
*insn
= (struct binary_op
*) pc
;
899 switch(vstack_ax(stack
)->type
) {
901 ERR("unknown register type\n");
905 case REG_STAR_GLOB_STRING
:
906 ERR("invalid register type for < binary operator\n");
910 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
912 insn
->op
= FILTER_OP_LT_STRING
;
916 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
918 if (vstack_bx(stack
)->type
== REG_S64
||
919 vstack_bx(stack
)->type
== REG_U64
)
920 insn
->op
= FILTER_OP_LT_S64
;
922 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
925 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
927 if (vstack_bx(stack
)->type
== REG_S64
||
928 vstack_bx(stack
)->type
== REG_U64
)
929 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
931 insn
->op
= FILTER_OP_LT_DOUBLE
;
934 break; /* Dynamic typing. */
937 if (vstack_pop(stack
)) {
941 vstack_ax(stack
)->type
= REG_S64
;
942 next_pc
+= sizeof(struct binary_op
);
948 struct binary_op
*insn
= (struct binary_op
*) pc
;
950 switch(vstack_ax(stack
)->type
) {
952 ERR("unknown register type\n");
956 case REG_STAR_GLOB_STRING
:
957 ERR("invalid register type for >= binary operator\n");
961 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
963 insn
->op
= FILTER_OP_GE_STRING
;
967 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
969 if (vstack_bx(stack
)->type
== REG_S64
||
970 vstack_bx(stack
)->type
== REG_U64
)
971 insn
->op
= FILTER_OP_GE_S64
;
973 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
976 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
978 if (vstack_bx(stack
)->type
== REG_S64
||
979 vstack_bx(stack
)->type
== REG_U64
)
980 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
982 insn
->op
= FILTER_OP_GE_DOUBLE
;
985 break; /* Dynamic typing. */
988 if (vstack_pop(stack
)) {
992 vstack_ax(stack
)->type
= REG_U64
;
993 next_pc
+= sizeof(struct binary_op
);
998 struct binary_op
*insn
= (struct binary_op
*) pc
;
1000 switch(vstack_ax(stack
)->type
) {
1002 ERR("unknown register type\n");
1006 case REG_STAR_GLOB_STRING
:
1007 ERR("invalid register type for <= binary operator\n");
1011 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
1013 insn
->op
= FILTER_OP_LE_STRING
;
1017 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
1019 if (vstack_bx(stack
)->type
== REG_S64
||
1020 vstack_bx(stack
)->type
== REG_U64
)
1021 insn
->op
= FILTER_OP_LE_S64
;
1023 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
1026 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
1028 if (vstack_bx(stack
)->type
== REG_S64
||
1029 vstack_bx(stack
)->type
== REG_U64
)
1030 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
1032 insn
->op
= FILTER_OP_LE_DOUBLE
;
1035 break; /* Dynamic typing. */
1037 vstack_ax(stack
)->type
= REG_S64
;
1038 next_pc
+= sizeof(struct binary_op
);
1042 case FILTER_OP_EQ_STRING
:
1043 case FILTER_OP_NE_STRING
:
1044 case FILTER_OP_GT_STRING
:
1045 case FILTER_OP_LT_STRING
:
1046 case FILTER_OP_GE_STRING
:
1047 case FILTER_OP_LE_STRING
:
1048 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1049 case FILTER_OP_NE_STAR_GLOB_STRING
:
1050 case FILTER_OP_EQ_S64
:
1051 case FILTER_OP_NE_S64
:
1052 case FILTER_OP_GT_S64
:
1053 case FILTER_OP_LT_S64
:
1054 case FILTER_OP_GE_S64
:
1055 case FILTER_OP_LE_S64
:
1056 case FILTER_OP_EQ_DOUBLE
:
1057 case FILTER_OP_NE_DOUBLE
:
1058 case FILTER_OP_GT_DOUBLE
:
1059 case FILTER_OP_LT_DOUBLE
:
1060 case FILTER_OP_GE_DOUBLE
:
1061 case FILTER_OP_LE_DOUBLE
:
1062 case FILTER_OP_EQ_DOUBLE_S64
:
1063 case FILTER_OP_NE_DOUBLE_S64
:
1064 case FILTER_OP_GT_DOUBLE_S64
:
1065 case FILTER_OP_LT_DOUBLE_S64
:
1066 case FILTER_OP_GE_DOUBLE_S64
:
1067 case FILTER_OP_LE_DOUBLE_S64
:
1068 case FILTER_OP_EQ_S64_DOUBLE
:
1069 case FILTER_OP_NE_S64_DOUBLE
:
1070 case FILTER_OP_GT_S64_DOUBLE
:
1071 case FILTER_OP_LT_S64_DOUBLE
:
1072 case FILTER_OP_GE_S64_DOUBLE
:
1073 case FILTER_OP_LE_S64_DOUBLE
:
1076 if (vstack_pop(stack
)) {
1080 vstack_ax(stack
)->type
= REG_S64
;
1081 next_pc
+= sizeof(struct binary_op
);
1085 case FILTER_OP_BIT_RSHIFT
:
1086 case FILTER_OP_BIT_LSHIFT
:
1087 case FILTER_OP_BIT_AND
:
1088 case FILTER_OP_BIT_OR
:
1089 case FILTER_OP_BIT_XOR
:
1092 if (vstack_pop(stack
)) {
1096 vstack_ax(stack
)->type
= REG_S64
;
1097 next_pc
+= sizeof(struct binary_op
);
1102 case FILTER_OP_UNARY_PLUS
:
1104 struct unary_op
*insn
= (struct unary_op
*) pc
;
1106 switch(vstack_ax(stack
)->type
) {
1108 ERR("unknown register type\n");
1114 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
1117 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
1119 case REG_UNKNOWN
: /* Dynamic typing. */
1123 next_pc
+= sizeof(struct unary_op
);
1127 case FILTER_OP_UNARY_MINUS
:
1129 struct unary_op
*insn
= (struct unary_op
*) pc
;
1131 switch(vstack_ax(stack
)->type
) {
1133 ERR("unknown register type\n");
1139 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
1142 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
1144 case REG_UNKNOWN
: /* Dynamic typing. */
1148 next_pc
+= sizeof(struct unary_op
);
1152 case FILTER_OP_UNARY_NOT
:
1154 struct unary_op
*insn
= (struct unary_op
*) pc
;
1156 switch(vstack_ax(stack
)->type
) {
1158 ERR("unknown register type\n");
1164 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
1167 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
1169 case REG_UNKNOWN
: /* Dynamic typing. */
1173 next_pc
+= sizeof(struct unary_op
);
1177 case FILTER_OP_UNARY_BIT_NOT
:
1180 next_pc
+= sizeof(struct unary_op
);
1184 case FILTER_OP_UNARY_PLUS_S64
:
1185 case FILTER_OP_UNARY_MINUS_S64
:
1186 case FILTER_OP_UNARY_NOT_S64
:
1187 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1188 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1189 case FILTER_OP_UNARY_NOT_DOUBLE
:
1192 next_pc
+= sizeof(struct unary_op
);
1200 /* Continue to next instruction */
1201 /* Pop 1 when jump not taken */
1202 if (vstack_pop(stack
)) {
1206 next_pc
+= sizeof(struct logical_op
);
1210 /* load field ref */
1211 case FILTER_OP_LOAD_FIELD_REF
:
1213 ERR("Unknown field ref type\n");
1217 /* get context ref */
1218 case FILTER_OP_GET_CONTEXT_REF
:
1220 if (vstack_push(stack
)) {
1224 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1225 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1228 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1229 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1230 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1232 if (vstack_push(stack
)) {
1236 vstack_ax(stack
)->type
= REG_STRING
;
1237 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1240 case FILTER_OP_LOAD_FIELD_REF_S64
:
1241 case FILTER_OP_GET_CONTEXT_REF_S64
:
1243 if (vstack_push(stack
)) {
1247 vstack_ax(stack
)->type
= REG_S64
;
1248 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1251 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1252 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1254 if (vstack_push(stack
)) {
1258 vstack_ax(stack
)->type
= REG_DOUBLE
;
1259 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1263 /* load from immediate operand */
1264 case FILTER_OP_LOAD_STRING
:
1266 struct load_op
*insn
= (struct load_op
*) pc
;
1268 if (vstack_push(stack
)) {
1272 vstack_ax(stack
)->type
= REG_STRING
;
1273 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1277 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1279 struct load_op
*insn
= (struct load_op
*) pc
;
1281 if (vstack_push(stack
)) {
1285 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1286 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1290 case FILTER_OP_LOAD_S64
:
1292 if (vstack_push(stack
)) {
1296 vstack_ax(stack
)->type
= REG_S64
;
1297 next_pc
+= sizeof(struct load_op
)
1298 + sizeof(struct literal_numeric
);
1302 case FILTER_OP_LOAD_DOUBLE
:
1304 if (vstack_push(stack
)) {
1308 vstack_ax(stack
)->type
= REG_DOUBLE
;
1309 next_pc
+= sizeof(struct load_op
)
1310 + sizeof(struct literal_double
);
1315 case FILTER_OP_CAST_TO_S64
:
1317 struct cast_op
*insn
= (struct cast_op
*) pc
;
1319 switch (vstack_ax(stack
)->type
) {
1321 ERR("unknown register type\n");
1326 case REG_STAR_GLOB_STRING
:
1327 ERR("Cast op can only be applied to numeric or floating point registers\n");
1331 insn
->op
= FILTER_OP_CAST_NOP
;
1334 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1341 vstack_ax(stack
)->type
= REG_S64
;
1342 next_pc
+= sizeof(struct cast_op
);
1345 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1348 vstack_ax(stack
)->type
= REG_S64
;
1349 next_pc
+= sizeof(struct cast_op
);
1352 case FILTER_OP_CAST_NOP
:
1354 next_pc
+= sizeof(struct cast_op
);
1359 * Instructions for recursive traversal through composed types.
1361 case FILTER_OP_GET_CONTEXT_ROOT
:
1363 if (vstack_push(stack
)) {
1367 vstack_ax(stack
)->type
= REG_PTR
;
1368 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1369 next_pc
+= sizeof(struct load_op
);
1372 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1374 if (vstack_push(stack
)) {
1378 vstack_ax(stack
)->type
= REG_PTR
;
1379 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1380 next_pc
+= sizeof(struct load_op
);
1383 case FILTER_OP_GET_PAYLOAD_ROOT
:
1385 if (vstack_push(stack
)) {
1389 vstack_ax(stack
)->type
= REG_PTR
;
1390 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1391 next_pc
+= sizeof(struct load_op
);
1395 case FILTER_OP_LOAD_FIELD
:
1397 struct load_op
*insn
= (struct load_op
*) pc
;
1399 assert(vstack_ax(stack
)->type
== REG_PTR
);
1401 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1405 next_pc
+= sizeof(struct load_op
);
1409 case FILTER_OP_LOAD_FIELD_S8
:
1410 case FILTER_OP_LOAD_FIELD_S16
:
1411 case FILTER_OP_LOAD_FIELD_S32
:
1412 case FILTER_OP_LOAD_FIELD_S64
:
1415 vstack_ax(stack
)->type
= REG_S64
;
1416 next_pc
+= sizeof(struct load_op
);
1420 case FILTER_OP_LOAD_FIELD_U8
:
1421 case FILTER_OP_LOAD_FIELD_U16
:
1422 case FILTER_OP_LOAD_FIELD_U32
:
1423 case FILTER_OP_LOAD_FIELD_U64
:
1426 vstack_ax(stack
)->type
= REG_U64
;
1427 next_pc
+= sizeof(struct load_op
);
1431 case FILTER_OP_LOAD_FIELD_STRING
:
1432 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1435 vstack_ax(stack
)->type
= REG_STRING
;
1436 next_pc
+= sizeof(struct load_op
);
1440 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1443 vstack_ax(stack
)->type
= REG_DOUBLE
;
1444 next_pc
+= sizeof(struct load_op
);
1448 case FILTER_OP_GET_SYMBOL
:
1450 struct load_op
*insn
= (struct load_op
*) pc
;
1452 dbg_printf("op get symbol\n");
1453 switch (vstack_ax(stack
)->load
.type
) {
1455 ERR("Nested fields not implemented yet.");
1458 case LOAD_ROOT_CONTEXT
:
1459 /* Lookup context field. */
1460 ret
= specialize_context_lookup(*pctx
,
1462 &vstack_ax(stack
)->load
);
1466 case LOAD_ROOT_APP_CONTEXT
:
1467 /* Lookup app context field. */
1468 ret
= specialize_app_context_lookup(pctx
,
1470 &vstack_ax(stack
)->load
);
1474 case LOAD_ROOT_PAYLOAD
:
1475 /* Lookup event payload field. */
1476 ret
= specialize_payload_lookup(event_desc
,
1478 &vstack_ax(stack
)->load
);
1483 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1487 case FILTER_OP_GET_SYMBOL_FIELD
:
1489 /* Always generated by specialize phase. */
1494 case FILTER_OP_GET_INDEX_U16
:
1496 struct load_op
*insn
= (struct load_op
*) pc
;
1497 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1499 dbg_printf("op get index u16\n");
1501 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1502 vstack_ax(stack
), sizeof(*index
));
1505 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1509 case FILTER_OP_GET_INDEX_U64
:
1511 struct load_op
*insn
= (struct load_op
*) pc
;
1512 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1514 dbg_printf("op get index u64\n");
1516 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1517 vstack_ax(stack
), sizeof(*index
));
1520 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);