2 * lttng-filter-specialize.c
4 * LTTng UST filter code specializer.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 #include "lttng-filter.h"
31 #include <lttng/align.h>
33 static int lttng_fls(int val
)
36 unsigned int x
= (unsigned int) val
;
40 if (!(x
& 0xFFFF0000U
)) {
44 if (!(x
& 0xFF000000U
)) {
48 if (!(x
& 0xF0000000U
)) {
52 if (!(x
& 0xC0000000U
)) {
56 if (!(x
& 0x80000000U
)) {
62 static int get_count_order(unsigned int count
)
66 order
= lttng_fls(count
) - 1;
67 if (count
& (count
- 1))
72 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
73 size_t align
, size_t len
)
76 size_t padding
= offset_align(runtime
->data_len
, align
);
77 size_t new_len
= runtime
->data_len
+ padding
+ len
;
78 size_t new_alloc_len
= new_len
;
79 size_t old_alloc_len
= runtime
->data_alloc_len
;
81 if (new_len
> FILTER_MAX_DATA_LEN
)
84 if (new_alloc_len
> old_alloc_len
) {
88 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
89 newptr
= realloc(runtime
->data
, new_alloc_len
);
92 runtime
->data
= newptr
;
93 /* We zero directly the memory from start of allocation. */
94 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
95 runtime
->data_alloc_len
= new_alloc_len
;
97 runtime
->data_len
+= padding
;
98 ret
= runtime
->data_len
;
99 runtime
->data_len
+= len
;
103 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
104 const void *p
, size_t align
, size_t len
)
108 offset
= bytecode_reserve_data(runtime
, align
, len
);
111 memcpy(&runtime
->data
[offset
], p
, len
);
115 static int specialize_load_field(struct vstack_entry
*stack_top
,
116 struct load_op
*insn
)
120 switch (stack_top
->load
.type
) {
123 case LOAD_ROOT_CONTEXT
:
124 case LOAD_ROOT_APP_CONTEXT
:
125 case LOAD_ROOT_PAYLOAD
:
127 dbg_printf("Filter warning: cannot load root, missing field name.\n");
131 switch (stack_top
->load
.object_type
) {
133 dbg_printf("op load field s8\n");
134 stack_top
->type
= REG_S64
;
135 if (!stack_top
->load
.rev_bo
)
136 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
138 case OBJECT_TYPE_S16
:
139 dbg_printf("op load field s16\n");
140 stack_top
->type
= REG_S64
;
141 if (!stack_top
->load
.rev_bo
)
142 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
144 case OBJECT_TYPE_S32
:
145 dbg_printf("op load field s32\n");
146 stack_top
->type
= REG_S64
;
147 if (!stack_top
->load
.rev_bo
)
148 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
150 case OBJECT_TYPE_S64
:
151 dbg_printf("op load field s64\n");
152 stack_top
->type
= REG_S64
;
153 if (!stack_top
->load
.rev_bo
)
154 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
157 dbg_printf("op load field u8\n");
158 stack_top
->type
= REG_S64
;
159 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
161 case OBJECT_TYPE_U16
:
162 dbg_printf("op load field u16\n");
163 stack_top
->type
= REG_S64
;
164 if (!stack_top
->load
.rev_bo
)
165 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
167 case OBJECT_TYPE_U32
:
168 dbg_printf("op load field u32\n");
169 stack_top
->type
= REG_S64
;
170 if (!stack_top
->load
.rev_bo
)
171 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
173 case OBJECT_TYPE_U64
:
174 dbg_printf("op load field u64\n");
175 stack_top
->type
= REG_S64
;
176 if (!stack_top
->load
.rev_bo
)
177 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
179 case OBJECT_TYPE_DOUBLE
:
180 stack_top
->type
= REG_DOUBLE
;
181 insn
->op
= FILTER_OP_LOAD_FIELD_DOUBLE
;
183 case OBJECT_TYPE_STRING
:
184 dbg_printf("op load field string\n");
185 stack_top
->type
= REG_STRING
;
186 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
188 case OBJECT_TYPE_STRING_SEQUENCE
:
189 dbg_printf("op load field string sequence\n");
190 stack_top
->type
= REG_STRING
;
191 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
193 case OBJECT_TYPE_DYNAMIC
:
194 dbg_printf("op load field dynamic\n");
195 stack_top
->type
= REG_UNKNOWN
;
196 /* Don't specialize load op. */
198 case OBJECT_TYPE_SEQUENCE
:
199 case OBJECT_TYPE_ARRAY
:
200 case OBJECT_TYPE_STRUCT
:
201 case OBJECT_TYPE_VARIANT
:
202 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
212 static int specialize_get_index_object_type(enum object_type
*otype
,
213 int signedness
, uint32_t elem_len
)
218 *otype
= OBJECT_TYPE_S8
;
220 *otype
= OBJECT_TYPE_U8
;
224 *otype
= OBJECT_TYPE_S16
;
226 *otype
= OBJECT_TYPE_U16
;
230 *otype
= OBJECT_TYPE_S32
;
232 *otype
= OBJECT_TYPE_U32
;
236 *otype
= OBJECT_TYPE_S64
;
238 *otype
= OBJECT_TYPE_U64
;
246 static int specialize_get_index(struct bytecode_runtime
*runtime
,
247 struct load_op
*insn
, uint64_t index
,
248 struct vstack_entry
*stack_top
,
252 struct filter_get_index_data gid
;
255 memset(&gid
, 0, sizeof(gid
));
256 switch (stack_top
->load
.type
) {
258 switch (stack_top
->load
.object_type
) {
259 case OBJECT_TYPE_ARRAY
:
261 const struct lttng_event_field
*field
;
262 uint32_t elem_len
, num_elems
;
265 field
= stack_top
->load
.field
;
266 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
267 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
268 num_elems
= field
->type
.u
.array
.length
;
269 if (index
>= num_elems
) {
273 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
274 signedness
, elem_len
);
277 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
278 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
279 gid
.elem
.type
= stack_top
->load
.object_type
;
280 gid
.elem
.len
= elem_len
;
281 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
282 gid
.elem
.rev_bo
= true;
283 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
286 case OBJECT_TYPE_SEQUENCE
:
288 const struct lttng_event_field
*field
;
292 field
= stack_top
->load
.field
;
293 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
294 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
295 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
296 signedness
, elem_len
);
299 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
300 gid
.elem
.type
= stack_top
->load
.object_type
;
301 gid
.elem
.len
= elem_len
;
302 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
303 gid
.elem
.rev_bo
= true;
304 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
307 case OBJECT_TYPE_STRUCT
:
308 /* Only generated by the specialize phase. */
309 case OBJECT_TYPE_VARIANT
: /* Fall-through */
311 ERR("Unexpected get index type %d",
312 (int) stack_top
->load
.object_type
);
317 case LOAD_ROOT_CONTEXT
:
318 case LOAD_ROOT_APP_CONTEXT
:
319 case LOAD_ROOT_PAYLOAD
:
320 ERR("Index lookup for root field not implemented yet.");
324 data_offset
= bytecode_push_data(runtime
, &gid
,
325 __alignof__(gid
), sizeof(gid
));
326 if (data_offset
< 0) {
332 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
335 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
348 static int specialize_context_lookup_name(struct lttng_ctx
*ctx
,
349 struct bytecode_runtime
*bytecode
,
350 struct load_op
*insn
)
355 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
356 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
357 return lttng_get_context_index(ctx
, name
);
360 static int specialize_load_object(const struct lttng_event_field
*field
,
361 struct vstack_load
*load
, bool is_context
)
363 load
->type
= LOAD_OBJECT
;
365 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
367 switch (field
->type
.atype
) {
369 if (field
->type
.u
.basic
.integer
.signedness
)
370 load
->object_type
= OBJECT_TYPE_S64
;
372 load
->object_type
= OBJECT_TYPE_U64
;
373 load
->rev_bo
= false;
377 const struct lttng_integer_type
*itype
=
378 &field
->type
.u
.basic
.enumeration
.container_type
;
380 if (itype
->signedness
)
381 load
->object_type
= OBJECT_TYPE_S64
;
383 load
->object_type
= OBJECT_TYPE_U64
;
384 load
->rev_bo
= false;
388 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
389 ERR("Array nesting only supports integer types.");
393 load
->object_type
= OBJECT_TYPE_STRING
;
395 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
396 load
->object_type
= OBJECT_TYPE_ARRAY
;
399 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
404 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
405 ERR("Sequence nesting only supports integer types.");
409 load
->object_type
= OBJECT_TYPE_STRING
;
411 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
412 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
415 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
420 load
->object_type
= OBJECT_TYPE_STRING
;
423 load
->object_type
= OBJECT_TYPE_DOUBLE
;
426 load
->object_type
= OBJECT_TYPE_DYNAMIC
;
429 ERR("Structure type cannot be loaded.");
432 ERR("Unknown type: %d", (int) field
->type
.atype
);
438 static int specialize_context_lookup(struct lttng_session
*session
,
439 struct bytecode_runtime
*runtime
,
440 struct load_op
*insn
,
441 struct vstack_load
*load
)
444 struct lttng_ctx_field
*ctx_field
;
445 struct lttng_event_field
*field
;
446 struct filter_get_index_data gid
;
449 idx
= specialize_context_lookup_name(session
->ctx
, runtime
, insn
);
453 ctx_field
= &session
->ctx
->fields
[idx
];
454 field
= &ctx_field
->event_field
;
455 ret
= specialize_load_object(field
, load
, true);
458 /* Specialize each get_symbol into a get_index. */
459 insn
->op
= FILTER_OP_GET_INDEX_U16
;
460 memset(&gid
, 0, sizeof(gid
));
462 gid
.elem
.type
= load
->object_type
;
463 data_offset
= bytecode_push_data(runtime
, &gid
,
464 __alignof__(gid
), sizeof(gid
));
465 if (data_offset
< 0) {
468 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
472 static int specialize_app_context_lookup(struct lttng_session
*session
,
473 struct bytecode_runtime
*runtime
,
474 struct load_op
*insn
,
475 struct vstack_load
*load
)
478 const char *orig_name
;
481 struct lttng_ctx_field
*ctx_field
;
482 struct lttng_event_field
*field
;
483 struct filter_get_index_data gid
;
486 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
487 orig_name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
488 name
= zmalloc(strlen(orig_name
) + strlen("$app.") + 1);
493 strcpy(name
, "$app.");
494 strcat(name
, orig_name
);
495 idx
= lttng_get_context_index(session
->ctx
, name
);
497 assert(lttng_context_is_app(name
));
498 ret
= lttng_ust_add_app_context_to_ctx_rcu(name
,
502 idx
= lttng_get_context_index(session
->ctx
,
507 ctx_field
= &session
->ctx
->fields
[idx
];
508 field
= &ctx_field
->event_field
;
509 ret
= specialize_load_object(field
, load
, true);
512 /* Specialize each get_symbol into a get_index. */
513 insn
->op
= FILTER_OP_GET_INDEX_U16
;
514 memset(&gid
, 0, sizeof(gid
));
516 gid
.elem
.type
= load
->object_type
;
517 data_offset
= bytecode_push_data(runtime
, &gid
,
518 __alignof__(gid
), sizeof(gid
));
519 if (data_offset
< 0) {
523 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
530 static int specialize_event_payload_lookup(struct lttng_event
*event
,
531 struct bytecode_runtime
*runtime
,
532 struct load_op
*insn
,
533 struct vstack_load
*load
)
537 const struct lttng_event_desc
*desc
= event
->desc
;
538 unsigned int i
, nr_fields
;
540 uint32_t field_offset
= 0;
541 const struct lttng_event_field
*field
;
543 struct filter_get_index_data gid
;
546 nr_fields
= desc
->nr_fields
;
547 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
548 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
549 for (i
= 0; i
< nr_fields
; i
++) {
550 field
= &desc
->fields
[i
];
551 if (!strcmp(field
->name
, name
)) {
555 /* compute field offset on stack */
556 switch (field
->type
.atype
) {
559 field_offset
+= sizeof(int64_t);
563 field_offset
+= sizeof(unsigned long);
564 field_offset
+= sizeof(void *);
567 field_offset
+= sizeof(void *);
570 field_offset
+= sizeof(double);
582 ret
= specialize_load_object(field
, load
, false);
586 /* Specialize each get_symbol into a get_index. */
587 insn
->op
= FILTER_OP_GET_INDEX_U16
;
588 memset(&gid
, 0, sizeof(gid
));
589 gid
.offset
= field_offset
;
590 gid
.elem
.type
= load
->object_type
;
591 data_offset
= bytecode_push_data(runtime
, &gid
,
592 __alignof__(gid
), sizeof(gid
));
593 if (data_offset
< 0) {
597 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
603 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
604 struct bytecode_runtime
*bytecode
)
606 void *pc
, *next_pc
, *start_pc
;
608 struct vstack _stack
;
609 struct vstack
*stack
= &_stack
;
610 struct lttng_session
*session
= bytecode
->p
.session
;
614 start_pc
= &bytecode
->code
[0];
615 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
617 switch (*(filter_opcode_t
*) pc
) {
618 case FILTER_OP_UNKNOWN
:
620 ERR("unknown bytecode op %u\n",
621 (unsigned int) *(filter_opcode_t
*) pc
);
625 case FILTER_OP_RETURN
:
626 if (vstack_ax(stack
)->type
== REG_S64
)
627 *(filter_opcode_t
*) pc
= FILTER_OP_RETURN_S64
;
631 case FILTER_OP_RETURN_S64
:
632 if (vstack_ax(stack
)->type
!= REG_S64
) {
633 ERR("Unexpected register type\n");
645 case FILTER_OP_MINUS
:
646 ERR("unsupported bytecode op %u\n",
647 (unsigned int) *(filter_opcode_t
*) pc
);
653 struct binary_op
*insn
= (struct binary_op
*) pc
;
655 switch(vstack_ax(stack
)->type
) {
657 ERR("unknown register type\n");
662 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
664 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
665 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
667 insn
->op
= FILTER_OP_EQ_STRING
;
669 case REG_STAR_GLOB_STRING
:
670 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
672 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
675 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
677 if (vstack_bx(stack
)->type
== REG_S64
)
678 insn
->op
= FILTER_OP_EQ_S64
;
680 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
683 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
685 if (vstack_bx(stack
)->type
== REG_S64
)
686 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
688 insn
->op
= FILTER_OP_EQ_DOUBLE
;
691 break; /* Dynamic typing. */
694 if (vstack_pop(stack
)) {
698 vstack_ax(stack
)->type
= REG_S64
;
699 next_pc
+= sizeof(struct binary_op
);
705 struct binary_op
*insn
= (struct binary_op
*) pc
;
707 switch(vstack_ax(stack
)->type
) {
709 ERR("unknown register type\n");
714 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
716 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
717 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
719 insn
->op
= FILTER_OP_NE_STRING
;
721 case REG_STAR_GLOB_STRING
:
722 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
724 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
727 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
729 if (vstack_bx(stack
)->type
== REG_S64
)
730 insn
->op
= FILTER_OP_NE_S64
;
732 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
735 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
737 if (vstack_bx(stack
)->type
== REG_S64
)
738 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
740 insn
->op
= FILTER_OP_NE_DOUBLE
;
743 break; /* Dynamic typing. */
746 if (vstack_pop(stack
)) {
750 vstack_ax(stack
)->type
= REG_S64
;
751 next_pc
+= sizeof(struct binary_op
);
757 struct binary_op
*insn
= (struct binary_op
*) pc
;
759 switch(vstack_ax(stack
)->type
) {
761 ERR("unknown register type\n");
765 case REG_STAR_GLOB_STRING
:
766 ERR("invalid register type for > binary operator\n");
770 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
772 insn
->op
= FILTER_OP_GT_STRING
;
775 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
777 if (vstack_bx(stack
)->type
== REG_S64
)
778 insn
->op
= FILTER_OP_GT_S64
;
780 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
783 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
785 if (vstack_bx(stack
)->type
== REG_S64
)
786 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
788 insn
->op
= FILTER_OP_GT_DOUBLE
;
791 break; /* Dynamic typing. */
794 if (vstack_pop(stack
)) {
798 vstack_ax(stack
)->type
= REG_S64
;
799 next_pc
+= sizeof(struct binary_op
);
805 struct binary_op
*insn
= (struct binary_op
*) pc
;
807 switch(vstack_ax(stack
)->type
) {
809 ERR("unknown register type\n");
813 case REG_STAR_GLOB_STRING
:
814 ERR("invalid register type for < binary operator\n");
818 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
820 insn
->op
= FILTER_OP_LT_STRING
;
823 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
825 if (vstack_bx(stack
)->type
== REG_S64
)
826 insn
->op
= FILTER_OP_LT_S64
;
828 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
831 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
833 if (vstack_bx(stack
)->type
== REG_S64
)
834 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
836 insn
->op
= FILTER_OP_LT_DOUBLE
;
839 break; /* Dynamic typing. */
842 if (vstack_pop(stack
)) {
846 vstack_ax(stack
)->type
= REG_S64
;
847 next_pc
+= sizeof(struct binary_op
);
853 struct binary_op
*insn
= (struct binary_op
*) pc
;
855 switch(vstack_ax(stack
)->type
) {
857 ERR("unknown register type\n");
861 case REG_STAR_GLOB_STRING
:
862 ERR("invalid register type for >= binary operator\n");
866 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
868 insn
->op
= FILTER_OP_GE_STRING
;
871 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
873 if (vstack_bx(stack
)->type
== REG_S64
)
874 insn
->op
= FILTER_OP_GE_S64
;
876 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
879 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
881 if (vstack_bx(stack
)->type
== REG_S64
)
882 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
884 insn
->op
= FILTER_OP_GE_DOUBLE
;
887 break; /* Dynamic typing. */
890 if (vstack_pop(stack
)) {
894 vstack_ax(stack
)->type
= REG_S64
;
895 next_pc
+= sizeof(struct binary_op
);
900 struct binary_op
*insn
= (struct binary_op
*) pc
;
902 switch(vstack_ax(stack
)->type
) {
904 ERR("unknown register type\n");
908 case REG_STAR_GLOB_STRING
:
909 ERR("invalid register type for <= binary operator\n");
913 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
915 insn
->op
= FILTER_OP_LE_STRING
;
918 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
920 if (vstack_bx(stack
)->type
== REG_S64
)
921 insn
->op
= FILTER_OP_LE_S64
;
923 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
926 if (vstack_bx(stack
)->type
== REG_UNKNOWN
)
928 if (vstack_bx(stack
)->type
== REG_S64
)
929 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
931 insn
->op
= FILTER_OP_LE_DOUBLE
;
934 break; /* Dynamic typing. */
936 vstack_ax(stack
)->type
= REG_S64
;
937 next_pc
+= sizeof(struct binary_op
);
941 case FILTER_OP_EQ_STRING
:
942 case FILTER_OP_NE_STRING
:
943 case FILTER_OP_GT_STRING
:
944 case FILTER_OP_LT_STRING
:
945 case FILTER_OP_GE_STRING
:
946 case FILTER_OP_LE_STRING
:
947 case FILTER_OP_EQ_STAR_GLOB_STRING
:
948 case FILTER_OP_NE_STAR_GLOB_STRING
:
949 case FILTER_OP_EQ_S64
:
950 case FILTER_OP_NE_S64
:
951 case FILTER_OP_GT_S64
:
952 case FILTER_OP_LT_S64
:
953 case FILTER_OP_GE_S64
:
954 case FILTER_OP_LE_S64
:
955 case FILTER_OP_EQ_DOUBLE
:
956 case FILTER_OP_NE_DOUBLE
:
957 case FILTER_OP_GT_DOUBLE
:
958 case FILTER_OP_LT_DOUBLE
:
959 case FILTER_OP_GE_DOUBLE
:
960 case FILTER_OP_LE_DOUBLE
:
961 case FILTER_OP_EQ_DOUBLE_S64
:
962 case FILTER_OP_NE_DOUBLE_S64
:
963 case FILTER_OP_GT_DOUBLE_S64
:
964 case FILTER_OP_LT_DOUBLE_S64
:
965 case FILTER_OP_GE_DOUBLE_S64
:
966 case FILTER_OP_LE_DOUBLE_S64
:
967 case FILTER_OP_EQ_S64_DOUBLE
:
968 case FILTER_OP_NE_S64_DOUBLE
:
969 case FILTER_OP_GT_S64_DOUBLE
:
970 case FILTER_OP_LT_S64_DOUBLE
:
971 case FILTER_OP_GE_S64_DOUBLE
:
972 case FILTER_OP_LE_S64_DOUBLE
:
973 case FILTER_OP_BIT_RSHIFT
:
974 case FILTER_OP_BIT_LSHIFT
:
975 case FILTER_OP_BIT_AND
:
976 case FILTER_OP_BIT_OR
:
977 case FILTER_OP_BIT_XOR
:
980 if (vstack_pop(stack
)) {
984 vstack_ax(stack
)->type
= REG_S64
;
985 next_pc
+= sizeof(struct binary_op
);
990 case FILTER_OP_UNARY_PLUS
:
992 struct unary_op
*insn
= (struct unary_op
*) pc
;
994 switch(vstack_ax(stack
)->type
) {
996 ERR("unknown register type\n");
1001 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
1004 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
1006 case REG_UNKNOWN
: /* Dynamic typing. */
1010 next_pc
+= sizeof(struct unary_op
);
1014 case FILTER_OP_UNARY_MINUS
:
1016 struct unary_op
*insn
= (struct unary_op
*) pc
;
1018 switch(vstack_ax(stack
)->type
) {
1020 ERR("unknown register type\n");
1025 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
1028 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
1030 case REG_UNKNOWN
: /* Dynamic typing. */
1034 next_pc
+= sizeof(struct unary_op
);
1038 case FILTER_OP_UNARY_NOT
:
1040 struct unary_op
*insn
= (struct unary_op
*) pc
;
1042 switch(vstack_ax(stack
)->type
) {
1044 ERR("unknown register type\n");
1049 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
1052 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
1054 case REG_UNKNOWN
: /* Dynamic typing. */
1058 next_pc
+= sizeof(struct unary_op
);
1062 case FILTER_OP_UNARY_BIT_NOT
:
1065 next_pc
+= sizeof(struct unary_op
);
1069 case FILTER_OP_UNARY_PLUS_S64
:
1070 case FILTER_OP_UNARY_MINUS_S64
:
1071 case FILTER_OP_UNARY_NOT_S64
:
1072 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1073 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1074 case FILTER_OP_UNARY_NOT_DOUBLE
:
1077 next_pc
+= sizeof(struct unary_op
);
1085 /* Continue to next instruction */
1086 /* Pop 1 when jump not taken */
1087 if (vstack_pop(stack
)) {
1091 next_pc
+= sizeof(struct logical_op
);
1095 /* load field ref */
1096 case FILTER_OP_LOAD_FIELD_REF
:
1098 ERR("Unknown field ref type\n");
1102 /* get context ref */
1103 case FILTER_OP_GET_CONTEXT_REF
:
1105 if (vstack_push(stack
)) {
1109 vstack_ax(stack
)->type
= REG_UNKNOWN
;
1110 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1113 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1114 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1115 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1117 if (vstack_push(stack
)) {
1121 vstack_ax(stack
)->type
= REG_STRING
;
1122 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1125 case FILTER_OP_LOAD_FIELD_REF_S64
:
1126 case FILTER_OP_GET_CONTEXT_REF_S64
:
1128 if (vstack_push(stack
)) {
1132 vstack_ax(stack
)->type
= REG_S64
;
1133 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1136 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1137 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1139 if (vstack_push(stack
)) {
1143 vstack_ax(stack
)->type
= REG_DOUBLE
;
1144 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1148 /* load from immediate operand */
1149 case FILTER_OP_LOAD_STRING
:
1151 struct load_op
*insn
= (struct load_op
*) pc
;
1153 if (vstack_push(stack
)) {
1157 vstack_ax(stack
)->type
= REG_STRING
;
1158 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1162 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1164 struct load_op
*insn
= (struct load_op
*) pc
;
1166 if (vstack_push(stack
)) {
1170 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1171 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1175 case FILTER_OP_LOAD_S64
:
1177 if (vstack_push(stack
)) {
1181 vstack_ax(stack
)->type
= REG_S64
;
1182 next_pc
+= sizeof(struct load_op
)
1183 + sizeof(struct literal_numeric
);
1187 case FILTER_OP_LOAD_DOUBLE
:
1189 if (vstack_push(stack
)) {
1193 vstack_ax(stack
)->type
= REG_DOUBLE
;
1194 next_pc
+= sizeof(struct load_op
)
1195 + sizeof(struct literal_double
);
1200 case FILTER_OP_CAST_TO_S64
:
1202 struct cast_op
*insn
= (struct cast_op
*) pc
;
1204 switch (vstack_ax(stack
)->type
) {
1206 ERR("unknown register type\n");
1211 case REG_STAR_GLOB_STRING
:
1212 ERR("Cast op can only be applied to numeric or floating point registers\n");
1216 insn
->op
= FILTER_OP_CAST_NOP
;
1219 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1225 vstack_ax(stack
)->type
= REG_S64
;
1226 next_pc
+= sizeof(struct cast_op
);
1229 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1232 vstack_ax(stack
)->type
= REG_S64
;
1233 next_pc
+= sizeof(struct cast_op
);
1236 case FILTER_OP_CAST_NOP
:
1238 next_pc
+= sizeof(struct cast_op
);
1243 * Instructions for recursive traversal through composed types.
1245 case FILTER_OP_GET_CONTEXT_ROOT
:
1247 if (vstack_push(stack
)) {
1251 vstack_ax(stack
)->type
= REG_PTR
;
1252 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1253 next_pc
+= sizeof(struct load_op
);
1256 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1258 if (vstack_push(stack
)) {
1262 vstack_ax(stack
)->type
= REG_PTR
;
1263 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1264 next_pc
+= sizeof(struct load_op
);
1267 case FILTER_OP_GET_PAYLOAD_ROOT
:
1269 if (vstack_push(stack
)) {
1273 vstack_ax(stack
)->type
= REG_PTR
;
1274 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1275 next_pc
+= sizeof(struct load_op
);
1279 case FILTER_OP_LOAD_FIELD
:
1281 struct load_op
*insn
= (struct load_op
*) pc
;
1283 assert(vstack_ax(stack
)->type
== REG_PTR
);
1285 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1289 next_pc
+= sizeof(struct load_op
);
1293 case FILTER_OP_LOAD_FIELD_S8
:
1294 case FILTER_OP_LOAD_FIELD_S16
:
1295 case FILTER_OP_LOAD_FIELD_S32
:
1296 case FILTER_OP_LOAD_FIELD_S64
:
1297 case FILTER_OP_LOAD_FIELD_U8
:
1298 case FILTER_OP_LOAD_FIELD_U16
:
1299 case FILTER_OP_LOAD_FIELD_U32
:
1300 case FILTER_OP_LOAD_FIELD_U64
:
1303 vstack_ax(stack
)->type
= REG_S64
;
1304 next_pc
+= sizeof(struct load_op
);
1308 case FILTER_OP_LOAD_FIELD_STRING
:
1309 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1312 vstack_ax(stack
)->type
= REG_STRING
;
1313 next_pc
+= sizeof(struct load_op
);
1317 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1320 vstack_ax(stack
)->type
= REG_DOUBLE
;
1321 next_pc
+= sizeof(struct load_op
);
1325 case FILTER_OP_GET_SYMBOL
:
1327 struct load_op
*insn
= (struct load_op
*) pc
;
1329 dbg_printf("op get symbol\n");
1330 switch (vstack_ax(stack
)->load
.type
) {
1332 ERR("Nested fields not implemented yet.");
1335 case LOAD_ROOT_CONTEXT
:
1336 /* Lookup context field. */
1337 ret
= specialize_context_lookup(session
,
1339 &vstack_ax(stack
)->load
);
1343 case LOAD_ROOT_APP_CONTEXT
:
1344 /* Lookup app context field. */
1345 ret
= specialize_app_context_lookup(session
,
1347 &vstack_ax(stack
)->load
);
1351 case LOAD_ROOT_PAYLOAD
:
1352 /* Lookup event payload field. */
1353 ret
= specialize_event_payload_lookup(event
,
1355 &vstack_ax(stack
)->load
);
1360 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1364 case FILTER_OP_GET_SYMBOL_FIELD
:
1366 /* Always generated by specialize phase. */
1371 case FILTER_OP_GET_INDEX_U16
:
1373 struct load_op
*insn
= (struct load_op
*) pc
;
1374 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1376 dbg_printf("op get index u16\n");
1378 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1379 vstack_ax(stack
), sizeof(*index
));
1382 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1386 case FILTER_OP_GET_INDEX_U64
:
1388 struct load_op
*insn
= (struct load_op
*) pc
;
1389 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1391 dbg_printf("op get index u64\n");
1393 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1394 vstack_ax(stack
), sizeof(*index
));
1397 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);