2 * filter-visitor-generate-bytecode.c
4 * LTTng filter bytecode generation
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
31 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
34 //#define INIT_ALLOC_SIZE PAGE_SIZE
35 #define INIT_ALLOC_SIZE 4
38 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
42 int bytecode_init(struct lttng_filter_bytecode_alloc
**fb
)
44 *fb
= calloc(sizeof(struct lttng_filter_bytecode_alloc
) + INIT_ALLOC_SIZE
, 1);
48 (*fb
)->alloc_len
= INIT_ALLOC_SIZE
;
54 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc
**fb
, uint32_t align
, uint32_t len
)
57 uint32_t padding
= offset_align((*fb
)->b
.len
, align
);
59 if ((*fb
)->b
.len
+ padding
+ len
> (*fb
)->alloc_len
) {
61 max_t(uint32_t, (*fb
)->b
.len
+ padding
+ len
,
62 (*fb
)->alloc_len
<< 1);
63 uint32_t old_len
= (*fb
)->alloc_len
;
67 *fb
= realloc(*fb
, sizeof(struct lttng_filter_bytecode_alloc
) + new_len
);
70 memset(&(*fb
)->b
.data
[old_len
], 0, new_len
- old_len
);
71 (*fb
)->alloc_len
= new_len
;
73 (*fb
)->b
.len
+= padding
;
80 int bytecode_push(struct lttng_filter_bytecode_alloc
**fb
, const void *data
,
81 uint32_t align
, uint32_t len
)
85 offset
= bytecode_reserve(fb
, align
, len
);
88 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
93 int bytecode_push_logical(struct lttng_filter_bytecode_alloc
**fb
,
94 struct logical_op
*data
,
95 uint32_t align
, uint32_t len
,
96 uint16_t *skip_offset
)
100 offset
= bytecode_reserve(fb
, align
, len
);
103 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
105 (void *) &((struct logical_op
*) &(*fb
)->b
.data
[offset
])->skip_offset
106 - (void *) &(*fb
)->b
.data
[0];
111 int bytecode_patch(struct lttng_filter_bytecode_alloc
**fb
,
116 if (offset
>= (*fb
)->b
.len
) {
119 memcpy(&(*fb
)->b
.data
[offset
], data
, len
);
124 int visit_node_root(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
127 struct return_op insn
;
130 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.root
.child
);
134 /* Generate end of bytecode instruction */
135 insn
.op
= FILTER_OP_RETURN
;
136 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
140 enum filter_register
reg_sel(struct ir_op
*node
)
142 switch (node
->side
) {
143 case IR_SIDE_UNKNOWN
:
145 fprintf(stderr
, "[error] Unknown node side in %s\n",
156 int visit_node_load(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
160 switch (node
->data_type
) {
161 case IR_DATA_UNKNOWN
:
163 fprintf(stderr
, "[error] Unknown data type in %s\n",
169 struct load_op
*insn
;
170 uint32_t insn_len
= sizeof(struct load_op
)
171 + strlen(node
->u
.load
.u
.string
) + 1;
173 insn
= calloc(insn_len
, 1);
176 insn
->op
= FILTER_OP_LOAD_STRING
;
177 insn
->reg
= reg_sel(node
);
178 if (insn
->reg
== REG_ERROR
)
180 strcpy(insn
->data
, node
->u
.load
.u
.string
);
181 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
185 case IR_DATA_NUMERIC
:
187 struct load_op
*insn
;
188 uint32_t insn_len
= sizeof(struct load_op
)
189 + sizeof(struct literal_numeric
);
191 insn
= calloc(insn_len
, 1);
194 insn
->op
= FILTER_OP_LOAD_S64
;
195 insn
->reg
= reg_sel(node
);
196 if (insn
->reg
== REG_ERROR
)
198 *(int64_t *) insn
->data
= node
->u
.load
.u
.num
;
199 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
205 struct load_op
*insn
;
206 uint32_t insn_len
= sizeof(struct load_op
)
207 + sizeof(struct literal_double
);
209 insn
= calloc(insn_len
, 1);
212 insn
->op
= FILTER_OP_LOAD_DOUBLE
;
213 insn
->reg
= reg_sel(node
);
214 if (insn
->reg
== REG_ERROR
)
216 *(double *) insn
->data
= node
->u
.load
.u
.flt
;
217 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
221 case IR_DATA_FIELD_REF
:
223 struct load_op
*insn
;
224 uint32_t insn_len
= sizeof(struct load_op
)
225 + sizeof(struct field_ref
);
226 struct field_ref ref_offset
;
227 uint16_t reloc_offset
;
229 insn
= calloc(insn_len
, 1);
232 insn
->op
= FILTER_OP_LOAD_FIELD_REF
;
233 insn
->reg
= reg_sel(node
);
234 ref_offset
.offset
= (uint16_t) -1U;
235 memcpy(insn
->data
, &ref_offset
, sizeof(ref_offset
));
236 if (insn
->reg
== REG_ERROR
)
238 /* reloc_offset points to struct load_op */
239 reloc_offset
= bytecode_get_len(&ctx
->bytecode
->b
);
240 ret
= bytecode_push(&ctx
->bytecode
, insn
, 1, insn_len
);
246 ret
= bytecode_push(&ctx
->bytecode_reloc
, &reloc_offset
,
247 1, sizeof(reloc_offset
));
252 ret
= bytecode_push(&ctx
->bytecode_reloc
, node
->u
.load
.u
.ref
,
253 1, strlen(node
->u
.load
.u
.ref
) + 1);
261 int visit_node_unary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
264 struct unary_op insn
;
267 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.unary
.child
);
271 /* Generate end of bytecode instruction */
272 switch (node
->u
.unary
.type
) {
273 case AST_UNARY_UNKNOWN
:
275 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
281 case AST_UNARY_MINUS
:
282 insn
.op
= FILTER_OP_UNARY_MINUS
;
283 insn
.reg
= reg_sel(node
);
284 if (insn
.reg
== REG_ERROR
)
286 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
288 insn
.op
= FILTER_OP_UNARY_NOT
;
289 insn
.reg
= reg_sel(node
);
290 if (insn
.reg
== REG_ERROR
)
292 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
297 * Binary comparator nesting is disallowed. This allows fitting into
301 int visit_node_binary(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
304 struct binary_op insn
;
307 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
310 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
314 switch (node
->u
.binary
.type
) {
317 fprintf(stderr
, "[error] Unknown unary node type in %s\n",
323 fprintf(stderr
, "[error] Unexpected logical node type in %s\n",
328 insn
.op
= FILTER_OP_MUL
;
331 insn
.op
= FILTER_OP_DIV
;
334 insn
.op
= FILTER_OP_MOD
;
337 insn
.op
= FILTER_OP_PLUS
;
340 insn
.op
= FILTER_OP_MINUS
;
343 insn
.op
= FILTER_OP_RSHIFT
;
346 insn
.op
= FILTER_OP_LSHIFT
;
349 insn
.op
= FILTER_OP_BIN_AND
;
352 insn
.op
= FILTER_OP_BIN_OR
;
355 insn
.op
= FILTER_OP_BIN_XOR
;
359 insn
.op
= FILTER_OP_EQ
;
362 insn
.op
= FILTER_OP_NE
;
365 insn
.op
= FILTER_OP_GT
;
368 insn
.op
= FILTER_OP_LT
;
371 insn
.op
= FILTER_OP_GE
;
374 insn
.op
= FILTER_OP_LE
;
377 return bytecode_push(&ctx
->bytecode
, &insn
, 1, sizeof(insn
));
381 * A logical op always return a s64 (1 or 0).
384 int visit_node_logical(struct filter_parser_ctx
*ctx
, struct ir_op
*node
)
387 struct logical_op insn
;
388 uint16_t skip_offset_loc
;
391 /* Visit left child */
392 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.left
);
395 /* Cast to s64 if float or field ref */
396 if (node
->u
.binary
.left
->data_type
== IR_DATA_FIELD_REF
397 || node
->u
.binary
.left
->data_type
== IR_DATA_FLOAT
) {
398 struct cast_op cast_insn
;
400 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
401 cast_insn
.reg
= REG_R0
;
402 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
403 1, sizeof(cast_insn
));
407 switch (node
->u
.logical
.type
) {
409 fprintf(stderr
, "[error] Unknown node type in %s\n",
414 insn
.op
= FILTER_OP_AND
;
417 insn
.op
= FILTER_OP_OR
;
420 insn
.skip_offset
= (uint16_t) -1UL; /* Temporary */
421 ret
= bytecode_push_logical(&ctx
->bytecode
, &insn
, 1, sizeof(insn
),
425 /* Visit right child */
426 ret
= recursive_visit_gen_bytecode(ctx
, node
->u
.binary
.right
);
429 /* Cast to s64 if float or field ref */
430 if (node
->u
.binary
.right
->data_type
== IR_DATA_FIELD_REF
431 || node
->u
.binary
.right
->data_type
== IR_DATA_FLOAT
) {
432 struct cast_op cast_insn
;
434 cast_insn
.op
= FILTER_OP_CAST_TO_S64
;
435 cast_insn
.reg
= REG_R0
;
436 ret
= bytecode_push(&ctx
->bytecode
, &cast_insn
,
437 1, sizeof(cast_insn
));
441 /* We now know where the logical op can skip. */
442 target_loc
= (uint16_t) bytecode_get_len(&ctx
->bytecode
->b
);
443 ret
= bytecode_patch(&ctx
->bytecode
,
444 &target_loc
, /* Offset to jump to */
445 skip_offset_loc
, /* Where to patch */
451 * Postorder traversal of the tree. We need the children result before
452 * we can evaluate the parent.
455 int recursive_visit_gen_bytecode(struct filter_parser_ctx
*ctx
,
461 fprintf(stderr
, "[error] Unknown node type in %s\n",
466 return visit_node_root(ctx
, node
);
468 return visit_node_load(ctx
, node
);
470 return visit_node_unary(ctx
, node
);
472 return visit_node_binary(ctx
, node
);
474 return visit_node_logical(ctx
, node
);
478 void filter_bytecode_free(struct filter_parser_ctx
*ctx
)
481 ctx
->bytecode
= NULL
;
482 free(ctx
->bytecode_reloc
);
483 ctx
->bytecode_reloc
= NULL
;
486 int filter_visitor_bytecode_generate(struct filter_parser_ctx
*ctx
)
490 ret
= bytecode_init(&ctx
->bytecode
);
493 ret
= bytecode_init(&ctx
->bytecode_reloc
);
496 ret
= recursive_visit_gen_bytecode(ctx
, ctx
->ir_root
);
500 /* Finally, append symbol table to bytecode */
501 ctx
->bytecode
->b
.reloc_table_offset
= bytecode_get_len(&ctx
->bytecode
->b
);
502 return bytecode_push(&ctx
->bytecode
, ctx
->bytecode_reloc
->b
.data
,
503 1, bytecode_get_len(&ctx
->bytecode_reloc
->b
));
506 filter_bytecode_free(ctx
);