Introduce LTTNG_UST_MAP_POPULATE_POLICY environment variable
[lttng-ust.git] / liblttng-ust / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng UST filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include "lttng-filter.h"
29 #include <lttng/align.h>
30
31 static int lttng_fls(int val)
32 {
33 int r = 32;
34 unsigned int x = (unsigned int) val;
35
36 if (!x)
37 return 0;
38 if (!(x & 0xFFFF0000U)) {
39 x <<= 16;
40 r -= 16;
41 }
42 if (!(x & 0xFF000000U)) {
43 x <<= 8;
44 r -= 8;
45 }
46 if (!(x & 0xF0000000U)) {
47 x <<= 4;
48 r -= 4;
49 }
50 if (!(x & 0xC0000000U)) {
51 x <<= 2;
52 r -= 2;
53 }
54 if (!(x & 0x80000000U)) {
55 r -= 1;
56 }
57 return r;
58 }
59
60 static int get_count_order(unsigned int count)
61 {
62 int order;
63
64 order = lttng_fls(count) - 1;
65 if (count & (count - 1))
66 order++;
67 return order;
68 }
69
70 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
71 size_t align, size_t len)
72 {
73 ssize_t ret;
74 size_t padding = offset_align(runtime->data_len, align);
75 size_t new_len = runtime->data_len + padding + len;
76 size_t new_alloc_len = new_len;
77 size_t old_alloc_len = runtime->data_alloc_len;
78
79 if (new_len > FILTER_MAX_DATA_LEN)
80 return -EINVAL;
81
82 if (new_alloc_len > old_alloc_len) {
83 char *newptr;
84
85 new_alloc_len =
86 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
87 newptr = realloc(runtime->data, new_alloc_len);
88 if (!newptr)
89 return -ENOMEM;
90 runtime->data = newptr;
91 /* We zero directly the memory from start of allocation. */
92 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
93 runtime->data_alloc_len = new_alloc_len;
94 }
95 runtime->data_len += padding;
96 ret = runtime->data_len;
97 runtime->data_len += len;
98 return ret;
99 }
100
101 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
102 const void *p, size_t align, size_t len)
103 {
104 ssize_t offset;
105
106 offset = bytecode_reserve_data(runtime, align, len);
107 if (offset < 0)
108 return -ENOMEM;
109 memcpy(&runtime->data[offset], p, len);
110 return offset;
111 }
112
113 static int specialize_load_field(struct vstack_entry *stack_top,
114 struct load_op *insn)
115 {
116 int ret;
117
118 switch (stack_top->load.type) {
119 case LOAD_OBJECT:
120 break;
121 case LOAD_ROOT_CONTEXT:
122 case LOAD_ROOT_APP_CONTEXT:
123 case LOAD_ROOT_PAYLOAD:
124 default:
125 dbg_printf("Filter warning: cannot load root, missing field name.\n");
126 ret = -EINVAL;
127 goto end;
128 }
129 switch (stack_top->load.object_type) {
130 case OBJECT_TYPE_S8:
131 dbg_printf("op load field s8\n");
132 stack_top->type = REG_S64;
133 if (!stack_top->load.rev_bo)
134 insn->op = FILTER_OP_LOAD_FIELD_S8;
135 break;
136 case OBJECT_TYPE_S16:
137 dbg_printf("op load field s16\n");
138 stack_top->type = REG_S64;
139 if (!stack_top->load.rev_bo)
140 insn->op = FILTER_OP_LOAD_FIELD_S16;
141 break;
142 case OBJECT_TYPE_S32:
143 dbg_printf("op load field s32\n");
144 stack_top->type = REG_S64;
145 if (!stack_top->load.rev_bo)
146 insn->op = FILTER_OP_LOAD_FIELD_S32;
147 break;
148 case OBJECT_TYPE_S64:
149 dbg_printf("op load field s64\n");
150 stack_top->type = REG_S64;
151 if (!stack_top->load.rev_bo)
152 insn->op = FILTER_OP_LOAD_FIELD_S64;
153 break;
154 case OBJECT_TYPE_U8:
155 dbg_printf("op load field u8\n");
156 stack_top->type = REG_S64;
157 insn->op = FILTER_OP_LOAD_FIELD_U8;
158 break;
159 case OBJECT_TYPE_U16:
160 dbg_printf("op load field u16\n");
161 stack_top->type = REG_S64;
162 if (!stack_top->load.rev_bo)
163 insn->op = FILTER_OP_LOAD_FIELD_U16;
164 break;
165 case OBJECT_TYPE_U32:
166 dbg_printf("op load field u32\n");
167 stack_top->type = REG_S64;
168 if (!stack_top->load.rev_bo)
169 insn->op = FILTER_OP_LOAD_FIELD_U32;
170 break;
171 case OBJECT_TYPE_U64:
172 dbg_printf("op load field u64\n");
173 stack_top->type = REG_S64;
174 if (!stack_top->load.rev_bo)
175 insn->op = FILTER_OP_LOAD_FIELD_U64;
176 break;
177 case OBJECT_TYPE_DOUBLE:
178 stack_top->type = REG_DOUBLE;
179 insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
180 break;
181 case OBJECT_TYPE_STRING:
182 dbg_printf("op load field string\n");
183 stack_top->type = REG_STRING;
184 insn->op = FILTER_OP_LOAD_FIELD_STRING;
185 break;
186 case OBJECT_TYPE_STRING_SEQUENCE:
187 dbg_printf("op load field string sequence\n");
188 stack_top->type = REG_STRING;
189 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
190 break;
191 case OBJECT_TYPE_DYNAMIC:
192 dbg_printf("op load field dynamic\n");
193 stack_top->type = REG_UNKNOWN;
194 /* Don't specialize load op. */
195 break;
196 case OBJECT_TYPE_SEQUENCE:
197 case OBJECT_TYPE_ARRAY:
198 case OBJECT_TYPE_STRUCT:
199 case OBJECT_TYPE_VARIANT:
200 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
201 ret = -EINVAL;
202 goto end;
203 }
204 return 0;
205
206 end:
207 return ret;
208 }
209
210 static int specialize_get_index_object_type(enum object_type *otype,
211 int signedness, uint32_t elem_len)
212 {
213 switch (elem_len) {
214 case 8:
215 if (signedness)
216 *otype = OBJECT_TYPE_S8;
217 else
218 *otype = OBJECT_TYPE_U8;
219 break;
220 case 16:
221 if (signedness)
222 *otype = OBJECT_TYPE_S16;
223 else
224 *otype = OBJECT_TYPE_U16;
225 break;
226 case 32:
227 if (signedness)
228 *otype = OBJECT_TYPE_S32;
229 else
230 *otype = OBJECT_TYPE_U32;
231 break;
232 case 64:
233 if (signedness)
234 *otype = OBJECT_TYPE_S64;
235 else
236 *otype = OBJECT_TYPE_U64;
237 break;
238 default:
239 return -EINVAL;
240 }
241 return 0;
242 }
243
244 static int specialize_get_index(struct bytecode_runtime *runtime,
245 struct load_op *insn, uint64_t index,
246 struct vstack_entry *stack_top,
247 int idx_len)
248 {
249 int ret;
250 struct filter_get_index_data gid;
251 ssize_t data_offset;
252
253 memset(&gid, 0, sizeof(gid));
254 switch (stack_top->load.type) {
255 case LOAD_OBJECT:
256 switch (stack_top->load.object_type) {
257 case OBJECT_TYPE_ARRAY:
258 {
259 const struct lttng_event_field *field;
260 uint32_t elem_len, num_elems;
261 int signedness;
262
263 field = stack_top->load.field;
264 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
265 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
266 num_elems = field->type.u.array.length;
267 if (index >= num_elems) {
268 ret = -EINVAL;
269 goto end;
270 }
271 ret = specialize_get_index_object_type(&stack_top->load.object_type,
272 signedness, elem_len);
273 if (ret)
274 goto end;
275 gid.offset = index * (elem_len / CHAR_BIT);
276 gid.array_len = num_elems * (elem_len / CHAR_BIT);
277 gid.elem.type = stack_top->load.object_type;
278 gid.elem.len = elem_len;
279 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
280 gid.elem.rev_bo = true;
281 stack_top->load.rev_bo = gid.elem.rev_bo;
282 break;
283 }
284 case OBJECT_TYPE_SEQUENCE:
285 {
286 const struct lttng_event_field *field;
287 uint32_t elem_len;
288 int signedness;
289
290 field = stack_top->load.field;
291 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
292 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
293 ret = specialize_get_index_object_type(&stack_top->load.object_type,
294 signedness, elem_len);
295 if (ret)
296 goto end;
297 gid.offset = index * (elem_len / CHAR_BIT);
298 gid.elem.type = stack_top->load.object_type;
299 gid.elem.len = elem_len;
300 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
301 gid.elem.rev_bo = true;
302 stack_top->load.rev_bo = gid.elem.rev_bo;
303 break;
304 }
305 case OBJECT_TYPE_STRUCT:
306 /* Only generated by the specialize phase. */
307 case OBJECT_TYPE_VARIANT: /* Fall-through */
308 default:
309 ERR("Unexpected get index type %d",
310 (int) stack_top->load.object_type);
311 ret = -EINVAL;
312 goto end;
313 }
314 break;
315 case LOAD_ROOT_CONTEXT:
316 case LOAD_ROOT_APP_CONTEXT:
317 case LOAD_ROOT_PAYLOAD:
318 ERR("Index lookup for root field not implemented yet.");
319 ret = -EINVAL;
320 goto end;
321 }
322 data_offset = bytecode_push_data(runtime, &gid,
323 __alignof__(gid), sizeof(gid));
324 if (data_offset < 0) {
325 ret = -EINVAL;
326 goto end;
327 }
328 switch (idx_len) {
329 case 2:
330 ((struct get_index_u16 *) insn->data)->index = data_offset;
331 break;
332 case 8:
333 ((struct get_index_u64 *) insn->data)->index = data_offset;
334 break;
335 default:
336 ret = -EINVAL;
337 goto end;
338 }
339
340 return 0;
341
342 end:
343 return ret;
344 }
345
346 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
347 struct bytecode_runtime *bytecode,
348 struct load_op *insn)
349 {
350 uint16_t offset;
351 const char *name;
352
353 offset = ((struct get_symbol *) insn->data)->offset;
354 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
355 return lttng_get_context_index(ctx, name);
356 }
357
358 static int specialize_load_object(const struct lttng_event_field *field,
359 struct vstack_load *load, bool is_context)
360 {
361 load->type = LOAD_OBJECT;
362 /*
363 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
364 */
365 switch (field->type.atype) {
366 case atype_integer:
367 if (field->type.u.basic.integer.signedness)
368 load->object_type = OBJECT_TYPE_S64;
369 else
370 load->object_type = OBJECT_TYPE_U64;
371 load->rev_bo = false;
372 break;
373 case atype_enum:
374 {
375 const struct lttng_integer_type *itype =
376 &field->type.u.basic.enumeration.container_type;
377
378 if (itype->signedness)
379 load->object_type = OBJECT_TYPE_S64;
380 else
381 load->object_type = OBJECT_TYPE_U64;
382 load->rev_bo = false;
383 break;
384 }
385 case atype_array:
386 if (field->type.u.array.elem_type.atype != atype_integer) {
387 ERR("Array nesting only supports integer types.");
388 return -EINVAL;
389 }
390 if (is_context) {
391 load->object_type = OBJECT_TYPE_STRING;
392 } else {
393 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
394 load->object_type = OBJECT_TYPE_ARRAY;
395 load->field = field;
396 } else {
397 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
398 }
399 }
400 break;
401 case atype_sequence:
402 if (field->type.u.sequence.elem_type.atype != atype_integer) {
403 ERR("Sequence nesting only supports integer types.");
404 return -EINVAL;
405 }
406 if (is_context) {
407 load->object_type = OBJECT_TYPE_STRING;
408 } else {
409 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
410 load->object_type = OBJECT_TYPE_SEQUENCE;
411 load->field = field;
412 } else {
413 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
414 }
415 }
416 break;
417 case atype_string:
418 load->object_type = OBJECT_TYPE_STRING;
419 break;
420 case atype_float:
421 load->object_type = OBJECT_TYPE_DOUBLE;
422 break;
423 case atype_dynamic:
424 load->object_type = OBJECT_TYPE_DYNAMIC;
425 break;
426 case atype_struct:
427 ERR("Structure type cannot be loaded.");
428 return -EINVAL;
429 default:
430 ERR("Unknown type: %d", (int) field->type.atype);
431 return -EINVAL;
432 }
433 return 0;
434 }
435
436 static int specialize_context_lookup(struct lttng_session *session,
437 struct bytecode_runtime *runtime,
438 struct load_op *insn,
439 struct vstack_load *load)
440 {
441 int idx, ret;
442 struct lttng_ctx_field *ctx_field;
443 struct lttng_event_field *field;
444 struct filter_get_index_data gid;
445 ssize_t data_offset;
446
447 idx = specialize_context_lookup_name(session->ctx, runtime, insn);
448 if (idx < 0) {
449 return -ENOENT;
450 }
451 ctx_field = &session->ctx->fields[idx];
452 field = &ctx_field->event_field;
453 ret = specialize_load_object(field, load, true);
454 if (ret)
455 return ret;
456 /* Specialize each get_symbol into a get_index. */
457 insn->op = FILTER_OP_GET_INDEX_U16;
458 memset(&gid, 0, sizeof(gid));
459 gid.ctx_index = idx;
460 gid.elem.type = load->object_type;
461 data_offset = bytecode_push_data(runtime, &gid,
462 __alignof__(gid), sizeof(gid));
463 if (data_offset < 0) {
464 return -EINVAL;
465 }
466 ((struct get_index_u16 *) insn->data)->index = data_offset;
467 return 0;
468 }
469
470 static int specialize_app_context_lookup(struct lttng_session *session,
471 struct bytecode_runtime *runtime,
472 struct load_op *insn,
473 struct vstack_load *load)
474 {
475 uint16_t offset;
476 const char *orig_name;
477 char *name = NULL;
478 int idx, ret;
479 struct lttng_ctx_field *ctx_field;
480 struct lttng_event_field *field;
481 struct filter_get_index_data gid;
482 ssize_t data_offset;
483
484 offset = ((struct get_symbol *) insn->data)->offset;
485 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
486 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
487 if (!name) {
488 ret = -ENOMEM;
489 goto end;
490 }
491 strcpy(name, "$app.");
492 strcat(name, orig_name);
493 idx = lttng_get_context_index(session->ctx, name);
494 if (idx < 0) {
495 assert(lttng_context_is_app(name));
496 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
497 &session->ctx);
498 if (ret)
499 return ret;
500 idx = lttng_get_context_index(session->ctx,
501 name);
502 if (idx < 0)
503 return -ENOENT;
504 }
505 ctx_field = &session->ctx->fields[idx];
506 field = &ctx_field->event_field;
507 ret = specialize_load_object(field, load, true);
508 if (ret)
509 goto end;
510 /* Specialize each get_symbol into a get_index. */
511 insn->op = FILTER_OP_GET_INDEX_U16;
512 memset(&gid, 0, sizeof(gid));
513 gid.ctx_index = idx;
514 gid.elem.type = load->object_type;
515 data_offset = bytecode_push_data(runtime, &gid,
516 __alignof__(gid), sizeof(gid));
517 if (data_offset < 0) {
518 ret = -EINVAL;
519 goto end;
520 }
521 ((struct get_index_u16 *) insn->data)->index = data_offset;
522 ret = 0;
523 end:
524 free(name);
525 return ret;
526 }
527
528 static int specialize_event_payload_lookup(struct lttng_event *event,
529 struct bytecode_runtime *runtime,
530 struct load_op *insn,
531 struct vstack_load *load)
532 {
533 const char *name;
534 uint16_t offset;
535 const struct lttng_event_desc *desc = event->desc;
536 unsigned int i, nr_fields;
537 bool found = false;
538 uint32_t field_offset = 0;
539 const struct lttng_event_field *field;
540 int ret;
541 struct filter_get_index_data gid;
542 ssize_t data_offset;
543
544 nr_fields = desc->nr_fields;
545 offset = ((struct get_symbol *) insn->data)->offset;
546 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
547 for (i = 0; i < nr_fields; i++) {
548 field = &desc->fields[i];
549 if (!strcmp(field->name, name)) {
550 found = true;
551 break;
552 }
553 /* compute field offset on stack */
554 switch (field->type.atype) {
555 case atype_integer:
556 case atype_enum:
557 field_offset += sizeof(int64_t);
558 break;
559 case atype_array:
560 case atype_sequence:
561 field_offset += sizeof(unsigned long);
562 field_offset += sizeof(void *);
563 break;
564 case atype_string:
565 field_offset += sizeof(void *);
566 break;
567 case atype_float:
568 field_offset += sizeof(double);
569 break;
570 default:
571 ret = -EINVAL;
572 goto end;
573 }
574 }
575 if (!found) {
576 ret = -EINVAL;
577 goto end;
578 }
579
580 ret = specialize_load_object(field, load, false);
581 if (ret)
582 goto end;
583
584 /* Specialize each get_symbol into a get_index. */
585 insn->op = FILTER_OP_GET_INDEX_U16;
586 memset(&gid, 0, sizeof(gid));
587 gid.offset = field_offset;
588 gid.elem.type = load->object_type;
589 data_offset = bytecode_push_data(runtime, &gid,
590 __alignof__(gid), sizeof(gid));
591 if (data_offset < 0) {
592 ret = -EINVAL;
593 goto end;
594 }
595 ((struct get_index_u16 *) insn->data)->index = data_offset;
596 ret = 0;
597 end:
598 return ret;
599 }
600
601 int lttng_filter_specialize_bytecode(struct lttng_event *event,
602 struct bytecode_runtime *bytecode)
603 {
604 void *pc, *next_pc, *start_pc;
605 int ret = -EINVAL;
606 struct vstack _stack;
607 struct vstack *stack = &_stack;
608 struct lttng_session *session = bytecode->p.session;
609
610 vstack_init(stack);
611
612 start_pc = &bytecode->code[0];
613 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
614 pc = next_pc) {
615 switch (*(filter_opcode_t *) pc) {
616 case FILTER_OP_UNKNOWN:
617 default:
618 ERR("unknown bytecode op %u\n",
619 (unsigned int) *(filter_opcode_t *) pc);
620 ret = -EINVAL;
621 goto end;
622
623 case FILTER_OP_RETURN:
624 if (vstack_ax(stack)->type == REG_S64)
625 *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64;
626 ret = 0;
627 goto end;
628
629 case FILTER_OP_RETURN_S64:
630 if (vstack_ax(stack)->type != REG_S64) {
631 ERR("Unexpected register type\n");
632 ret = -EINVAL;
633 goto end;
634 }
635 ret = 0;
636 goto end;
637
638 /* binary */
639 case FILTER_OP_MUL:
640 case FILTER_OP_DIV:
641 case FILTER_OP_MOD:
642 case FILTER_OP_PLUS:
643 case FILTER_OP_MINUS:
644 ERR("unsupported bytecode op %u\n",
645 (unsigned int) *(filter_opcode_t *) pc);
646 ret = -EINVAL;
647 goto end;
648
649 case FILTER_OP_EQ:
650 {
651 struct binary_op *insn = (struct binary_op *) pc;
652
653 switch(vstack_ax(stack)->type) {
654 default:
655 ERR("unknown register type\n");
656 ret = -EINVAL;
657 goto end;
658
659 case REG_STRING:
660 if (vstack_bx(stack)->type == REG_UNKNOWN)
661 break;
662 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
663 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
664 else
665 insn->op = FILTER_OP_EQ_STRING;
666 break;
667 case REG_STAR_GLOB_STRING:
668 if (vstack_bx(stack)->type == REG_UNKNOWN)
669 break;
670 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
671 break;
672 case REG_S64:
673 if (vstack_bx(stack)->type == REG_UNKNOWN)
674 break;
675 if (vstack_bx(stack)->type == REG_S64)
676 insn->op = FILTER_OP_EQ_S64;
677 else
678 insn->op = FILTER_OP_EQ_DOUBLE_S64;
679 break;
680 case REG_DOUBLE:
681 if (vstack_bx(stack)->type == REG_UNKNOWN)
682 break;
683 if (vstack_bx(stack)->type == REG_S64)
684 insn->op = FILTER_OP_EQ_S64_DOUBLE;
685 else
686 insn->op = FILTER_OP_EQ_DOUBLE;
687 break;
688 case REG_UNKNOWN:
689 break; /* Dynamic typing. */
690 }
691 /* Pop 2, push 1 */
692 if (vstack_pop(stack)) {
693 ret = -EINVAL;
694 goto end;
695 }
696 vstack_ax(stack)->type = REG_S64;
697 next_pc += sizeof(struct binary_op);
698 break;
699 }
700
701 case FILTER_OP_NE:
702 {
703 struct binary_op *insn = (struct binary_op *) pc;
704
705 switch(vstack_ax(stack)->type) {
706 default:
707 ERR("unknown register type\n");
708 ret = -EINVAL;
709 goto end;
710
711 case REG_STRING:
712 if (vstack_bx(stack)->type == REG_UNKNOWN)
713 break;
714 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
715 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
716 else
717 insn->op = FILTER_OP_NE_STRING;
718 break;
719 case REG_STAR_GLOB_STRING:
720 if (vstack_bx(stack)->type == REG_UNKNOWN)
721 break;
722 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
723 break;
724 case REG_S64:
725 if (vstack_bx(stack)->type == REG_UNKNOWN)
726 break;
727 if (vstack_bx(stack)->type == REG_S64)
728 insn->op = FILTER_OP_NE_S64;
729 else
730 insn->op = FILTER_OP_NE_DOUBLE_S64;
731 break;
732 case REG_DOUBLE:
733 if (vstack_bx(stack)->type == REG_UNKNOWN)
734 break;
735 if (vstack_bx(stack)->type == REG_S64)
736 insn->op = FILTER_OP_NE_S64_DOUBLE;
737 else
738 insn->op = FILTER_OP_NE_DOUBLE;
739 break;
740 case REG_UNKNOWN:
741 break; /* Dynamic typing. */
742 }
743 /* Pop 2, push 1 */
744 if (vstack_pop(stack)) {
745 ret = -EINVAL;
746 goto end;
747 }
748 vstack_ax(stack)->type = REG_S64;
749 next_pc += sizeof(struct binary_op);
750 break;
751 }
752
753 case FILTER_OP_GT:
754 {
755 struct binary_op *insn = (struct binary_op *) pc;
756
757 switch(vstack_ax(stack)->type) {
758 default:
759 ERR("unknown register type\n");
760 ret = -EINVAL;
761 goto end;
762
763 case REG_STAR_GLOB_STRING:
764 ERR("invalid register type for > binary operator\n");
765 ret = -EINVAL;
766 goto end;
767 case REG_STRING:
768 if (vstack_bx(stack)->type == REG_UNKNOWN)
769 break;
770 insn->op = FILTER_OP_GT_STRING;
771 break;
772 case REG_S64:
773 if (vstack_bx(stack)->type == REG_UNKNOWN)
774 break;
775 if (vstack_bx(stack)->type == REG_S64)
776 insn->op = FILTER_OP_GT_S64;
777 else
778 insn->op = FILTER_OP_GT_DOUBLE_S64;
779 break;
780 case REG_DOUBLE:
781 if (vstack_bx(stack)->type == REG_UNKNOWN)
782 break;
783 if (vstack_bx(stack)->type == REG_S64)
784 insn->op = FILTER_OP_GT_S64_DOUBLE;
785 else
786 insn->op = FILTER_OP_GT_DOUBLE;
787 break;
788 case REG_UNKNOWN:
789 break; /* Dynamic typing. */
790 }
791 /* Pop 2, push 1 */
792 if (vstack_pop(stack)) {
793 ret = -EINVAL;
794 goto end;
795 }
796 vstack_ax(stack)->type = REG_S64;
797 next_pc += sizeof(struct binary_op);
798 break;
799 }
800
801 case FILTER_OP_LT:
802 {
803 struct binary_op *insn = (struct binary_op *) pc;
804
805 switch(vstack_ax(stack)->type) {
806 default:
807 ERR("unknown register type\n");
808 ret = -EINVAL;
809 goto end;
810
811 case REG_STAR_GLOB_STRING:
812 ERR("invalid register type for < binary operator\n");
813 ret = -EINVAL;
814 goto end;
815 case REG_STRING:
816 if (vstack_bx(stack)->type == REG_UNKNOWN)
817 break;
818 insn->op = FILTER_OP_LT_STRING;
819 break;
820 case REG_S64:
821 if (vstack_bx(stack)->type == REG_UNKNOWN)
822 break;
823 if (vstack_bx(stack)->type == REG_S64)
824 insn->op = FILTER_OP_LT_S64;
825 else
826 insn->op = FILTER_OP_LT_DOUBLE_S64;
827 break;
828 case REG_DOUBLE:
829 if (vstack_bx(stack)->type == REG_UNKNOWN)
830 break;
831 if (vstack_bx(stack)->type == REG_S64)
832 insn->op = FILTER_OP_LT_S64_DOUBLE;
833 else
834 insn->op = FILTER_OP_LT_DOUBLE;
835 break;
836 case REG_UNKNOWN:
837 break; /* Dynamic typing. */
838 }
839 /* Pop 2, push 1 */
840 if (vstack_pop(stack)) {
841 ret = -EINVAL;
842 goto end;
843 }
844 vstack_ax(stack)->type = REG_S64;
845 next_pc += sizeof(struct binary_op);
846 break;
847 }
848
849 case FILTER_OP_GE:
850 {
851 struct binary_op *insn = (struct binary_op *) pc;
852
853 switch(vstack_ax(stack)->type) {
854 default:
855 ERR("unknown register type\n");
856 ret = -EINVAL;
857 goto end;
858
859 case REG_STAR_GLOB_STRING:
860 ERR("invalid register type for >= binary operator\n");
861 ret = -EINVAL;
862 goto end;
863 case REG_STRING:
864 if (vstack_bx(stack)->type == REG_UNKNOWN)
865 break;
866 insn->op = FILTER_OP_GE_STRING;
867 break;
868 case REG_S64:
869 if (vstack_bx(stack)->type == REG_UNKNOWN)
870 break;
871 if (vstack_bx(stack)->type == REG_S64)
872 insn->op = FILTER_OP_GE_S64;
873 else
874 insn->op = FILTER_OP_GE_DOUBLE_S64;
875 break;
876 case REG_DOUBLE:
877 if (vstack_bx(stack)->type == REG_UNKNOWN)
878 break;
879 if (vstack_bx(stack)->type == REG_S64)
880 insn->op = FILTER_OP_GE_S64_DOUBLE;
881 else
882 insn->op = FILTER_OP_GE_DOUBLE;
883 break;
884 case REG_UNKNOWN:
885 break; /* Dynamic typing. */
886 }
887 /* Pop 2, push 1 */
888 if (vstack_pop(stack)) {
889 ret = -EINVAL;
890 goto end;
891 }
892 vstack_ax(stack)->type = REG_S64;
893 next_pc += sizeof(struct binary_op);
894 break;
895 }
896 case FILTER_OP_LE:
897 {
898 struct binary_op *insn = (struct binary_op *) pc;
899
900 switch(vstack_ax(stack)->type) {
901 default:
902 ERR("unknown register type\n");
903 ret = -EINVAL;
904 goto end;
905
906 case REG_STAR_GLOB_STRING:
907 ERR("invalid register type for <= binary operator\n");
908 ret = -EINVAL;
909 goto end;
910 case REG_STRING:
911 if (vstack_bx(stack)->type == REG_UNKNOWN)
912 break;
913 insn->op = FILTER_OP_LE_STRING;
914 break;
915 case REG_S64:
916 if (vstack_bx(stack)->type == REG_UNKNOWN)
917 break;
918 if (vstack_bx(stack)->type == REG_S64)
919 insn->op = FILTER_OP_LE_S64;
920 else
921 insn->op = FILTER_OP_LE_DOUBLE_S64;
922 break;
923 case REG_DOUBLE:
924 if (vstack_bx(stack)->type == REG_UNKNOWN)
925 break;
926 if (vstack_bx(stack)->type == REG_S64)
927 insn->op = FILTER_OP_LE_S64_DOUBLE;
928 else
929 insn->op = FILTER_OP_LE_DOUBLE;
930 break;
931 case REG_UNKNOWN:
932 break; /* Dynamic typing. */
933 }
934 vstack_ax(stack)->type = REG_S64;
935 next_pc += sizeof(struct binary_op);
936 break;
937 }
938
939 case FILTER_OP_EQ_STRING:
940 case FILTER_OP_NE_STRING:
941 case FILTER_OP_GT_STRING:
942 case FILTER_OP_LT_STRING:
943 case FILTER_OP_GE_STRING:
944 case FILTER_OP_LE_STRING:
945 case FILTER_OP_EQ_STAR_GLOB_STRING:
946 case FILTER_OP_NE_STAR_GLOB_STRING:
947 case FILTER_OP_EQ_S64:
948 case FILTER_OP_NE_S64:
949 case FILTER_OP_GT_S64:
950 case FILTER_OP_LT_S64:
951 case FILTER_OP_GE_S64:
952 case FILTER_OP_LE_S64:
953 case FILTER_OP_EQ_DOUBLE:
954 case FILTER_OP_NE_DOUBLE:
955 case FILTER_OP_GT_DOUBLE:
956 case FILTER_OP_LT_DOUBLE:
957 case FILTER_OP_GE_DOUBLE:
958 case FILTER_OP_LE_DOUBLE:
959 case FILTER_OP_EQ_DOUBLE_S64:
960 case FILTER_OP_NE_DOUBLE_S64:
961 case FILTER_OP_GT_DOUBLE_S64:
962 case FILTER_OP_LT_DOUBLE_S64:
963 case FILTER_OP_GE_DOUBLE_S64:
964 case FILTER_OP_LE_DOUBLE_S64:
965 case FILTER_OP_EQ_S64_DOUBLE:
966 case FILTER_OP_NE_S64_DOUBLE:
967 case FILTER_OP_GT_S64_DOUBLE:
968 case FILTER_OP_LT_S64_DOUBLE:
969 case FILTER_OP_GE_S64_DOUBLE:
970 case FILTER_OP_LE_S64_DOUBLE:
971 case FILTER_OP_BIT_RSHIFT:
972 case FILTER_OP_BIT_LSHIFT:
973 case FILTER_OP_BIT_AND:
974 case FILTER_OP_BIT_OR:
975 case FILTER_OP_BIT_XOR:
976 {
977 /* Pop 2, push 1 */
978 if (vstack_pop(stack)) {
979 ret = -EINVAL;
980 goto end;
981 }
982 vstack_ax(stack)->type = REG_S64;
983 next_pc += sizeof(struct binary_op);
984 break;
985 }
986
987 /* unary */
988 case FILTER_OP_UNARY_PLUS:
989 {
990 struct unary_op *insn = (struct unary_op *) pc;
991
992 switch(vstack_ax(stack)->type) {
993 default:
994 ERR("unknown register type\n");
995 ret = -EINVAL;
996 goto end;
997
998 case REG_S64:
999 insn->op = FILTER_OP_UNARY_PLUS_S64;
1000 break;
1001 case REG_DOUBLE:
1002 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
1003 break;
1004 case REG_UNKNOWN: /* Dynamic typing. */
1005 break;
1006 }
1007 /* Pop 1, push 1 */
1008 next_pc += sizeof(struct unary_op);
1009 break;
1010 }
1011
1012 case FILTER_OP_UNARY_MINUS:
1013 {
1014 struct unary_op *insn = (struct unary_op *) pc;
1015
1016 switch(vstack_ax(stack)->type) {
1017 default:
1018 ERR("unknown register type\n");
1019 ret = -EINVAL;
1020 goto end;
1021
1022 case REG_S64:
1023 insn->op = FILTER_OP_UNARY_MINUS_S64;
1024 break;
1025 case REG_DOUBLE:
1026 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1027 break;
1028 case REG_UNKNOWN: /* Dynamic typing. */
1029 break;
1030 }
1031 /* Pop 1, push 1 */
1032 next_pc += sizeof(struct unary_op);
1033 break;
1034 }
1035
1036 case FILTER_OP_UNARY_NOT:
1037 {
1038 struct unary_op *insn = (struct unary_op *) pc;
1039
1040 switch(vstack_ax(stack)->type) {
1041 default:
1042 ERR("unknown register type\n");
1043 ret = -EINVAL;
1044 goto end;
1045
1046 case REG_S64:
1047 insn->op = FILTER_OP_UNARY_NOT_S64;
1048 break;
1049 case REG_DOUBLE:
1050 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1051 break;
1052 case REG_UNKNOWN: /* Dynamic typing. */
1053 break;
1054 }
1055 /* Pop 1, push 1 */
1056 next_pc += sizeof(struct unary_op);
1057 break;
1058 }
1059
1060 case FILTER_OP_UNARY_BIT_NOT:
1061 {
1062 /* Pop 1, push 1 */
1063 next_pc += sizeof(struct unary_op);
1064 break;
1065 }
1066
1067 case FILTER_OP_UNARY_PLUS_S64:
1068 case FILTER_OP_UNARY_MINUS_S64:
1069 case FILTER_OP_UNARY_NOT_S64:
1070 case FILTER_OP_UNARY_PLUS_DOUBLE:
1071 case FILTER_OP_UNARY_MINUS_DOUBLE:
1072 case FILTER_OP_UNARY_NOT_DOUBLE:
1073 {
1074 /* Pop 1, push 1 */
1075 next_pc += sizeof(struct unary_op);
1076 break;
1077 }
1078
1079 /* logical */
1080 case FILTER_OP_AND:
1081 case FILTER_OP_OR:
1082 {
1083 /* Continue to next instruction */
1084 /* Pop 1 when jump not taken */
1085 if (vstack_pop(stack)) {
1086 ret = -EINVAL;
1087 goto end;
1088 }
1089 next_pc += sizeof(struct logical_op);
1090 break;
1091 }
1092
1093 /* load field ref */
1094 case FILTER_OP_LOAD_FIELD_REF:
1095 {
1096 ERR("Unknown field ref type\n");
1097 ret = -EINVAL;
1098 goto end;
1099 }
1100 /* get context ref */
1101 case FILTER_OP_GET_CONTEXT_REF:
1102 {
1103 if (vstack_push(stack)) {
1104 ret = -EINVAL;
1105 goto end;
1106 }
1107 vstack_ax(stack)->type = REG_UNKNOWN;
1108 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1109 break;
1110 }
1111 case FILTER_OP_LOAD_FIELD_REF_STRING:
1112 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1113 case FILTER_OP_GET_CONTEXT_REF_STRING:
1114 {
1115 if (vstack_push(stack)) {
1116 ret = -EINVAL;
1117 goto end;
1118 }
1119 vstack_ax(stack)->type = REG_STRING;
1120 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1121 break;
1122 }
1123 case FILTER_OP_LOAD_FIELD_REF_S64:
1124 case FILTER_OP_GET_CONTEXT_REF_S64:
1125 {
1126 if (vstack_push(stack)) {
1127 ret = -EINVAL;
1128 goto end;
1129 }
1130 vstack_ax(stack)->type = REG_S64;
1131 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1132 break;
1133 }
1134 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1135 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1136 {
1137 if (vstack_push(stack)) {
1138 ret = -EINVAL;
1139 goto end;
1140 }
1141 vstack_ax(stack)->type = REG_DOUBLE;
1142 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1143 break;
1144 }
1145
1146 /* load from immediate operand */
1147 case FILTER_OP_LOAD_STRING:
1148 {
1149 struct load_op *insn = (struct load_op *) pc;
1150
1151 if (vstack_push(stack)) {
1152 ret = -EINVAL;
1153 goto end;
1154 }
1155 vstack_ax(stack)->type = REG_STRING;
1156 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1157 break;
1158 }
1159
1160 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1161 {
1162 struct load_op *insn = (struct load_op *) pc;
1163
1164 if (vstack_push(stack)) {
1165 ret = -EINVAL;
1166 goto end;
1167 }
1168 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1169 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1170 break;
1171 }
1172
1173 case FILTER_OP_LOAD_S64:
1174 {
1175 if (vstack_push(stack)) {
1176 ret = -EINVAL;
1177 goto end;
1178 }
1179 vstack_ax(stack)->type = REG_S64;
1180 next_pc += sizeof(struct load_op)
1181 + sizeof(struct literal_numeric);
1182 break;
1183 }
1184
1185 case FILTER_OP_LOAD_DOUBLE:
1186 {
1187 if (vstack_push(stack)) {
1188 ret = -EINVAL;
1189 goto end;
1190 }
1191 vstack_ax(stack)->type = REG_DOUBLE;
1192 next_pc += sizeof(struct load_op)
1193 + sizeof(struct literal_double);
1194 break;
1195 }
1196
1197 /* cast */
1198 case FILTER_OP_CAST_TO_S64:
1199 {
1200 struct cast_op *insn = (struct cast_op *) pc;
1201
1202 switch (vstack_ax(stack)->type) {
1203 default:
1204 ERR("unknown register type\n");
1205 ret = -EINVAL;
1206 goto end;
1207
1208 case REG_STRING:
1209 case REG_STAR_GLOB_STRING:
1210 ERR("Cast op can only be applied to numeric or floating point registers\n");
1211 ret = -EINVAL;
1212 goto end;
1213 case REG_S64:
1214 insn->op = FILTER_OP_CAST_NOP;
1215 break;
1216 case REG_DOUBLE:
1217 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1218 break;
1219 case REG_UNKNOWN:
1220 break;
1221 }
1222 /* Pop 1, push 1 */
1223 vstack_ax(stack)->type = REG_S64;
1224 next_pc += sizeof(struct cast_op);
1225 break;
1226 }
1227 case FILTER_OP_CAST_DOUBLE_TO_S64:
1228 {
1229 /* Pop 1, push 1 */
1230 vstack_ax(stack)->type = REG_S64;
1231 next_pc += sizeof(struct cast_op);
1232 break;
1233 }
1234 case FILTER_OP_CAST_NOP:
1235 {
1236 next_pc += sizeof(struct cast_op);
1237 break;
1238 }
1239
1240 /*
1241 * Instructions for recursive traversal through composed types.
1242 */
1243 case FILTER_OP_GET_CONTEXT_ROOT:
1244 {
1245 if (vstack_push(stack)) {
1246 ret = -EINVAL;
1247 goto end;
1248 }
1249 vstack_ax(stack)->type = REG_PTR;
1250 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1251 next_pc += sizeof(struct load_op);
1252 break;
1253 }
1254 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1255 {
1256 if (vstack_push(stack)) {
1257 ret = -EINVAL;
1258 goto end;
1259 }
1260 vstack_ax(stack)->type = REG_PTR;
1261 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1262 next_pc += sizeof(struct load_op);
1263 break;
1264 }
1265 case FILTER_OP_GET_PAYLOAD_ROOT:
1266 {
1267 if (vstack_push(stack)) {
1268 ret = -EINVAL;
1269 goto end;
1270 }
1271 vstack_ax(stack)->type = REG_PTR;
1272 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1273 next_pc += sizeof(struct load_op);
1274 break;
1275 }
1276
1277 case FILTER_OP_LOAD_FIELD:
1278 {
1279 struct load_op *insn = (struct load_op *) pc;
1280
1281 assert(vstack_ax(stack)->type == REG_PTR);
1282 /* Pop 1, push 1 */
1283 ret = specialize_load_field(vstack_ax(stack), insn);
1284 if (ret)
1285 goto end;
1286
1287 next_pc += sizeof(struct load_op);
1288 break;
1289 }
1290
1291 case FILTER_OP_LOAD_FIELD_S8:
1292 case FILTER_OP_LOAD_FIELD_S16:
1293 case FILTER_OP_LOAD_FIELD_S32:
1294 case FILTER_OP_LOAD_FIELD_S64:
1295 case FILTER_OP_LOAD_FIELD_U8:
1296 case FILTER_OP_LOAD_FIELD_U16:
1297 case FILTER_OP_LOAD_FIELD_U32:
1298 case FILTER_OP_LOAD_FIELD_U64:
1299 {
1300 /* Pop 1, push 1 */
1301 vstack_ax(stack)->type = REG_S64;
1302 next_pc += sizeof(struct load_op);
1303 break;
1304 }
1305
1306 case FILTER_OP_LOAD_FIELD_STRING:
1307 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1308 {
1309 /* Pop 1, push 1 */
1310 vstack_ax(stack)->type = REG_STRING;
1311 next_pc += sizeof(struct load_op);
1312 break;
1313 }
1314
1315 case FILTER_OP_LOAD_FIELD_DOUBLE:
1316 {
1317 /* Pop 1, push 1 */
1318 vstack_ax(stack)->type = REG_DOUBLE;
1319 next_pc += sizeof(struct load_op);
1320 break;
1321 }
1322
1323 case FILTER_OP_GET_SYMBOL:
1324 {
1325 struct load_op *insn = (struct load_op *) pc;
1326
1327 dbg_printf("op get symbol\n");
1328 switch (vstack_ax(stack)->load.type) {
1329 case LOAD_OBJECT:
1330 ERR("Nested fields not implemented yet.");
1331 ret = -EINVAL;
1332 goto end;
1333 case LOAD_ROOT_CONTEXT:
1334 /* Lookup context field. */
1335 ret = specialize_context_lookup(session,
1336 bytecode, insn,
1337 &vstack_ax(stack)->load);
1338 if (ret)
1339 goto end;
1340 break;
1341 case LOAD_ROOT_APP_CONTEXT:
1342 /* Lookup app context field. */
1343 ret = specialize_app_context_lookup(session,
1344 bytecode, insn,
1345 &vstack_ax(stack)->load);
1346 if (ret)
1347 goto end;
1348 break;
1349 case LOAD_ROOT_PAYLOAD:
1350 /* Lookup event payload field. */
1351 ret = specialize_event_payload_lookup(event,
1352 bytecode, insn,
1353 &vstack_ax(stack)->load);
1354 if (ret)
1355 goto end;
1356 break;
1357 }
1358 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1359 break;
1360 }
1361
1362 case FILTER_OP_GET_SYMBOL_FIELD:
1363 {
1364 /* Always generated by specialize phase. */
1365 ret = -EINVAL;
1366 goto end;
1367 }
1368
1369 case FILTER_OP_GET_INDEX_U16:
1370 {
1371 struct load_op *insn = (struct load_op *) pc;
1372 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1373
1374 dbg_printf("op get index u16\n");
1375 /* Pop 1, push 1 */
1376 ret = specialize_get_index(bytecode, insn, index->index,
1377 vstack_ax(stack), sizeof(*index));
1378 if (ret)
1379 goto end;
1380 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1381 break;
1382 }
1383
1384 case FILTER_OP_GET_INDEX_U64:
1385 {
1386 struct load_op *insn = (struct load_op *) pc;
1387 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1388
1389 dbg_printf("op get index u64\n");
1390 /* Pop 1, push 1 */
1391 ret = specialize_get_index(bytecode, insn, index->index,
1392 vstack_ax(stack), sizeof(*index));
1393 if (ret)
1394 goto end;
1395 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1396 break;
1397 }
1398
1399 }
1400 }
1401 end:
1402 return ret;
1403 }
This page took 0.095295 seconds and 4 git commands to generate.