Combine common recorder/notifier functions to lttng_free_event_filter_runtime
[lttng-ust.git] / liblttng-ust / lttng-bytecode-specialize.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode specializer.
7 */
8
9 #define _LGPL_SOURCE
10 #include <stddef.h>
11 #include <stdint.h>
12
13 #include <lttng/align.h>
14
15 #include "context-internal.h"
16 #include "lttng-bytecode.h"
17 #include "ust-events-internal.h"
18
19 static int lttng_fls(int val)
20 {
21 int r = 32;
22 unsigned int x = (unsigned int) val;
23
24 if (!x)
25 return 0;
26 if (!(x & 0xFFFF0000U)) {
27 x <<= 16;
28 r -= 16;
29 }
30 if (!(x & 0xFF000000U)) {
31 x <<= 8;
32 r -= 8;
33 }
34 if (!(x & 0xF0000000U)) {
35 x <<= 4;
36 r -= 4;
37 }
38 if (!(x & 0xC0000000U)) {
39 x <<= 2;
40 r -= 2;
41 }
42 if (!(x & 0x80000000U)) {
43 r -= 1;
44 }
45 return r;
46 }
47
48 static int get_count_order(unsigned int count)
49 {
50 int order;
51
52 order = lttng_fls(count) - 1;
53 if (count & (count - 1))
54 order++;
55 return order;
56 }
57
58 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
59 size_t align, size_t len)
60 {
61 ssize_t ret;
62 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
63 size_t new_len = runtime->data_len + padding + len;
64 size_t new_alloc_len = new_len;
65 size_t old_alloc_len = runtime->data_alloc_len;
66
67 if (new_len > BYTECODE_MAX_DATA_LEN)
68 return -EINVAL;
69
70 if (new_alloc_len > old_alloc_len) {
71 char *newptr;
72
73 new_alloc_len =
74 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
75 newptr = realloc(runtime->data, new_alloc_len);
76 if (!newptr)
77 return -ENOMEM;
78 runtime->data = newptr;
79 /* We zero directly the memory from start of allocation. */
80 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
81 runtime->data_alloc_len = new_alloc_len;
82 }
83 runtime->data_len += padding;
84 ret = runtime->data_len;
85 runtime->data_len += len;
86 return ret;
87 }
88
89 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
90 const void *p, size_t align, size_t len)
91 {
92 ssize_t offset;
93
94 offset = bytecode_reserve_data(runtime, align, len);
95 if (offset < 0)
96 return -ENOMEM;
97 memcpy(&runtime->data[offset], p, len);
98 return offset;
99 }
100
101 static int specialize_load_field(struct vstack_entry *stack_top,
102 struct load_op *insn)
103 {
104 int ret;
105
106 switch (stack_top->load.type) {
107 case LOAD_OBJECT:
108 break;
109 case LOAD_ROOT_CONTEXT:
110 case LOAD_ROOT_APP_CONTEXT:
111 case LOAD_ROOT_PAYLOAD:
112 default:
113 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
114 ret = -EINVAL;
115 goto end;
116 }
117 switch (stack_top->load.object_type) {
118 case OBJECT_TYPE_S8:
119 dbg_printf("op load field s8\n");
120 stack_top->type = REG_S64;
121 if (!stack_top->load.rev_bo)
122 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
123 break;
124 case OBJECT_TYPE_S16:
125 dbg_printf("op load field s16\n");
126 stack_top->type = REG_S64;
127 if (!stack_top->load.rev_bo)
128 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
129 break;
130 case OBJECT_TYPE_S32:
131 dbg_printf("op load field s32\n");
132 stack_top->type = REG_S64;
133 if (!stack_top->load.rev_bo)
134 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
135 break;
136 case OBJECT_TYPE_S64:
137 dbg_printf("op load field s64\n");
138 stack_top->type = REG_S64;
139 if (!stack_top->load.rev_bo)
140 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
141 break;
142 case OBJECT_TYPE_SIGNED_ENUM:
143 dbg_printf("op load field signed enumeration\n");
144 stack_top->type = REG_PTR;
145 break;
146 case OBJECT_TYPE_U8:
147 dbg_printf("op load field u8\n");
148 stack_top->type = REG_U64;
149 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
150 break;
151 case OBJECT_TYPE_U16:
152 dbg_printf("op load field u16\n");
153 stack_top->type = REG_U64;
154 if (!stack_top->load.rev_bo)
155 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
156 break;
157 case OBJECT_TYPE_U32:
158 dbg_printf("op load field u32\n");
159 stack_top->type = REG_U64;
160 if (!stack_top->load.rev_bo)
161 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
162 break;
163 case OBJECT_TYPE_U64:
164 dbg_printf("op load field u64\n");
165 stack_top->type = REG_U64;
166 if (!stack_top->load.rev_bo)
167 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
168 break;
169 case OBJECT_TYPE_UNSIGNED_ENUM:
170 dbg_printf("op load field unsigned enumeration\n");
171 stack_top->type = REG_PTR;
172 break;
173 case OBJECT_TYPE_DOUBLE:
174 stack_top->type = REG_DOUBLE;
175 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
176 break;
177 case OBJECT_TYPE_STRING:
178 dbg_printf("op load field string\n");
179 stack_top->type = REG_STRING;
180 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
181 break;
182 case OBJECT_TYPE_STRING_SEQUENCE:
183 dbg_printf("op load field string sequence\n");
184 stack_top->type = REG_STRING;
185 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
186 break;
187 case OBJECT_TYPE_DYNAMIC:
188 dbg_printf("op load field dynamic\n");
189 stack_top->type = REG_UNKNOWN;
190 /* Don't specialize load op. */
191 break;
192 case OBJECT_TYPE_SEQUENCE:
193 case OBJECT_TYPE_ARRAY:
194 case OBJECT_TYPE_STRUCT:
195 case OBJECT_TYPE_VARIANT:
196 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
197 ret = -EINVAL;
198 goto end;
199 }
200 return 0;
201
202 end:
203 return ret;
204 }
205
206 static int specialize_get_index_object_type(enum object_type *otype,
207 int signedness, uint32_t elem_len)
208 {
209 switch (elem_len) {
210 case 8:
211 if (signedness)
212 *otype = OBJECT_TYPE_S8;
213 else
214 *otype = OBJECT_TYPE_U8;
215 break;
216 case 16:
217 if (signedness)
218 *otype = OBJECT_TYPE_S16;
219 else
220 *otype = OBJECT_TYPE_U16;
221 break;
222 case 32:
223 if (signedness)
224 *otype = OBJECT_TYPE_S32;
225 else
226 *otype = OBJECT_TYPE_U32;
227 break;
228 case 64:
229 if (signedness)
230 *otype = OBJECT_TYPE_S64;
231 else
232 *otype = OBJECT_TYPE_U64;
233 break;
234 default:
235 return -EINVAL;
236 }
237 return 0;
238 }
239
240 static int specialize_get_index(struct bytecode_runtime *runtime,
241 struct load_op *insn, uint64_t index,
242 struct vstack_entry *stack_top,
243 int idx_len)
244 {
245 int ret;
246 struct bytecode_get_index_data gid;
247 ssize_t data_offset;
248
249 memset(&gid, 0, sizeof(gid));
250 switch (stack_top->load.type) {
251 case LOAD_OBJECT:
252 switch (stack_top->load.object_type) {
253 case OBJECT_TYPE_ARRAY:
254 {
255 const struct lttng_integer_type *integer_type;
256 const struct lttng_event_field *field;
257 uint32_t elem_len, num_elems;
258 int signedness;
259
260 field = stack_top->load.field;
261 switch (field->type.atype) {
262 case atype_array_nestable:
263 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
264 ret = -EINVAL;
265 goto end;
266 }
267 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
268 num_elems = field->type.u.array_nestable.length;
269 break;
270 default:
271 ret = -EINVAL;
272 goto end;
273 }
274 elem_len = integer_type->size;
275 signedness = integer_type->signedness;
276 if (index >= num_elems) {
277 ret = -EINVAL;
278 goto end;
279 }
280 ret = specialize_get_index_object_type(&stack_top->load.object_type,
281 signedness, elem_len);
282 if (ret)
283 goto end;
284 gid.offset = index * (elem_len / CHAR_BIT);
285 gid.array_len = num_elems * (elem_len / CHAR_BIT);
286 gid.elem.type = stack_top->load.object_type;
287 gid.elem.len = elem_len;
288 if (integer_type->reverse_byte_order)
289 gid.elem.rev_bo = true;
290 stack_top->load.rev_bo = gid.elem.rev_bo;
291 break;
292 }
293 case OBJECT_TYPE_SEQUENCE:
294 {
295 const struct lttng_integer_type *integer_type;
296 const struct lttng_event_field *field;
297 uint32_t elem_len;
298 int signedness;
299
300 field = stack_top->load.field;
301 switch (field->type.atype) {
302 case atype_sequence_nestable:
303 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
304 ret = -EINVAL;
305 goto end;
306 }
307 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
308 break;
309 default:
310 ret = -EINVAL;
311 goto end;
312 }
313 elem_len = integer_type->size;
314 signedness = integer_type->signedness;
315 ret = specialize_get_index_object_type(&stack_top->load.object_type,
316 signedness, elem_len);
317 if (ret)
318 goto end;
319 gid.offset = index * (elem_len / CHAR_BIT);
320 gid.elem.type = stack_top->load.object_type;
321 gid.elem.len = elem_len;
322 if (integer_type->reverse_byte_order)
323 gid.elem.rev_bo = true;
324 stack_top->load.rev_bo = gid.elem.rev_bo;
325 break;
326 }
327 case OBJECT_TYPE_STRUCT:
328 /* Only generated by the specialize phase. */
329 case OBJECT_TYPE_VARIANT: /* Fall-through */
330 default:
331 ERR("Unexpected get index type %d",
332 (int) stack_top->load.object_type);
333 ret = -EINVAL;
334 goto end;
335 }
336 break;
337 case LOAD_ROOT_CONTEXT:
338 case LOAD_ROOT_APP_CONTEXT:
339 case LOAD_ROOT_PAYLOAD:
340 ERR("Index lookup for root field not implemented yet.");
341 ret = -EINVAL;
342 goto end;
343 }
344 data_offset = bytecode_push_data(runtime, &gid,
345 __alignof__(gid), sizeof(gid));
346 if (data_offset < 0) {
347 ret = -EINVAL;
348 goto end;
349 }
350 switch (idx_len) {
351 case 2:
352 ((struct get_index_u16 *) insn->data)->index = data_offset;
353 break;
354 case 8:
355 ((struct get_index_u64 *) insn->data)->index = data_offset;
356 break;
357 default:
358 ret = -EINVAL;
359 goto end;
360 }
361
362 return 0;
363
364 end:
365 return ret;
366 }
367
368 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
369 struct bytecode_runtime *bytecode,
370 struct load_op *insn)
371 {
372 uint16_t offset;
373 const char *name;
374
375 offset = ((struct get_symbol *) insn->data)->offset;
376 name = bytecode->p.priv->bc->bc.data + bytecode->p.priv->bc->bc.reloc_offset + offset;
377 return lttng_get_context_index(ctx, name);
378 }
379
380 static int specialize_load_object(const struct lttng_event_field *field,
381 struct vstack_load *load, bool is_context)
382 {
383 load->type = LOAD_OBJECT;
384
385 switch (field->type.atype) {
386 case atype_integer:
387 if (field->type.u.integer.signedness)
388 load->object_type = OBJECT_TYPE_S64;
389 else
390 load->object_type = OBJECT_TYPE_U64;
391 load->rev_bo = false;
392 break;
393 case atype_enum_nestable:
394 {
395 const struct lttng_integer_type *itype;
396
397 itype = &field->type.u.enum_nestable.container_type->u.integer;
398 if (itype->signedness)
399 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
400 else
401 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
402 load->rev_bo = false;
403 break;
404 }
405 case atype_array_nestable:
406 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
407 ERR("Array nesting only supports integer types.");
408 return -EINVAL;
409 }
410 if (is_context) {
411 load->object_type = OBJECT_TYPE_STRING;
412 } else {
413 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
414 load->object_type = OBJECT_TYPE_ARRAY;
415 load->field = field;
416 } else {
417 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
418 }
419 }
420 break;
421 case atype_sequence_nestable:
422 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
423 ERR("Sequence nesting only supports integer types.");
424 return -EINVAL;
425 }
426 if (is_context) {
427 load->object_type = OBJECT_TYPE_STRING;
428 } else {
429 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
430 load->object_type = OBJECT_TYPE_SEQUENCE;
431 load->field = field;
432 } else {
433 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
434 }
435 }
436 break;
437
438 case atype_string:
439 load->object_type = OBJECT_TYPE_STRING;
440 break;
441 case atype_float:
442 load->object_type = OBJECT_TYPE_DOUBLE;
443 break;
444 case atype_dynamic:
445 load->object_type = OBJECT_TYPE_DYNAMIC;
446 break;
447 default:
448 ERR("Unknown type: %d", (int) field->type.atype);
449 return -EINVAL;
450 }
451 return 0;
452 }
453
454 static int specialize_context_lookup(struct lttng_ctx *ctx,
455 struct bytecode_runtime *runtime,
456 struct load_op *insn,
457 struct vstack_load *load)
458 {
459 int idx, ret;
460 struct lttng_ctx_field *ctx_field;
461 struct lttng_event_field *field;
462 struct bytecode_get_index_data gid;
463 ssize_t data_offset;
464
465 idx = specialize_context_lookup_name(ctx, runtime, insn);
466 if (idx < 0) {
467 return -ENOENT;
468 }
469 ctx_field = &ctx->fields[idx];
470 field = &ctx_field->event_field;
471 ret = specialize_load_object(field, load, true);
472 if (ret)
473 return ret;
474 /* Specialize each get_symbol into a get_index. */
475 insn->op = BYTECODE_OP_GET_INDEX_U16;
476 memset(&gid, 0, sizeof(gid));
477 gid.ctx_index = idx;
478 gid.elem.type = load->object_type;
479 gid.elem.rev_bo = load->rev_bo;
480 gid.field = field;
481 data_offset = bytecode_push_data(runtime, &gid,
482 __alignof__(gid), sizeof(gid));
483 if (data_offset < 0) {
484 return -EINVAL;
485 }
486 ((struct get_index_u16 *) insn->data)->index = data_offset;
487 return 0;
488 }
489
490 static int specialize_app_context_lookup(struct lttng_ctx **pctx,
491 struct bytecode_runtime *runtime,
492 struct load_op *insn,
493 struct vstack_load *load)
494 {
495 uint16_t offset;
496 const char *orig_name;
497 char *name = NULL;
498 int idx, ret;
499 struct lttng_ctx_field *ctx_field;
500 struct lttng_event_field *field;
501 struct bytecode_get_index_data gid;
502 ssize_t data_offset;
503
504 offset = ((struct get_symbol *) insn->data)->offset;
505 orig_name = runtime->p.priv->bc->bc.data + runtime->p.priv->bc->bc.reloc_offset + offset;
506 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
507 if (!name) {
508 ret = -ENOMEM;
509 goto end;
510 }
511 strcpy(name, "$app.");
512 strcat(name, orig_name);
513 idx = lttng_get_context_index(*pctx, name);
514 if (idx < 0) {
515 assert(lttng_context_is_app(name));
516 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
517 pctx);
518 if (ret)
519 return ret;
520 idx = lttng_get_context_index(*pctx, name);
521 if (idx < 0)
522 return -ENOENT;
523 }
524 ctx_field = &(*pctx)->fields[idx];
525 field = &ctx_field->event_field;
526 ret = specialize_load_object(field, load, true);
527 if (ret)
528 goto end;
529 /* Specialize each get_symbol into a get_index. */
530 insn->op = BYTECODE_OP_GET_INDEX_U16;
531 memset(&gid, 0, sizeof(gid));
532 gid.ctx_index = idx;
533 gid.elem.type = load->object_type;
534 gid.elem.rev_bo = load->rev_bo;
535 gid.field = field;
536 data_offset = bytecode_push_data(runtime, &gid,
537 __alignof__(gid), sizeof(gid));
538 if (data_offset < 0) {
539 ret = -EINVAL;
540 goto end;
541 }
542 ((struct get_index_u16 *) insn->data)->index = data_offset;
543 ret = 0;
544 end:
545 free(name);
546 return ret;
547 }
548
549 static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
550 struct bytecode_runtime *runtime,
551 struct load_op *insn,
552 struct vstack_load *load)
553 {
554 const char *name;
555 uint16_t offset;
556 unsigned int i, nr_fields;
557 bool found = false;
558 uint32_t field_offset = 0;
559 const struct lttng_event_field *field;
560 int ret;
561 struct bytecode_get_index_data gid;
562 ssize_t data_offset;
563
564 nr_fields = event_desc->nr_fields;
565 offset = ((struct get_symbol *) insn->data)->offset;
566 name = runtime->p.priv->bc->bc.data + runtime->p.priv->bc->bc.reloc_offset + offset;
567 for (i = 0; i < nr_fields; i++) {
568 field = &event_desc->fields[i];
569 if (field->u.ext.nofilter) {
570 continue;
571 }
572 if (!strcmp(field->name, name)) {
573 found = true;
574 break;
575 }
576 /* compute field offset on stack */
577 switch (field->type.atype) {
578 case atype_integer:
579 case atype_enum_nestable:
580 field_offset += sizeof(int64_t);
581 break;
582 case atype_array_nestable:
583 case atype_sequence_nestable:
584 field_offset += sizeof(unsigned long);
585 field_offset += sizeof(void *);
586 break;
587 case atype_string:
588 field_offset += sizeof(void *);
589 break;
590 case atype_float:
591 field_offset += sizeof(double);
592 break;
593 default:
594 ret = -EINVAL;
595 goto end;
596 }
597 }
598 if (!found) {
599 ret = -EINVAL;
600 goto end;
601 }
602
603 ret = specialize_load_object(field, load, false);
604 if (ret)
605 goto end;
606
607 /* Specialize each get_symbol into a get_index. */
608 insn->op = BYTECODE_OP_GET_INDEX_U16;
609 memset(&gid, 0, sizeof(gid));
610 gid.offset = field_offset;
611 gid.elem.type = load->object_type;
612 gid.elem.rev_bo = load->rev_bo;
613 gid.field = field;
614 data_offset = bytecode_push_data(runtime, &gid,
615 __alignof__(gid), sizeof(gid));
616 if (data_offset < 0) {
617 ret = -EINVAL;
618 goto end;
619 }
620 ((struct get_index_u16 *) insn->data)->index = data_offset;
621 ret = 0;
622 end:
623 return ret;
624 }
625
626 int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
627 struct bytecode_runtime *bytecode)
628 {
629 void *pc, *next_pc, *start_pc;
630 int ret = -EINVAL;
631 struct vstack _stack;
632 struct vstack *stack = &_stack;
633 struct lttng_ctx **pctx = bytecode->p.priv->pctx;
634
635 vstack_init(stack);
636
637 start_pc = &bytecode->code[0];
638 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
639 pc = next_pc) {
640 switch (*(bytecode_opcode_t *) pc) {
641 case BYTECODE_OP_UNKNOWN:
642 default:
643 ERR("unknown bytecode op %u\n",
644 (unsigned int) *(bytecode_opcode_t *) pc);
645 ret = -EINVAL;
646 goto end;
647
648 case BYTECODE_OP_RETURN:
649 if (vstack_ax(stack)->type == REG_S64 ||
650 vstack_ax(stack)->type == REG_U64)
651 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
652 ret = 0;
653 goto end;
654
655 case BYTECODE_OP_RETURN_S64:
656 if (vstack_ax(stack)->type != REG_S64 &&
657 vstack_ax(stack)->type != REG_U64) {
658 ERR("Unexpected register type\n");
659 ret = -EINVAL;
660 goto end;
661 }
662 ret = 0;
663 goto end;
664
665 /* binary */
666 case BYTECODE_OP_MUL:
667 case BYTECODE_OP_DIV:
668 case BYTECODE_OP_MOD:
669 case BYTECODE_OP_PLUS:
670 case BYTECODE_OP_MINUS:
671 ERR("unsupported bytecode op %u\n",
672 (unsigned int) *(bytecode_opcode_t *) pc);
673 ret = -EINVAL;
674 goto end;
675
676 case BYTECODE_OP_EQ:
677 {
678 struct binary_op *insn = (struct binary_op *) pc;
679
680 switch(vstack_ax(stack)->type) {
681 default:
682 ERR("unknown register type\n");
683 ret = -EINVAL;
684 goto end;
685
686 case REG_STRING:
687 if (vstack_bx(stack)->type == REG_UNKNOWN)
688 break;
689 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
690 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
691 else
692 insn->op = BYTECODE_OP_EQ_STRING;
693 break;
694 case REG_STAR_GLOB_STRING:
695 if (vstack_bx(stack)->type == REG_UNKNOWN)
696 break;
697 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
698 break;
699 case REG_S64:
700 case REG_U64:
701 if (vstack_bx(stack)->type == REG_UNKNOWN)
702 break;
703 if (vstack_bx(stack)->type == REG_S64 ||
704 vstack_bx(stack)->type == REG_U64)
705 insn->op = BYTECODE_OP_EQ_S64;
706 else
707 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
708 break;
709 case REG_DOUBLE:
710 if (vstack_bx(stack)->type == REG_UNKNOWN)
711 break;
712 if (vstack_bx(stack)->type == REG_S64 ||
713 vstack_bx(stack)->type == REG_U64)
714 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
715 else
716 insn->op = BYTECODE_OP_EQ_DOUBLE;
717 break;
718 case REG_UNKNOWN:
719 break; /* Dynamic typing. */
720 }
721 /* Pop 2, push 1 */
722 if (vstack_pop(stack)) {
723 ret = -EINVAL;
724 goto end;
725 }
726 vstack_ax(stack)->type = REG_S64;
727 next_pc += sizeof(struct binary_op);
728 break;
729 }
730
731 case BYTECODE_OP_NE:
732 {
733 struct binary_op *insn = (struct binary_op *) pc;
734
735 switch(vstack_ax(stack)->type) {
736 default:
737 ERR("unknown register type\n");
738 ret = -EINVAL;
739 goto end;
740
741 case REG_STRING:
742 if (vstack_bx(stack)->type == REG_UNKNOWN)
743 break;
744 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
745 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
746 else
747 insn->op = BYTECODE_OP_NE_STRING;
748 break;
749 case REG_STAR_GLOB_STRING:
750 if (vstack_bx(stack)->type == REG_UNKNOWN)
751 break;
752 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
753 break;
754 case REG_S64:
755 case REG_U64:
756 if (vstack_bx(stack)->type == REG_UNKNOWN)
757 break;
758 if (vstack_bx(stack)->type == REG_S64 ||
759 vstack_bx(stack)->type == REG_U64)
760 insn->op = BYTECODE_OP_NE_S64;
761 else
762 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
763 break;
764 case REG_DOUBLE:
765 if (vstack_bx(stack)->type == REG_UNKNOWN)
766 break;
767 if (vstack_bx(stack)->type == REG_S64 ||
768 vstack_bx(stack)->type == REG_U64)
769 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
770 else
771 insn->op = BYTECODE_OP_NE_DOUBLE;
772 break;
773 case REG_UNKNOWN:
774 break; /* Dynamic typing. */
775 }
776 /* Pop 2, push 1 */
777 if (vstack_pop(stack)) {
778 ret = -EINVAL;
779 goto end;
780 }
781 vstack_ax(stack)->type = REG_S64;
782 next_pc += sizeof(struct binary_op);
783 break;
784 }
785
786 case BYTECODE_OP_GT:
787 {
788 struct binary_op *insn = (struct binary_op *) pc;
789
790 switch(vstack_ax(stack)->type) {
791 default:
792 ERR("unknown register type\n");
793 ret = -EINVAL;
794 goto end;
795
796 case REG_STAR_GLOB_STRING:
797 ERR("invalid register type for > binary operator\n");
798 ret = -EINVAL;
799 goto end;
800 case REG_STRING:
801 if (vstack_bx(stack)->type == REG_UNKNOWN)
802 break;
803 insn->op = BYTECODE_OP_GT_STRING;
804 break;
805 case REG_S64:
806 case REG_U64:
807 if (vstack_bx(stack)->type == REG_UNKNOWN)
808 break;
809 if (vstack_bx(stack)->type == REG_S64 ||
810 vstack_bx(stack)->type == REG_U64)
811 insn->op = BYTECODE_OP_GT_S64;
812 else
813 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
814 break;
815 case REG_DOUBLE:
816 if (vstack_bx(stack)->type == REG_UNKNOWN)
817 break;
818 if (vstack_bx(stack)->type == REG_S64 ||
819 vstack_bx(stack)->type == REG_U64)
820 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
821 else
822 insn->op = BYTECODE_OP_GT_DOUBLE;
823 break;
824 case REG_UNKNOWN:
825 break; /* Dynamic typing. */
826 }
827 /* Pop 2, push 1 */
828 if (vstack_pop(stack)) {
829 ret = -EINVAL;
830 goto end;
831 }
832 vstack_ax(stack)->type = REG_S64;
833 next_pc += sizeof(struct binary_op);
834 break;
835 }
836
837 case BYTECODE_OP_LT:
838 {
839 struct binary_op *insn = (struct binary_op *) pc;
840
841 switch(vstack_ax(stack)->type) {
842 default:
843 ERR("unknown register type\n");
844 ret = -EINVAL;
845 goto end;
846
847 case REG_STAR_GLOB_STRING:
848 ERR("invalid register type for < binary operator\n");
849 ret = -EINVAL;
850 goto end;
851 case REG_STRING:
852 if (vstack_bx(stack)->type == REG_UNKNOWN)
853 break;
854 insn->op = BYTECODE_OP_LT_STRING;
855 break;
856 case REG_S64:
857 case REG_U64:
858 if (vstack_bx(stack)->type == REG_UNKNOWN)
859 break;
860 if (vstack_bx(stack)->type == REG_S64 ||
861 vstack_bx(stack)->type == REG_U64)
862 insn->op = BYTECODE_OP_LT_S64;
863 else
864 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
865 break;
866 case REG_DOUBLE:
867 if (vstack_bx(stack)->type == REG_UNKNOWN)
868 break;
869 if (vstack_bx(stack)->type == REG_S64 ||
870 vstack_bx(stack)->type == REG_U64)
871 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
872 else
873 insn->op = BYTECODE_OP_LT_DOUBLE;
874 break;
875 case REG_UNKNOWN:
876 break; /* Dynamic typing. */
877 }
878 /* Pop 2, push 1 */
879 if (vstack_pop(stack)) {
880 ret = -EINVAL;
881 goto end;
882 }
883 vstack_ax(stack)->type = REG_S64;
884 next_pc += sizeof(struct binary_op);
885 break;
886 }
887
888 case BYTECODE_OP_GE:
889 {
890 struct binary_op *insn = (struct binary_op *) pc;
891
892 switch(vstack_ax(stack)->type) {
893 default:
894 ERR("unknown register type\n");
895 ret = -EINVAL;
896 goto end;
897
898 case REG_STAR_GLOB_STRING:
899 ERR("invalid register type for >= binary operator\n");
900 ret = -EINVAL;
901 goto end;
902 case REG_STRING:
903 if (vstack_bx(stack)->type == REG_UNKNOWN)
904 break;
905 insn->op = BYTECODE_OP_GE_STRING;
906 break;
907 case REG_S64:
908 case REG_U64:
909 if (vstack_bx(stack)->type == REG_UNKNOWN)
910 break;
911 if (vstack_bx(stack)->type == REG_S64 ||
912 vstack_bx(stack)->type == REG_U64)
913 insn->op = BYTECODE_OP_GE_S64;
914 else
915 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
916 break;
917 case REG_DOUBLE:
918 if (vstack_bx(stack)->type == REG_UNKNOWN)
919 break;
920 if (vstack_bx(stack)->type == REG_S64 ||
921 vstack_bx(stack)->type == REG_U64)
922 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
923 else
924 insn->op = BYTECODE_OP_GE_DOUBLE;
925 break;
926 case REG_UNKNOWN:
927 break; /* Dynamic typing. */
928 }
929 /* Pop 2, push 1 */
930 if (vstack_pop(stack)) {
931 ret = -EINVAL;
932 goto end;
933 }
934 vstack_ax(stack)->type = REG_U64;
935 next_pc += sizeof(struct binary_op);
936 break;
937 }
938 case BYTECODE_OP_LE:
939 {
940 struct binary_op *insn = (struct binary_op *) pc;
941
942 switch(vstack_ax(stack)->type) {
943 default:
944 ERR("unknown register type\n");
945 ret = -EINVAL;
946 goto end;
947
948 case REG_STAR_GLOB_STRING:
949 ERR("invalid register type for <= binary operator\n");
950 ret = -EINVAL;
951 goto end;
952 case REG_STRING:
953 if (vstack_bx(stack)->type == REG_UNKNOWN)
954 break;
955 insn->op = BYTECODE_OP_LE_STRING;
956 break;
957 case REG_S64:
958 case REG_U64:
959 if (vstack_bx(stack)->type == REG_UNKNOWN)
960 break;
961 if (vstack_bx(stack)->type == REG_S64 ||
962 vstack_bx(stack)->type == REG_U64)
963 insn->op = BYTECODE_OP_LE_S64;
964 else
965 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
966 break;
967 case REG_DOUBLE:
968 if (vstack_bx(stack)->type == REG_UNKNOWN)
969 break;
970 if (vstack_bx(stack)->type == REG_S64 ||
971 vstack_bx(stack)->type == REG_U64)
972 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
973 else
974 insn->op = BYTECODE_OP_LE_DOUBLE;
975 break;
976 case REG_UNKNOWN:
977 break; /* Dynamic typing. */
978 }
979 vstack_ax(stack)->type = REG_S64;
980 next_pc += sizeof(struct binary_op);
981 break;
982 }
983
984 case BYTECODE_OP_EQ_STRING:
985 case BYTECODE_OP_NE_STRING:
986 case BYTECODE_OP_GT_STRING:
987 case BYTECODE_OP_LT_STRING:
988 case BYTECODE_OP_GE_STRING:
989 case BYTECODE_OP_LE_STRING:
990 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
991 case BYTECODE_OP_NE_STAR_GLOB_STRING:
992 case BYTECODE_OP_EQ_S64:
993 case BYTECODE_OP_NE_S64:
994 case BYTECODE_OP_GT_S64:
995 case BYTECODE_OP_LT_S64:
996 case BYTECODE_OP_GE_S64:
997 case BYTECODE_OP_LE_S64:
998 case BYTECODE_OP_EQ_DOUBLE:
999 case BYTECODE_OP_NE_DOUBLE:
1000 case BYTECODE_OP_GT_DOUBLE:
1001 case BYTECODE_OP_LT_DOUBLE:
1002 case BYTECODE_OP_GE_DOUBLE:
1003 case BYTECODE_OP_LE_DOUBLE:
1004 case BYTECODE_OP_EQ_DOUBLE_S64:
1005 case BYTECODE_OP_NE_DOUBLE_S64:
1006 case BYTECODE_OP_GT_DOUBLE_S64:
1007 case BYTECODE_OP_LT_DOUBLE_S64:
1008 case BYTECODE_OP_GE_DOUBLE_S64:
1009 case BYTECODE_OP_LE_DOUBLE_S64:
1010 case BYTECODE_OP_EQ_S64_DOUBLE:
1011 case BYTECODE_OP_NE_S64_DOUBLE:
1012 case BYTECODE_OP_GT_S64_DOUBLE:
1013 case BYTECODE_OP_LT_S64_DOUBLE:
1014 case BYTECODE_OP_GE_S64_DOUBLE:
1015 case BYTECODE_OP_LE_S64_DOUBLE:
1016 {
1017 /* Pop 2, push 1 */
1018 if (vstack_pop(stack)) {
1019 ret = -EINVAL;
1020 goto end;
1021 }
1022 vstack_ax(stack)->type = REG_S64;
1023 next_pc += sizeof(struct binary_op);
1024 break;
1025 }
1026
1027 case BYTECODE_OP_BIT_RSHIFT:
1028 case BYTECODE_OP_BIT_LSHIFT:
1029 case BYTECODE_OP_BIT_AND:
1030 case BYTECODE_OP_BIT_OR:
1031 case BYTECODE_OP_BIT_XOR:
1032 {
1033 /* Pop 2, push 1 */
1034 if (vstack_pop(stack)) {
1035 ret = -EINVAL;
1036 goto end;
1037 }
1038 vstack_ax(stack)->type = REG_S64;
1039 next_pc += sizeof(struct binary_op);
1040 break;
1041 }
1042
1043 /* unary */
1044 case BYTECODE_OP_UNARY_PLUS:
1045 {
1046 struct unary_op *insn = (struct unary_op *) pc;
1047
1048 switch(vstack_ax(stack)->type) {
1049 default:
1050 ERR("unknown register type\n");
1051 ret = -EINVAL;
1052 goto end;
1053
1054 case REG_S64:
1055 case REG_U64:
1056 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1057 break;
1058 case REG_DOUBLE:
1059 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1060 break;
1061 case REG_UNKNOWN: /* Dynamic typing. */
1062 break;
1063 }
1064 /* Pop 1, push 1 */
1065 next_pc += sizeof(struct unary_op);
1066 break;
1067 }
1068
1069 case BYTECODE_OP_UNARY_MINUS:
1070 {
1071 struct unary_op *insn = (struct unary_op *) pc;
1072
1073 switch(vstack_ax(stack)->type) {
1074 default:
1075 ERR("unknown register type\n");
1076 ret = -EINVAL;
1077 goto end;
1078
1079 case REG_S64:
1080 case REG_U64:
1081 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1082 break;
1083 case REG_DOUBLE:
1084 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1085 break;
1086 case REG_UNKNOWN: /* Dynamic typing. */
1087 break;
1088 }
1089 /* Pop 1, push 1 */
1090 next_pc += sizeof(struct unary_op);
1091 break;
1092 }
1093
1094 case BYTECODE_OP_UNARY_NOT:
1095 {
1096 struct unary_op *insn = (struct unary_op *) pc;
1097
1098 switch(vstack_ax(stack)->type) {
1099 default:
1100 ERR("unknown register type\n");
1101 ret = -EINVAL;
1102 goto end;
1103
1104 case REG_S64:
1105 case REG_U64:
1106 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1107 break;
1108 case REG_DOUBLE:
1109 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1110 break;
1111 case REG_UNKNOWN: /* Dynamic typing. */
1112 break;
1113 }
1114 /* Pop 1, push 1 */
1115 next_pc += sizeof(struct unary_op);
1116 break;
1117 }
1118
1119 case BYTECODE_OP_UNARY_BIT_NOT:
1120 {
1121 /* Pop 1, push 1 */
1122 next_pc += sizeof(struct unary_op);
1123 break;
1124 }
1125
1126 case BYTECODE_OP_UNARY_PLUS_S64:
1127 case BYTECODE_OP_UNARY_MINUS_S64:
1128 case BYTECODE_OP_UNARY_NOT_S64:
1129 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1130 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1131 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1132 {
1133 /* Pop 1, push 1 */
1134 next_pc += sizeof(struct unary_op);
1135 break;
1136 }
1137
1138 /* logical */
1139 case BYTECODE_OP_AND:
1140 case BYTECODE_OP_OR:
1141 {
1142 /* Continue to next instruction */
1143 /* Pop 1 when jump not taken */
1144 if (vstack_pop(stack)) {
1145 ret = -EINVAL;
1146 goto end;
1147 }
1148 next_pc += sizeof(struct logical_op);
1149 break;
1150 }
1151
1152 /* load field ref */
1153 case BYTECODE_OP_LOAD_FIELD_REF:
1154 {
1155 ERR("Unknown field ref type\n");
1156 ret = -EINVAL;
1157 goto end;
1158 }
1159 /* get context ref */
1160 case BYTECODE_OP_GET_CONTEXT_REF:
1161 {
1162 if (vstack_push(stack)) {
1163 ret = -EINVAL;
1164 goto end;
1165 }
1166 vstack_ax(stack)->type = REG_UNKNOWN;
1167 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1168 break;
1169 }
1170 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1171 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1172 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1173 {
1174 if (vstack_push(stack)) {
1175 ret = -EINVAL;
1176 goto end;
1177 }
1178 vstack_ax(stack)->type = REG_STRING;
1179 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1180 break;
1181 }
1182 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1183 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1184 {
1185 if (vstack_push(stack)) {
1186 ret = -EINVAL;
1187 goto end;
1188 }
1189 vstack_ax(stack)->type = REG_S64;
1190 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1191 break;
1192 }
1193 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1194 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1195 {
1196 if (vstack_push(stack)) {
1197 ret = -EINVAL;
1198 goto end;
1199 }
1200 vstack_ax(stack)->type = REG_DOUBLE;
1201 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1202 break;
1203 }
1204
1205 /* load from immediate operand */
1206 case BYTECODE_OP_LOAD_STRING:
1207 {
1208 struct load_op *insn = (struct load_op *) pc;
1209
1210 if (vstack_push(stack)) {
1211 ret = -EINVAL;
1212 goto end;
1213 }
1214 vstack_ax(stack)->type = REG_STRING;
1215 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1216 break;
1217 }
1218
1219 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1220 {
1221 struct load_op *insn = (struct load_op *) pc;
1222
1223 if (vstack_push(stack)) {
1224 ret = -EINVAL;
1225 goto end;
1226 }
1227 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1228 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1229 break;
1230 }
1231
1232 case BYTECODE_OP_LOAD_S64:
1233 {
1234 if (vstack_push(stack)) {
1235 ret = -EINVAL;
1236 goto end;
1237 }
1238 vstack_ax(stack)->type = REG_S64;
1239 next_pc += sizeof(struct load_op)
1240 + sizeof(struct literal_numeric);
1241 break;
1242 }
1243
1244 case BYTECODE_OP_LOAD_DOUBLE:
1245 {
1246 if (vstack_push(stack)) {
1247 ret = -EINVAL;
1248 goto end;
1249 }
1250 vstack_ax(stack)->type = REG_DOUBLE;
1251 next_pc += sizeof(struct load_op)
1252 + sizeof(struct literal_double);
1253 break;
1254 }
1255
1256 /* cast */
1257 case BYTECODE_OP_CAST_TO_S64:
1258 {
1259 struct cast_op *insn = (struct cast_op *) pc;
1260
1261 switch (vstack_ax(stack)->type) {
1262 default:
1263 ERR("unknown register type\n");
1264 ret = -EINVAL;
1265 goto end;
1266
1267 case REG_STRING:
1268 case REG_STAR_GLOB_STRING:
1269 ERR("Cast op can only be applied to numeric or floating point registers\n");
1270 ret = -EINVAL;
1271 goto end;
1272 case REG_S64:
1273 insn->op = BYTECODE_OP_CAST_NOP;
1274 break;
1275 case REG_DOUBLE:
1276 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1277 break;
1278 case REG_UNKNOWN:
1279 case REG_U64:
1280 break;
1281 }
1282 /* Pop 1, push 1 */
1283 vstack_ax(stack)->type = REG_S64;
1284 next_pc += sizeof(struct cast_op);
1285 break;
1286 }
1287 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1288 {
1289 /* Pop 1, push 1 */
1290 vstack_ax(stack)->type = REG_S64;
1291 next_pc += sizeof(struct cast_op);
1292 break;
1293 }
1294 case BYTECODE_OP_CAST_NOP:
1295 {
1296 next_pc += sizeof(struct cast_op);
1297 break;
1298 }
1299
1300 /*
1301 * Instructions for recursive traversal through composed types.
1302 */
1303 case BYTECODE_OP_GET_CONTEXT_ROOT:
1304 {
1305 if (vstack_push(stack)) {
1306 ret = -EINVAL;
1307 goto end;
1308 }
1309 vstack_ax(stack)->type = REG_PTR;
1310 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1311 next_pc += sizeof(struct load_op);
1312 break;
1313 }
1314 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1315 {
1316 if (vstack_push(stack)) {
1317 ret = -EINVAL;
1318 goto end;
1319 }
1320 vstack_ax(stack)->type = REG_PTR;
1321 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1322 next_pc += sizeof(struct load_op);
1323 break;
1324 }
1325 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1326 {
1327 if (vstack_push(stack)) {
1328 ret = -EINVAL;
1329 goto end;
1330 }
1331 vstack_ax(stack)->type = REG_PTR;
1332 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1333 next_pc += sizeof(struct load_op);
1334 break;
1335 }
1336
1337 case BYTECODE_OP_LOAD_FIELD:
1338 {
1339 struct load_op *insn = (struct load_op *) pc;
1340
1341 assert(vstack_ax(stack)->type == REG_PTR);
1342 /* Pop 1, push 1 */
1343 ret = specialize_load_field(vstack_ax(stack), insn);
1344 if (ret)
1345 goto end;
1346
1347 next_pc += sizeof(struct load_op);
1348 break;
1349 }
1350
1351 case BYTECODE_OP_LOAD_FIELD_S8:
1352 case BYTECODE_OP_LOAD_FIELD_S16:
1353 case BYTECODE_OP_LOAD_FIELD_S32:
1354 case BYTECODE_OP_LOAD_FIELD_S64:
1355 {
1356 /* Pop 1, push 1 */
1357 vstack_ax(stack)->type = REG_S64;
1358 next_pc += sizeof(struct load_op);
1359 break;
1360 }
1361
1362 case BYTECODE_OP_LOAD_FIELD_U8:
1363 case BYTECODE_OP_LOAD_FIELD_U16:
1364 case BYTECODE_OP_LOAD_FIELD_U32:
1365 case BYTECODE_OP_LOAD_FIELD_U64:
1366 {
1367 /* Pop 1, push 1 */
1368 vstack_ax(stack)->type = REG_U64;
1369 next_pc += sizeof(struct load_op);
1370 break;
1371 }
1372
1373 case BYTECODE_OP_LOAD_FIELD_STRING:
1374 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1375 {
1376 /* Pop 1, push 1 */
1377 vstack_ax(stack)->type = REG_STRING;
1378 next_pc += sizeof(struct load_op);
1379 break;
1380 }
1381
1382 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1383 {
1384 /* Pop 1, push 1 */
1385 vstack_ax(stack)->type = REG_DOUBLE;
1386 next_pc += sizeof(struct load_op);
1387 break;
1388 }
1389
1390 case BYTECODE_OP_GET_SYMBOL:
1391 {
1392 struct load_op *insn = (struct load_op *) pc;
1393
1394 dbg_printf("op get symbol\n");
1395 switch (vstack_ax(stack)->load.type) {
1396 case LOAD_OBJECT:
1397 ERR("Nested fields not implemented yet.");
1398 ret = -EINVAL;
1399 goto end;
1400 case LOAD_ROOT_CONTEXT:
1401 /* Lookup context field. */
1402 ret = specialize_context_lookup(*pctx,
1403 bytecode, insn,
1404 &vstack_ax(stack)->load);
1405 if (ret)
1406 goto end;
1407 break;
1408 case LOAD_ROOT_APP_CONTEXT:
1409 /* Lookup app context field. */
1410 ret = specialize_app_context_lookup(pctx,
1411 bytecode, insn,
1412 &vstack_ax(stack)->load);
1413 if (ret)
1414 goto end;
1415 break;
1416 case LOAD_ROOT_PAYLOAD:
1417 /* Lookup event payload field. */
1418 ret = specialize_payload_lookup(event_desc,
1419 bytecode, insn,
1420 &vstack_ax(stack)->load);
1421 if (ret)
1422 goto end;
1423 break;
1424 }
1425 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1426 break;
1427 }
1428
1429 case BYTECODE_OP_GET_SYMBOL_FIELD:
1430 {
1431 /* Always generated by specialize phase. */
1432 ret = -EINVAL;
1433 goto end;
1434 }
1435
1436 case BYTECODE_OP_GET_INDEX_U16:
1437 {
1438 struct load_op *insn = (struct load_op *) pc;
1439 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1440
1441 dbg_printf("op get index u16\n");
1442 /* Pop 1, push 1 */
1443 ret = specialize_get_index(bytecode, insn, index->index,
1444 vstack_ax(stack), sizeof(*index));
1445 if (ret)
1446 goto end;
1447 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1448 break;
1449 }
1450
1451 case BYTECODE_OP_GET_INDEX_U64:
1452 {
1453 struct load_op *insn = (struct load_op *) pc;
1454 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1455
1456 dbg_printf("op get index u64\n");
1457 /* Pop 1, push 1 */
1458 ret = specialize_get_index(bytecode, insn, index->index,
1459 vstack_ax(stack), sizeof(*index));
1460 if (ret)
1461 goto end;
1462 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1463 break;
1464 }
1465
1466 }
1467 }
1468 end:
1469 return ret;
1470 }
This page took 0.060226 seconds and 4 git commands to generate.