Refactoring and fix: bytecode ABI
[lttng-ust.git] / liblttng-ust / lttng-bytecode-specialize.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode specializer.
7 */
8
9 #define _LGPL_SOURCE
10 #include <stddef.h>
11 #include <stdint.h>
12
13 #include <lttng/ust-align.h>
14
15 #include "context-internal.h"
16 #include "lttng-bytecode.h"
17 #include "ust-events-internal.h"
18 #include "ust-helper.h"
19
20 static int lttng_fls(int val)
21 {
22 int r = 32;
23 unsigned int x = (unsigned int) val;
24
25 if (!x)
26 return 0;
27 if (!(x & 0xFFFF0000U)) {
28 x <<= 16;
29 r -= 16;
30 }
31 if (!(x & 0xFF000000U)) {
32 x <<= 8;
33 r -= 8;
34 }
35 if (!(x & 0xF0000000U)) {
36 x <<= 4;
37 r -= 4;
38 }
39 if (!(x & 0xC0000000U)) {
40 x <<= 2;
41 r -= 2;
42 }
43 if (!(x & 0x80000000U)) {
44 r -= 1;
45 }
46 return r;
47 }
48
49 static int get_count_order(unsigned int count)
50 {
51 int order;
52
53 order = lttng_fls(count) - 1;
54 if (count & (count - 1))
55 order++;
56 return order;
57 }
58
59 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
60 size_t align, size_t len)
61 {
62 ssize_t ret;
63 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
64 size_t new_len = runtime->data_len + padding + len;
65 size_t new_alloc_len = new_len;
66 size_t old_alloc_len = runtime->data_alloc_len;
67
68 if (new_len > BYTECODE_MAX_DATA_LEN)
69 return -EINVAL;
70
71 if (new_alloc_len > old_alloc_len) {
72 char *newptr;
73
74 new_alloc_len =
75 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
76 newptr = realloc(runtime->data, new_alloc_len);
77 if (!newptr)
78 return -ENOMEM;
79 runtime->data = newptr;
80 /* We zero directly the memory from start of allocation. */
81 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
82 runtime->data_alloc_len = new_alloc_len;
83 }
84 runtime->data_len += padding;
85 ret = runtime->data_len;
86 runtime->data_len += len;
87 return ret;
88 }
89
90 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
91 const void *p, size_t align, size_t len)
92 {
93 ssize_t offset;
94
95 offset = bytecode_reserve_data(runtime, align, len);
96 if (offset < 0)
97 return -ENOMEM;
98 memcpy(&runtime->data[offset], p, len);
99 return offset;
100 }
101
102 static int specialize_load_field(struct vstack_entry *stack_top,
103 struct load_op *insn)
104 {
105 int ret;
106
107 switch (stack_top->load.type) {
108 case LOAD_OBJECT:
109 break;
110 case LOAD_ROOT_CONTEXT:
111 case LOAD_ROOT_APP_CONTEXT:
112 case LOAD_ROOT_PAYLOAD:
113 default:
114 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
115 ret = -EINVAL;
116 goto end;
117 }
118 switch (stack_top->load.object_type) {
119 case OBJECT_TYPE_S8:
120 dbg_printf("op load field s8\n");
121 stack_top->type = REG_S64;
122 if (!stack_top->load.rev_bo)
123 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
124 break;
125 case OBJECT_TYPE_S16:
126 dbg_printf("op load field s16\n");
127 stack_top->type = REG_S64;
128 if (!stack_top->load.rev_bo)
129 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
130 break;
131 case OBJECT_TYPE_S32:
132 dbg_printf("op load field s32\n");
133 stack_top->type = REG_S64;
134 if (!stack_top->load.rev_bo)
135 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
136 break;
137 case OBJECT_TYPE_S64:
138 dbg_printf("op load field s64\n");
139 stack_top->type = REG_S64;
140 if (!stack_top->load.rev_bo)
141 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
142 break;
143 case OBJECT_TYPE_SIGNED_ENUM:
144 dbg_printf("op load field signed enumeration\n");
145 stack_top->type = REG_PTR;
146 break;
147 case OBJECT_TYPE_U8:
148 dbg_printf("op load field u8\n");
149 stack_top->type = REG_U64;
150 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
151 break;
152 case OBJECT_TYPE_U16:
153 dbg_printf("op load field u16\n");
154 stack_top->type = REG_U64;
155 if (!stack_top->load.rev_bo)
156 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
157 break;
158 case OBJECT_TYPE_U32:
159 dbg_printf("op load field u32\n");
160 stack_top->type = REG_U64;
161 if (!stack_top->load.rev_bo)
162 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
163 break;
164 case OBJECT_TYPE_U64:
165 dbg_printf("op load field u64\n");
166 stack_top->type = REG_U64;
167 if (!stack_top->load.rev_bo)
168 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
169 break;
170 case OBJECT_TYPE_UNSIGNED_ENUM:
171 dbg_printf("op load field unsigned enumeration\n");
172 stack_top->type = REG_PTR;
173 break;
174 case OBJECT_TYPE_DOUBLE:
175 stack_top->type = REG_DOUBLE;
176 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
177 break;
178 case OBJECT_TYPE_STRING:
179 dbg_printf("op load field string\n");
180 stack_top->type = REG_STRING;
181 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
182 break;
183 case OBJECT_TYPE_STRING_SEQUENCE:
184 dbg_printf("op load field string sequence\n");
185 stack_top->type = REG_STRING;
186 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
187 break;
188 case OBJECT_TYPE_DYNAMIC:
189 dbg_printf("op load field dynamic\n");
190 stack_top->type = REG_UNKNOWN;
191 /* Don't specialize load op. */
192 break;
193 case OBJECT_TYPE_SEQUENCE:
194 case OBJECT_TYPE_ARRAY:
195 case OBJECT_TYPE_STRUCT:
196 case OBJECT_TYPE_VARIANT:
197 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
198 ret = -EINVAL;
199 goto end;
200 }
201 return 0;
202
203 end:
204 return ret;
205 }
206
207 static int specialize_get_index_object_type(enum object_type *otype,
208 int signedness, uint32_t elem_len)
209 {
210 switch (elem_len) {
211 case 8:
212 if (signedness)
213 *otype = OBJECT_TYPE_S8;
214 else
215 *otype = OBJECT_TYPE_U8;
216 break;
217 case 16:
218 if (signedness)
219 *otype = OBJECT_TYPE_S16;
220 else
221 *otype = OBJECT_TYPE_U16;
222 break;
223 case 32:
224 if (signedness)
225 *otype = OBJECT_TYPE_S32;
226 else
227 *otype = OBJECT_TYPE_U32;
228 break;
229 case 64:
230 if (signedness)
231 *otype = OBJECT_TYPE_S64;
232 else
233 *otype = OBJECT_TYPE_U64;
234 break;
235 default:
236 return -EINVAL;
237 }
238 return 0;
239 }
240
241 static int specialize_get_index(struct bytecode_runtime *runtime,
242 struct load_op *insn, uint64_t index,
243 struct vstack_entry *stack_top,
244 int idx_len)
245 {
246 int ret;
247 struct bytecode_get_index_data gid;
248 ssize_t data_offset;
249
250 memset(&gid, 0, sizeof(gid));
251 switch (stack_top->load.type) {
252 case LOAD_OBJECT:
253 switch (stack_top->load.object_type) {
254 case OBJECT_TYPE_ARRAY:
255 {
256 struct lttng_ust_type_integer *integer_type;
257 struct lttng_ust_event_field *field;
258 uint32_t elem_len, num_elems;
259 int signedness;
260
261 field = stack_top->load.field;
262 switch (field->type->type) {
263 case lttng_ust_type_array:
264 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
265 ret = -EINVAL;
266 goto end;
267 }
268 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
269 num_elems = lttng_ust_get_type_array(field->type)->length;
270 break;
271 default:
272 ret = -EINVAL;
273 goto end;
274 }
275 elem_len = integer_type->size;
276 signedness = integer_type->signedness;
277 if (index >= num_elems) {
278 ret = -EINVAL;
279 goto end;
280 }
281 ret = specialize_get_index_object_type(&stack_top->load.object_type,
282 signedness, elem_len);
283 if (ret)
284 goto end;
285 gid.offset = index * (elem_len / CHAR_BIT);
286 gid.array_len = num_elems * (elem_len / CHAR_BIT);
287 gid.elem.type = stack_top->load.object_type;
288 gid.elem.len = elem_len;
289 if (integer_type->reverse_byte_order)
290 gid.elem.rev_bo = true;
291 stack_top->load.rev_bo = gid.elem.rev_bo;
292 break;
293 }
294 case OBJECT_TYPE_SEQUENCE:
295 {
296 struct lttng_ust_type_integer *integer_type;
297 struct lttng_ust_event_field *field;
298 uint32_t elem_len;
299 int signedness;
300
301 field = stack_top->load.field;
302 switch (field->type->type) {
303 case lttng_ust_type_sequence:
304 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
305 ret = -EINVAL;
306 goto end;
307 }
308 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
309 break;
310 default:
311 ret = -EINVAL;
312 goto end;
313 }
314 elem_len = integer_type->size;
315 signedness = integer_type->signedness;
316 ret = specialize_get_index_object_type(&stack_top->load.object_type,
317 signedness, elem_len);
318 if (ret)
319 goto end;
320 gid.offset = index * (elem_len / CHAR_BIT);
321 gid.elem.type = stack_top->load.object_type;
322 gid.elem.len = elem_len;
323 if (integer_type->reverse_byte_order)
324 gid.elem.rev_bo = true;
325 stack_top->load.rev_bo = gid.elem.rev_bo;
326 break;
327 }
328 case OBJECT_TYPE_STRUCT:
329 /* Only generated by the specialize phase. */
330 case OBJECT_TYPE_VARIANT: /* Fall-through */
331 default:
332 ERR("Unexpected get index type %d",
333 (int) stack_top->load.object_type);
334 ret = -EINVAL;
335 goto end;
336 }
337 break;
338 case LOAD_ROOT_CONTEXT:
339 case LOAD_ROOT_APP_CONTEXT:
340 case LOAD_ROOT_PAYLOAD:
341 ERR("Index lookup for root field not implemented yet.");
342 ret = -EINVAL;
343 goto end;
344 }
345 data_offset = bytecode_push_data(runtime, &gid,
346 __alignof__(gid), sizeof(gid));
347 if (data_offset < 0) {
348 ret = -EINVAL;
349 goto end;
350 }
351 switch (idx_len) {
352 case 2:
353 ((struct get_index_u16 *) insn->data)->index = data_offset;
354 break;
355 case 8:
356 ((struct get_index_u64 *) insn->data)->index = data_offset;
357 break;
358 default:
359 ret = -EINVAL;
360 goto end;
361 }
362
363 return 0;
364
365 end:
366 return ret;
367 }
368
369 static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
370 struct bytecode_runtime *bytecode,
371 struct load_op *insn)
372 {
373 uint16_t offset;
374 const char *name;
375
376 offset = ((struct get_symbol *) insn->data)->offset;
377 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
378 return lttng_get_context_index(ctx, name);
379 }
380
381 static int specialize_load_object(struct lttng_ust_event_field *field,
382 struct vstack_load *load, bool is_context)
383 {
384 load->type = LOAD_OBJECT;
385
386 switch (field->type->type) {
387 case lttng_ust_type_integer:
388 if (lttng_ust_get_type_integer(field->type)->signedness)
389 load->object_type = OBJECT_TYPE_S64;
390 else
391 load->object_type = OBJECT_TYPE_U64;
392 load->rev_bo = false;
393 break;
394 case lttng_ust_type_enum:
395 {
396 struct lttng_ust_type_integer *itype;
397
398 itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
399 if (itype->signedness)
400 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
401 else
402 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
403 load->rev_bo = false;
404 break;
405 }
406 case lttng_ust_type_array:
407 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
408 ERR("Array nesting only supports integer types.");
409 return -EINVAL;
410 }
411 if (is_context) {
412 load->object_type = OBJECT_TYPE_STRING;
413 } else {
414 if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
415 load->object_type = OBJECT_TYPE_ARRAY;
416 load->field = field;
417 } else {
418 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
419 }
420 }
421 break;
422 case lttng_ust_type_sequence:
423 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
424 ERR("Sequence nesting only supports integer types.");
425 return -EINVAL;
426 }
427 if (is_context) {
428 load->object_type = OBJECT_TYPE_STRING;
429 } else {
430 if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
431 load->object_type = OBJECT_TYPE_SEQUENCE;
432 load->field = field;
433 } else {
434 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
435 }
436 }
437 break;
438
439 case lttng_ust_type_string:
440 load->object_type = OBJECT_TYPE_STRING;
441 break;
442 case lttng_ust_type_float:
443 load->object_type = OBJECT_TYPE_DOUBLE;
444 break;
445 case lttng_ust_type_dynamic:
446 load->object_type = OBJECT_TYPE_DYNAMIC;
447 break;
448 default:
449 ERR("Unknown type: %d", (int) field->type->type);
450 return -EINVAL;
451 }
452 return 0;
453 }
454
455 static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
456 struct bytecode_runtime *runtime,
457 struct load_op *insn,
458 struct vstack_load *load)
459 {
460 int idx, ret;
461 struct lttng_ust_ctx_field *ctx_field;
462 struct lttng_ust_event_field *field;
463 struct bytecode_get_index_data gid;
464 ssize_t data_offset;
465
466 idx = specialize_context_lookup_name(ctx, runtime, insn);
467 if (idx < 0) {
468 return -ENOENT;
469 }
470 ctx_field = ctx->fields[idx];
471 field = ctx_field->event_field;
472 ret = specialize_load_object(field, load, true);
473 if (ret)
474 return ret;
475 /* Specialize each get_symbol into a get_index. */
476 insn->op = BYTECODE_OP_GET_INDEX_U16;
477 memset(&gid, 0, sizeof(gid));
478 gid.ctx_index = idx;
479 gid.elem.type = load->object_type;
480 gid.elem.rev_bo = load->rev_bo;
481 gid.field = field;
482 data_offset = bytecode_push_data(runtime, &gid,
483 __alignof__(gid), sizeof(gid));
484 if (data_offset < 0) {
485 return -EINVAL;
486 }
487 ((struct get_index_u16 *) insn->data)->index = data_offset;
488 return 0;
489 }
490
491 static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
492 struct bytecode_runtime *runtime,
493 struct load_op *insn,
494 struct vstack_load *load)
495 {
496 uint16_t offset;
497 const char *orig_name;
498 char *name = NULL;
499 int idx, ret;
500 struct lttng_ust_ctx_field *ctx_field;
501 struct lttng_ust_event_field *field;
502 struct bytecode_get_index_data gid;
503 ssize_t data_offset;
504
505 offset = ((struct get_symbol *) insn->data)->offset;
506 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
507 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
508 if (!name) {
509 ret = -ENOMEM;
510 goto end;
511 }
512 strcpy(name, "$app.");
513 strcat(name, orig_name);
514 idx = lttng_get_context_index(*pctx, name);
515 if (idx < 0) {
516 assert(lttng_context_is_app(name));
517 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
518 pctx);
519 if (ret)
520 return ret;
521 idx = lttng_get_context_index(*pctx, name);
522 if (idx < 0)
523 return -ENOENT;
524 }
525 ctx_field = (*pctx)->fields[idx];
526 field = ctx_field->event_field;
527 ret = specialize_load_object(field, load, true);
528 if (ret)
529 goto end;
530 /* Specialize each get_symbol into a get_index. */
531 insn->op = BYTECODE_OP_GET_INDEX_U16;
532 memset(&gid, 0, sizeof(gid));
533 gid.ctx_index = idx;
534 gid.elem.type = load->object_type;
535 gid.elem.rev_bo = load->rev_bo;
536 gid.field = field;
537 data_offset = bytecode_push_data(runtime, &gid,
538 __alignof__(gid), sizeof(gid));
539 if (data_offset < 0) {
540 ret = -EINVAL;
541 goto end;
542 }
543 ((struct get_index_u16 *) insn->data)->index = data_offset;
544 ret = 0;
545 end:
546 free(name);
547 return ret;
548 }
549
550 static int specialize_payload_lookup(struct lttng_ust_event_desc *event_desc,
551 struct bytecode_runtime *runtime,
552 struct load_op *insn,
553 struct vstack_load *load)
554 {
555 const char *name;
556 uint16_t offset;
557 unsigned int i, nr_fields;
558 bool found = false;
559 uint32_t field_offset = 0;
560 struct lttng_ust_event_field *field;
561 int ret;
562 struct bytecode_get_index_data gid;
563 ssize_t data_offset;
564
565 nr_fields = event_desc->nr_fields;
566 offset = ((struct get_symbol *) insn->data)->offset;
567 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
568 for (i = 0; i < nr_fields; i++) {
569 field = event_desc->fields[i];
570 if (field->nofilter) {
571 continue;
572 }
573 if (!strcmp(field->name, name)) {
574 found = true;
575 break;
576 }
577 /* compute field offset on stack */
578 switch (field->type->type) {
579 case lttng_ust_type_integer:
580 case lttng_ust_type_enum:
581 field_offset += sizeof(int64_t);
582 break;
583 case lttng_ust_type_array:
584 case lttng_ust_type_sequence:
585 field_offset += sizeof(unsigned long);
586 field_offset += sizeof(void *);
587 break;
588 case lttng_ust_type_string:
589 field_offset += sizeof(void *);
590 break;
591 case lttng_ust_type_float:
592 field_offset += sizeof(double);
593 break;
594 default:
595 ret = -EINVAL;
596 goto end;
597 }
598 }
599 if (!found) {
600 ret = -EINVAL;
601 goto end;
602 }
603
604 ret = specialize_load_object(field, load, false);
605 if (ret)
606 goto end;
607
608 /* Specialize each get_symbol into a get_index. */
609 insn->op = BYTECODE_OP_GET_INDEX_U16;
610 memset(&gid, 0, sizeof(gid));
611 gid.offset = field_offset;
612 gid.elem.type = load->object_type;
613 gid.elem.rev_bo = load->rev_bo;
614 gid.field = field;
615 data_offset = bytecode_push_data(runtime, &gid,
616 __alignof__(gid), sizeof(gid));
617 if (data_offset < 0) {
618 ret = -EINVAL;
619 goto end;
620 }
621 ((struct get_index_u16 *) insn->data)->index = data_offset;
622 ret = 0;
623 end:
624 return ret;
625 }
626
627 int lttng_bytecode_specialize(struct lttng_ust_event_desc *event_desc,
628 struct bytecode_runtime *bytecode)
629 {
630 void *pc, *next_pc, *start_pc;
631 int ret = -EINVAL;
632 struct vstack _stack;
633 struct vstack *stack = &_stack;
634 struct lttng_ust_ctx **pctx = bytecode->p.pctx;
635
636 vstack_init(stack);
637
638 start_pc = &bytecode->code[0];
639 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
640 pc = next_pc) {
641 switch (*(bytecode_opcode_t *) pc) {
642 case BYTECODE_OP_UNKNOWN:
643 default:
644 ERR("unknown bytecode op %u\n",
645 (unsigned int) *(bytecode_opcode_t *) pc);
646 ret = -EINVAL;
647 goto end;
648
649 case BYTECODE_OP_RETURN:
650 if (vstack_ax(stack)->type == REG_S64 ||
651 vstack_ax(stack)->type == REG_U64)
652 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
653 ret = 0;
654 goto end;
655
656 case BYTECODE_OP_RETURN_S64:
657 if (vstack_ax(stack)->type != REG_S64 &&
658 vstack_ax(stack)->type != REG_U64) {
659 ERR("Unexpected register type\n");
660 ret = -EINVAL;
661 goto end;
662 }
663 ret = 0;
664 goto end;
665
666 /* binary */
667 case BYTECODE_OP_MUL:
668 case BYTECODE_OP_DIV:
669 case BYTECODE_OP_MOD:
670 case BYTECODE_OP_PLUS:
671 case BYTECODE_OP_MINUS:
672 ERR("unsupported bytecode op %u\n",
673 (unsigned int) *(bytecode_opcode_t *) pc);
674 ret = -EINVAL;
675 goto end;
676
677 case BYTECODE_OP_EQ:
678 {
679 struct binary_op *insn = (struct binary_op *) pc;
680
681 switch(vstack_ax(stack)->type) {
682 default:
683 ERR("unknown register type\n");
684 ret = -EINVAL;
685 goto end;
686
687 case REG_STRING:
688 if (vstack_bx(stack)->type == REG_UNKNOWN)
689 break;
690 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
691 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
692 else
693 insn->op = BYTECODE_OP_EQ_STRING;
694 break;
695 case REG_STAR_GLOB_STRING:
696 if (vstack_bx(stack)->type == REG_UNKNOWN)
697 break;
698 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
699 break;
700 case REG_S64:
701 case REG_U64:
702 if (vstack_bx(stack)->type == REG_UNKNOWN)
703 break;
704 if (vstack_bx(stack)->type == REG_S64 ||
705 vstack_bx(stack)->type == REG_U64)
706 insn->op = BYTECODE_OP_EQ_S64;
707 else
708 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
709 break;
710 case REG_DOUBLE:
711 if (vstack_bx(stack)->type == REG_UNKNOWN)
712 break;
713 if (vstack_bx(stack)->type == REG_S64 ||
714 vstack_bx(stack)->type == REG_U64)
715 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
716 else
717 insn->op = BYTECODE_OP_EQ_DOUBLE;
718 break;
719 case REG_UNKNOWN:
720 break; /* Dynamic typing. */
721 }
722 /* Pop 2, push 1 */
723 if (vstack_pop(stack)) {
724 ret = -EINVAL;
725 goto end;
726 }
727 vstack_ax(stack)->type = REG_S64;
728 next_pc += sizeof(struct binary_op);
729 break;
730 }
731
732 case BYTECODE_OP_NE:
733 {
734 struct binary_op *insn = (struct binary_op *) pc;
735
736 switch(vstack_ax(stack)->type) {
737 default:
738 ERR("unknown register type\n");
739 ret = -EINVAL;
740 goto end;
741
742 case REG_STRING:
743 if (vstack_bx(stack)->type == REG_UNKNOWN)
744 break;
745 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
746 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
747 else
748 insn->op = BYTECODE_OP_NE_STRING;
749 break;
750 case REG_STAR_GLOB_STRING:
751 if (vstack_bx(stack)->type == REG_UNKNOWN)
752 break;
753 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
754 break;
755 case REG_S64:
756 case REG_U64:
757 if (vstack_bx(stack)->type == REG_UNKNOWN)
758 break;
759 if (vstack_bx(stack)->type == REG_S64 ||
760 vstack_bx(stack)->type == REG_U64)
761 insn->op = BYTECODE_OP_NE_S64;
762 else
763 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
764 break;
765 case REG_DOUBLE:
766 if (vstack_bx(stack)->type == REG_UNKNOWN)
767 break;
768 if (vstack_bx(stack)->type == REG_S64 ||
769 vstack_bx(stack)->type == REG_U64)
770 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
771 else
772 insn->op = BYTECODE_OP_NE_DOUBLE;
773 break;
774 case REG_UNKNOWN:
775 break; /* Dynamic typing. */
776 }
777 /* Pop 2, push 1 */
778 if (vstack_pop(stack)) {
779 ret = -EINVAL;
780 goto end;
781 }
782 vstack_ax(stack)->type = REG_S64;
783 next_pc += sizeof(struct binary_op);
784 break;
785 }
786
787 case BYTECODE_OP_GT:
788 {
789 struct binary_op *insn = (struct binary_op *) pc;
790
791 switch(vstack_ax(stack)->type) {
792 default:
793 ERR("unknown register type\n");
794 ret = -EINVAL;
795 goto end;
796
797 case REG_STAR_GLOB_STRING:
798 ERR("invalid register type for > binary operator\n");
799 ret = -EINVAL;
800 goto end;
801 case REG_STRING:
802 if (vstack_bx(stack)->type == REG_UNKNOWN)
803 break;
804 insn->op = BYTECODE_OP_GT_STRING;
805 break;
806 case REG_S64:
807 case REG_U64:
808 if (vstack_bx(stack)->type == REG_UNKNOWN)
809 break;
810 if (vstack_bx(stack)->type == REG_S64 ||
811 vstack_bx(stack)->type == REG_U64)
812 insn->op = BYTECODE_OP_GT_S64;
813 else
814 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
815 break;
816 case REG_DOUBLE:
817 if (vstack_bx(stack)->type == REG_UNKNOWN)
818 break;
819 if (vstack_bx(stack)->type == REG_S64 ||
820 vstack_bx(stack)->type == REG_U64)
821 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
822 else
823 insn->op = BYTECODE_OP_GT_DOUBLE;
824 break;
825 case REG_UNKNOWN:
826 break; /* Dynamic typing. */
827 }
828 /* Pop 2, push 1 */
829 if (vstack_pop(stack)) {
830 ret = -EINVAL;
831 goto end;
832 }
833 vstack_ax(stack)->type = REG_S64;
834 next_pc += sizeof(struct binary_op);
835 break;
836 }
837
838 case BYTECODE_OP_LT:
839 {
840 struct binary_op *insn = (struct binary_op *) pc;
841
842 switch(vstack_ax(stack)->type) {
843 default:
844 ERR("unknown register type\n");
845 ret = -EINVAL;
846 goto end;
847
848 case REG_STAR_GLOB_STRING:
849 ERR("invalid register type for < binary operator\n");
850 ret = -EINVAL;
851 goto end;
852 case REG_STRING:
853 if (vstack_bx(stack)->type == REG_UNKNOWN)
854 break;
855 insn->op = BYTECODE_OP_LT_STRING;
856 break;
857 case REG_S64:
858 case REG_U64:
859 if (vstack_bx(stack)->type == REG_UNKNOWN)
860 break;
861 if (vstack_bx(stack)->type == REG_S64 ||
862 vstack_bx(stack)->type == REG_U64)
863 insn->op = BYTECODE_OP_LT_S64;
864 else
865 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
866 break;
867 case REG_DOUBLE:
868 if (vstack_bx(stack)->type == REG_UNKNOWN)
869 break;
870 if (vstack_bx(stack)->type == REG_S64 ||
871 vstack_bx(stack)->type == REG_U64)
872 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
873 else
874 insn->op = BYTECODE_OP_LT_DOUBLE;
875 break;
876 case REG_UNKNOWN:
877 break; /* Dynamic typing. */
878 }
879 /* Pop 2, push 1 */
880 if (vstack_pop(stack)) {
881 ret = -EINVAL;
882 goto end;
883 }
884 vstack_ax(stack)->type = REG_S64;
885 next_pc += sizeof(struct binary_op);
886 break;
887 }
888
889 case BYTECODE_OP_GE:
890 {
891 struct binary_op *insn = (struct binary_op *) pc;
892
893 switch(vstack_ax(stack)->type) {
894 default:
895 ERR("unknown register type\n");
896 ret = -EINVAL;
897 goto end;
898
899 case REG_STAR_GLOB_STRING:
900 ERR("invalid register type for >= binary operator\n");
901 ret = -EINVAL;
902 goto end;
903 case REG_STRING:
904 if (vstack_bx(stack)->type == REG_UNKNOWN)
905 break;
906 insn->op = BYTECODE_OP_GE_STRING;
907 break;
908 case REG_S64:
909 case REG_U64:
910 if (vstack_bx(stack)->type == REG_UNKNOWN)
911 break;
912 if (vstack_bx(stack)->type == REG_S64 ||
913 vstack_bx(stack)->type == REG_U64)
914 insn->op = BYTECODE_OP_GE_S64;
915 else
916 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
917 break;
918 case REG_DOUBLE:
919 if (vstack_bx(stack)->type == REG_UNKNOWN)
920 break;
921 if (vstack_bx(stack)->type == REG_S64 ||
922 vstack_bx(stack)->type == REG_U64)
923 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
924 else
925 insn->op = BYTECODE_OP_GE_DOUBLE;
926 break;
927 case REG_UNKNOWN:
928 break; /* Dynamic typing. */
929 }
930 /* Pop 2, push 1 */
931 if (vstack_pop(stack)) {
932 ret = -EINVAL;
933 goto end;
934 }
935 vstack_ax(stack)->type = REG_U64;
936 next_pc += sizeof(struct binary_op);
937 break;
938 }
939 case BYTECODE_OP_LE:
940 {
941 struct binary_op *insn = (struct binary_op *) pc;
942
943 switch(vstack_ax(stack)->type) {
944 default:
945 ERR("unknown register type\n");
946 ret = -EINVAL;
947 goto end;
948
949 case REG_STAR_GLOB_STRING:
950 ERR("invalid register type for <= binary operator\n");
951 ret = -EINVAL;
952 goto end;
953 case REG_STRING:
954 if (vstack_bx(stack)->type == REG_UNKNOWN)
955 break;
956 insn->op = BYTECODE_OP_LE_STRING;
957 break;
958 case REG_S64:
959 case REG_U64:
960 if (vstack_bx(stack)->type == REG_UNKNOWN)
961 break;
962 if (vstack_bx(stack)->type == REG_S64 ||
963 vstack_bx(stack)->type == REG_U64)
964 insn->op = BYTECODE_OP_LE_S64;
965 else
966 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
967 break;
968 case REG_DOUBLE:
969 if (vstack_bx(stack)->type == REG_UNKNOWN)
970 break;
971 if (vstack_bx(stack)->type == REG_S64 ||
972 vstack_bx(stack)->type == REG_U64)
973 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
974 else
975 insn->op = BYTECODE_OP_LE_DOUBLE;
976 break;
977 case REG_UNKNOWN:
978 break; /* Dynamic typing. */
979 }
980 vstack_ax(stack)->type = REG_S64;
981 next_pc += sizeof(struct binary_op);
982 break;
983 }
984
985 case BYTECODE_OP_EQ_STRING:
986 case BYTECODE_OP_NE_STRING:
987 case BYTECODE_OP_GT_STRING:
988 case BYTECODE_OP_LT_STRING:
989 case BYTECODE_OP_GE_STRING:
990 case BYTECODE_OP_LE_STRING:
991 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
992 case BYTECODE_OP_NE_STAR_GLOB_STRING:
993 case BYTECODE_OP_EQ_S64:
994 case BYTECODE_OP_NE_S64:
995 case BYTECODE_OP_GT_S64:
996 case BYTECODE_OP_LT_S64:
997 case BYTECODE_OP_GE_S64:
998 case BYTECODE_OP_LE_S64:
999 case BYTECODE_OP_EQ_DOUBLE:
1000 case BYTECODE_OP_NE_DOUBLE:
1001 case BYTECODE_OP_GT_DOUBLE:
1002 case BYTECODE_OP_LT_DOUBLE:
1003 case BYTECODE_OP_GE_DOUBLE:
1004 case BYTECODE_OP_LE_DOUBLE:
1005 case BYTECODE_OP_EQ_DOUBLE_S64:
1006 case BYTECODE_OP_NE_DOUBLE_S64:
1007 case BYTECODE_OP_GT_DOUBLE_S64:
1008 case BYTECODE_OP_LT_DOUBLE_S64:
1009 case BYTECODE_OP_GE_DOUBLE_S64:
1010 case BYTECODE_OP_LE_DOUBLE_S64:
1011 case BYTECODE_OP_EQ_S64_DOUBLE:
1012 case BYTECODE_OP_NE_S64_DOUBLE:
1013 case BYTECODE_OP_GT_S64_DOUBLE:
1014 case BYTECODE_OP_LT_S64_DOUBLE:
1015 case BYTECODE_OP_GE_S64_DOUBLE:
1016 case BYTECODE_OP_LE_S64_DOUBLE:
1017 {
1018 /* Pop 2, push 1 */
1019 if (vstack_pop(stack)) {
1020 ret = -EINVAL;
1021 goto end;
1022 }
1023 vstack_ax(stack)->type = REG_S64;
1024 next_pc += sizeof(struct binary_op);
1025 break;
1026 }
1027
1028 case BYTECODE_OP_BIT_RSHIFT:
1029 case BYTECODE_OP_BIT_LSHIFT:
1030 case BYTECODE_OP_BIT_AND:
1031 case BYTECODE_OP_BIT_OR:
1032 case BYTECODE_OP_BIT_XOR:
1033 {
1034 /* Pop 2, push 1 */
1035 if (vstack_pop(stack)) {
1036 ret = -EINVAL;
1037 goto end;
1038 }
1039 vstack_ax(stack)->type = REG_S64;
1040 next_pc += sizeof(struct binary_op);
1041 break;
1042 }
1043
1044 /* unary */
1045 case BYTECODE_OP_UNARY_PLUS:
1046 {
1047 struct unary_op *insn = (struct unary_op *) pc;
1048
1049 switch(vstack_ax(stack)->type) {
1050 default:
1051 ERR("unknown register type\n");
1052 ret = -EINVAL;
1053 goto end;
1054
1055 case REG_S64:
1056 case REG_U64:
1057 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1058 break;
1059 case REG_DOUBLE:
1060 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1061 break;
1062 case REG_UNKNOWN: /* Dynamic typing. */
1063 break;
1064 }
1065 /* Pop 1, push 1 */
1066 next_pc += sizeof(struct unary_op);
1067 break;
1068 }
1069
1070 case BYTECODE_OP_UNARY_MINUS:
1071 {
1072 struct unary_op *insn = (struct unary_op *) pc;
1073
1074 switch(vstack_ax(stack)->type) {
1075 default:
1076 ERR("unknown register type\n");
1077 ret = -EINVAL;
1078 goto end;
1079
1080 case REG_S64:
1081 case REG_U64:
1082 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1083 break;
1084 case REG_DOUBLE:
1085 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1086 break;
1087 case REG_UNKNOWN: /* Dynamic typing. */
1088 break;
1089 }
1090 /* Pop 1, push 1 */
1091 next_pc += sizeof(struct unary_op);
1092 break;
1093 }
1094
1095 case BYTECODE_OP_UNARY_NOT:
1096 {
1097 struct unary_op *insn = (struct unary_op *) pc;
1098
1099 switch(vstack_ax(stack)->type) {
1100 default:
1101 ERR("unknown register type\n");
1102 ret = -EINVAL;
1103 goto end;
1104
1105 case REG_S64:
1106 case REG_U64:
1107 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1108 break;
1109 case REG_DOUBLE:
1110 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1111 break;
1112 case REG_UNKNOWN: /* Dynamic typing. */
1113 break;
1114 }
1115 /* Pop 1, push 1 */
1116 next_pc += sizeof(struct unary_op);
1117 break;
1118 }
1119
1120 case BYTECODE_OP_UNARY_BIT_NOT:
1121 {
1122 /* Pop 1, push 1 */
1123 next_pc += sizeof(struct unary_op);
1124 break;
1125 }
1126
1127 case BYTECODE_OP_UNARY_PLUS_S64:
1128 case BYTECODE_OP_UNARY_MINUS_S64:
1129 case BYTECODE_OP_UNARY_NOT_S64:
1130 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1131 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1132 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1133 {
1134 /* Pop 1, push 1 */
1135 next_pc += sizeof(struct unary_op);
1136 break;
1137 }
1138
1139 /* logical */
1140 case BYTECODE_OP_AND:
1141 case BYTECODE_OP_OR:
1142 {
1143 /* Continue to next instruction */
1144 /* Pop 1 when jump not taken */
1145 if (vstack_pop(stack)) {
1146 ret = -EINVAL;
1147 goto end;
1148 }
1149 next_pc += sizeof(struct logical_op);
1150 break;
1151 }
1152
1153 /* load field ref */
1154 case BYTECODE_OP_LOAD_FIELD_REF:
1155 {
1156 ERR("Unknown field ref type\n");
1157 ret = -EINVAL;
1158 goto end;
1159 }
1160 /* get context ref */
1161 case BYTECODE_OP_GET_CONTEXT_REF:
1162 {
1163 if (vstack_push(stack)) {
1164 ret = -EINVAL;
1165 goto end;
1166 }
1167 vstack_ax(stack)->type = REG_UNKNOWN;
1168 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1169 break;
1170 }
1171 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1172 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1173 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1174 {
1175 if (vstack_push(stack)) {
1176 ret = -EINVAL;
1177 goto end;
1178 }
1179 vstack_ax(stack)->type = REG_STRING;
1180 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1181 break;
1182 }
1183 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1184 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1185 {
1186 if (vstack_push(stack)) {
1187 ret = -EINVAL;
1188 goto end;
1189 }
1190 vstack_ax(stack)->type = REG_S64;
1191 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1192 break;
1193 }
1194 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1195 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1196 {
1197 if (vstack_push(stack)) {
1198 ret = -EINVAL;
1199 goto end;
1200 }
1201 vstack_ax(stack)->type = REG_DOUBLE;
1202 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1203 break;
1204 }
1205
1206 /* load from immediate operand */
1207 case BYTECODE_OP_LOAD_STRING:
1208 {
1209 struct load_op *insn = (struct load_op *) pc;
1210
1211 if (vstack_push(stack)) {
1212 ret = -EINVAL;
1213 goto end;
1214 }
1215 vstack_ax(stack)->type = REG_STRING;
1216 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1217 break;
1218 }
1219
1220 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1221 {
1222 struct load_op *insn = (struct load_op *) pc;
1223
1224 if (vstack_push(stack)) {
1225 ret = -EINVAL;
1226 goto end;
1227 }
1228 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1229 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1230 break;
1231 }
1232
1233 case BYTECODE_OP_LOAD_S64:
1234 {
1235 if (vstack_push(stack)) {
1236 ret = -EINVAL;
1237 goto end;
1238 }
1239 vstack_ax(stack)->type = REG_S64;
1240 next_pc += sizeof(struct load_op)
1241 + sizeof(struct literal_numeric);
1242 break;
1243 }
1244
1245 case BYTECODE_OP_LOAD_DOUBLE:
1246 {
1247 if (vstack_push(stack)) {
1248 ret = -EINVAL;
1249 goto end;
1250 }
1251 vstack_ax(stack)->type = REG_DOUBLE;
1252 next_pc += sizeof(struct load_op)
1253 + sizeof(struct literal_double);
1254 break;
1255 }
1256
1257 /* cast */
1258 case BYTECODE_OP_CAST_TO_S64:
1259 {
1260 struct cast_op *insn = (struct cast_op *) pc;
1261
1262 switch (vstack_ax(stack)->type) {
1263 default:
1264 ERR("unknown register type\n");
1265 ret = -EINVAL;
1266 goto end;
1267
1268 case REG_STRING:
1269 case REG_STAR_GLOB_STRING:
1270 ERR("Cast op can only be applied to numeric or floating point registers\n");
1271 ret = -EINVAL;
1272 goto end;
1273 case REG_S64:
1274 insn->op = BYTECODE_OP_CAST_NOP;
1275 break;
1276 case REG_DOUBLE:
1277 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1278 break;
1279 case REG_UNKNOWN:
1280 case REG_U64:
1281 break;
1282 }
1283 /* Pop 1, push 1 */
1284 vstack_ax(stack)->type = REG_S64;
1285 next_pc += sizeof(struct cast_op);
1286 break;
1287 }
1288 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1289 {
1290 /* Pop 1, push 1 */
1291 vstack_ax(stack)->type = REG_S64;
1292 next_pc += sizeof(struct cast_op);
1293 break;
1294 }
1295 case BYTECODE_OP_CAST_NOP:
1296 {
1297 next_pc += sizeof(struct cast_op);
1298 break;
1299 }
1300
1301 /*
1302 * Instructions for recursive traversal through composed types.
1303 */
1304 case BYTECODE_OP_GET_CONTEXT_ROOT:
1305 {
1306 if (vstack_push(stack)) {
1307 ret = -EINVAL;
1308 goto end;
1309 }
1310 vstack_ax(stack)->type = REG_PTR;
1311 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1312 next_pc += sizeof(struct load_op);
1313 break;
1314 }
1315 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1316 {
1317 if (vstack_push(stack)) {
1318 ret = -EINVAL;
1319 goto end;
1320 }
1321 vstack_ax(stack)->type = REG_PTR;
1322 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1323 next_pc += sizeof(struct load_op);
1324 break;
1325 }
1326 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1327 {
1328 if (vstack_push(stack)) {
1329 ret = -EINVAL;
1330 goto end;
1331 }
1332 vstack_ax(stack)->type = REG_PTR;
1333 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1334 next_pc += sizeof(struct load_op);
1335 break;
1336 }
1337
1338 case BYTECODE_OP_LOAD_FIELD:
1339 {
1340 struct load_op *insn = (struct load_op *) pc;
1341
1342 assert(vstack_ax(stack)->type == REG_PTR);
1343 /* Pop 1, push 1 */
1344 ret = specialize_load_field(vstack_ax(stack), insn);
1345 if (ret)
1346 goto end;
1347
1348 next_pc += sizeof(struct load_op);
1349 break;
1350 }
1351
1352 case BYTECODE_OP_LOAD_FIELD_S8:
1353 case BYTECODE_OP_LOAD_FIELD_S16:
1354 case BYTECODE_OP_LOAD_FIELD_S32:
1355 case BYTECODE_OP_LOAD_FIELD_S64:
1356 {
1357 /* Pop 1, push 1 */
1358 vstack_ax(stack)->type = REG_S64;
1359 next_pc += sizeof(struct load_op);
1360 break;
1361 }
1362
1363 case BYTECODE_OP_LOAD_FIELD_U8:
1364 case BYTECODE_OP_LOAD_FIELD_U16:
1365 case BYTECODE_OP_LOAD_FIELD_U32:
1366 case BYTECODE_OP_LOAD_FIELD_U64:
1367 {
1368 /* Pop 1, push 1 */
1369 vstack_ax(stack)->type = REG_U64;
1370 next_pc += sizeof(struct load_op);
1371 break;
1372 }
1373
1374 case BYTECODE_OP_LOAD_FIELD_STRING:
1375 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1376 {
1377 /* Pop 1, push 1 */
1378 vstack_ax(stack)->type = REG_STRING;
1379 next_pc += sizeof(struct load_op);
1380 break;
1381 }
1382
1383 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1384 {
1385 /* Pop 1, push 1 */
1386 vstack_ax(stack)->type = REG_DOUBLE;
1387 next_pc += sizeof(struct load_op);
1388 break;
1389 }
1390
1391 case BYTECODE_OP_GET_SYMBOL:
1392 {
1393 struct load_op *insn = (struct load_op *) pc;
1394
1395 dbg_printf("op get symbol\n");
1396 switch (vstack_ax(stack)->load.type) {
1397 case LOAD_OBJECT:
1398 ERR("Nested fields not implemented yet.");
1399 ret = -EINVAL;
1400 goto end;
1401 case LOAD_ROOT_CONTEXT:
1402 /* Lookup context field. */
1403 ret = specialize_context_lookup(*pctx,
1404 bytecode, insn,
1405 &vstack_ax(stack)->load);
1406 if (ret)
1407 goto end;
1408 break;
1409 case LOAD_ROOT_APP_CONTEXT:
1410 /* Lookup app context field. */
1411 ret = specialize_app_context_lookup(pctx,
1412 bytecode, insn,
1413 &vstack_ax(stack)->load);
1414 if (ret)
1415 goto end;
1416 break;
1417 case LOAD_ROOT_PAYLOAD:
1418 /* Lookup event payload field. */
1419 ret = specialize_payload_lookup(event_desc,
1420 bytecode, insn,
1421 &vstack_ax(stack)->load);
1422 if (ret)
1423 goto end;
1424 break;
1425 }
1426 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1427 break;
1428 }
1429
1430 case BYTECODE_OP_GET_SYMBOL_FIELD:
1431 {
1432 /* Always generated by specialize phase. */
1433 ret = -EINVAL;
1434 goto end;
1435 }
1436
1437 case BYTECODE_OP_GET_INDEX_U16:
1438 {
1439 struct load_op *insn = (struct load_op *) pc;
1440 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1441
1442 dbg_printf("op get index u16\n");
1443 /* Pop 1, push 1 */
1444 ret = specialize_get_index(bytecode, insn, index->index,
1445 vstack_ax(stack), sizeof(*index));
1446 if (ret)
1447 goto end;
1448 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1449 break;
1450 }
1451
1452 case BYTECODE_OP_GET_INDEX_U64:
1453 {
1454 struct load_op *insn = (struct load_op *) pc;
1455 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1456
1457 dbg_printf("op get index u64\n");
1458 /* Pop 1, push 1 */
1459 ret = specialize_get_index(bytecode, insn, index->index,
1460 vstack_ax(stack), sizeof(*index));
1461 if (ret)
1462 goto end;
1463 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1464 break;
1465 }
1466
1467 }
1468 }
1469 end:
1470 return ret;
1471 }
This page took 0.059449 seconds and 4 git commands to generate.