Rename "tsc" to "timestamp"
[lttng-modules.git] / src / lttng-bytecode-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-bytecode-specialize.c
4 *
5 * LTTng modules bytecode code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <wrapper/compiler_attributes.h>
12
13 #include <lttng/lttng-bytecode.h>
14 #include <lttng/align.h>
15 #include <lttng/events-internal.h>
16
17 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
18 size_t align, size_t len)
19 {
20 ssize_t ret;
21 size_t padding = offset_align(runtime->data_len, align);
22 size_t new_len = runtime->data_len + padding + len;
23 size_t new_alloc_len = new_len;
24 size_t old_alloc_len = runtime->data_alloc_len;
25
26 if (new_len > INTERPRETER_MAX_DATA_LEN)
27 return -EINVAL;
28
29 if (new_alloc_len > old_alloc_len) {
30 char *newptr;
31
32 new_alloc_len =
33 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
34 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
35 if (!newptr)
36 return -ENOMEM;
37 runtime->data = newptr;
38 /* We zero directly the memory from start of allocation. */
39 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
40 runtime->data_alloc_len = new_alloc_len;
41 }
42 runtime->data_len += padding;
43 ret = runtime->data_len;
44 runtime->data_len += len;
45 return ret;
46 }
47
48 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
49 const void *p, size_t align, size_t len)
50 {
51 ssize_t offset;
52
53 offset = bytecode_reserve_data(runtime, align, len);
54 if (offset < 0)
55 return -ENOMEM;
56 memcpy(&runtime->data[offset], p, len);
57 return offset;
58 }
59
60 static int specialize_load_field(struct vstack_entry *stack_top,
61 struct load_op *insn)
62 {
63 int ret;
64
65 switch (stack_top->load.type) {
66 case LOAD_OBJECT:
67 break;
68 case LOAD_ROOT_CONTEXT:
69 case LOAD_ROOT_APP_CONTEXT:
70 case LOAD_ROOT_PAYLOAD:
71 default:
72 dbg_printk("Bytecode warning: cannot load root, missing field name.\n");
73 ret = -EINVAL;
74 goto end;
75 }
76 switch (stack_top->load.object_type) {
77 case OBJECT_TYPE_S8:
78 dbg_printk("op load field s8\n");
79 stack_top->type = REG_S64;
80 if (!stack_top->load.user)
81 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
82 break;
83 case OBJECT_TYPE_S16:
84 dbg_printk("op load field s16\n");
85 stack_top->type = REG_S64;
86 if (!stack_top->load.rev_bo && !stack_top->load.user)
87 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
88 break;
89 case OBJECT_TYPE_S32:
90 dbg_printk("op load field s32\n");
91 stack_top->type = REG_S64;
92 if (!stack_top->load.rev_bo && !stack_top->load.user)
93 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
94 break;
95 case OBJECT_TYPE_S64:
96 dbg_printk("op load field s64\n");
97 stack_top->type = REG_S64;
98 if (!stack_top->load.rev_bo && !stack_top->load.user)
99 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
100 break;
101 case OBJECT_TYPE_SIGNED_ENUM:
102 dbg_printk("op load field signed enumeration\n");
103 if (stack_top->load.user) {
104 printk(KERN_WARNING "LTTng: bytecode: user enum unsupported\n");
105 ret = -EINVAL;
106 goto end;
107 }
108 stack_top->type = REG_S64;
109 break;
110 case OBJECT_TYPE_U8:
111 dbg_printk("op load field u8\n");
112 stack_top->type = REG_S64;
113 if (!stack_top->load.user)
114 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
115 break;
116 case OBJECT_TYPE_U16:
117 dbg_printk("op load field u16\n");
118 stack_top->type = REG_S64;
119 if (!stack_top->load.rev_bo && !stack_top->load.user)
120 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
121 break;
122 case OBJECT_TYPE_U32:
123 dbg_printk("op load field u32\n");
124 stack_top->type = REG_S64;
125 if (!stack_top->load.rev_bo && !stack_top->load.user)
126 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
127 break;
128 case OBJECT_TYPE_U64:
129 dbg_printk("op load field u64\n");
130 stack_top->type = REG_S64;
131 if (!stack_top->load.rev_bo && !stack_top->load.user)
132 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
133 break;
134 case OBJECT_TYPE_UNSIGNED_ENUM:
135 dbg_printk("op load field unsigned enumeration\n");
136 if (stack_top->load.user) {
137 printk(KERN_WARNING "LTTng: bytecode: user enum unsupported\n");
138 ret = -EINVAL;
139 goto end;
140 }
141 stack_top->type = REG_U64;
142 break;
143 case OBJECT_TYPE_DOUBLE:
144 printk(KERN_WARNING "LTTng: bytecode: Double type unsupported\n\n");
145 ret = -EINVAL;
146 goto end;
147 case OBJECT_TYPE_STRING:
148 dbg_printk("op load field string\n");
149 stack_top->type = REG_STRING;
150 if (!stack_top->load.user)
151 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
152 break;
153 case OBJECT_TYPE_STRING_SEQUENCE:
154 dbg_printk("op load field string sequence\n");
155 stack_top->type = REG_STRING;
156 if (!stack_top->load.user)
157 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
158 break;
159 case OBJECT_TYPE_DYNAMIC:
160 ret = -EINVAL;
161 goto end;
162 case OBJECT_TYPE_SEQUENCE:
163 case OBJECT_TYPE_ARRAY:
164 case OBJECT_TYPE_STRUCT:
165 case OBJECT_TYPE_VARIANT:
166 printk(KERN_WARNING "LTTng: bytecode: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
167 ret = -EINVAL;
168 goto end;
169 }
170 return 0;
171
172 end:
173 return ret;
174 }
175
176 static int specialize_get_index_object_type(enum object_type *otype,
177 int signedness, uint32_t elem_len)
178 {
179 switch (elem_len) {
180 case 8:
181 if (signedness)
182 *otype = OBJECT_TYPE_S8;
183 else
184 *otype = OBJECT_TYPE_U8;
185 break;
186 case 16:
187 if (signedness)
188 *otype = OBJECT_TYPE_S16;
189 else
190 *otype = OBJECT_TYPE_U16;
191 break;
192 case 32:
193 if (signedness)
194 *otype = OBJECT_TYPE_S32;
195 else
196 *otype = OBJECT_TYPE_U32;
197 break;
198 case 64:
199 if (signedness)
200 *otype = OBJECT_TYPE_S64;
201 else
202 *otype = OBJECT_TYPE_U64;
203 break;
204 default:
205 return -EINVAL;
206 }
207 return 0;
208 }
209
210 static int specialize_get_index(struct bytecode_runtime *runtime,
211 struct load_op *insn, uint64_t index,
212 struct vstack_entry *stack_top,
213 int idx_len)
214 {
215 int ret;
216 struct bytecode_get_index_data gid;
217 ssize_t data_offset;
218
219 memset(&gid, 0, sizeof(gid));
220 switch (stack_top->load.type) {
221 case LOAD_OBJECT:
222 switch (stack_top->load.object_type) {
223 case OBJECT_TYPE_ARRAY:
224 {
225 const struct lttng_kernel_event_field *field;
226 const struct lttng_kernel_type_array *array_type;
227 const struct lttng_kernel_type_integer *integer_type;
228 uint32_t elem_len, num_elems;
229 int signedness;
230
231 field = stack_top->load.field;
232 array_type = lttng_kernel_get_type_array(field->type);
233 if (!lttng_kernel_type_is_bytewise_integer(array_type->elem_type)) {
234 ret = -EINVAL;
235 goto end;
236 }
237 integer_type = lttng_kernel_get_type_integer(array_type->elem_type);
238 num_elems = array_type->length;
239 elem_len = integer_type->size;
240 signedness = integer_type->signedness;
241 if (index >= num_elems) {
242 ret = -EINVAL;
243 goto end;
244 }
245 ret = specialize_get_index_object_type(&stack_top->load.object_type,
246 signedness, elem_len);
247 if (ret)
248 goto end;
249 gid.offset = index * (elem_len / CHAR_BIT);
250 gid.array_len = num_elems * (elem_len / CHAR_BIT);
251 gid.elem.type = stack_top->load.object_type;
252 gid.elem.len = elem_len;
253 stack_top->load.rev_bo = gid.elem.rev_bo = integer_type->reverse_byte_order;
254 stack_top->load.user = gid.elem.user = integer_type->user;
255 break;
256 }
257 case OBJECT_TYPE_SEQUENCE:
258 {
259 const struct lttng_kernel_event_field *field;
260 const struct lttng_kernel_type_sequence *sequence_type;
261 const struct lttng_kernel_type_integer *integer_type;
262 uint32_t elem_len;
263 int signedness;
264
265 field = stack_top->load.field;
266 sequence_type = lttng_kernel_get_type_sequence(field->type);
267 if (!lttng_kernel_type_is_bytewise_integer(sequence_type->elem_type)) {
268 ret = -EINVAL;
269 goto end;
270 }
271 integer_type = lttng_kernel_get_type_integer(sequence_type->elem_type);
272 elem_len = integer_type->size;
273 signedness = integer_type->signedness;
274 ret = specialize_get_index_object_type(&stack_top->load.object_type,
275 signedness, elem_len);
276 if (ret)
277 goto end;
278 gid.offset = index * (elem_len / CHAR_BIT);
279 gid.elem.type = stack_top->load.object_type;
280 gid.elem.len = elem_len;
281 stack_top->load.rev_bo = gid.elem.rev_bo = integer_type->reverse_byte_order;
282 stack_top->load.user = gid.elem.user = integer_type->user;
283 break;
284 }
285 case OBJECT_TYPE_STRUCT:
286 /* Only generated by the specialize phase. */
287 case OBJECT_TYPE_VARIANT:
288 lttng_fallthrough;
289 default:
290 printk(KERN_WARNING "LTTng: bytecode: Unexpected get index type %d",
291 (int) stack_top->load.object_type);
292 ret = -EINVAL;
293 goto end;
294 }
295 break;
296 case LOAD_ROOT_CONTEXT:
297 case LOAD_ROOT_APP_CONTEXT:
298 case LOAD_ROOT_PAYLOAD:
299 printk(KERN_WARNING "LTTng: bytecode: Index lookup for root field not implemented yet.\n");
300 ret = -EINVAL;
301 goto end;
302 }
303 data_offset = bytecode_push_data(runtime, &gid,
304 __alignof__(gid), sizeof(gid));
305 if (data_offset < 0) {
306 ret = -EINVAL;
307 goto end;
308 }
309 switch (idx_len) {
310 case 2:
311 ((struct get_index_u16 *) insn->data)->index = data_offset;
312 break;
313 case 8:
314 ((struct get_index_u64 *) insn->data)->index = data_offset;
315 break;
316 default:
317 ret = -EINVAL;
318 goto end;
319 }
320
321 return 0;
322
323 end:
324 return ret;
325 }
326
327 static int specialize_context_lookup_name(struct lttng_kernel_ctx *ctx,
328 struct bytecode_runtime *bytecode,
329 struct load_op *insn)
330 {
331 uint16_t offset;
332 const char *name;
333
334 offset = ((struct get_symbol *) insn->data)->offset;
335 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
336 return lttng_kernel_get_context_index(ctx, name);
337 }
338
339 static int specialize_load_object(const struct lttng_kernel_event_field *field,
340 struct vstack_load *load, bool is_context)
341 {
342 load->type = LOAD_OBJECT;
343
344 switch (field->type->type) {
345 case lttng_kernel_type_integer:
346 {
347 const struct lttng_kernel_type_integer *integer_type = lttng_kernel_get_type_integer(field->type);
348
349 if (integer_type->signedness)
350 load->object_type = OBJECT_TYPE_S64;
351 else
352 load->object_type = OBJECT_TYPE_U64;
353 load->rev_bo = integer_type->reverse_byte_order;
354 load->user = integer_type->user;
355 break;
356 }
357 case lttng_kernel_type_enum:
358 {
359 const struct lttng_kernel_type_enum *enum_type = lttng_kernel_get_type_enum(field->type);
360 const struct lttng_kernel_type_integer *integer_type = lttng_kernel_get_type_integer(enum_type->container_type);
361
362 if (integer_type->signedness)
363 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
364 else
365 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
366 load->rev_bo = integer_type->reverse_byte_order;
367 load->user = integer_type->user;
368 break;
369 }
370 case lttng_kernel_type_array:
371 {
372 const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(field->type);
373 const struct lttng_kernel_type_integer *integer_type;
374
375 if (!lttng_kernel_type_is_bytewise_integer(array_type->elem_type)) {
376 printk(KERN_WARNING "LTTng: bytecode: Array nesting only supports integer types.\n");
377 return -EINVAL;
378 }
379 integer_type = lttng_kernel_get_type_integer(array_type->elem_type);
380 if (is_context) {
381 load->object_type = OBJECT_TYPE_STRING;
382 load->user = integer_type->user;
383 } else {
384 if (array_type->encoding == lttng_kernel_string_encoding_none) {
385 load->object_type = OBJECT_TYPE_ARRAY;
386 load->field = field;
387 } else {
388 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
389 load->user = integer_type->user;
390 }
391 }
392 break;
393 }
394 case lttng_kernel_type_sequence:
395 {
396 const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(field->type);
397 const struct lttng_kernel_type_integer *integer_type;
398
399 if (!lttng_kernel_type_is_bytewise_integer(sequence_type->elem_type)) {
400 printk(KERN_WARNING "LTTng: bytecode: Sequence nesting only supports integer types.\n");
401 return -EINVAL;
402 }
403 integer_type = lttng_kernel_get_type_integer(sequence_type->elem_type);
404 if (is_context) {
405 load->object_type = OBJECT_TYPE_STRING;
406 load->user = integer_type->user;
407 } else {
408 if (sequence_type->encoding == lttng_kernel_string_encoding_none) {
409 load->object_type = OBJECT_TYPE_SEQUENCE;
410 load->field = field;
411 } else {
412 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
413 load->user = integer_type->user;
414 }
415 }
416 break;
417 }
418 case lttng_kernel_type_string:
419 {
420 const struct lttng_kernel_type_string *string_type = lttng_kernel_get_type_string(field->type);
421
422 load->object_type = OBJECT_TYPE_STRING;
423 load->user = string_type->user;
424 break;
425 }
426 case lttng_kernel_type_struct:
427 printk(KERN_WARNING "LTTng: bytecode: Structure type cannot be loaded.\n");
428 return -EINVAL;
429 case lttng_kernel_type_variant:
430 printk(KERN_WARNING "LTTng: bytecode: Variant type cannot be loaded.\n");
431 return -EINVAL;
432 default:
433 printk(KERN_WARNING "LTTng: bytecode: Unknown type: %d", (int) field->type->type);
434 return -EINVAL;
435 }
436 return 0;
437 }
438
439 static int specialize_context_lookup(struct lttng_kernel_ctx *ctx,
440 struct bytecode_runtime *runtime,
441 struct load_op *insn,
442 struct vstack_load *load)
443 {
444 int idx, ret;
445 const struct lttng_kernel_ctx_field *ctx_field;
446 const struct lttng_kernel_event_field *field;
447 struct bytecode_get_index_data gid;
448 ssize_t data_offset;
449
450 idx = specialize_context_lookup_name(ctx, runtime, insn);
451 if (idx < 0) {
452 return -ENOENT;
453 }
454 ctx_field = &lttng_static_ctx->fields[idx];
455 field = ctx_field->event_field;
456 ret = specialize_load_object(field, load, true);
457 if (ret)
458 return ret;
459 /* Specialize each get_symbol into a get_index. */
460 insn->op = BYTECODE_OP_GET_INDEX_U16;
461 memset(&gid, 0, sizeof(gid));
462 gid.ctx_index = idx;
463 gid.elem.type = load->object_type;
464 gid.elem.rev_bo = load->rev_bo;
465 gid.elem.user = load->user;
466 gid.field = field;
467 data_offset = bytecode_push_data(runtime, &gid,
468 __alignof__(gid), sizeof(gid));
469 if (data_offset < 0) {
470 return -EINVAL;
471 }
472 ((struct get_index_u16 *) insn->data)->index = data_offset;
473 return 0;
474 }
475
476 static int specialize_payload_lookup(const struct lttng_kernel_event_desc *event_desc,
477 struct bytecode_runtime *runtime,
478 struct load_op *insn,
479 struct vstack_load *load)
480 {
481 const char *name;
482 uint16_t offset;
483 unsigned int i, nr_fields;
484 bool found = false;
485 uint32_t field_offset = 0;
486 const struct lttng_kernel_event_field *field;
487 int ret;
488 struct bytecode_get_index_data gid;
489 ssize_t data_offset;
490
491 nr_fields = event_desc->tp_class->nr_fields;
492 offset = ((struct get_symbol *) insn->data)->offset;
493 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
494 for (i = 0; i < nr_fields; i++) {
495 field = event_desc->tp_class->fields[i];
496 if (field->nofilter) {
497 continue;
498 }
499 if (!strcmp(field->name, name)) {
500 found = true;
501 break;
502 }
503 /* compute field offset on stack */
504 switch (field->type->type) {
505 case lttng_kernel_type_integer:
506 case lttng_kernel_type_enum:
507 field_offset += sizeof(int64_t);
508 break;
509 case lttng_kernel_type_array:
510 case lttng_kernel_type_sequence:
511 field_offset += sizeof(unsigned long);
512 field_offset += sizeof(void *);
513 break;
514 case lttng_kernel_type_string:
515 field_offset += sizeof(void *);
516 break;
517 default:
518 ret = -EINVAL;
519 goto end;
520 }
521 }
522 if (!found) {
523 ret = -EINVAL;
524 goto end;
525 }
526
527 ret = specialize_load_object(field, load, false);
528 if (ret)
529 goto end;
530
531 /* Specialize each get_symbol into a get_index. */
532 insn->op = BYTECODE_OP_GET_INDEX_U16;
533 memset(&gid, 0, sizeof(gid));
534 gid.offset = field_offset;
535 gid.elem.type = load->object_type;
536 gid.elem.rev_bo = load->rev_bo;
537 gid.elem.user = load->user;
538 gid.field = field;
539 data_offset = bytecode_push_data(runtime, &gid,
540 __alignof__(gid), sizeof(gid));
541 if (data_offset < 0) {
542 ret = -EINVAL;
543 goto end;
544 }
545 ((struct get_index_u16 *) insn->data)->index = data_offset;
546 ret = 0;
547 end:
548 return ret;
549 }
550
551 int lttng_bytecode_specialize(const struct lttng_kernel_event_desc *event_desc,
552 struct bytecode_runtime *bytecode)
553 {
554 void *pc, *next_pc, *start_pc;
555 int ret = -EINVAL;
556 struct vstack _stack;
557 struct vstack *stack = &_stack;
558 struct lttng_kernel_ctx *ctx = bytecode->p.ctx;
559
560 vstack_init(stack);
561
562 start_pc = &bytecode->code[0];
563 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
564 pc = next_pc) {
565 switch (*(bytecode_opcode_t *) pc) {
566 case BYTECODE_OP_UNKNOWN:
567 default:
568 printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
569 (unsigned int) *(bytecode_opcode_t *) pc);
570 ret = -EINVAL;
571 goto end;
572
573 case BYTECODE_OP_RETURN:
574 case BYTECODE_OP_RETURN_S64:
575 ret = 0;
576 goto end;
577
578 /* binary */
579 case BYTECODE_OP_MUL:
580 case BYTECODE_OP_DIV:
581 case BYTECODE_OP_MOD:
582 case BYTECODE_OP_PLUS:
583 case BYTECODE_OP_MINUS:
584 printk(KERN_WARNING "LTTng: bytecode: unknown bytecode op %u\n",
585 (unsigned int) *(bytecode_opcode_t *) pc);
586 ret = -EINVAL;
587 goto end;
588
589 case BYTECODE_OP_EQ:
590 {
591 struct binary_op *insn = (struct binary_op *) pc;
592
593 switch(vstack_ax(stack)->type) {
594 default:
595 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
596 ret = -EINVAL;
597 goto end;
598
599 case REG_STRING:
600 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
601 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
602 else
603 insn->op = BYTECODE_OP_EQ_STRING;
604 break;
605 case REG_STAR_GLOB_STRING:
606 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
607 break;
608 case REG_S64:
609 if (vstack_bx(stack)->type == REG_S64)
610 insn->op = BYTECODE_OP_EQ_S64;
611 else
612 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
613 break;
614 case REG_DOUBLE:
615 if (vstack_bx(stack)->type == REG_S64)
616 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
617 else
618 insn->op = BYTECODE_OP_EQ_DOUBLE;
619 break;
620 }
621 /* Pop 2, push 1 */
622 if (vstack_pop(stack)) {
623 ret = -EINVAL;
624 goto end;
625 }
626 vstack_ax(stack)->type = REG_S64;
627 next_pc += sizeof(struct binary_op);
628 break;
629 }
630
631 case BYTECODE_OP_NE:
632 {
633 struct binary_op *insn = (struct binary_op *) pc;
634
635 switch(vstack_ax(stack)->type) {
636 default:
637 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
638 ret = -EINVAL;
639 goto end;
640
641 case REG_STRING:
642 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
643 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
644 else
645 insn->op = BYTECODE_OP_NE_STRING;
646 break;
647 case REG_STAR_GLOB_STRING:
648 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
649 break;
650 case REG_S64:
651 if (vstack_bx(stack)->type == REG_S64)
652 insn->op = BYTECODE_OP_NE_S64;
653 else
654 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
655 break;
656 case REG_DOUBLE:
657 if (vstack_bx(stack)->type == REG_S64)
658 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
659 else
660 insn->op = BYTECODE_OP_NE_DOUBLE;
661 break;
662 }
663 /* Pop 2, push 1 */
664 if (vstack_pop(stack)) {
665 ret = -EINVAL;
666 goto end;
667 }
668 vstack_ax(stack)->type = REG_S64;
669 next_pc += sizeof(struct binary_op);
670 break;
671 }
672
673 case BYTECODE_OP_GT:
674 {
675 struct binary_op *insn = (struct binary_op *) pc;
676
677 switch(vstack_ax(stack)->type) {
678 default:
679 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
680 ret = -EINVAL;
681 goto end;
682
683 case REG_STAR_GLOB_STRING:
684 printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>' binary operator\n");
685 ret = -EINVAL;
686 goto end;
687 case REG_STRING:
688 insn->op = BYTECODE_OP_GT_STRING;
689 break;
690 case REG_S64:
691 if (vstack_bx(stack)->type == REG_S64)
692 insn->op = BYTECODE_OP_GT_S64;
693 else
694 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
695 break;
696 case REG_DOUBLE:
697 if (vstack_bx(stack)->type == REG_S64)
698 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
699 else
700 insn->op = BYTECODE_OP_GT_DOUBLE;
701 break;
702 }
703 /* Pop 2, push 1 */
704 if (vstack_pop(stack)) {
705 ret = -EINVAL;
706 goto end;
707 }
708 vstack_ax(stack)->type = REG_S64;
709 next_pc += sizeof(struct binary_op);
710 break;
711 }
712
713 case BYTECODE_OP_LT:
714 {
715 struct binary_op *insn = (struct binary_op *) pc;
716
717 switch(vstack_ax(stack)->type) {
718 default:
719 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
720 ret = -EINVAL;
721 goto end;
722
723 case REG_STAR_GLOB_STRING:
724 printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<' binary operator\n");
725 ret = -EINVAL;
726 goto end;
727 case REG_STRING:
728 insn->op = BYTECODE_OP_LT_STRING;
729 break;
730 case REG_S64:
731 if (vstack_bx(stack)->type == REG_S64)
732 insn->op = BYTECODE_OP_LT_S64;
733 else
734 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
735 break;
736 case REG_DOUBLE:
737 if (vstack_bx(stack)->type == REG_S64)
738 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
739 else
740 insn->op = BYTECODE_OP_LT_DOUBLE;
741 break;
742 }
743 /* Pop 2, push 1 */
744 if (vstack_pop(stack)) {
745 ret = -EINVAL;
746 goto end;
747 }
748 vstack_ax(stack)->type = REG_S64;
749 next_pc += sizeof(struct binary_op);
750 break;
751 }
752
753 case BYTECODE_OP_GE:
754 {
755 struct binary_op *insn = (struct binary_op *) pc;
756
757 switch(vstack_ax(stack)->type) {
758 default:
759 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
760 ret = -EINVAL;
761 goto end;
762
763 case REG_STAR_GLOB_STRING:
764 printk(KERN_WARNING "LTTng: bytecode: invalid register type for '>=' binary operator\n");
765 ret = -EINVAL;
766 goto end;
767 case REG_STRING:
768 insn->op = BYTECODE_OP_GE_STRING;
769 break;
770 case REG_S64:
771 if (vstack_bx(stack)->type == REG_S64)
772 insn->op = BYTECODE_OP_GE_S64;
773 else
774 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
775 break;
776 case REG_DOUBLE:
777 if (vstack_bx(stack)->type == REG_S64)
778 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
779 else
780 insn->op = BYTECODE_OP_GE_DOUBLE;
781 break;
782 }
783 /* Pop 2, push 1 */
784 if (vstack_pop(stack)) {
785 ret = -EINVAL;
786 goto end;
787 }
788 vstack_ax(stack)->type = REG_S64;
789 next_pc += sizeof(struct binary_op);
790 break;
791 }
792 case BYTECODE_OP_LE:
793 {
794 struct binary_op *insn = (struct binary_op *) pc;
795
796 switch(vstack_ax(stack)->type) {
797 default:
798 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
799 ret = -EINVAL;
800 goto end;
801
802 case REG_STAR_GLOB_STRING:
803 printk(KERN_WARNING "LTTng: bytecode: invalid register type for '<=' binary operator\n");
804 ret = -EINVAL;
805 goto end;
806 case REG_STRING:
807 insn->op = BYTECODE_OP_LE_STRING;
808 break;
809 case REG_S64:
810 if (vstack_bx(stack)->type == REG_S64)
811 insn->op = BYTECODE_OP_LE_S64;
812 else
813 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
814 break;
815 case REG_DOUBLE:
816 if (vstack_bx(stack)->type == REG_S64)
817 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
818 else
819 insn->op = BYTECODE_OP_LE_DOUBLE;
820 break;
821 }
822 vstack_ax(stack)->type = REG_S64;
823 next_pc += sizeof(struct binary_op);
824 break;
825 }
826
827 case BYTECODE_OP_EQ_STRING:
828 case BYTECODE_OP_NE_STRING:
829 case BYTECODE_OP_GT_STRING:
830 case BYTECODE_OP_LT_STRING:
831 case BYTECODE_OP_GE_STRING:
832 case BYTECODE_OP_LE_STRING:
833 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
834 case BYTECODE_OP_NE_STAR_GLOB_STRING:
835 case BYTECODE_OP_EQ_S64:
836 case BYTECODE_OP_NE_S64:
837 case BYTECODE_OP_GT_S64:
838 case BYTECODE_OP_LT_S64:
839 case BYTECODE_OP_GE_S64:
840 case BYTECODE_OP_LE_S64:
841 case BYTECODE_OP_EQ_DOUBLE:
842 case BYTECODE_OP_NE_DOUBLE:
843 case BYTECODE_OP_GT_DOUBLE:
844 case BYTECODE_OP_LT_DOUBLE:
845 case BYTECODE_OP_GE_DOUBLE:
846 case BYTECODE_OP_LE_DOUBLE:
847 case BYTECODE_OP_EQ_DOUBLE_S64:
848 case BYTECODE_OP_NE_DOUBLE_S64:
849 case BYTECODE_OP_GT_DOUBLE_S64:
850 case BYTECODE_OP_LT_DOUBLE_S64:
851 case BYTECODE_OP_GE_DOUBLE_S64:
852 case BYTECODE_OP_LE_DOUBLE_S64:
853 case BYTECODE_OP_EQ_S64_DOUBLE:
854 case BYTECODE_OP_NE_S64_DOUBLE:
855 case BYTECODE_OP_GT_S64_DOUBLE:
856 case BYTECODE_OP_LT_S64_DOUBLE:
857 case BYTECODE_OP_GE_S64_DOUBLE:
858 case BYTECODE_OP_LE_S64_DOUBLE:
859 case BYTECODE_OP_BIT_RSHIFT:
860 case BYTECODE_OP_BIT_LSHIFT:
861 case BYTECODE_OP_BIT_AND:
862 case BYTECODE_OP_BIT_OR:
863 case BYTECODE_OP_BIT_XOR:
864 {
865 /* Pop 2, push 1 */
866 if (vstack_pop(stack)) {
867 ret = -EINVAL;
868 goto end;
869 }
870 vstack_ax(stack)->type = REG_S64;
871 next_pc += sizeof(struct binary_op);
872 break;
873 }
874
875 /* unary */
876 case BYTECODE_OP_UNARY_PLUS:
877 {
878 struct unary_op *insn = (struct unary_op *) pc;
879
880 switch(vstack_ax(stack)->type) {
881 default:
882 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
883 ret = -EINVAL;
884 goto end;
885
886 case REG_S64:
887 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
888 break;
889 case REG_DOUBLE:
890 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
891 break;
892 }
893 /* Pop 1, push 1 */
894 next_pc += sizeof(struct unary_op);
895 break;
896 }
897
898 case BYTECODE_OP_UNARY_MINUS:
899 {
900 struct unary_op *insn = (struct unary_op *) pc;
901
902 switch(vstack_ax(stack)->type) {
903 default:
904 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
905 ret = -EINVAL;
906 goto end;
907
908 case REG_S64:
909 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
910 break;
911 case REG_DOUBLE:
912 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
913 break;
914 }
915 /* Pop 1, push 1 */
916 next_pc += sizeof(struct unary_op);
917 break;
918 }
919
920 case BYTECODE_OP_UNARY_NOT:
921 {
922 struct unary_op *insn = (struct unary_op *) pc;
923
924 switch(vstack_ax(stack)->type) {
925 default:
926 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
927 ret = -EINVAL;
928 goto end;
929
930 case REG_S64:
931 insn->op = BYTECODE_OP_UNARY_NOT_S64;
932 break;
933 case REG_DOUBLE:
934 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
935 break;
936 }
937 /* Pop 1, push 1 */
938 next_pc += sizeof(struct unary_op);
939 break;
940 }
941
942 case BYTECODE_OP_UNARY_BIT_NOT:
943 {
944 /* Pop 1, push 1 */
945 next_pc += sizeof(struct unary_op);
946 break;
947 }
948
949 case BYTECODE_OP_UNARY_PLUS_S64:
950 case BYTECODE_OP_UNARY_MINUS_S64:
951 case BYTECODE_OP_UNARY_NOT_S64:
952 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
953 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
954 case BYTECODE_OP_UNARY_NOT_DOUBLE:
955 {
956 /* Pop 1, push 1 */
957 next_pc += sizeof(struct unary_op);
958 break;
959 }
960
961 /* logical */
962 case BYTECODE_OP_AND:
963 case BYTECODE_OP_OR:
964 {
965 /* Continue to next instruction */
966 /* Pop 1 when jump not taken */
967 if (vstack_pop(stack)) {
968 ret = -EINVAL;
969 goto end;
970 }
971 next_pc += sizeof(struct logical_op);
972 break;
973 }
974
975 /* load field ref */
976 case BYTECODE_OP_LOAD_FIELD_REF:
977 {
978 printk(KERN_WARNING "LTTng: bytecode: Unknown field ref type\n");
979 ret = -EINVAL;
980 goto end;
981 }
982 /* get context ref */
983 case BYTECODE_OP_GET_CONTEXT_REF:
984 {
985 printk(KERN_WARNING "LTTng: bytecode: Unknown get context ref type\n");
986 ret = -EINVAL;
987 goto end;
988 }
989 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
990 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
991 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
992 case BYTECODE_OP_LOAD_FIELD_REF_USER_STRING:
993 case BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE:
994 {
995 if (vstack_push(stack)) {
996 ret = -EINVAL;
997 goto end;
998 }
999 vstack_ax(stack)->type = REG_STRING;
1000 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1001 break;
1002 }
1003 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1004 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1005 {
1006 if (vstack_push(stack)) {
1007 ret = -EINVAL;
1008 goto end;
1009 }
1010 vstack_ax(stack)->type = REG_S64;
1011 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1012 break;
1013 }
1014 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1015 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1016 {
1017 if (vstack_push(stack)) {
1018 ret = -EINVAL;
1019 goto end;
1020 }
1021 vstack_ax(stack)->type = REG_DOUBLE;
1022 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1023 break;
1024 }
1025
1026 /* load from immediate operand */
1027 case BYTECODE_OP_LOAD_STRING:
1028 {
1029 struct load_op *insn = (struct load_op *) pc;
1030
1031 if (vstack_push(stack)) {
1032 ret = -EINVAL;
1033 goto end;
1034 }
1035 vstack_ax(stack)->type = REG_STRING;
1036 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1037 break;
1038 }
1039
1040 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1041 {
1042 struct load_op *insn = (struct load_op *) pc;
1043
1044 if (vstack_push(stack)) {
1045 ret = -EINVAL;
1046 goto end;
1047 }
1048 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1049 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1050 break;
1051 }
1052
1053 case BYTECODE_OP_LOAD_S64:
1054 {
1055 if (vstack_push(stack)) {
1056 ret = -EINVAL;
1057 goto end;
1058 }
1059 vstack_ax(stack)->type = REG_S64;
1060 next_pc += sizeof(struct load_op)
1061 + sizeof(struct literal_numeric);
1062 break;
1063 }
1064
1065 case BYTECODE_OP_LOAD_DOUBLE:
1066 {
1067 if (vstack_push(stack)) {
1068 ret = -EINVAL;
1069 goto end;
1070 }
1071 vstack_ax(stack)->type = REG_DOUBLE;
1072 next_pc += sizeof(struct load_op)
1073 + sizeof(struct literal_double);
1074 break;
1075 }
1076
1077 /* cast */
1078 case BYTECODE_OP_CAST_TO_S64:
1079 {
1080 struct cast_op *insn = (struct cast_op *) pc;
1081
1082 switch (vstack_ax(stack)->type) {
1083 default:
1084 printk(KERN_WARNING "LTTng: bytecode: unknown register type\n");
1085 ret = -EINVAL;
1086 goto end;
1087
1088 case REG_STRING:
1089 case REG_STAR_GLOB_STRING:
1090 printk(KERN_WARNING "LTTng: bytecode: Cast op can only be applied to numeric or floating point registers\n");
1091 ret = -EINVAL;
1092 goto end;
1093 case REG_S64:
1094 insn->op = BYTECODE_OP_CAST_NOP;
1095 break;
1096 case REG_DOUBLE:
1097 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1098 break;
1099 }
1100 /* Pop 1, push 1 */
1101 vstack_ax(stack)->type = REG_S64;
1102 next_pc += sizeof(struct cast_op);
1103 break;
1104 }
1105 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1106 {
1107 /* Pop 1, push 1 */
1108 vstack_ax(stack)->type = REG_S64;
1109 next_pc += sizeof(struct cast_op);
1110 break;
1111 }
1112 case BYTECODE_OP_CAST_NOP:
1113 {
1114 next_pc += sizeof(struct cast_op);
1115 break;
1116 }
1117
1118 /*
1119 * Instructions for recursive traversal through composed types.
1120 */
1121 case BYTECODE_OP_GET_CONTEXT_ROOT:
1122 {
1123 if (vstack_push(stack)) {
1124 ret = -EINVAL;
1125 goto end;
1126 }
1127 vstack_ax(stack)->type = REG_PTR;
1128 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1129 next_pc += sizeof(struct load_op);
1130 break;
1131 }
1132 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1133 {
1134 if (vstack_push(stack)) {
1135 ret = -EINVAL;
1136 goto end;
1137 }
1138 vstack_ax(stack)->type = REG_PTR;
1139 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1140 next_pc += sizeof(struct load_op);
1141 break;
1142 }
1143 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1144 {
1145 if (vstack_push(stack)) {
1146 ret = -EINVAL;
1147 goto end;
1148 }
1149 vstack_ax(stack)->type = REG_PTR;
1150 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1151 next_pc += sizeof(struct load_op);
1152 break;
1153 }
1154
1155 case BYTECODE_OP_LOAD_FIELD:
1156 {
1157 struct load_op *insn = (struct load_op *) pc;
1158
1159 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1160 /* Pop 1, push 1 */
1161 ret = specialize_load_field(vstack_ax(stack), insn);
1162 if (ret)
1163 goto end;
1164
1165 next_pc += sizeof(struct load_op);
1166 break;
1167 }
1168
1169 case BYTECODE_OP_LOAD_FIELD_S8:
1170 case BYTECODE_OP_LOAD_FIELD_S16:
1171 case BYTECODE_OP_LOAD_FIELD_S32:
1172 case BYTECODE_OP_LOAD_FIELD_S64:
1173 case BYTECODE_OP_LOAD_FIELD_U8:
1174 case BYTECODE_OP_LOAD_FIELD_U16:
1175 case BYTECODE_OP_LOAD_FIELD_U32:
1176 case BYTECODE_OP_LOAD_FIELD_U64:
1177 {
1178 /* Pop 1, push 1 */
1179 vstack_ax(stack)->type = REG_S64;
1180 next_pc += sizeof(struct load_op);
1181 break;
1182 }
1183
1184 case BYTECODE_OP_LOAD_FIELD_STRING:
1185 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1186 {
1187 /* Pop 1, push 1 */
1188 vstack_ax(stack)->type = REG_STRING;
1189 next_pc += sizeof(struct load_op);
1190 break;
1191 }
1192
1193 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1194 {
1195 /* Pop 1, push 1 */
1196 vstack_ax(stack)->type = REG_DOUBLE;
1197 next_pc += sizeof(struct load_op);
1198 break;
1199 }
1200
1201 case BYTECODE_OP_GET_SYMBOL:
1202 {
1203 struct load_op *insn = (struct load_op *) pc;
1204
1205 dbg_printk("op get symbol\n");
1206 switch (vstack_ax(stack)->load.type) {
1207 case LOAD_OBJECT:
1208 printk(KERN_WARNING "LTTng: bytecode: Nested fields not implemented yet.\n");
1209 ret = -EINVAL;
1210 goto end;
1211 case LOAD_ROOT_CONTEXT:
1212 /* Lookup context field. */
1213 ret = specialize_context_lookup(ctx, bytecode, insn,
1214 &vstack_ax(stack)->load);
1215 if (ret)
1216 goto end;
1217 break;
1218 case LOAD_ROOT_APP_CONTEXT:
1219 ret = -EINVAL;
1220 goto end;
1221 case LOAD_ROOT_PAYLOAD:
1222 /* Lookup event payload field. */
1223 ret = specialize_payload_lookup(event_desc,
1224 bytecode, insn,
1225 &vstack_ax(stack)->load);
1226 if (ret)
1227 goto end;
1228 break;
1229 }
1230 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1231 break;
1232 }
1233
1234 case BYTECODE_OP_GET_SYMBOL_FIELD:
1235 {
1236 /* Always generated by specialize phase. */
1237 ret = -EINVAL;
1238 goto end;
1239 }
1240
1241 case BYTECODE_OP_GET_INDEX_U16:
1242 {
1243 struct load_op *insn = (struct load_op *) pc;
1244 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1245
1246 dbg_printk("op get index u16\n");
1247 /* Pop 1, push 1 */
1248 ret = specialize_get_index(bytecode, insn, index->index,
1249 vstack_ax(stack), sizeof(*index));
1250 if (ret)
1251 goto end;
1252 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1253 break;
1254 }
1255
1256 case BYTECODE_OP_GET_INDEX_U64:
1257 {
1258 struct load_op *insn = (struct load_op *) pc;
1259 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1260
1261 dbg_printk("op get index u64\n");
1262 /* Pop 1, push 1 */
1263 ret = specialize_get_index(bytecode, insn, index->index,
1264 vstack_ax(stack), sizeof(*index));
1265 if (ret)
1266 goto end;
1267 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1268 break;
1269 }
1270
1271 }
1272 }
1273 end:
1274 return ret;
1275 }
This page took 0.083614 seconds and 4 git commands to generate.