fix: tie compaction probe build to CONFIG_COMPACTION
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <wrapper/compiler_attributes.h>
11 #include <wrapper/uaccess.h>
12 #include <wrapper/objtool.h>
13 #include <wrapper/types.h>
14 #include <linux/swab.h>
15
16 #include <lttng-filter.h>
17 #include <lttng-string-utils.h>
18
19 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
20
21 /*
22 * get_char should be called with page fault handler disabled if it is expected
23 * to handle user-space read.
24 */
25 static
26 char get_char(const struct estack_entry *reg, size_t offset)
27 {
28 if (unlikely(offset >= reg->u.s.seq_len))
29 return '\0';
30 if (reg->u.s.user) {
31 char c;
32
33 /* Handle invalid access as end of string. */
34 if (unlikely(!lttng_access_ok(VERIFY_READ,
35 reg->u.s.user_str + offset,
36 sizeof(c))))
37 return '\0';
38 /* Handle fault (nonzero return value) as end of string. */
39 if (unlikely(__copy_from_user_inatomic(&c,
40 reg->u.s.user_str + offset,
41 sizeof(c))))
42 return '\0';
43 return c;
44 } else {
45 return reg->u.s.str[offset];
46 }
47 }
48
49 /*
50 * -1: wildcard found.
51 * -2: unknown escape char.
52 * 0: normal char.
53 */
54 static
55 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
56 {
57 switch (*c) {
58 case '\\':
59 (*offset)++;
60 *c = get_char(reg, *offset);
61 switch (*c) {
62 case '\\':
63 case '*':
64 return 0;
65 default:
66 return -2;
67 }
68 case '*':
69 return -1;
70 default:
71 return 0;
72 }
73 }
74
75 static
76 char get_char_at_cb(size_t at, void *data)
77 {
78 return get_char(data, at);
79 }
80
81 static
82 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
83 {
84 bool has_user = false;
85 int result;
86 struct estack_entry *pattern_reg;
87 struct estack_entry *candidate_reg;
88
89 /* Disable the page fault handler when reading from userspace. */
90 if (estack_bx(stack, top)->u.s.user
91 || estack_ax(stack, top)->u.s.user) {
92 has_user = true;
93 pagefault_disable();
94 }
95
96 /* Find out which side is the pattern vs. the candidate. */
97 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
98 pattern_reg = estack_ax(stack, top);
99 candidate_reg = estack_bx(stack, top);
100 } else {
101 pattern_reg = estack_bx(stack, top);
102 candidate_reg = estack_ax(stack, top);
103 }
104
105 /* Perform the match operation. */
106 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
107 pattern_reg, get_char_at_cb, candidate_reg);
108 if (has_user)
109 pagefault_enable();
110
111 return result;
112 }
113
114 static
115 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
116 {
117 size_t offset_bx = 0, offset_ax = 0;
118 int diff, has_user = 0;
119
120 if (estack_bx(stack, top)->u.s.user
121 || estack_ax(stack, top)->u.s.user) {
122 has_user = 1;
123 pagefault_disable();
124 }
125
126 for (;;) {
127 int ret;
128 int escaped_r0 = 0;
129 char char_bx, char_ax;
130
131 char_bx = get_char(estack_bx(stack, top), offset_bx);
132 char_ax = get_char(estack_ax(stack, top), offset_ax);
133
134 if (unlikely(char_bx == '\0')) {
135 if (char_ax == '\0') {
136 diff = 0;
137 break;
138 } else {
139 if (estack_ax(stack, top)->u.s.literal_type ==
140 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
141 ret = parse_char(estack_ax(stack, top),
142 &char_ax, &offset_ax);
143 if (ret == -1) {
144 diff = 0;
145 break;
146 }
147 }
148 diff = -1;
149 break;
150 }
151 }
152 if (unlikely(char_ax == '\0')) {
153 if (estack_bx(stack, top)->u.s.literal_type ==
154 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
155 ret = parse_char(estack_bx(stack, top),
156 &char_bx, &offset_bx);
157 if (ret == -1) {
158 diff = 0;
159 break;
160 }
161 }
162 diff = 1;
163 break;
164 }
165 if (estack_bx(stack, top)->u.s.literal_type ==
166 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
167 ret = parse_char(estack_bx(stack, top),
168 &char_bx, &offset_bx);
169 if (ret == -1) {
170 diff = 0;
171 break;
172 } else if (ret == -2) {
173 escaped_r0 = 1;
174 }
175 /* else compare both char */
176 }
177 if (estack_ax(stack, top)->u.s.literal_type ==
178 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
179 ret = parse_char(estack_ax(stack, top),
180 &char_ax, &offset_ax);
181 if (ret == -1) {
182 diff = 0;
183 break;
184 } else if (ret == -2) {
185 if (!escaped_r0) {
186 diff = -1;
187 break;
188 }
189 } else {
190 if (escaped_r0) {
191 diff = 1;
192 break;
193 }
194 }
195 } else {
196 if (escaped_r0) {
197 diff = 1;
198 break;
199 }
200 }
201 diff = char_bx - char_ax;
202 if (diff != 0)
203 break;
204 offset_bx++;
205 offset_ax++;
206 }
207 if (has_user)
208 pagefault_enable();
209
210 return diff;
211 }
212
213 uint64_t lttng_filter_false(void *filter_data,
214 struct lttng_probe_ctx *lttng_probe_ctx,
215 const char *filter_stack_data)
216 {
217 return 0;
218 }
219
220 #ifdef INTERPRETER_USE_SWITCH
221
222 /*
223 * Fallback for compilers that do not support taking address of labels.
224 */
225
226 #define START_OP \
227 start_pc = &bytecode->data[0]; \
228 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
229 pc = next_pc) { \
230 dbg_printk("Executing op %s (%u)\n", \
231 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
232 (unsigned int) *(filter_opcode_t *) pc); \
233 switch (*(filter_opcode_t *) pc) {
234
235 #define OP(name) case name
236
237 #define PO break
238
239 #define END_OP } \
240 }
241
242 #else
243
244 /*
245 * Dispatch-table based interpreter.
246 */
247
248 #define START_OP \
249 start_pc = &bytecode->code[0]; \
250 pc = next_pc = start_pc; \
251 if (unlikely(pc - start_pc >= bytecode->len)) \
252 goto end; \
253 goto *dispatch[*(filter_opcode_t *) pc];
254
255 #define OP(name) \
256 LABEL_##name
257
258 #define PO \
259 pc = next_pc; \
260 goto *dispatch[*(filter_opcode_t *) pc];
261
262 #define END_OP
263
264 #endif
265
266 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
267 struct load_ptr *ptr,
268 uint32_t idx)
269 {
270
271 struct lttng_ctx_field *ctx_field;
272 struct lttng_event_field *field;
273 union lttng_ctx_value v;
274
275 ctx_field = &lttng_static_ctx->fields[idx];
276 field = &ctx_field->event_field;
277 ptr->type = LOAD_OBJECT;
278 /* field is only used for types nested within variants. */
279 ptr->field = NULL;
280
281 switch (field->type.atype) {
282 case atype_integer:
283 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
284 if (field->type.u.basic.integer.signedness) {
285 ptr->object_type = OBJECT_TYPE_S64;
286 ptr->u.s64 = v.s64;
287 ptr->ptr = &ptr->u.s64;
288 } else {
289 ptr->object_type = OBJECT_TYPE_U64;
290 ptr->u.u64 = v.s64; /* Cast. */
291 ptr->ptr = &ptr->u.u64;
292 }
293 break;
294 case atype_enum:
295 {
296 const struct lttng_integer_type *itype =
297 &field->type.u.basic.enumeration.container_type;
298
299 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
300 if (itype->signedness) {
301 ptr->object_type = OBJECT_TYPE_S64;
302 ptr->u.s64 = v.s64;
303 ptr->ptr = &ptr->u.s64;
304 } else {
305 ptr->object_type = OBJECT_TYPE_U64;
306 ptr->u.u64 = v.s64; /* Cast. */
307 ptr->ptr = &ptr->u.u64;
308 }
309 break;
310 }
311 case atype_array:
312 if (field->type.u.array.elem_type.atype != atype_integer) {
313 printk(KERN_WARNING "Array nesting only supports integer types.\n");
314 return -EINVAL;
315 }
316 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
317 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
318 return -EINVAL;
319 }
320 ptr->object_type = OBJECT_TYPE_STRING;
321 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
322 ptr->ptr = v.str;
323 break;
324 case atype_sequence:
325 if (field->type.u.sequence.elem_type.atype != atype_integer) {
326 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
327 return -EINVAL;
328 }
329 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
330 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
331 return -EINVAL;
332 }
333 ptr->object_type = OBJECT_TYPE_STRING;
334 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
335 ptr->ptr = v.str;
336 break;
337 case atype_array_bitfield:
338 printk(KERN_WARNING "Bitfield array type is not supported.\n");
339 return -EINVAL;
340 case atype_sequence_bitfield:
341 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
342 return -EINVAL;
343 case atype_string:
344 ptr->object_type = OBJECT_TYPE_STRING;
345 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
346 ptr->ptr = v.str;
347 break;
348 case atype_struct:
349 printk(KERN_WARNING "Structure type cannot be loaded.\n");
350 return -EINVAL;
351 default:
352 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
353 return -EINVAL;
354 }
355 return 0;
356 }
357
358 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
359 struct bytecode_runtime *runtime,
360 uint64_t index, struct estack_entry *stack_top)
361 {
362 int ret;
363 const struct filter_get_index_data *gid;
364
365 /*
366 * Types nested within variants need to perform dynamic lookup
367 * based on the field descriptions. LTTng-UST does not implement
368 * variants for now.
369 */
370 if (stack_top->u.ptr.field)
371 return -EINVAL;
372 gid = (const struct filter_get_index_data *) &runtime->data[index];
373 switch (stack_top->u.ptr.type) {
374 case LOAD_OBJECT:
375 switch (stack_top->u.ptr.object_type) {
376 case OBJECT_TYPE_ARRAY:
377 {
378 const char *ptr;
379
380 WARN_ON_ONCE(gid->offset >= gid->array_len);
381 /* Skip count (unsigned long) */
382 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
383 ptr = ptr + gid->offset;
384 stack_top->u.ptr.ptr = ptr;
385 stack_top->u.ptr.object_type = gid->elem.type;
386 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
387 /* field is only used for types nested within variants. */
388 stack_top->u.ptr.field = NULL;
389 break;
390 }
391 case OBJECT_TYPE_SEQUENCE:
392 {
393 const char *ptr;
394 size_t ptr_seq_len;
395
396 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
397 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
398 if (gid->offset >= gid->elem.len * ptr_seq_len) {
399 ret = -EINVAL;
400 goto end;
401 }
402 ptr = ptr + gid->offset;
403 stack_top->u.ptr.ptr = ptr;
404 stack_top->u.ptr.object_type = gid->elem.type;
405 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
406 /* field is only used for types nested within variants. */
407 stack_top->u.ptr.field = NULL;
408 break;
409 }
410 case OBJECT_TYPE_STRUCT:
411 printk(KERN_WARNING "Nested structures are not supported yet.\n");
412 ret = -EINVAL;
413 goto end;
414 case OBJECT_TYPE_VARIANT:
415 default:
416 printk(KERN_WARNING "Unexpected get index type %d",
417 (int) stack_top->u.ptr.object_type);
418 ret = -EINVAL;
419 goto end;
420 }
421 break;
422 case LOAD_ROOT_CONTEXT:
423 lttng_fallthrough;
424 case LOAD_ROOT_APP_CONTEXT:
425 {
426 ret = context_get_index(lttng_probe_ctx,
427 &stack_top->u.ptr,
428 gid->ctx_index);
429 if (ret) {
430 goto end;
431 }
432 break;
433 }
434 case LOAD_ROOT_PAYLOAD:
435 stack_top->u.ptr.ptr += gid->offset;
436 if (gid->elem.type == OBJECT_TYPE_STRING)
437 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
438 stack_top->u.ptr.object_type = gid->elem.type;
439 stack_top->u.ptr.type = LOAD_OBJECT;
440 /* field is only used for types nested within variants. */
441 stack_top->u.ptr.field = NULL;
442 break;
443 }
444 return 0;
445
446 end:
447 return ret;
448 }
449
450 static int dynamic_load_field(struct estack_entry *stack_top)
451 {
452 int ret;
453
454 switch (stack_top->u.ptr.type) {
455 case LOAD_OBJECT:
456 break;
457 case LOAD_ROOT_CONTEXT:
458 case LOAD_ROOT_APP_CONTEXT:
459 case LOAD_ROOT_PAYLOAD:
460 default:
461 dbg_printk("Filter warning: cannot load root, missing field name.\n");
462 ret = -EINVAL;
463 goto end;
464 }
465 switch (stack_top->u.ptr.object_type) {
466 case OBJECT_TYPE_S8:
467 dbg_printk("op load field s8\n");
468 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
469 break;
470 case OBJECT_TYPE_S16:
471 {
472 int16_t tmp;
473
474 dbg_printk("op load field s16\n");
475 tmp = *(int16_t *) stack_top->u.ptr.ptr;
476 if (stack_top->u.ptr.rev_bo)
477 __swab16s(&tmp);
478 stack_top->u.v = tmp;
479 break;
480 }
481 case OBJECT_TYPE_S32:
482 {
483 int32_t tmp;
484
485 dbg_printk("op load field s32\n");
486 tmp = *(int32_t *) stack_top->u.ptr.ptr;
487 if (stack_top->u.ptr.rev_bo)
488 __swab32s(&tmp);
489 stack_top->u.v = tmp;
490 break;
491 }
492 case OBJECT_TYPE_S64:
493 {
494 int64_t tmp;
495
496 dbg_printk("op load field s64\n");
497 tmp = *(int64_t *) stack_top->u.ptr.ptr;
498 if (stack_top->u.ptr.rev_bo)
499 __swab64s(&tmp);
500 stack_top->u.v = tmp;
501 break;
502 }
503 case OBJECT_TYPE_U8:
504 dbg_printk("op load field u8\n");
505 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
506 break;
507 case OBJECT_TYPE_U16:
508 {
509 uint16_t tmp;
510
511 dbg_printk("op load field s16\n");
512 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
513 if (stack_top->u.ptr.rev_bo)
514 __swab16s(&tmp);
515 stack_top->u.v = tmp;
516 break;
517 }
518 case OBJECT_TYPE_U32:
519 {
520 uint32_t tmp;
521
522 dbg_printk("op load field u32\n");
523 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
524 if (stack_top->u.ptr.rev_bo)
525 __swab32s(&tmp);
526 stack_top->u.v = tmp;
527 break;
528 }
529 case OBJECT_TYPE_U64:
530 {
531 uint64_t tmp;
532
533 dbg_printk("op load field u64\n");
534 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
535 if (stack_top->u.ptr.rev_bo)
536 __swab64s(&tmp);
537 stack_top->u.v = tmp;
538 break;
539 }
540 case OBJECT_TYPE_STRING:
541 {
542 const char *str;
543
544 dbg_printk("op load field string\n");
545 str = (const char *) stack_top->u.ptr.ptr;
546 stack_top->u.s.str = str;
547 if (unlikely(!stack_top->u.s.str)) {
548 dbg_printk("Filter warning: loading a NULL string.\n");
549 ret = -EINVAL;
550 goto end;
551 }
552 stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
553 stack_top->u.s.literal_type =
554 ESTACK_STRING_LITERAL_TYPE_NONE;
555 break;
556 }
557 case OBJECT_TYPE_STRING_SEQUENCE:
558 {
559 const char *ptr;
560
561 dbg_printk("op load field string sequence\n");
562 ptr = stack_top->u.ptr.ptr;
563 stack_top->u.s.seq_len = *(unsigned long *) ptr;
564 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
565 if (unlikely(!stack_top->u.s.str)) {
566 dbg_printk("Filter warning: loading a NULL sequence.\n");
567 ret = -EINVAL;
568 goto end;
569 }
570 stack_top->u.s.literal_type =
571 ESTACK_STRING_LITERAL_TYPE_NONE;
572 break;
573 }
574 case OBJECT_TYPE_DYNAMIC:
575 /*
576 * Dynamic types in context are looked up
577 * by context get index.
578 */
579 ret = -EINVAL;
580 goto end;
581 case OBJECT_TYPE_DOUBLE:
582 ret = -EINVAL;
583 goto end;
584 case OBJECT_TYPE_SEQUENCE:
585 case OBJECT_TYPE_ARRAY:
586 case OBJECT_TYPE_STRUCT:
587 case OBJECT_TYPE_VARIANT:
588 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
589 ret = -EINVAL;
590 goto end;
591 }
592 return 0;
593
594 end:
595 return ret;
596 }
597
598 #ifdef DEBUG
599
600 #define DBG_USER_STR_CUTOFF 32
601
602 /*
603 * In debug mode, print user string (truncated, if necessary).
604 */
605 static inline
606 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
607 {
608 size_t pos = 0;
609 char last_char;
610 char user_str[DBG_USER_STR_CUTOFF];
611
612 pagefault_disable();
613 do {
614 last_char = get_char(user_str_reg, pos);
615 user_str[pos] = last_char;
616 pos++;
617 } while (last_char != '\0' && pos < sizeof(user_str));
618 pagefault_enable();
619
620 user_str[sizeof(user_str) - 1] = '\0';
621 dbg_printk("load field ref user string: '%s%s'\n", user_str,
622 last_char != '\0' ? "[...]" : "");
623 }
624 #else
625 static inline
626 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
627 {
628 }
629 #endif
630
631 /*
632 * Return 0 (discard), or raise the 0x1 flag (log event).
633 * Currently, other flags are kept for future extensions and have no
634 * effect.
635 */
636 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
637 struct lttng_probe_ctx *lttng_probe_ctx,
638 const char *filter_stack_data)
639 {
640 struct bytecode_runtime *bytecode = filter_data;
641 void *pc, *next_pc, *start_pc;
642 int ret = -EINVAL;
643 uint64_t retval = 0;
644 struct estack _stack;
645 struct estack *stack = &_stack;
646 register int64_t ax = 0, bx = 0;
647 register int top = FILTER_STACK_EMPTY;
648 #ifndef INTERPRETER_USE_SWITCH
649 static void *dispatch[NR_FILTER_OPS] = {
650 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
651
652 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
653
654 /* binary */
655 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
656 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
657 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
658 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
659 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
660 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
661 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
662 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
663 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
664 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
665
666 /* binary comparators */
667 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
668 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
669 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
670 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
671 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
672 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
673
674 /* string binary comparator */
675 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
676 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
677 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
678 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
679 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
680 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
681
682 /* globbing pattern binary comparator */
683 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
684 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
685
686 /* s64 binary comparator */
687 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
688 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
689 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
690 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
691 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
692 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
693
694 /* double binary comparator */
695 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
696 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
697 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
698 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
699 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
700 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
701
702 /* Mixed S64-double binary comparators */
703 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
704 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
705 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
706 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
707 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
708 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
709
710 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
711 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
712 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
713 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
714 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
715 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
716
717 /* unary */
718 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
719 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
720 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
721 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
722 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
723 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
724 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
725 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
726 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
727
728 /* logical */
729 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
730 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
731
732 /* load field ref */
733 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
734 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
735 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
736 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
737 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
738
739 /* load from immediate operand */
740 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
741 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
742 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
743 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
744
745 /* cast */
746 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
747 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
748 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
749
750 /* get context ref */
751 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
752 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
753 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
754 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
755
756 /* load userspace field ref */
757 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
758 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
759
760 /* Instructions for recursive traversal through composed types. */
761 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
762 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
763 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
764
765 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
766 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
767 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
768 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
769
770 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
771 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
772 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
773 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
774 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
775 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
776 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
777 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
778 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
779 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
780 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
781 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
782
783 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
784
785 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
786 };
787 #endif /* #ifndef INTERPRETER_USE_SWITCH */
788
789 START_OP
790
791 OP(FILTER_OP_UNKNOWN):
792 OP(FILTER_OP_LOAD_FIELD_REF):
793 OP(FILTER_OP_GET_CONTEXT_REF):
794 #ifdef INTERPRETER_USE_SWITCH
795 default:
796 #endif /* INTERPRETER_USE_SWITCH */
797 printk(KERN_WARNING "unknown bytecode op %u\n",
798 (unsigned int) *(filter_opcode_t *) pc);
799 ret = -EINVAL;
800 goto end;
801
802 OP(FILTER_OP_RETURN):
803 OP(FILTER_OP_RETURN_S64):
804 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
805 retval = !!estack_ax_v;
806 ret = 0;
807 goto end;
808
809 /* binary */
810 OP(FILTER_OP_MUL):
811 OP(FILTER_OP_DIV):
812 OP(FILTER_OP_MOD):
813 OP(FILTER_OP_PLUS):
814 OP(FILTER_OP_MINUS):
815 printk(KERN_WARNING "unsupported bytecode op %u\n",
816 (unsigned int) *(filter_opcode_t *) pc);
817 ret = -EINVAL;
818 goto end;
819
820 OP(FILTER_OP_EQ):
821 OP(FILTER_OP_NE):
822 OP(FILTER_OP_GT):
823 OP(FILTER_OP_LT):
824 OP(FILTER_OP_GE):
825 OP(FILTER_OP_LE):
826 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
827 (unsigned int) *(filter_opcode_t *) pc);
828 ret = -EINVAL;
829 goto end;
830
831 OP(FILTER_OP_EQ_STRING):
832 {
833 int res;
834
835 res = (stack_strcmp(stack, top, "==") == 0);
836 estack_pop(stack, top, ax, bx);
837 estack_ax_v = res;
838 next_pc += sizeof(struct binary_op);
839 PO;
840 }
841 OP(FILTER_OP_NE_STRING):
842 {
843 int res;
844
845 res = (stack_strcmp(stack, top, "!=") != 0);
846 estack_pop(stack, top, ax, bx);
847 estack_ax_v = res;
848 next_pc += sizeof(struct binary_op);
849 PO;
850 }
851 OP(FILTER_OP_GT_STRING):
852 {
853 int res;
854
855 res = (stack_strcmp(stack, top, ">") > 0);
856 estack_pop(stack, top, ax, bx);
857 estack_ax_v = res;
858 next_pc += sizeof(struct binary_op);
859 PO;
860 }
861 OP(FILTER_OP_LT_STRING):
862 {
863 int res;
864
865 res = (stack_strcmp(stack, top, "<") < 0);
866 estack_pop(stack, top, ax, bx);
867 estack_ax_v = res;
868 next_pc += sizeof(struct binary_op);
869 PO;
870 }
871 OP(FILTER_OP_GE_STRING):
872 {
873 int res;
874
875 res = (stack_strcmp(stack, top, ">=") >= 0);
876 estack_pop(stack, top, ax, bx);
877 estack_ax_v = res;
878 next_pc += sizeof(struct binary_op);
879 PO;
880 }
881 OP(FILTER_OP_LE_STRING):
882 {
883 int res;
884
885 res = (stack_strcmp(stack, top, "<=") <= 0);
886 estack_pop(stack, top, ax, bx);
887 estack_ax_v = res;
888 next_pc += sizeof(struct binary_op);
889 PO;
890 }
891
892 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
893 {
894 int res;
895
896 res = (stack_star_glob_match(stack, top, "==") == 0);
897 estack_pop(stack, top, ax, bx);
898 estack_ax_v = res;
899 next_pc += sizeof(struct binary_op);
900 PO;
901 }
902 OP(FILTER_OP_NE_STAR_GLOB_STRING):
903 {
904 int res;
905
906 res = (stack_star_glob_match(stack, top, "!=") != 0);
907 estack_pop(stack, top, ax, bx);
908 estack_ax_v = res;
909 next_pc += sizeof(struct binary_op);
910 PO;
911 }
912
913 OP(FILTER_OP_EQ_S64):
914 {
915 int res;
916
917 res = (estack_bx_v == estack_ax_v);
918 estack_pop(stack, top, ax, bx);
919 estack_ax_v = res;
920 next_pc += sizeof(struct binary_op);
921 PO;
922 }
923 OP(FILTER_OP_NE_S64):
924 {
925 int res;
926
927 res = (estack_bx_v != estack_ax_v);
928 estack_pop(stack, top, ax, bx);
929 estack_ax_v = res;
930 next_pc += sizeof(struct binary_op);
931 PO;
932 }
933 OP(FILTER_OP_GT_S64):
934 {
935 int res;
936
937 res = (estack_bx_v > estack_ax_v);
938 estack_pop(stack, top, ax, bx);
939 estack_ax_v = res;
940 next_pc += sizeof(struct binary_op);
941 PO;
942 }
943 OP(FILTER_OP_LT_S64):
944 {
945 int res;
946
947 res = (estack_bx_v < estack_ax_v);
948 estack_pop(stack, top, ax, bx);
949 estack_ax_v = res;
950 next_pc += sizeof(struct binary_op);
951 PO;
952 }
953 OP(FILTER_OP_GE_S64):
954 {
955 int res;
956
957 res = (estack_bx_v >= estack_ax_v);
958 estack_pop(stack, top, ax, bx);
959 estack_ax_v = res;
960 next_pc += sizeof(struct binary_op);
961 PO;
962 }
963 OP(FILTER_OP_LE_S64):
964 {
965 int res;
966
967 res = (estack_bx_v <= estack_ax_v);
968 estack_pop(stack, top, ax, bx);
969 estack_ax_v = res;
970 next_pc += sizeof(struct binary_op);
971 PO;
972 }
973
974 OP(FILTER_OP_EQ_DOUBLE):
975 OP(FILTER_OP_NE_DOUBLE):
976 OP(FILTER_OP_GT_DOUBLE):
977 OP(FILTER_OP_LT_DOUBLE):
978 OP(FILTER_OP_GE_DOUBLE):
979 OP(FILTER_OP_LE_DOUBLE):
980 {
981 BUG_ON(1);
982 PO;
983 }
984
985 /* Mixed S64-double binary comparators */
986 OP(FILTER_OP_EQ_DOUBLE_S64):
987 OP(FILTER_OP_NE_DOUBLE_S64):
988 OP(FILTER_OP_GT_DOUBLE_S64):
989 OP(FILTER_OP_LT_DOUBLE_S64):
990 OP(FILTER_OP_GE_DOUBLE_S64):
991 OP(FILTER_OP_LE_DOUBLE_S64):
992 OP(FILTER_OP_EQ_S64_DOUBLE):
993 OP(FILTER_OP_NE_S64_DOUBLE):
994 OP(FILTER_OP_GT_S64_DOUBLE):
995 OP(FILTER_OP_LT_S64_DOUBLE):
996 OP(FILTER_OP_GE_S64_DOUBLE):
997 OP(FILTER_OP_LE_S64_DOUBLE):
998 {
999 BUG_ON(1);
1000 PO;
1001 }
1002 OP(FILTER_OP_BIT_RSHIFT):
1003 {
1004 int64_t res;
1005
1006 /* Catch undefined behavior. */
1007 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1008 ret = -EINVAL;
1009 goto end;
1010 }
1011 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
1012 estack_pop(stack, top, ax, bx);
1013 estack_ax_v = res;
1014 next_pc += sizeof(struct binary_op);
1015 PO;
1016 }
1017 OP(FILTER_OP_BIT_LSHIFT):
1018 {
1019 int64_t res;
1020
1021 /* Catch undefined behavior. */
1022 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1023 ret = -EINVAL;
1024 goto end;
1025 }
1026 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
1027 estack_pop(stack, top, ax, bx);
1028 estack_ax_v = res;
1029 next_pc += sizeof(struct binary_op);
1030 PO;
1031 }
1032 OP(FILTER_OP_BIT_AND):
1033 {
1034 int64_t res;
1035
1036 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1037 estack_pop(stack, top, ax, bx);
1038 estack_ax_v = res;
1039 next_pc += sizeof(struct binary_op);
1040 PO;
1041 }
1042 OP(FILTER_OP_BIT_OR):
1043 {
1044 int64_t res;
1045
1046 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1047 estack_pop(stack, top, ax, bx);
1048 estack_ax_v = res;
1049 next_pc += sizeof(struct binary_op);
1050 PO;
1051 }
1052 OP(FILTER_OP_BIT_XOR):
1053 {
1054 int64_t res;
1055
1056 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1057 estack_pop(stack, top, ax, bx);
1058 estack_ax_v = res;
1059 next_pc += sizeof(struct binary_op);
1060 PO;
1061 }
1062
1063 /* unary */
1064 OP(FILTER_OP_UNARY_PLUS):
1065 OP(FILTER_OP_UNARY_MINUS):
1066 OP(FILTER_OP_UNARY_NOT):
1067 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1068 (unsigned int) *(filter_opcode_t *) pc);
1069 ret = -EINVAL;
1070 goto end;
1071
1072
1073 OP(FILTER_OP_UNARY_BIT_NOT):
1074 {
1075 estack_ax_v = ~(uint64_t) estack_ax_v;
1076 next_pc += sizeof(struct unary_op);
1077 PO;
1078 }
1079
1080 OP(FILTER_OP_UNARY_PLUS_S64):
1081 {
1082 next_pc += sizeof(struct unary_op);
1083 PO;
1084 }
1085 OP(FILTER_OP_UNARY_MINUS_S64):
1086 {
1087 estack_ax_v = -estack_ax_v;
1088 next_pc += sizeof(struct unary_op);
1089 PO;
1090 }
1091 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1092 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1093 {
1094 BUG_ON(1);
1095 PO;
1096 }
1097 OP(FILTER_OP_UNARY_NOT_S64):
1098 {
1099 estack_ax_v = !estack_ax_v;
1100 next_pc += sizeof(struct unary_op);
1101 PO;
1102 }
1103 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1104 {
1105 BUG_ON(1);
1106 PO;
1107 }
1108
1109 /* logical */
1110 OP(FILTER_OP_AND):
1111 {
1112 struct logical_op *insn = (struct logical_op *) pc;
1113
1114 /* If AX is 0, skip and evaluate to 0 */
1115 if (unlikely(estack_ax_v == 0)) {
1116 dbg_printk("Jumping to bytecode offset %u\n",
1117 (unsigned int) insn->skip_offset);
1118 next_pc = start_pc + insn->skip_offset;
1119 } else {
1120 /* Pop 1 when jump not taken */
1121 estack_pop(stack, top, ax, bx);
1122 next_pc += sizeof(struct logical_op);
1123 }
1124 PO;
1125 }
1126 OP(FILTER_OP_OR):
1127 {
1128 struct logical_op *insn = (struct logical_op *) pc;
1129
1130 /* If AX is nonzero, skip and evaluate to 1 */
1131
1132 if (unlikely(estack_ax_v != 0)) {
1133 estack_ax_v = 1;
1134 dbg_printk("Jumping to bytecode offset %u\n",
1135 (unsigned int) insn->skip_offset);
1136 next_pc = start_pc + insn->skip_offset;
1137 } else {
1138 /* Pop 1 when jump not taken */
1139 estack_pop(stack, top, ax, bx);
1140 next_pc += sizeof(struct logical_op);
1141 }
1142 PO;
1143 }
1144
1145
1146 /* load field ref */
1147 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1148 {
1149 struct load_op *insn = (struct load_op *) pc;
1150 struct field_ref *ref = (struct field_ref *) insn->data;
1151
1152 dbg_printk("load field ref offset %u type string\n",
1153 ref->offset);
1154 estack_push(stack, top, ax, bx);
1155 estack_ax(stack, top)->u.s.str =
1156 *(const char * const *) &filter_stack_data[ref->offset];
1157 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1158 dbg_printk("Filter warning: loading a NULL string.\n");
1159 ret = -EINVAL;
1160 goto end;
1161 }
1162 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1163 estack_ax(stack, top)->u.s.literal_type =
1164 ESTACK_STRING_LITERAL_TYPE_NONE;
1165 estack_ax(stack, top)->u.s.user = 0;
1166 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1167 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1168 PO;
1169 }
1170
1171 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1172 {
1173 struct load_op *insn = (struct load_op *) pc;
1174 struct field_ref *ref = (struct field_ref *) insn->data;
1175
1176 dbg_printk("load field ref offset %u type sequence\n",
1177 ref->offset);
1178 estack_push(stack, top, ax, bx);
1179 estack_ax(stack, top)->u.s.seq_len =
1180 *(unsigned long *) &filter_stack_data[ref->offset];
1181 estack_ax(stack, top)->u.s.str =
1182 *(const char **) (&filter_stack_data[ref->offset
1183 + sizeof(unsigned long)]);
1184 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1185 dbg_printk("Filter warning: loading a NULL sequence.\n");
1186 ret = -EINVAL;
1187 goto end;
1188 }
1189 estack_ax(stack, top)->u.s.literal_type =
1190 ESTACK_STRING_LITERAL_TYPE_NONE;
1191 estack_ax(stack, top)->u.s.user = 0;
1192 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1193 PO;
1194 }
1195
1196 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1197 {
1198 struct load_op *insn = (struct load_op *) pc;
1199 struct field_ref *ref = (struct field_ref *) insn->data;
1200
1201 dbg_printk("load field ref offset %u type s64\n",
1202 ref->offset);
1203 estack_push(stack, top, ax, bx);
1204 estack_ax_v =
1205 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1206 dbg_printk("ref load s64 %lld\n",
1207 (long long) estack_ax_v);
1208 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1209 PO;
1210 }
1211
1212 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1213 {
1214 BUG_ON(1);
1215 PO;
1216 }
1217
1218 /* load from immediate operand */
1219 OP(FILTER_OP_LOAD_STRING):
1220 {
1221 struct load_op *insn = (struct load_op *) pc;
1222
1223 dbg_printk("load string %s\n", insn->data);
1224 estack_push(stack, top, ax, bx);
1225 estack_ax(stack, top)->u.s.str = insn->data;
1226 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1227 estack_ax(stack, top)->u.s.literal_type =
1228 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1229 estack_ax(stack, top)->u.s.user = 0;
1230 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1231 PO;
1232 }
1233
1234 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1235 {
1236 struct load_op *insn = (struct load_op *) pc;
1237
1238 dbg_printk("load globbing pattern %s\n", insn->data);
1239 estack_push(stack, top, ax, bx);
1240 estack_ax(stack, top)->u.s.str = insn->data;
1241 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1242 estack_ax(stack, top)->u.s.literal_type =
1243 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1244 estack_ax(stack, top)->u.s.user = 0;
1245 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1246 PO;
1247 }
1248
1249 OP(FILTER_OP_LOAD_S64):
1250 {
1251 struct load_op *insn = (struct load_op *) pc;
1252
1253 estack_push(stack, top, ax, bx);
1254 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1255 dbg_printk("load s64 %lld\n",
1256 (long long) estack_ax_v);
1257 next_pc += sizeof(struct load_op)
1258 + sizeof(struct literal_numeric);
1259 PO;
1260 }
1261
1262 OP(FILTER_OP_LOAD_DOUBLE):
1263 {
1264 BUG_ON(1);
1265 PO;
1266 }
1267
1268 /* cast */
1269 OP(FILTER_OP_CAST_TO_S64):
1270 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1271 (unsigned int) *(filter_opcode_t *) pc);
1272 ret = -EINVAL;
1273 goto end;
1274
1275 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1276 {
1277 BUG_ON(1);
1278 PO;
1279 }
1280
1281 OP(FILTER_OP_CAST_NOP):
1282 {
1283 next_pc += sizeof(struct cast_op);
1284 PO;
1285 }
1286
1287 /* get context ref */
1288 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1289 {
1290 struct load_op *insn = (struct load_op *) pc;
1291 struct field_ref *ref = (struct field_ref *) insn->data;
1292 struct lttng_ctx_field *ctx_field;
1293 union lttng_ctx_value v;
1294
1295 dbg_printk("get context ref offset %u type string\n",
1296 ref->offset);
1297 ctx_field = &lttng_static_ctx->fields[ref->offset];
1298 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1299 estack_push(stack, top, ax, bx);
1300 estack_ax(stack, top)->u.s.str = v.str;
1301 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1302 dbg_printk("Filter warning: loading a NULL string.\n");
1303 ret = -EINVAL;
1304 goto end;
1305 }
1306 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1307 estack_ax(stack, top)->u.s.literal_type =
1308 ESTACK_STRING_LITERAL_TYPE_NONE;
1309 estack_ax(stack, top)->u.s.user = 0;
1310 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1311 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1312 PO;
1313 }
1314
1315 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1316 {
1317 struct load_op *insn = (struct load_op *) pc;
1318 struct field_ref *ref = (struct field_ref *) insn->data;
1319 struct lttng_ctx_field *ctx_field;
1320 union lttng_ctx_value v;
1321
1322 dbg_printk("get context ref offset %u type s64\n",
1323 ref->offset);
1324 ctx_field = &lttng_static_ctx->fields[ref->offset];
1325 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1326 estack_push(stack, top, ax, bx);
1327 estack_ax_v = v.s64;
1328 dbg_printk("ref get context s64 %lld\n",
1329 (long long) estack_ax_v);
1330 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1331 PO;
1332 }
1333
1334 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1335 {
1336 BUG_ON(1);
1337 PO;
1338 }
1339
1340 /* load userspace field ref */
1341 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1342 {
1343 struct load_op *insn = (struct load_op *) pc;
1344 struct field_ref *ref = (struct field_ref *) insn->data;
1345
1346 dbg_printk("load field ref offset %u type user string\n",
1347 ref->offset);
1348 estack_push(stack, top, ax, bx);
1349 estack_ax(stack, top)->u.s.user_str =
1350 *(const char * const *) &filter_stack_data[ref->offset];
1351 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1352 dbg_printk("Filter warning: loading a NULL string.\n");
1353 ret = -EINVAL;
1354 goto end;
1355 }
1356 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1357 estack_ax(stack, top)->u.s.literal_type =
1358 ESTACK_STRING_LITERAL_TYPE_NONE;
1359 estack_ax(stack, top)->u.s.user = 1;
1360 dbg_load_ref_user_str_printk(estack_ax(stack, top));
1361 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1362 PO;
1363 }
1364
1365 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1366 {
1367 struct load_op *insn = (struct load_op *) pc;
1368 struct field_ref *ref = (struct field_ref *) insn->data;
1369
1370 dbg_printk("load field ref offset %u type user sequence\n",
1371 ref->offset);
1372 estack_push(stack, top, ax, bx);
1373 estack_ax(stack, top)->u.s.seq_len =
1374 *(unsigned long *) &filter_stack_data[ref->offset];
1375 estack_ax(stack, top)->u.s.user_str =
1376 *(const char **) (&filter_stack_data[ref->offset
1377 + sizeof(unsigned long)]);
1378 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1379 dbg_printk("Filter warning: loading a NULL sequence.\n");
1380 ret = -EINVAL;
1381 goto end;
1382 }
1383 estack_ax(stack, top)->u.s.literal_type =
1384 ESTACK_STRING_LITERAL_TYPE_NONE;
1385 estack_ax(stack, top)->u.s.user = 1;
1386 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1387 PO;
1388 }
1389
1390 OP(FILTER_OP_GET_CONTEXT_ROOT):
1391 {
1392 dbg_printk("op get context root\n");
1393 estack_push(stack, top, ax, bx);
1394 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1395 /* "field" only needed for variants. */
1396 estack_ax(stack, top)->u.ptr.field = NULL;
1397 next_pc += sizeof(struct load_op);
1398 PO;
1399 }
1400
1401 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1402 {
1403 BUG_ON(1);
1404 PO;
1405 }
1406
1407 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1408 {
1409 dbg_printk("op get app payload root\n");
1410 estack_push(stack, top, ax, bx);
1411 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1412 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1413 /* "field" only needed for variants. */
1414 estack_ax(stack, top)->u.ptr.field = NULL;
1415 next_pc += sizeof(struct load_op);
1416 PO;
1417 }
1418
1419 OP(FILTER_OP_GET_SYMBOL):
1420 {
1421 dbg_printk("op get symbol\n");
1422 switch (estack_ax(stack, top)->u.ptr.type) {
1423 case LOAD_OBJECT:
1424 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1425 ret = -EINVAL;
1426 goto end;
1427 case LOAD_ROOT_CONTEXT:
1428 case LOAD_ROOT_APP_CONTEXT:
1429 case LOAD_ROOT_PAYLOAD:
1430 /*
1431 * symbol lookup is performed by
1432 * specialization.
1433 */
1434 ret = -EINVAL;
1435 goto end;
1436 }
1437 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1438 PO;
1439 }
1440
1441 OP(FILTER_OP_GET_SYMBOL_FIELD):
1442 {
1443 /*
1444 * Used for first variant encountered in a
1445 * traversal. Variants are not implemented yet.
1446 */
1447 ret = -EINVAL;
1448 goto end;
1449 }
1450
1451 OP(FILTER_OP_GET_INDEX_U16):
1452 {
1453 struct load_op *insn = (struct load_op *) pc;
1454 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1455
1456 dbg_printk("op get index u16\n");
1457 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1458 if (ret)
1459 goto end;
1460 estack_ax_v = estack_ax(stack, top)->u.v;
1461 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1462 PO;
1463 }
1464
1465 OP(FILTER_OP_GET_INDEX_U64):
1466 {
1467 struct load_op *insn = (struct load_op *) pc;
1468 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1469
1470 dbg_printk("op get index u64\n");
1471 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1472 if (ret)
1473 goto end;
1474 estack_ax_v = estack_ax(stack, top)->u.v;
1475 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1476 PO;
1477 }
1478
1479 OP(FILTER_OP_LOAD_FIELD):
1480 {
1481 dbg_printk("op load field\n");
1482 ret = dynamic_load_field(estack_ax(stack, top));
1483 if (ret)
1484 goto end;
1485 estack_ax_v = estack_ax(stack, top)->u.v;
1486 next_pc += sizeof(struct load_op);
1487 PO;
1488 }
1489
1490 OP(FILTER_OP_LOAD_FIELD_S8):
1491 {
1492 dbg_printk("op load field s8\n");
1493
1494 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1495 next_pc += sizeof(struct load_op);
1496 PO;
1497 }
1498 OP(FILTER_OP_LOAD_FIELD_S16):
1499 {
1500 dbg_printk("op load field s16\n");
1501
1502 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1503 next_pc += sizeof(struct load_op);
1504 PO;
1505 }
1506 OP(FILTER_OP_LOAD_FIELD_S32):
1507 {
1508 dbg_printk("op load field s32\n");
1509
1510 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1511 next_pc += sizeof(struct load_op);
1512 PO;
1513 }
1514 OP(FILTER_OP_LOAD_FIELD_S64):
1515 {
1516 dbg_printk("op load field s64\n");
1517
1518 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1519 next_pc += sizeof(struct load_op);
1520 PO;
1521 }
1522 OP(FILTER_OP_LOAD_FIELD_U8):
1523 {
1524 dbg_printk("op load field u8\n");
1525
1526 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1527 next_pc += sizeof(struct load_op);
1528 PO;
1529 }
1530 OP(FILTER_OP_LOAD_FIELD_U16):
1531 {
1532 dbg_printk("op load field u16\n");
1533
1534 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1535 next_pc += sizeof(struct load_op);
1536 PO;
1537 }
1538 OP(FILTER_OP_LOAD_FIELD_U32):
1539 {
1540 dbg_printk("op load field u32\n");
1541
1542 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1543 next_pc += sizeof(struct load_op);
1544 PO;
1545 }
1546 OP(FILTER_OP_LOAD_FIELD_U64):
1547 {
1548 dbg_printk("op load field u64\n");
1549
1550 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1551 next_pc += sizeof(struct load_op);
1552 PO;
1553 }
1554 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1555 {
1556 ret = -EINVAL;
1557 goto end;
1558 }
1559
1560 OP(FILTER_OP_LOAD_FIELD_STRING):
1561 {
1562 const char *str;
1563
1564 dbg_printk("op load field string\n");
1565 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1566 estack_ax(stack, top)->u.s.str = str;
1567 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1568 dbg_printk("Filter warning: loading a NULL string.\n");
1569 ret = -EINVAL;
1570 goto end;
1571 }
1572 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1573 estack_ax(stack, top)->u.s.literal_type =
1574 ESTACK_STRING_LITERAL_TYPE_NONE;
1575 next_pc += sizeof(struct load_op);
1576 PO;
1577 }
1578
1579 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1580 {
1581 const char *ptr;
1582
1583 dbg_printk("op load field string sequence\n");
1584 ptr = estack_ax(stack, top)->u.ptr.ptr;
1585 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1586 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1587 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1588 dbg_printk("Filter warning: loading a NULL sequence.\n");
1589 ret = -EINVAL;
1590 goto end;
1591 }
1592 estack_ax(stack, top)->u.s.literal_type =
1593 ESTACK_STRING_LITERAL_TYPE_NONE;
1594 next_pc += sizeof(struct load_op);
1595 PO;
1596 }
1597
1598 END_OP
1599 end:
1600 /* return 0 (discard) on error */
1601 if (ret)
1602 return 0;
1603 return retval;
1604 }
1605
1606 #undef START_OP
1607 #undef OP
1608 #undef PO
1609 #undef END_OP
This page took 0.092444 seconds and 4 git commands to generate.