Document last supported kernel version for stable-2.11 branch
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <wrapper/uaccess.h>
11 #include <wrapper/objtool.h>
12 #include <wrapper/types.h>
13 #include <linux/swab.h>
14
15 #include <lttng-filter.h>
16 #include <lttng-string-utils.h>
17
18 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
19
20 /*
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
23 */
24 static
25 char get_char(const struct estack_entry *reg, size_t offset)
26 {
27 if (unlikely(offset >= reg->u.s.seq_len))
28 return '\0';
29 if (reg->u.s.user) {
30 char c;
31
32 /* Handle invalid access as end of string. */
33 if (unlikely(!lttng_access_ok(VERIFY_READ,
34 reg->u.s.user_str + offset,
35 sizeof(c))))
36 return '\0';
37 /* Handle fault (nonzero return value) as end of string. */
38 if (unlikely(__copy_from_user_inatomic(&c,
39 reg->u.s.user_str + offset,
40 sizeof(c))))
41 return '\0';
42 return c;
43 } else {
44 return reg->u.s.str[offset];
45 }
46 }
47
48 /*
49 * -1: wildcard found.
50 * -2: unknown escape char.
51 * 0: normal char.
52 */
53 static
54 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
55 {
56 switch (*c) {
57 case '\\':
58 (*offset)++;
59 *c = get_char(reg, *offset);
60 switch (*c) {
61 case '\\':
62 case '*':
63 return 0;
64 default:
65 return -2;
66 }
67 case '*':
68 return -1;
69 default:
70 return 0;
71 }
72 }
73
74 static
75 char get_char_at_cb(size_t at, void *data)
76 {
77 return get_char(data, at);
78 }
79
80 static
81 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
82 {
83 bool has_user = false;
84 int result;
85 struct estack_entry *pattern_reg;
86 struct estack_entry *candidate_reg;
87
88 /* Disable the page fault handler when reading from userspace. */
89 if (estack_bx(stack, top)->u.s.user
90 || estack_ax(stack, top)->u.s.user) {
91 has_user = true;
92 pagefault_disable();
93 }
94
95 /* Find out which side is the pattern vs. the candidate. */
96 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
97 pattern_reg = estack_ax(stack, top);
98 candidate_reg = estack_bx(stack, top);
99 } else {
100 pattern_reg = estack_bx(stack, top);
101 candidate_reg = estack_ax(stack, top);
102 }
103
104 /* Perform the match operation. */
105 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
106 pattern_reg, get_char_at_cb, candidate_reg);
107 if (has_user)
108 pagefault_enable();
109
110 return result;
111 }
112
113 static
114 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
115 {
116 size_t offset_bx = 0, offset_ax = 0;
117 int diff, has_user = 0;
118
119 if (estack_bx(stack, top)->u.s.user
120 || estack_ax(stack, top)->u.s.user) {
121 has_user = 1;
122 pagefault_disable();
123 }
124
125 for (;;) {
126 int ret;
127 int escaped_r0 = 0;
128 char char_bx, char_ax;
129
130 char_bx = get_char(estack_bx(stack, top), offset_bx);
131 char_ax = get_char(estack_ax(stack, top), offset_ax);
132
133 if (unlikely(char_bx == '\0')) {
134 if (char_ax == '\0') {
135 diff = 0;
136 break;
137 } else {
138 if (estack_ax(stack, top)->u.s.literal_type ==
139 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
140 ret = parse_char(estack_ax(stack, top),
141 &char_ax, &offset_ax);
142 if (ret == -1) {
143 diff = 0;
144 break;
145 }
146 }
147 diff = -1;
148 break;
149 }
150 }
151 if (unlikely(char_ax == '\0')) {
152 if (estack_bx(stack, top)->u.s.literal_type ==
153 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
154 ret = parse_char(estack_bx(stack, top),
155 &char_bx, &offset_bx);
156 if (ret == -1) {
157 diff = 0;
158 break;
159 }
160 }
161 diff = 1;
162 break;
163 }
164 if (estack_bx(stack, top)->u.s.literal_type ==
165 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
166 ret = parse_char(estack_bx(stack, top),
167 &char_bx, &offset_bx);
168 if (ret == -1) {
169 diff = 0;
170 break;
171 } else if (ret == -2) {
172 escaped_r0 = 1;
173 }
174 /* else compare both char */
175 }
176 if (estack_ax(stack, top)->u.s.literal_type ==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
178 ret = parse_char(estack_ax(stack, top),
179 &char_ax, &offset_ax);
180 if (ret == -1) {
181 diff = 0;
182 break;
183 } else if (ret == -2) {
184 if (!escaped_r0) {
185 diff = -1;
186 break;
187 }
188 } else {
189 if (escaped_r0) {
190 diff = 1;
191 break;
192 }
193 }
194 } else {
195 if (escaped_r0) {
196 diff = 1;
197 break;
198 }
199 }
200 diff = char_bx - char_ax;
201 if (diff != 0)
202 break;
203 offset_bx++;
204 offset_ax++;
205 }
206 if (has_user)
207 pagefault_enable();
208
209 return diff;
210 }
211
212 uint64_t lttng_filter_false(void *filter_data,
213 struct lttng_probe_ctx *lttng_probe_ctx,
214 const char *filter_stack_data)
215 {
216 return 0;
217 }
218
219 #ifdef INTERPRETER_USE_SWITCH
220
221 /*
222 * Fallback for compilers that do not support taking address of labels.
223 */
224
225 #define START_OP \
226 start_pc = &bytecode->data[0]; \
227 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
228 pc = next_pc) { \
229 dbg_printk("Executing op %s (%u)\n", \
230 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
231 (unsigned int) *(filter_opcode_t *) pc); \
232 switch (*(filter_opcode_t *) pc) {
233
234 #define OP(name) case name
235
236 #define PO break
237
238 #define END_OP } \
239 }
240
241 #else
242
243 /*
244 * Dispatch-table based interpreter.
245 */
246
247 #define START_OP \
248 start_pc = &bytecode->code[0]; \
249 pc = next_pc = start_pc; \
250 if (unlikely(pc - start_pc >= bytecode->len)) \
251 goto end; \
252 goto *dispatch[*(filter_opcode_t *) pc];
253
254 #define OP(name) \
255 LABEL_##name
256
257 #define PO \
258 pc = next_pc; \
259 goto *dispatch[*(filter_opcode_t *) pc];
260
261 #define END_OP
262
263 #endif
264
265 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
266 struct load_ptr *ptr,
267 uint32_t idx)
268 {
269
270 struct lttng_ctx_field *ctx_field;
271 struct lttng_event_field *field;
272 union lttng_ctx_value v;
273
274 ctx_field = &lttng_static_ctx->fields[idx];
275 field = &ctx_field->event_field;
276 ptr->type = LOAD_OBJECT;
277 /* field is only used for types nested within variants. */
278 ptr->field = NULL;
279
280 switch (field->type.atype) {
281 case atype_integer:
282 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
283 if (field->type.u.basic.integer.signedness) {
284 ptr->object_type = OBJECT_TYPE_S64;
285 ptr->u.s64 = v.s64;
286 ptr->ptr = &ptr->u.s64;
287 } else {
288 ptr->object_type = OBJECT_TYPE_U64;
289 ptr->u.u64 = v.s64; /* Cast. */
290 ptr->ptr = &ptr->u.u64;
291 }
292 break;
293 case atype_enum:
294 {
295 const struct lttng_integer_type *itype =
296 &field->type.u.basic.enumeration.container_type;
297
298 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
299 if (itype->signedness) {
300 ptr->object_type = OBJECT_TYPE_S64;
301 ptr->u.s64 = v.s64;
302 ptr->ptr = &ptr->u.s64;
303 } else {
304 ptr->object_type = OBJECT_TYPE_U64;
305 ptr->u.u64 = v.s64; /* Cast. */
306 ptr->ptr = &ptr->u.u64;
307 }
308 break;
309 }
310 case atype_array:
311 if (field->type.u.array.elem_type.atype != atype_integer) {
312 printk(KERN_WARNING "Array nesting only supports integer types.\n");
313 return -EINVAL;
314 }
315 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
316 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
317 return -EINVAL;
318 }
319 ptr->object_type = OBJECT_TYPE_STRING;
320 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
321 ptr->ptr = v.str;
322 break;
323 case atype_sequence:
324 if (field->type.u.sequence.elem_type.atype != atype_integer) {
325 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
326 return -EINVAL;
327 }
328 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
329 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
330 return -EINVAL;
331 }
332 ptr->object_type = OBJECT_TYPE_STRING;
333 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
334 ptr->ptr = v.str;
335 break;
336 case atype_array_bitfield:
337 printk(KERN_WARNING "Bitfield array type is not supported.\n");
338 return -EINVAL;
339 case atype_sequence_bitfield:
340 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
341 return -EINVAL;
342 case atype_string:
343 ptr->object_type = OBJECT_TYPE_STRING;
344 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
345 ptr->ptr = v.str;
346 break;
347 case atype_struct:
348 printk(KERN_WARNING "Structure type cannot be loaded.\n");
349 return -EINVAL;
350 default:
351 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
352 return -EINVAL;
353 }
354 return 0;
355 }
356
357 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
358 struct bytecode_runtime *runtime,
359 uint64_t index, struct estack_entry *stack_top)
360 {
361 int ret;
362 const struct filter_get_index_data *gid;
363
364 /*
365 * Types nested within variants need to perform dynamic lookup
366 * based on the field descriptions. LTTng-UST does not implement
367 * variants for now.
368 */
369 if (stack_top->u.ptr.field)
370 return -EINVAL;
371 gid = (const struct filter_get_index_data *) &runtime->data[index];
372 switch (stack_top->u.ptr.type) {
373 case LOAD_OBJECT:
374 switch (stack_top->u.ptr.object_type) {
375 case OBJECT_TYPE_ARRAY:
376 {
377 const char *ptr;
378
379 WARN_ON_ONCE(gid->offset >= gid->array_len);
380 /* Skip count (unsigned long) */
381 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
382 ptr = ptr + gid->offset;
383 stack_top->u.ptr.ptr = ptr;
384 stack_top->u.ptr.object_type = gid->elem.type;
385 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
386 /* field is only used for types nested within variants. */
387 stack_top->u.ptr.field = NULL;
388 break;
389 }
390 case OBJECT_TYPE_SEQUENCE:
391 {
392 const char *ptr;
393 size_t ptr_seq_len;
394
395 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
396 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
397 if (gid->offset >= gid->elem.len * ptr_seq_len) {
398 ret = -EINVAL;
399 goto end;
400 }
401 ptr = ptr + gid->offset;
402 stack_top->u.ptr.ptr = ptr;
403 stack_top->u.ptr.object_type = gid->elem.type;
404 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
405 /* field is only used for types nested within variants. */
406 stack_top->u.ptr.field = NULL;
407 break;
408 }
409 case OBJECT_TYPE_STRUCT:
410 printk(KERN_WARNING "Nested structures are not supported yet.\n");
411 ret = -EINVAL;
412 goto end;
413 case OBJECT_TYPE_VARIANT:
414 default:
415 printk(KERN_WARNING "Unexpected get index type %d",
416 (int) stack_top->u.ptr.object_type);
417 ret = -EINVAL;
418 goto end;
419 }
420 break;
421 case LOAD_ROOT_CONTEXT:
422 case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
423 {
424 ret = context_get_index(lttng_probe_ctx,
425 &stack_top->u.ptr,
426 gid->ctx_index);
427 if (ret) {
428 goto end;
429 }
430 break;
431 }
432 case LOAD_ROOT_PAYLOAD:
433 stack_top->u.ptr.ptr += gid->offset;
434 if (gid->elem.type == OBJECT_TYPE_STRING)
435 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
436 stack_top->u.ptr.object_type = gid->elem.type;
437 stack_top->u.ptr.type = LOAD_OBJECT;
438 /* field is only used for types nested within variants. */
439 stack_top->u.ptr.field = NULL;
440 break;
441 }
442 return 0;
443
444 end:
445 return ret;
446 }
447
448 static int dynamic_load_field(struct estack_entry *stack_top)
449 {
450 int ret;
451
452 switch (stack_top->u.ptr.type) {
453 case LOAD_OBJECT:
454 break;
455 case LOAD_ROOT_CONTEXT:
456 case LOAD_ROOT_APP_CONTEXT:
457 case LOAD_ROOT_PAYLOAD:
458 default:
459 dbg_printk("Filter warning: cannot load root, missing field name.\n");
460 ret = -EINVAL;
461 goto end;
462 }
463 switch (stack_top->u.ptr.object_type) {
464 case OBJECT_TYPE_S8:
465 dbg_printk("op load field s8\n");
466 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
467 break;
468 case OBJECT_TYPE_S16:
469 {
470 int16_t tmp;
471
472 dbg_printk("op load field s16\n");
473 tmp = *(int16_t *) stack_top->u.ptr.ptr;
474 if (stack_top->u.ptr.rev_bo)
475 __swab16s(&tmp);
476 stack_top->u.v = tmp;
477 break;
478 }
479 case OBJECT_TYPE_S32:
480 {
481 int32_t tmp;
482
483 dbg_printk("op load field s32\n");
484 tmp = *(int32_t *) stack_top->u.ptr.ptr;
485 if (stack_top->u.ptr.rev_bo)
486 __swab32s(&tmp);
487 stack_top->u.v = tmp;
488 break;
489 }
490 case OBJECT_TYPE_S64:
491 {
492 int64_t tmp;
493
494 dbg_printk("op load field s64\n");
495 tmp = *(int64_t *) stack_top->u.ptr.ptr;
496 if (stack_top->u.ptr.rev_bo)
497 __swab64s(&tmp);
498 stack_top->u.v = tmp;
499 break;
500 }
501 case OBJECT_TYPE_U8:
502 dbg_printk("op load field u8\n");
503 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
504 break;
505 case OBJECT_TYPE_U16:
506 {
507 uint16_t tmp;
508
509 dbg_printk("op load field s16\n");
510 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
511 if (stack_top->u.ptr.rev_bo)
512 __swab16s(&tmp);
513 stack_top->u.v = tmp;
514 break;
515 }
516 case OBJECT_TYPE_U32:
517 {
518 uint32_t tmp;
519
520 dbg_printk("op load field u32\n");
521 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
522 if (stack_top->u.ptr.rev_bo)
523 __swab32s(&tmp);
524 stack_top->u.v = tmp;
525 break;
526 }
527 case OBJECT_TYPE_U64:
528 {
529 uint64_t tmp;
530
531 dbg_printk("op load field u64\n");
532 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
533 if (stack_top->u.ptr.rev_bo)
534 __swab64s(&tmp);
535 stack_top->u.v = tmp;
536 break;
537 }
538 case OBJECT_TYPE_STRING:
539 {
540 const char *str;
541
542 dbg_printk("op load field string\n");
543 str = (const char *) stack_top->u.ptr.ptr;
544 stack_top->u.s.str = str;
545 if (unlikely(!stack_top->u.s.str)) {
546 dbg_printk("Filter warning: loading a NULL string.\n");
547 ret = -EINVAL;
548 goto end;
549 }
550 stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
551 stack_top->u.s.literal_type =
552 ESTACK_STRING_LITERAL_TYPE_NONE;
553 break;
554 }
555 case OBJECT_TYPE_STRING_SEQUENCE:
556 {
557 const char *ptr;
558
559 dbg_printk("op load field string sequence\n");
560 ptr = stack_top->u.ptr.ptr;
561 stack_top->u.s.seq_len = *(unsigned long *) ptr;
562 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
563 if (unlikely(!stack_top->u.s.str)) {
564 dbg_printk("Filter warning: loading a NULL sequence.\n");
565 ret = -EINVAL;
566 goto end;
567 }
568 stack_top->u.s.literal_type =
569 ESTACK_STRING_LITERAL_TYPE_NONE;
570 break;
571 }
572 case OBJECT_TYPE_DYNAMIC:
573 /*
574 * Dynamic types in context are looked up
575 * by context get index.
576 */
577 ret = -EINVAL;
578 goto end;
579 case OBJECT_TYPE_DOUBLE:
580 ret = -EINVAL;
581 goto end;
582 case OBJECT_TYPE_SEQUENCE:
583 case OBJECT_TYPE_ARRAY:
584 case OBJECT_TYPE_STRUCT:
585 case OBJECT_TYPE_VARIANT:
586 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
587 ret = -EINVAL;
588 goto end;
589 }
590 return 0;
591
592 end:
593 return ret;
594 }
595
596 #ifdef DEBUG
597
598 #define DBG_USER_STR_CUTOFF 32
599
600 /*
601 * In debug mode, print user string (truncated, if necessary).
602 */
603 static inline
604 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
605 {
606 size_t pos = 0;
607 char last_char;
608 char user_str[DBG_USER_STR_CUTOFF];
609
610 pagefault_disable();
611 do {
612 last_char = get_char(user_str_reg, pos);
613 user_str[pos] = last_char;
614 pos++;
615 } while (last_char != '\0' && pos < sizeof(user_str));
616 pagefault_enable();
617
618 user_str[sizeof(user_str) - 1] = '\0';
619 dbg_printk("load field ref user string: '%s%s'\n", user_str,
620 last_char != '\0' ? "[...]" : "");
621 }
622 #else
623 static inline
624 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
625 {
626 }
627 #endif
628
629 /*
630 * Return 0 (discard), or raise the 0x1 flag (log event).
631 * Currently, other flags are kept for future extensions and have no
632 * effect.
633 */
634 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
635 struct lttng_probe_ctx *lttng_probe_ctx,
636 const char *filter_stack_data)
637 {
638 struct bytecode_runtime *bytecode = filter_data;
639 void *pc, *next_pc, *start_pc;
640 int ret = -EINVAL;
641 uint64_t retval = 0;
642 struct estack _stack;
643 struct estack *stack = &_stack;
644 register int64_t ax = 0, bx = 0;
645 register int top = FILTER_STACK_EMPTY;
646 #ifndef INTERPRETER_USE_SWITCH
647 static void *dispatch[NR_FILTER_OPS] = {
648 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
649
650 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
651
652 /* binary */
653 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
654 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
655 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
656 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
657 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
658 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
659 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
660 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
661 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
662 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
663
664 /* binary comparators */
665 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
666 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
667 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
668 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
669 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
670 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
671
672 /* string binary comparator */
673 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
674 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
675 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
676 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
677 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
678 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
679
680 /* globbing pattern binary comparator */
681 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
682 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
683
684 /* s64 binary comparator */
685 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
686 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
687 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
688 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
689 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
690 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
691
692 /* double binary comparator */
693 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
694 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
695 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
696 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
697 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
698 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
699
700 /* Mixed S64-double binary comparators */
701 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
702 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
703 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
704 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
705 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
706 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
707
708 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
709 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
710 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
711 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
712 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
713 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
714
715 /* unary */
716 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
717 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
718 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
719 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
720 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
721 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
722 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
723 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
724 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
725
726 /* logical */
727 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
728 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
729
730 /* load field ref */
731 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
732 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
733 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
734 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
735 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
736
737 /* load from immediate operand */
738 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
739 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
740 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
741 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
742
743 /* cast */
744 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
745 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
746 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
747
748 /* get context ref */
749 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
750 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
751 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
752 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
753
754 /* load userspace field ref */
755 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
756 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
757
758 /* Instructions for recursive traversal through composed types. */
759 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
760 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
761 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
762
763 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
764 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
765 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
766 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
767
768 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
769 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
770 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
771 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
772 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
773 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
774 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
775 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
776 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
777 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
778 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
779 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
780
781 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
782
783 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
784 };
785 #endif /* #ifndef INTERPRETER_USE_SWITCH */
786
787 START_OP
788
789 OP(FILTER_OP_UNKNOWN):
790 OP(FILTER_OP_LOAD_FIELD_REF):
791 OP(FILTER_OP_GET_CONTEXT_REF):
792 #ifdef INTERPRETER_USE_SWITCH
793 default:
794 #endif /* INTERPRETER_USE_SWITCH */
795 printk(KERN_WARNING "unknown bytecode op %u\n",
796 (unsigned int) *(filter_opcode_t *) pc);
797 ret = -EINVAL;
798 goto end;
799
800 OP(FILTER_OP_RETURN):
801 OP(FILTER_OP_RETURN_S64):
802 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
803 retval = !!estack_ax_v;
804 ret = 0;
805 goto end;
806
807 /* binary */
808 OP(FILTER_OP_MUL):
809 OP(FILTER_OP_DIV):
810 OP(FILTER_OP_MOD):
811 OP(FILTER_OP_PLUS):
812 OP(FILTER_OP_MINUS):
813 printk(KERN_WARNING "unsupported bytecode op %u\n",
814 (unsigned int) *(filter_opcode_t *) pc);
815 ret = -EINVAL;
816 goto end;
817
818 OP(FILTER_OP_EQ):
819 OP(FILTER_OP_NE):
820 OP(FILTER_OP_GT):
821 OP(FILTER_OP_LT):
822 OP(FILTER_OP_GE):
823 OP(FILTER_OP_LE):
824 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
825 (unsigned int) *(filter_opcode_t *) pc);
826 ret = -EINVAL;
827 goto end;
828
829 OP(FILTER_OP_EQ_STRING):
830 {
831 int res;
832
833 res = (stack_strcmp(stack, top, "==") == 0);
834 estack_pop(stack, top, ax, bx);
835 estack_ax_v = res;
836 next_pc += sizeof(struct binary_op);
837 PO;
838 }
839 OP(FILTER_OP_NE_STRING):
840 {
841 int res;
842
843 res = (stack_strcmp(stack, top, "!=") != 0);
844 estack_pop(stack, top, ax, bx);
845 estack_ax_v = res;
846 next_pc += sizeof(struct binary_op);
847 PO;
848 }
849 OP(FILTER_OP_GT_STRING):
850 {
851 int res;
852
853 res = (stack_strcmp(stack, top, ">") > 0);
854 estack_pop(stack, top, ax, bx);
855 estack_ax_v = res;
856 next_pc += sizeof(struct binary_op);
857 PO;
858 }
859 OP(FILTER_OP_LT_STRING):
860 {
861 int res;
862
863 res = (stack_strcmp(stack, top, "<") < 0);
864 estack_pop(stack, top, ax, bx);
865 estack_ax_v = res;
866 next_pc += sizeof(struct binary_op);
867 PO;
868 }
869 OP(FILTER_OP_GE_STRING):
870 {
871 int res;
872
873 res = (stack_strcmp(stack, top, ">=") >= 0);
874 estack_pop(stack, top, ax, bx);
875 estack_ax_v = res;
876 next_pc += sizeof(struct binary_op);
877 PO;
878 }
879 OP(FILTER_OP_LE_STRING):
880 {
881 int res;
882
883 res = (stack_strcmp(stack, top, "<=") <= 0);
884 estack_pop(stack, top, ax, bx);
885 estack_ax_v = res;
886 next_pc += sizeof(struct binary_op);
887 PO;
888 }
889
890 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
891 {
892 int res;
893
894 res = (stack_star_glob_match(stack, top, "==") == 0);
895 estack_pop(stack, top, ax, bx);
896 estack_ax_v = res;
897 next_pc += sizeof(struct binary_op);
898 PO;
899 }
900 OP(FILTER_OP_NE_STAR_GLOB_STRING):
901 {
902 int res;
903
904 res = (stack_star_glob_match(stack, top, "!=") != 0);
905 estack_pop(stack, top, ax, bx);
906 estack_ax_v = res;
907 next_pc += sizeof(struct binary_op);
908 PO;
909 }
910
911 OP(FILTER_OP_EQ_S64):
912 {
913 int res;
914
915 res = (estack_bx_v == estack_ax_v);
916 estack_pop(stack, top, ax, bx);
917 estack_ax_v = res;
918 next_pc += sizeof(struct binary_op);
919 PO;
920 }
921 OP(FILTER_OP_NE_S64):
922 {
923 int res;
924
925 res = (estack_bx_v != estack_ax_v);
926 estack_pop(stack, top, ax, bx);
927 estack_ax_v = res;
928 next_pc += sizeof(struct binary_op);
929 PO;
930 }
931 OP(FILTER_OP_GT_S64):
932 {
933 int res;
934
935 res = (estack_bx_v > estack_ax_v);
936 estack_pop(stack, top, ax, bx);
937 estack_ax_v = res;
938 next_pc += sizeof(struct binary_op);
939 PO;
940 }
941 OP(FILTER_OP_LT_S64):
942 {
943 int res;
944
945 res = (estack_bx_v < estack_ax_v);
946 estack_pop(stack, top, ax, bx);
947 estack_ax_v = res;
948 next_pc += sizeof(struct binary_op);
949 PO;
950 }
951 OP(FILTER_OP_GE_S64):
952 {
953 int res;
954
955 res = (estack_bx_v >= estack_ax_v);
956 estack_pop(stack, top, ax, bx);
957 estack_ax_v = res;
958 next_pc += sizeof(struct binary_op);
959 PO;
960 }
961 OP(FILTER_OP_LE_S64):
962 {
963 int res;
964
965 res = (estack_bx_v <= estack_ax_v);
966 estack_pop(stack, top, ax, bx);
967 estack_ax_v = res;
968 next_pc += sizeof(struct binary_op);
969 PO;
970 }
971
972 OP(FILTER_OP_EQ_DOUBLE):
973 OP(FILTER_OP_NE_DOUBLE):
974 OP(FILTER_OP_GT_DOUBLE):
975 OP(FILTER_OP_LT_DOUBLE):
976 OP(FILTER_OP_GE_DOUBLE):
977 OP(FILTER_OP_LE_DOUBLE):
978 {
979 BUG_ON(1);
980 PO;
981 }
982
983 /* Mixed S64-double binary comparators */
984 OP(FILTER_OP_EQ_DOUBLE_S64):
985 OP(FILTER_OP_NE_DOUBLE_S64):
986 OP(FILTER_OP_GT_DOUBLE_S64):
987 OP(FILTER_OP_LT_DOUBLE_S64):
988 OP(FILTER_OP_GE_DOUBLE_S64):
989 OP(FILTER_OP_LE_DOUBLE_S64):
990 OP(FILTER_OP_EQ_S64_DOUBLE):
991 OP(FILTER_OP_NE_S64_DOUBLE):
992 OP(FILTER_OP_GT_S64_DOUBLE):
993 OP(FILTER_OP_LT_S64_DOUBLE):
994 OP(FILTER_OP_GE_S64_DOUBLE):
995 OP(FILTER_OP_LE_S64_DOUBLE):
996 {
997 BUG_ON(1);
998 PO;
999 }
1000 OP(FILTER_OP_BIT_RSHIFT):
1001 {
1002 int64_t res;
1003
1004 /* Catch undefined behavior. */
1005 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1006 ret = -EINVAL;
1007 goto end;
1008 }
1009 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
1010 estack_pop(stack, top, ax, bx);
1011 estack_ax_v = res;
1012 next_pc += sizeof(struct binary_op);
1013 PO;
1014 }
1015 OP(FILTER_OP_BIT_LSHIFT):
1016 {
1017 int64_t res;
1018
1019 /* Catch undefined behavior. */
1020 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1021 ret = -EINVAL;
1022 goto end;
1023 }
1024 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
1025 estack_pop(stack, top, ax, bx);
1026 estack_ax_v = res;
1027 next_pc += sizeof(struct binary_op);
1028 PO;
1029 }
1030 OP(FILTER_OP_BIT_AND):
1031 {
1032 int64_t res;
1033
1034 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1035 estack_pop(stack, top, ax, bx);
1036 estack_ax_v = res;
1037 next_pc += sizeof(struct binary_op);
1038 PO;
1039 }
1040 OP(FILTER_OP_BIT_OR):
1041 {
1042 int64_t res;
1043
1044 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1045 estack_pop(stack, top, ax, bx);
1046 estack_ax_v = res;
1047 next_pc += sizeof(struct binary_op);
1048 PO;
1049 }
1050 OP(FILTER_OP_BIT_XOR):
1051 {
1052 int64_t res;
1053
1054 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1055 estack_pop(stack, top, ax, bx);
1056 estack_ax_v = res;
1057 next_pc += sizeof(struct binary_op);
1058 PO;
1059 }
1060
1061 /* unary */
1062 OP(FILTER_OP_UNARY_PLUS):
1063 OP(FILTER_OP_UNARY_MINUS):
1064 OP(FILTER_OP_UNARY_NOT):
1065 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1066 (unsigned int) *(filter_opcode_t *) pc);
1067 ret = -EINVAL;
1068 goto end;
1069
1070
1071 OP(FILTER_OP_UNARY_BIT_NOT):
1072 {
1073 estack_ax_v = ~(uint64_t) estack_ax_v;
1074 next_pc += sizeof(struct unary_op);
1075 PO;
1076 }
1077
1078 OP(FILTER_OP_UNARY_PLUS_S64):
1079 {
1080 next_pc += sizeof(struct unary_op);
1081 PO;
1082 }
1083 OP(FILTER_OP_UNARY_MINUS_S64):
1084 {
1085 estack_ax_v = -estack_ax_v;
1086 next_pc += sizeof(struct unary_op);
1087 PO;
1088 }
1089 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1090 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1091 {
1092 BUG_ON(1);
1093 PO;
1094 }
1095 OP(FILTER_OP_UNARY_NOT_S64):
1096 {
1097 estack_ax_v = !estack_ax_v;
1098 next_pc += sizeof(struct unary_op);
1099 PO;
1100 }
1101 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1102 {
1103 BUG_ON(1);
1104 PO;
1105 }
1106
1107 /* logical */
1108 OP(FILTER_OP_AND):
1109 {
1110 struct logical_op *insn = (struct logical_op *) pc;
1111
1112 /* If AX is 0, skip and evaluate to 0 */
1113 if (unlikely(estack_ax_v == 0)) {
1114 dbg_printk("Jumping to bytecode offset %u\n",
1115 (unsigned int) insn->skip_offset);
1116 next_pc = start_pc + insn->skip_offset;
1117 } else {
1118 /* Pop 1 when jump not taken */
1119 estack_pop(stack, top, ax, bx);
1120 next_pc += sizeof(struct logical_op);
1121 }
1122 PO;
1123 }
1124 OP(FILTER_OP_OR):
1125 {
1126 struct logical_op *insn = (struct logical_op *) pc;
1127
1128 /* If AX is nonzero, skip and evaluate to 1 */
1129
1130 if (unlikely(estack_ax_v != 0)) {
1131 estack_ax_v = 1;
1132 dbg_printk("Jumping to bytecode offset %u\n",
1133 (unsigned int) insn->skip_offset);
1134 next_pc = start_pc + insn->skip_offset;
1135 } else {
1136 /* Pop 1 when jump not taken */
1137 estack_pop(stack, top, ax, bx);
1138 next_pc += sizeof(struct logical_op);
1139 }
1140 PO;
1141 }
1142
1143
1144 /* load field ref */
1145 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1146 {
1147 struct load_op *insn = (struct load_op *) pc;
1148 struct field_ref *ref = (struct field_ref *) insn->data;
1149
1150 dbg_printk("load field ref offset %u type string\n",
1151 ref->offset);
1152 estack_push(stack, top, ax, bx);
1153 estack_ax(stack, top)->u.s.str =
1154 *(const char * const *) &filter_stack_data[ref->offset];
1155 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1156 dbg_printk("Filter warning: loading a NULL string.\n");
1157 ret = -EINVAL;
1158 goto end;
1159 }
1160 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1161 estack_ax(stack, top)->u.s.literal_type =
1162 ESTACK_STRING_LITERAL_TYPE_NONE;
1163 estack_ax(stack, top)->u.s.user = 0;
1164 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1165 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1166 PO;
1167 }
1168
1169 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1170 {
1171 struct load_op *insn = (struct load_op *) pc;
1172 struct field_ref *ref = (struct field_ref *) insn->data;
1173
1174 dbg_printk("load field ref offset %u type sequence\n",
1175 ref->offset);
1176 estack_push(stack, top, ax, bx);
1177 estack_ax(stack, top)->u.s.seq_len =
1178 *(unsigned long *) &filter_stack_data[ref->offset];
1179 estack_ax(stack, top)->u.s.str =
1180 *(const char **) (&filter_stack_data[ref->offset
1181 + sizeof(unsigned long)]);
1182 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1183 dbg_printk("Filter warning: loading a NULL sequence.\n");
1184 ret = -EINVAL;
1185 goto end;
1186 }
1187 estack_ax(stack, top)->u.s.literal_type =
1188 ESTACK_STRING_LITERAL_TYPE_NONE;
1189 estack_ax(stack, top)->u.s.user = 0;
1190 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1191 PO;
1192 }
1193
1194 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1195 {
1196 struct load_op *insn = (struct load_op *) pc;
1197 struct field_ref *ref = (struct field_ref *) insn->data;
1198
1199 dbg_printk("load field ref offset %u type s64\n",
1200 ref->offset);
1201 estack_push(stack, top, ax, bx);
1202 estack_ax_v =
1203 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1204 dbg_printk("ref load s64 %lld\n",
1205 (long long) estack_ax_v);
1206 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1207 PO;
1208 }
1209
1210 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1211 {
1212 BUG_ON(1);
1213 PO;
1214 }
1215
1216 /* load from immediate operand */
1217 OP(FILTER_OP_LOAD_STRING):
1218 {
1219 struct load_op *insn = (struct load_op *) pc;
1220
1221 dbg_printk("load string %s\n", insn->data);
1222 estack_push(stack, top, ax, bx);
1223 estack_ax(stack, top)->u.s.str = insn->data;
1224 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1225 estack_ax(stack, top)->u.s.literal_type =
1226 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1227 estack_ax(stack, top)->u.s.user = 0;
1228 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1229 PO;
1230 }
1231
1232 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1233 {
1234 struct load_op *insn = (struct load_op *) pc;
1235
1236 dbg_printk("load globbing pattern %s\n", insn->data);
1237 estack_push(stack, top, ax, bx);
1238 estack_ax(stack, top)->u.s.str = insn->data;
1239 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1240 estack_ax(stack, top)->u.s.literal_type =
1241 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1242 estack_ax(stack, top)->u.s.user = 0;
1243 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1244 PO;
1245 }
1246
1247 OP(FILTER_OP_LOAD_S64):
1248 {
1249 struct load_op *insn = (struct load_op *) pc;
1250
1251 estack_push(stack, top, ax, bx);
1252 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1253 dbg_printk("load s64 %lld\n",
1254 (long long) estack_ax_v);
1255 next_pc += sizeof(struct load_op)
1256 + sizeof(struct literal_numeric);
1257 PO;
1258 }
1259
1260 OP(FILTER_OP_LOAD_DOUBLE):
1261 {
1262 BUG_ON(1);
1263 PO;
1264 }
1265
1266 /* cast */
1267 OP(FILTER_OP_CAST_TO_S64):
1268 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1269 (unsigned int) *(filter_opcode_t *) pc);
1270 ret = -EINVAL;
1271 goto end;
1272
1273 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1274 {
1275 BUG_ON(1);
1276 PO;
1277 }
1278
1279 OP(FILTER_OP_CAST_NOP):
1280 {
1281 next_pc += sizeof(struct cast_op);
1282 PO;
1283 }
1284
1285 /* get context ref */
1286 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1287 {
1288 struct load_op *insn = (struct load_op *) pc;
1289 struct field_ref *ref = (struct field_ref *) insn->data;
1290 struct lttng_ctx_field *ctx_field;
1291 union lttng_ctx_value v;
1292
1293 dbg_printk("get context ref offset %u type string\n",
1294 ref->offset);
1295 ctx_field = &lttng_static_ctx->fields[ref->offset];
1296 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1297 estack_push(stack, top, ax, bx);
1298 estack_ax(stack, top)->u.s.str = v.str;
1299 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1300 dbg_printk("Filter warning: loading a NULL string.\n");
1301 ret = -EINVAL;
1302 goto end;
1303 }
1304 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1305 estack_ax(stack, top)->u.s.literal_type =
1306 ESTACK_STRING_LITERAL_TYPE_NONE;
1307 estack_ax(stack, top)->u.s.user = 0;
1308 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1309 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1310 PO;
1311 }
1312
1313 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1314 {
1315 struct load_op *insn = (struct load_op *) pc;
1316 struct field_ref *ref = (struct field_ref *) insn->data;
1317 struct lttng_ctx_field *ctx_field;
1318 union lttng_ctx_value v;
1319
1320 dbg_printk("get context ref offset %u type s64\n",
1321 ref->offset);
1322 ctx_field = &lttng_static_ctx->fields[ref->offset];
1323 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1324 estack_push(stack, top, ax, bx);
1325 estack_ax_v = v.s64;
1326 dbg_printk("ref get context s64 %lld\n",
1327 (long long) estack_ax_v);
1328 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1329 PO;
1330 }
1331
1332 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1333 {
1334 BUG_ON(1);
1335 PO;
1336 }
1337
1338 /* load userspace field ref */
1339 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1340 {
1341 struct load_op *insn = (struct load_op *) pc;
1342 struct field_ref *ref = (struct field_ref *) insn->data;
1343
1344 dbg_printk("load field ref offset %u type user string\n",
1345 ref->offset);
1346 estack_push(stack, top, ax, bx);
1347 estack_ax(stack, top)->u.s.user_str =
1348 *(const char * const *) &filter_stack_data[ref->offset];
1349 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1350 dbg_printk("Filter warning: loading a NULL string.\n");
1351 ret = -EINVAL;
1352 goto end;
1353 }
1354 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1355 estack_ax(stack, top)->u.s.literal_type =
1356 ESTACK_STRING_LITERAL_TYPE_NONE;
1357 estack_ax(stack, top)->u.s.user = 1;
1358 dbg_load_ref_user_str_printk(estack_ax(stack, top));
1359 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1360 PO;
1361 }
1362
1363 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1364 {
1365 struct load_op *insn = (struct load_op *) pc;
1366 struct field_ref *ref = (struct field_ref *) insn->data;
1367
1368 dbg_printk("load field ref offset %u type user sequence\n",
1369 ref->offset);
1370 estack_push(stack, top, ax, bx);
1371 estack_ax(stack, top)->u.s.seq_len =
1372 *(unsigned long *) &filter_stack_data[ref->offset];
1373 estack_ax(stack, top)->u.s.user_str =
1374 *(const char **) (&filter_stack_data[ref->offset
1375 + sizeof(unsigned long)]);
1376 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1377 dbg_printk("Filter warning: loading a NULL sequence.\n");
1378 ret = -EINVAL;
1379 goto end;
1380 }
1381 estack_ax(stack, top)->u.s.literal_type =
1382 ESTACK_STRING_LITERAL_TYPE_NONE;
1383 estack_ax(stack, top)->u.s.user = 1;
1384 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1385 PO;
1386 }
1387
1388 OP(FILTER_OP_GET_CONTEXT_ROOT):
1389 {
1390 dbg_printk("op get context root\n");
1391 estack_push(stack, top, ax, bx);
1392 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1393 /* "field" only needed for variants. */
1394 estack_ax(stack, top)->u.ptr.field = NULL;
1395 next_pc += sizeof(struct load_op);
1396 PO;
1397 }
1398
1399 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1400 {
1401 BUG_ON(1);
1402 PO;
1403 }
1404
1405 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1406 {
1407 dbg_printk("op get app payload root\n");
1408 estack_push(stack, top, ax, bx);
1409 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1410 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1411 /* "field" only needed for variants. */
1412 estack_ax(stack, top)->u.ptr.field = NULL;
1413 next_pc += sizeof(struct load_op);
1414 PO;
1415 }
1416
1417 OP(FILTER_OP_GET_SYMBOL):
1418 {
1419 dbg_printk("op get symbol\n");
1420 switch (estack_ax(stack, top)->u.ptr.type) {
1421 case LOAD_OBJECT:
1422 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1423 ret = -EINVAL;
1424 goto end;
1425 case LOAD_ROOT_CONTEXT:
1426 case LOAD_ROOT_APP_CONTEXT:
1427 case LOAD_ROOT_PAYLOAD:
1428 /*
1429 * symbol lookup is performed by
1430 * specialization.
1431 */
1432 ret = -EINVAL;
1433 goto end;
1434 }
1435 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1436 PO;
1437 }
1438
1439 OP(FILTER_OP_GET_SYMBOL_FIELD):
1440 {
1441 /*
1442 * Used for first variant encountered in a
1443 * traversal. Variants are not implemented yet.
1444 */
1445 ret = -EINVAL;
1446 goto end;
1447 }
1448
1449 OP(FILTER_OP_GET_INDEX_U16):
1450 {
1451 struct load_op *insn = (struct load_op *) pc;
1452 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1453
1454 dbg_printk("op get index u16\n");
1455 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1456 if (ret)
1457 goto end;
1458 estack_ax_v = estack_ax(stack, top)->u.v;
1459 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1460 PO;
1461 }
1462
1463 OP(FILTER_OP_GET_INDEX_U64):
1464 {
1465 struct load_op *insn = (struct load_op *) pc;
1466 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1467
1468 dbg_printk("op get index u64\n");
1469 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1470 if (ret)
1471 goto end;
1472 estack_ax_v = estack_ax(stack, top)->u.v;
1473 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1474 PO;
1475 }
1476
1477 OP(FILTER_OP_LOAD_FIELD):
1478 {
1479 dbg_printk("op load field\n");
1480 ret = dynamic_load_field(estack_ax(stack, top));
1481 if (ret)
1482 goto end;
1483 estack_ax_v = estack_ax(stack, top)->u.v;
1484 next_pc += sizeof(struct load_op);
1485 PO;
1486 }
1487
1488 OP(FILTER_OP_LOAD_FIELD_S8):
1489 {
1490 dbg_printk("op load field s8\n");
1491
1492 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1493 next_pc += sizeof(struct load_op);
1494 PO;
1495 }
1496 OP(FILTER_OP_LOAD_FIELD_S16):
1497 {
1498 dbg_printk("op load field s16\n");
1499
1500 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1501 next_pc += sizeof(struct load_op);
1502 PO;
1503 }
1504 OP(FILTER_OP_LOAD_FIELD_S32):
1505 {
1506 dbg_printk("op load field s32\n");
1507
1508 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1509 next_pc += sizeof(struct load_op);
1510 PO;
1511 }
1512 OP(FILTER_OP_LOAD_FIELD_S64):
1513 {
1514 dbg_printk("op load field s64\n");
1515
1516 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1517 next_pc += sizeof(struct load_op);
1518 PO;
1519 }
1520 OP(FILTER_OP_LOAD_FIELD_U8):
1521 {
1522 dbg_printk("op load field u8\n");
1523
1524 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1525 next_pc += sizeof(struct load_op);
1526 PO;
1527 }
1528 OP(FILTER_OP_LOAD_FIELD_U16):
1529 {
1530 dbg_printk("op load field u16\n");
1531
1532 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1533 next_pc += sizeof(struct load_op);
1534 PO;
1535 }
1536 OP(FILTER_OP_LOAD_FIELD_U32):
1537 {
1538 dbg_printk("op load field u32\n");
1539
1540 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1541 next_pc += sizeof(struct load_op);
1542 PO;
1543 }
1544 OP(FILTER_OP_LOAD_FIELD_U64):
1545 {
1546 dbg_printk("op load field u64\n");
1547
1548 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1549 next_pc += sizeof(struct load_op);
1550 PO;
1551 }
1552 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1553 {
1554 ret = -EINVAL;
1555 goto end;
1556 }
1557
1558 OP(FILTER_OP_LOAD_FIELD_STRING):
1559 {
1560 const char *str;
1561
1562 dbg_printk("op load field string\n");
1563 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1564 estack_ax(stack, top)->u.s.str = str;
1565 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1566 dbg_printk("Filter warning: loading a NULL string.\n");
1567 ret = -EINVAL;
1568 goto end;
1569 }
1570 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1571 estack_ax(stack, top)->u.s.literal_type =
1572 ESTACK_STRING_LITERAL_TYPE_NONE;
1573 next_pc += sizeof(struct load_op);
1574 PO;
1575 }
1576
1577 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1578 {
1579 const char *ptr;
1580
1581 dbg_printk("op load field string sequence\n");
1582 ptr = estack_ax(stack, top)->u.ptr.ptr;
1583 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1584 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1585 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1586 dbg_printk("Filter warning: loading a NULL sequence.\n");
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590 estack_ax(stack, top)->u.s.literal_type =
1591 ESTACK_STRING_LITERAL_TYPE_NONE;
1592 next_pc += sizeof(struct load_op);
1593 PO;
1594 }
1595
1596 END_OP
1597 end:
1598 /* return 0 (discard) on error */
1599 if (ret)
1600 return 0;
1601 return retval;
1602 }
1603
1604 #undef START_OP
1605 #undef OP
1606 #undef PO
1607 #undef END_OP
This page took 0.098297 seconds and 4 git commands to generate.