821c12d3ba9679e5ae605ae5189a639529b8c5fd
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <wrapper/compiler_attributes.h>
11 #include <wrapper/uaccess.h>
12 #include <wrapper/objtool.h>
13 #include <wrapper/types.h>
14 #include <linux/swab.h>
15
16 #include <lttng-filter.h>
17 #include <lttng-string-utils.h>
18
19 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
20
21 /*
22 * get_char should be called with page fault handler disabled if it is expected
23 * to handle user-space read.
24 */
25 static
26 char get_char(const struct estack_entry *reg, size_t offset)
27 {
28 if (unlikely(offset >= reg->u.s.seq_len))
29 return '\0';
30 if (reg->u.s.user) {
31 char c;
32
33 /* Handle invalid access as end of string. */
34 if (unlikely(!lttng_access_ok(VERIFY_READ,
35 reg->u.s.user_str + offset,
36 sizeof(c))))
37 return '\0';
38 /* Handle fault (nonzero return value) as end of string. */
39 if (unlikely(__copy_from_user_inatomic(&c,
40 reg->u.s.user_str + offset,
41 sizeof(c))))
42 return '\0';
43 return c;
44 } else {
45 return reg->u.s.str[offset];
46 }
47 }
48
49 /*
50 * -1: wildcard found.
51 * -2: unknown escape char.
52 * 0: normal char.
53 */
54 static
55 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
56 {
57 switch (*c) {
58 case '\\':
59 (*offset)++;
60 *c = get_char(reg, *offset);
61 switch (*c) {
62 case '\\':
63 case '*':
64 return 0;
65 default:
66 return -2;
67 }
68 case '*':
69 return -1;
70 default:
71 return 0;
72 }
73 }
74
75 static
76 char get_char_at_cb(size_t at, void *data)
77 {
78 return get_char(data, at);
79 }
80
81 static
82 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
83 {
84 bool has_user = false;
85 int result;
86 struct estack_entry *pattern_reg;
87 struct estack_entry *candidate_reg;
88
89 /* Disable the page fault handler when reading from userspace. */
90 if (estack_bx(stack, top)->u.s.user
91 || estack_ax(stack, top)->u.s.user) {
92 has_user = true;
93 pagefault_disable();
94 }
95
96 /* Find out which side is the pattern vs. the candidate. */
97 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
98 pattern_reg = estack_ax(stack, top);
99 candidate_reg = estack_bx(stack, top);
100 } else {
101 pattern_reg = estack_bx(stack, top);
102 candidate_reg = estack_ax(stack, top);
103 }
104
105 /* Perform the match operation. */
106 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
107 pattern_reg, get_char_at_cb, candidate_reg);
108 if (has_user)
109 pagefault_enable();
110
111 return result;
112 }
113
114 static
115 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
116 {
117 size_t offset_bx = 0, offset_ax = 0;
118 int diff, has_user = 0;
119
120 if (estack_bx(stack, top)->u.s.user
121 || estack_ax(stack, top)->u.s.user) {
122 has_user = 1;
123 pagefault_disable();
124 }
125
126 for (;;) {
127 int ret;
128 int escaped_r0 = 0;
129 char char_bx, char_ax;
130
131 char_bx = get_char(estack_bx(stack, top), offset_bx);
132 char_ax = get_char(estack_ax(stack, top), offset_ax);
133
134 if (unlikely(char_bx == '\0')) {
135 if (char_ax == '\0') {
136 diff = 0;
137 break;
138 } else {
139 if (estack_ax(stack, top)->u.s.literal_type ==
140 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
141 ret = parse_char(estack_ax(stack, top),
142 &char_ax, &offset_ax);
143 if (ret == -1) {
144 diff = 0;
145 break;
146 }
147 }
148 diff = -1;
149 break;
150 }
151 }
152 if (unlikely(char_ax == '\0')) {
153 if (estack_bx(stack, top)->u.s.literal_type ==
154 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
155 ret = parse_char(estack_bx(stack, top),
156 &char_bx, &offset_bx);
157 if (ret == -1) {
158 diff = 0;
159 break;
160 }
161 }
162 diff = 1;
163 break;
164 }
165 if (estack_bx(stack, top)->u.s.literal_type ==
166 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
167 ret = parse_char(estack_bx(stack, top),
168 &char_bx, &offset_bx);
169 if (ret == -1) {
170 diff = 0;
171 break;
172 } else if (ret == -2) {
173 escaped_r0 = 1;
174 }
175 /* else compare both char */
176 }
177 if (estack_ax(stack, top)->u.s.literal_type ==
178 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
179 ret = parse_char(estack_ax(stack, top),
180 &char_ax, &offset_ax);
181 if (ret == -1) {
182 diff = 0;
183 break;
184 } else if (ret == -2) {
185 if (!escaped_r0) {
186 diff = -1;
187 break;
188 }
189 } else {
190 if (escaped_r0) {
191 diff = 1;
192 break;
193 }
194 }
195 } else {
196 if (escaped_r0) {
197 diff = 1;
198 break;
199 }
200 }
201 diff = char_bx - char_ax;
202 if (diff != 0)
203 break;
204 offset_bx++;
205 offset_ax++;
206 }
207 if (has_user)
208 pagefault_enable();
209
210 return diff;
211 }
212
213 uint64_t lttng_filter_false(void *filter_data,
214 struct lttng_probe_ctx *lttng_probe_ctx,
215 const char *filter_stack_data)
216 {
217 return 0;
218 }
219
220 #ifdef INTERPRETER_USE_SWITCH
221
222 /*
223 * Fallback for compilers that do not support taking address of labels.
224 */
225
226 #define START_OP \
227 start_pc = &bytecode->data[0]; \
228 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
229 pc = next_pc) { \
230 dbg_printk("Executing op %s (%u)\n", \
231 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
232 (unsigned int) *(filter_opcode_t *) pc); \
233 switch (*(filter_opcode_t *) pc) {
234
235 #define OP(name) case name
236
237 #define PO break
238
239 #define END_OP } \
240 }
241
242 #else
243
244 /*
245 * Dispatch-table based interpreter.
246 */
247
248 #define START_OP \
249 start_pc = &bytecode->code[0]; \
250 pc = next_pc = start_pc; \
251 if (unlikely(pc - start_pc >= bytecode->len)) \
252 goto end; \
253 goto *dispatch[*(filter_opcode_t *) pc];
254
255 #define OP(name) \
256 LABEL_##name
257
258 #define PO \
259 pc = next_pc; \
260 goto *dispatch[*(filter_opcode_t *) pc];
261
262 #define END_OP
263
264 #endif
265
266 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
267 struct load_ptr *ptr,
268 uint32_t idx)
269 {
270
271 struct lttng_ctx_field *ctx_field;
272 struct lttng_event_field *field;
273 union lttng_ctx_value v;
274
275 ctx_field = &lttng_static_ctx->fields[idx];
276 field = &ctx_field->event_field;
277 ptr->type = LOAD_OBJECT;
278 /* field is only used for types nested within variants. */
279 ptr->field = NULL;
280
281 switch (field->type.atype) {
282 case atype_integer:
283 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
284 if (field->type.u.basic.integer.signedness) {
285 ptr->object_type = OBJECT_TYPE_S64;
286 ptr->u.s64 = v.s64;
287 ptr->ptr = &ptr->u.s64;
288 } else {
289 ptr->object_type = OBJECT_TYPE_U64;
290 ptr->u.u64 = v.s64; /* Cast. */
291 ptr->ptr = &ptr->u.u64;
292 }
293 ptr->rev_bo = field->type.u.basic.integer.reverse_byte_order;
294 break;
295 case atype_enum:
296 {
297 const struct lttng_integer_type *itype =
298 &field->type.u.basic.enumeration.container_type;
299
300 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
301 if (itype->signedness) {
302 ptr->object_type = OBJECT_TYPE_S64;
303 ptr->u.s64 = v.s64;
304 ptr->ptr = &ptr->u.s64;
305 } else {
306 ptr->object_type = OBJECT_TYPE_U64;
307 ptr->u.u64 = v.s64; /* Cast. */
308 ptr->ptr = &ptr->u.u64;
309 }
310 ptr->rev_bo = itype->reverse_byte_order;
311 break;
312 }
313 case atype_array:
314 if (field->type.u.array.elem_type.atype != atype_integer) {
315 printk(KERN_WARNING "Array nesting only supports integer types.\n");
316 return -EINVAL;
317 }
318 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
319 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
320 return -EINVAL;
321 }
322 ptr->object_type = OBJECT_TYPE_STRING;
323 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
324 ptr->ptr = v.str;
325 break;
326 case atype_sequence:
327 if (field->type.u.sequence.elem_type.atype != atype_integer) {
328 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
329 return -EINVAL;
330 }
331 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
332 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
333 return -EINVAL;
334 }
335 ptr->object_type = OBJECT_TYPE_STRING;
336 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
337 ptr->ptr = v.str;
338 break;
339 case atype_array_bitfield:
340 printk(KERN_WARNING "Bitfield array type is not supported.\n");
341 return -EINVAL;
342 case atype_sequence_bitfield:
343 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
344 return -EINVAL;
345 case atype_string:
346 ptr->object_type = OBJECT_TYPE_STRING;
347 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
348 ptr->ptr = v.str;
349 break;
350 case atype_struct:
351 printk(KERN_WARNING "Structure type cannot be loaded.\n");
352 return -EINVAL;
353 default:
354 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
355 return -EINVAL;
356 }
357 return 0;
358 }
359
360 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
361 struct bytecode_runtime *runtime,
362 uint64_t index, struct estack_entry *stack_top)
363 {
364 int ret;
365 const struct filter_get_index_data *gid;
366
367 /*
368 * Types nested within variants need to perform dynamic lookup
369 * based on the field descriptions. LTTng-UST does not implement
370 * variants for now.
371 */
372 if (stack_top->u.ptr.field)
373 return -EINVAL;
374 gid = (const struct filter_get_index_data *) &runtime->data[index];
375 switch (stack_top->u.ptr.type) {
376 case LOAD_OBJECT:
377 switch (stack_top->u.ptr.object_type) {
378 case OBJECT_TYPE_ARRAY:
379 {
380 const char *ptr;
381
382 WARN_ON_ONCE(gid->offset >= gid->array_len);
383 /* Skip count (unsigned long) */
384 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
385 ptr = ptr + gid->offset;
386 stack_top->u.ptr.ptr = ptr;
387 stack_top->u.ptr.object_type = gid->elem.type;
388 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
389 /* field is only used for types nested within variants. */
390 stack_top->u.ptr.field = NULL;
391 break;
392 }
393 case OBJECT_TYPE_SEQUENCE:
394 {
395 const char *ptr;
396 size_t ptr_seq_len;
397
398 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
399 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
400 if (gid->offset >= gid->elem.len * ptr_seq_len) {
401 ret = -EINVAL;
402 goto end;
403 }
404 ptr = ptr + gid->offset;
405 stack_top->u.ptr.ptr = ptr;
406 stack_top->u.ptr.object_type = gid->elem.type;
407 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
408 /* field is only used for types nested within variants. */
409 stack_top->u.ptr.field = NULL;
410 break;
411 }
412 case OBJECT_TYPE_STRUCT:
413 printk(KERN_WARNING "Nested structures are not supported yet.\n");
414 ret = -EINVAL;
415 goto end;
416 case OBJECT_TYPE_VARIANT:
417 default:
418 printk(KERN_WARNING "Unexpected get index type %d",
419 (int) stack_top->u.ptr.object_type);
420 ret = -EINVAL;
421 goto end;
422 }
423 break;
424 case LOAD_ROOT_CONTEXT:
425 lttng_fallthrough;
426 case LOAD_ROOT_APP_CONTEXT:
427 {
428 ret = context_get_index(lttng_probe_ctx,
429 &stack_top->u.ptr,
430 gid->ctx_index);
431 if (ret) {
432 goto end;
433 }
434 break;
435 }
436 case LOAD_ROOT_PAYLOAD:
437 stack_top->u.ptr.ptr += gid->offset;
438 if (gid->elem.type == OBJECT_TYPE_STRING)
439 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
440 stack_top->u.ptr.object_type = gid->elem.type;
441 stack_top->u.ptr.type = LOAD_OBJECT;
442 /* field is only used for types nested within variants. */
443 stack_top->u.ptr.field = NULL;
444 break;
445 }
446 return 0;
447
448 end:
449 return ret;
450 }
451
452 static int dynamic_load_field(struct estack_entry *stack_top)
453 {
454 int ret;
455
456 switch (stack_top->u.ptr.type) {
457 case LOAD_OBJECT:
458 break;
459 case LOAD_ROOT_CONTEXT:
460 case LOAD_ROOT_APP_CONTEXT:
461 case LOAD_ROOT_PAYLOAD:
462 default:
463 dbg_printk("Filter warning: cannot load root, missing field name.\n");
464 ret = -EINVAL;
465 goto end;
466 }
467 switch (stack_top->u.ptr.object_type) {
468 case OBJECT_TYPE_S8:
469 dbg_printk("op load field s8\n");
470 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
471 break;
472 case OBJECT_TYPE_S16:
473 {
474 int16_t tmp;
475
476 dbg_printk("op load field s16\n");
477 tmp = *(int16_t *) stack_top->u.ptr.ptr;
478 if (stack_top->u.ptr.rev_bo)
479 __swab16s(&tmp);
480 stack_top->u.v = tmp;
481 break;
482 }
483 case OBJECT_TYPE_S32:
484 {
485 int32_t tmp;
486
487 dbg_printk("op load field s32\n");
488 tmp = *(int32_t *) stack_top->u.ptr.ptr;
489 if (stack_top->u.ptr.rev_bo)
490 __swab32s(&tmp);
491 stack_top->u.v = tmp;
492 break;
493 }
494 case OBJECT_TYPE_S64:
495 {
496 int64_t tmp;
497
498 dbg_printk("op load field s64\n");
499 tmp = *(int64_t *) stack_top->u.ptr.ptr;
500 if (stack_top->u.ptr.rev_bo)
501 __swab64s(&tmp);
502 stack_top->u.v = tmp;
503 break;
504 }
505 case OBJECT_TYPE_U8:
506 dbg_printk("op load field u8\n");
507 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
508 break;
509 case OBJECT_TYPE_U16:
510 {
511 uint16_t tmp;
512
513 dbg_printk("op load field s16\n");
514 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
515 if (stack_top->u.ptr.rev_bo)
516 __swab16s(&tmp);
517 stack_top->u.v = tmp;
518 break;
519 }
520 case OBJECT_TYPE_U32:
521 {
522 uint32_t tmp;
523
524 dbg_printk("op load field u32\n");
525 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
526 if (stack_top->u.ptr.rev_bo)
527 __swab32s(&tmp);
528 stack_top->u.v = tmp;
529 break;
530 }
531 case OBJECT_TYPE_U64:
532 {
533 uint64_t tmp;
534
535 dbg_printk("op load field u64\n");
536 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
537 if (stack_top->u.ptr.rev_bo)
538 __swab64s(&tmp);
539 stack_top->u.v = tmp;
540 break;
541 }
542 case OBJECT_TYPE_STRING:
543 {
544 const char *str;
545
546 dbg_printk("op load field string\n");
547 str = (const char *) stack_top->u.ptr.ptr;
548 stack_top->u.s.str = str;
549 if (unlikely(!stack_top->u.s.str)) {
550 dbg_printk("Filter warning: loading a NULL string.\n");
551 ret = -EINVAL;
552 goto end;
553 }
554 stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
555 stack_top->u.s.literal_type =
556 ESTACK_STRING_LITERAL_TYPE_NONE;
557 break;
558 }
559 case OBJECT_TYPE_STRING_SEQUENCE:
560 {
561 const char *ptr;
562
563 dbg_printk("op load field string sequence\n");
564 ptr = stack_top->u.ptr.ptr;
565 stack_top->u.s.seq_len = *(unsigned long *) ptr;
566 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
567 if (unlikely(!stack_top->u.s.str)) {
568 dbg_printk("Filter warning: loading a NULL sequence.\n");
569 ret = -EINVAL;
570 goto end;
571 }
572 stack_top->u.s.literal_type =
573 ESTACK_STRING_LITERAL_TYPE_NONE;
574 break;
575 }
576 case OBJECT_TYPE_DYNAMIC:
577 /*
578 * Dynamic types in context are looked up
579 * by context get index.
580 */
581 ret = -EINVAL;
582 goto end;
583 case OBJECT_TYPE_DOUBLE:
584 ret = -EINVAL;
585 goto end;
586 case OBJECT_TYPE_SEQUENCE:
587 case OBJECT_TYPE_ARRAY:
588 case OBJECT_TYPE_STRUCT:
589 case OBJECT_TYPE_VARIANT:
590 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
591 ret = -EINVAL;
592 goto end;
593 }
594 return 0;
595
596 end:
597 return ret;
598 }
599
600 #ifdef DEBUG
601
602 #define DBG_USER_STR_CUTOFF 32
603
604 /*
605 * In debug mode, print user string (truncated, if necessary).
606 */
607 static inline
608 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
609 {
610 size_t pos = 0;
611 char last_char;
612 char user_str[DBG_USER_STR_CUTOFF];
613
614 pagefault_disable();
615 do {
616 last_char = get_char(user_str_reg, pos);
617 user_str[pos] = last_char;
618 pos++;
619 } while (last_char != '\0' && pos < sizeof(user_str));
620 pagefault_enable();
621
622 user_str[sizeof(user_str) - 1] = '\0';
623 dbg_printk("load field ref user string: '%s%s'\n", user_str,
624 last_char != '\0' ? "[...]" : "");
625 }
626 #else
627 static inline
628 void dbg_load_ref_user_str_printk(const struct estack_entry *user_str_reg)
629 {
630 }
631 #endif
632
633 /*
634 * Return 0 (discard), or raise the 0x1 flag (log event).
635 * Currently, other flags are kept for future extensions and have no
636 * effect.
637 */
638 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
639 struct lttng_probe_ctx *lttng_probe_ctx,
640 const char *filter_stack_data)
641 {
642 struct bytecode_runtime *bytecode = filter_data;
643 void *pc, *next_pc, *start_pc;
644 int ret = -EINVAL;
645 uint64_t retval = 0;
646 struct estack _stack;
647 struct estack *stack = &_stack;
648 register int64_t ax = 0, bx = 0;
649 register int top = FILTER_STACK_EMPTY;
650 #ifndef INTERPRETER_USE_SWITCH
651 static void *dispatch[NR_FILTER_OPS] = {
652 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
653
654 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
655
656 /* binary */
657 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
658 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
659 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
660 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
661 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
662 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
663 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
664 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
665 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
666 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
667
668 /* binary comparators */
669 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
670 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
671 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
672 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
673 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
674 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
675
676 /* string binary comparator */
677 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
678 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
679 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
680 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
681 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
682 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
683
684 /* globbing pattern binary comparator */
685 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
686 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
687
688 /* s64 binary comparator */
689 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
690 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
691 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
692 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
693 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
694 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
695
696 /* double binary comparator */
697 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
698 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
699 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
700 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
701 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
702 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
703
704 /* Mixed S64-double binary comparators */
705 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
706 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
707 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
708 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
709 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
710 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
711
712 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
713 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
714 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
715 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
716 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
717 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
718
719 /* unary */
720 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
721 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
722 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
723 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
724 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
725 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
726 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
727 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
728 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
729
730 /* logical */
731 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
732 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
733
734 /* load field ref */
735 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
736 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
737 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
738 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
739 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
740
741 /* load from immediate operand */
742 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
743 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
744 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
745 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
746
747 /* cast */
748 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
749 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
750 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
751
752 /* get context ref */
753 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
754 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
755 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
756 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
757
758 /* load userspace field ref */
759 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
760 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
761
762 /* Instructions for recursive traversal through composed types. */
763 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
764 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
765 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
766
767 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
768 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
769 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
770 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
771
772 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
773 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
774 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
775 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
776 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
777 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
778 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
779 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
780 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
781 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
782 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
783 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
784
785 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
786
787 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
788 };
789 #endif /* #ifndef INTERPRETER_USE_SWITCH */
790
791 START_OP
792
793 OP(FILTER_OP_UNKNOWN):
794 OP(FILTER_OP_LOAD_FIELD_REF):
795 OP(FILTER_OP_GET_CONTEXT_REF):
796 #ifdef INTERPRETER_USE_SWITCH
797 default:
798 #endif /* INTERPRETER_USE_SWITCH */
799 printk(KERN_WARNING "unknown bytecode op %u\n",
800 (unsigned int) *(filter_opcode_t *) pc);
801 ret = -EINVAL;
802 goto end;
803
804 OP(FILTER_OP_RETURN):
805 OP(FILTER_OP_RETURN_S64):
806 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
807 retval = !!estack_ax_v;
808 ret = 0;
809 goto end;
810
811 /* binary */
812 OP(FILTER_OP_MUL):
813 OP(FILTER_OP_DIV):
814 OP(FILTER_OP_MOD):
815 OP(FILTER_OP_PLUS):
816 OP(FILTER_OP_MINUS):
817 printk(KERN_WARNING "unsupported bytecode op %u\n",
818 (unsigned int) *(filter_opcode_t *) pc);
819 ret = -EINVAL;
820 goto end;
821
822 OP(FILTER_OP_EQ):
823 OP(FILTER_OP_NE):
824 OP(FILTER_OP_GT):
825 OP(FILTER_OP_LT):
826 OP(FILTER_OP_GE):
827 OP(FILTER_OP_LE):
828 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
829 (unsigned int) *(filter_opcode_t *) pc);
830 ret = -EINVAL;
831 goto end;
832
833 OP(FILTER_OP_EQ_STRING):
834 {
835 int res;
836
837 res = (stack_strcmp(stack, top, "==") == 0);
838 estack_pop(stack, top, ax, bx);
839 estack_ax_v = res;
840 next_pc += sizeof(struct binary_op);
841 PO;
842 }
843 OP(FILTER_OP_NE_STRING):
844 {
845 int res;
846
847 res = (stack_strcmp(stack, top, "!=") != 0);
848 estack_pop(stack, top, ax, bx);
849 estack_ax_v = res;
850 next_pc += sizeof(struct binary_op);
851 PO;
852 }
853 OP(FILTER_OP_GT_STRING):
854 {
855 int res;
856
857 res = (stack_strcmp(stack, top, ">") > 0);
858 estack_pop(stack, top, ax, bx);
859 estack_ax_v = res;
860 next_pc += sizeof(struct binary_op);
861 PO;
862 }
863 OP(FILTER_OP_LT_STRING):
864 {
865 int res;
866
867 res = (stack_strcmp(stack, top, "<") < 0);
868 estack_pop(stack, top, ax, bx);
869 estack_ax_v = res;
870 next_pc += sizeof(struct binary_op);
871 PO;
872 }
873 OP(FILTER_OP_GE_STRING):
874 {
875 int res;
876
877 res = (stack_strcmp(stack, top, ">=") >= 0);
878 estack_pop(stack, top, ax, bx);
879 estack_ax_v = res;
880 next_pc += sizeof(struct binary_op);
881 PO;
882 }
883 OP(FILTER_OP_LE_STRING):
884 {
885 int res;
886
887 res = (stack_strcmp(stack, top, "<=") <= 0);
888 estack_pop(stack, top, ax, bx);
889 estack_ax_v = res;
890 next_pc += sizeof(struct binary_op);
891 PO;
892 }
893
894 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
895 {
896 int res;
897
898 res = (stack_star_glob_match(stack, top, "==") == 0);
899 estack_pop(stack, top, ax, bx);
900 estack_ax_v = res;
901 next_pc += sizeof(struct binary_op);
902 PO;
903 }
904 OP(FILTER_OP_NE_STAR_GLOB_STRING):
905 {
906 int res;
907
908 res = (stack_star_glob_match(stack, top, "!=") != 0);
909 estack_pop(stack, top, ax, bx);
910 estack_ax_v = res;
911 next_pc += sizeof(struct binary_op);
912 PO;
913 }
914
915 OP(FILTER_OP_EQ_S64):
916 {
917 int res;
918
919 res = (estack_bx_v == estack_ax_v);
920 estack_pop(stack, top, ax, bx);
921 estack_ax_v = res;
922 next_pc += sizeof(struct binary_op);
923 PO;
924 }
925 OP(FILTER_OP_NE_S64):
926 {
927 int res;
928
929 res = (estack_bx_v != estack_ax_v);
930 estack_pop(stack, top, ax, bx);
931 estack_ax_v = res;
932 next_pc += sizeof(struct binary_op);
933 PO;
934 }
935 OP(FILTER_OP_GT_S64):
936 {
937 int res;
938
939 res = (estack_bx_v > estack_ax_v);
940 estack_pop(stack, top, ax, bx);
941 estack_ax_v = res;
942 next_pc += sizeof(struct binary_op);
943 PO;
944 }
945 OP(FILTER_OP_LT_S64):
946 {
947 int res;
948
949 res = (estack_bx_v < estack_ax_v);
950 estack_pop(stack, top, ax, bx);
951 estack_ax_v = res;
952 next_pc += sizeof(struct binary_op);
953 PO;
954 }
955 OP(FILTER_OP_GE_S64):
956 {
957 int res;
958
959 res = (estack_bx_v >= estack_ax_v);
960 estack_pop(stack, top, ax, bx);
961 estack_ax_v = res;
962 next_pc += sizeof(struct binary_op);
963 PO;
964 }
965 OP(FILTER_OP_LE_S64):
966 {
967 int res;
968
969 res = (estack_bx_v <= estack_ax_v);
970 estack_pop(stack, top, ax, bx);
971 estack_ax_v = res;
972 next_pc += sizeof(struct binary_op);
973 PO;
974 }
975
976 OP(FILTER_OP_EQ_DOUBLE):
977 OP(FILTER_OP_NE_DOUBLE):
978 OP(FILTER_OP_GT_DOUBLE):
979 OP(FILTER_OP_LT_DOUBLE):
980 OP(FILTER_OP_GE_DOUBLE):
981 OP(FILTER_OP_LE_DOUBLE):
982 {
983 BUG_ON(1);
984 PO;
985 }
986
987 /* Mixed S64-double binary comparators */
988 OP(FILTER_OP_EQ_DOUBLE_S64):
989 OP(FILTER_OP_NE_DOUBLE_S64):
990 OP(FILTER_OP_GT_DOUBLE_S64):
991 OP(FILTER_OP_LT_DOUBLE_S64):
992 OP(FILTER_OP_GE_DOUBLE_S64):
993 OP(FILTER_OP_LE_DOUBLE_S64):
994 OP(FILTER_OP_EQ_S64_DOUBLE):
995 OP(FILTER_OP_NE_S64_DOUBLE):
996 OP(FILTER_OP_GT_S64_DOUBLE):
997 OP(FILTER_OP_LT_S64_DOUBLE):
998 OP(FILTER_OP_GE_S64_DOUBLE):
999 OP(FILTER_OP_LE_S64_DOUBLE):
1000 {
1001 BUG_ON(1);
1002 PO;
1003 }
1004 OP(FILTER_OP_BIT_RSHIFT):
1005 {
1006 int64_t res;
1007
1008 /* Catch undefined behavior. */
1009 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1010 ret = -EINVAL;
1011 goto end;
1012 }
1013 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
1014 estack_pop(stack, top, ax, bx);
1015 estack_ax_v = res;
1016 next_pc += sizeof(struct binary_op);
1017 PO;
1018 }
1019 OP(FILTER_OP_BIT_LSHIFT):
1020 {
1021 int64_t res;
1022
1023 /* Catch undefined behavior. */
1024 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
1025 ret = -EINVAL;
1026 goto end;
1027 }
1028 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
1029 estack_pop(stack, top, ax, bx);
1030 estack_ax_v = res;
1031 next_pc += sizeof(struct binary_op);
1032 PO;
1033 }
1034 OP(FILTER_OP_BIT_AND):
1035 {
1036 int64_t res;
1037
1038 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1039 estack_pop(stack, top, ax, bx);
1040 estack_ax_v = res;
1041 next_pc += sizeof(struct binary_op);
1042 PO;
1043 }
1044 OP(FILTER_OP_BIT_OR):
1045 {
1046 int64_t res;
1047
1048 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1049 estack_pop(stack, top, ax, bx);
1050 estack_ax_v = res;
1051 next_pc += sizeof(struct binary_op);
1052 PO;
1053 }
1054 OP(FILTER_OP_BIT_XOR):
1055 {
1056 int64_t res;
1057
1058 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1059 estack_pop(stack, top, ax, bx);
1060 estack_ax_v = res;
1061 next_pc += sizeof(struct binary_op);
1062 PO;
1063 }
1064
1065 /* unary */
1066 OP(FILTER_OP_UNARY_PLUS):
1067 OP(FILTER_OP_UNARY_MINUS):
1068 OP(FILTER_OP_UNARY_NOT):
1069 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1070 (unsigned int) *(filter_opcode_t *) pc);
1071 ret = -EINVAL;
1072 goto end;
1073
1074
1075 OP(FILTER_OP_UNARY_BIT_NOT):
1076 {
1077 estack_ax_v = ~(uint64_t) estack_ax_v;
1078 next_pc += sizeof(struct unary_op);
1079 PO;
1080 }
1081
1082 OP(FILTER_OP_UNARY_PLUS_S64):
1083 {
1084 next_pc += sizeof(struct unary_op);
1085 PO;
1086 }
1087 OP(FILTER_OP_UNARY_MINUS_S64):
1088 {
1089 estack_ax_v = -estack_ax_v;
1090 next_pc += sizeof(struct unary_op);
1091 PO;
1092 }
1093 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1094 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1095 {
1096 BUG_ON(1);
1097 PO;
1098 }
1099 OP(FILTER_OP_UNARY_NOT_S64):
1100 {
1101 estack_ax_v = !estack_ax_v;
1102 next_pc += sizeof(struct unary_op);
1103 PO;
1104 }
1105 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1106 {
1107 BUG_ON(1);
1108 PO;
1109 }
1110
1111 /* logical */
1112 OP(FILTER_OP_AND):
1113 {
1114 struct logical_op *insn = (struct logical_op *) pc;
1115
1116 /* If AX is 0, skip and evaluate to 0 */
1117 if (unlikely(estack_ax_v == 0)) {
1118 dbg_printk("Jumping to bytecode offset %u\n",
1119 (unsigned int) insn->skip_offset);
1120 next_pc = start_pc + insn->skip_offset;
1121 } else {
1122 /* Pop 1 when jump not taken */
1123 estack_pop(stack, top, ax, bx);
1124 next_pc += sizeof(struct logical_op);
1125 }
1126 PO;
1127 }
1128 OP(FILTER_OP_OR):
1129 {
1130 struct logical_op *insn = (struct logical_op *) pc;
1131
1132 /* If AX is nonzero, skip and evaluate to 1 */
1133
1134 if (unlikely(estack_ax_v != 0)) {
1135 estack_ax_v = 1;
1136 dbg_printk("Jumping to bytecode offset %u\n",
1137 (unsigned int) insn->skip_offset);
1138 next_pc = start_pc + insn->skip_offset;
1139 } else {
1140 /* Pop 1 when jump not taken */
1141 estack_pop(stack, top, ax, bx);
1142 next_pc += sizeof(struct logical_op);
1143 }
1144 PO;
1145 }
1146
1147
1148 /* load field ref */
1149 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1150 {
1151 struct load_op *insn = (struct load_op *) pc;
1152 struct field_ref *ref = (struct field_ref *) insn->data;
1153
1154 dbg_printk("load field ref offset %u type string\n",
1155 ref->offset);
1156 estack_push(stack, top, ax, bx);
1157 estack_ax(stack, top)->u.s.str =
1158 *(const char * const *) &filter_stack_data[ref->offset];
1159 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1160 dbg_printk("Filter warning: loading a NULL string.\n");
1161 ret = -EINVAL;
1162 goto end;
1163 }
1164 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1165 estack_ax(stack, top)->u.s.literal_type =
1166 ESTACK_STRING_LITERAL_TYPE_NONE;
1167 estack_ax(stack, top)->u.s.user = 0;
1168 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1169 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1170 PO;
1171 }
1172
1173 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1174 {
1175 struct load_op *insn = (struct load_op *) pc;
1176 struct field_ref *ref = (struct field_ref *) insn->data;
1177
1178 dbg_printk("load field ref offset %u type sequence\n",
1179 ref->offset);
1180 estack_push(stack, top, ax, bx);
1181 estack_ax(stack, top)->u.s.seq_len =
1182 *(unsigned long *) &filter_stack_data[ref->offset];
1183 estack_ax(stack, top)->u.s.str =
1184 *(const char **) (&filter_stack_data[ref->offset
1185 + sizeof(unsigned long)]);
1186 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1187 dbg_printk("Filter warning: loading a NULL sequence.\n");
1188 ret = -EINVAL;
1189 goto end;
1190 }
1191 estack_ax(stack, top)->u.s.literal_type =
1192 ESTACK_STRING_LITERAL_TYPE_NONE;
1193 estack_ax(stack, top)->u.s.user = 0;
1194 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1195 PO;
1196 }
1197
1198 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1199 {
1200 struct load_op *insn = (struct load_op *) pc;
1201 struct field_ref *ref = (struct field_ref *) insn->data;
1202
1203 dbg_printk("load field ref offset %u type s64\n",
1204 ref->offset);
1205 estack_push(stack, top, ax, bx);
1206 estack_ax_v =
1207 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1208 dbg_printk("ref load s64 %lld\n",
1209 (long long) estack_ax_v);
1210 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1211 PO;
1212 }
1213
1214 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1215 {
1216 BUG_ON(1);
1217 PO;
1218 }
1219
1220 /* load from immediate operand */
1221 OP(FILTER_OP_LOAD_STRING):
1222 {
1223 struct load_op *insn = (struct load_op *) pc;
1224
1225 dbg_printk("load string %s\n", insn->data);
1226 estack_push(stack, top, ax, bx);
1227 estack_ax(stack, top)->u.s.str = insn->data;
1228 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1229 estack_ax(stack, top)->u.s.literal_type =
1230 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1231 estack_ax(stack, top)->u.s.user = 0;
1232 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1233 PO;
1234 }
1235
1236 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1237 {
1238 struct load_op *insn = (struct load_op *) pc;
1239
1240 dbg_printk("load globbing pattern %s\n", insn->data);
1241 estack_push(stack, top, ax, bx);
1242 estack_ax(stack, top)->u.s.str = insn->data;
1243 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1244 estack_ax(stack, top)->u.s.literal_type =
1245 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1246 estack_ax(stack, top)->u.s.user = 0;
1247 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1248 PO;
1249 }
1250
1251 OP(FILTER_OP_LOAD_S64):
1252 {
1253 struct load_op *insn = (struct load_op *) pc;
1254
1255 estack_push(stack, top, ax, bx);
1256 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1257 dbg_printk("load s64 %lld\n",
1258 (long long) estack_ax_v);
1259 next_pc += sizeof(struct load_op)
1260 + sizeof(struct literal_numeric);
1261 PO;
1262 }
1263
1264 OP(FILTER_OP_LOAD_DOUBLE):
1265 {
1266 BUG_ON(1);
1267 PO;
1268 }
1269
1270 /* cast */
1271 OP(FILTER_OP_CAST_TO_S64):
1272 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1273 (unsigned int) *(filter_opcode_t *) pc);
1274 ret = -EINVAL;
1275 goto end;
1276
1277 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1278 {
1279 BUG_ON(1);
1280 PO;
1281 }
1282
1283 OP(FILTER_OP_CAST_NOP):
1284 {
1285 next_pc += sizeof(struct cast_op);
1286 PO;
1287 }
1288
1289 /* get context ref */
1290 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1291 {
1292 struct load_op *insn = (struct load_op *) pc;
1293 struct field_ref *ref = (struct field_ref *) insn->data;
1294 struct lttng_ctx_field *ctx_field;
1295 union lttng_ctx_value v;
1296
1297 dbg_printk("get context ref offset %u type string\n",
1298 ref->offset);
1299 ctx_field = &lttng_static_ctx->fields[ref->offset];
1300 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1301 estack_push(stack, top, ax, bx);
1302 estack_ax(stack, top)->u.s.str = v.str;
1303 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1304 dbg_printk("Filter warning: loading a NULL string.\n");
1305 ret = -EINVAL;
1306 goto end;
1307 }
1308 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1309 estack_ax(stack, top)->u.s.literal_type =
1310 ESTACK_STRING_LITERAL_TYPE_NONE;
1311 estack_ax(stack, top)->u.s.user = 0;
1312 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1313 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1314 PO;
1315 }
1316
1317 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1318 {
1319 struct load_op *insn = (struct load_op *) pc;
1320 struct field_ref *ref = (struct field_ref *) insn->data;
1321 struct lttng_ctx_field *ctx_field;
1322 union lttng_ctx_value v;
1323
1324 dbg_printk("get context ref offset %u type s64\n",
1325 ref->offset);
1326 ctx_field = &lttng_static_ctx->fields[ref->offset];
1327 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1328 estack_push(stack, top, ax, bx);
1329 estack_ax_v = v.s64;
1330 dbg_printk("ref get context s64 %lld\n",
1331 (long long) estack_ax_v);
1332 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1333 PO;
1334 }
1335
1336 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1337 {
1338 BUG_ON(1);
1339 PO;
1340 }
1341
1342 /* load userspace field ref */
1343 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1344 {
1345 struct load_op *insn = (struct load_op *) pc;
1346 struct field_ref *ref = (struct field_ref *) insn->data;
1347
1348 dbg_printk("load field ref offset %u type user string\n",
1349 ref->offset);
1350 estack_push(stack, top, ax, bx);
1351 estack_ax(stack, top)->u.s.user_str =
1352 *(const char * const *) &filter_stack_data[ref->offset];
1353 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1354 dbg_printk("Filter warning: loading a NULL string.\n");
1355 ret = -EINVAL;
1356 goto end;
1357 }
1358 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1359 estack_ax(stack, top)->u.s.literal_type =
1360 ESTACK_STRING_LITERAL_TYPE_NONE;
1361 estack_ax(stack, top)->u.s.user = 1;
1362 dbg_load_ref_user_str_printk(estack_ax(stack, top));
1363 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1364 PO;
1365 }
1366
1367 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1368 {
1369 struct load_op *insn = (struct load_op *) pc;
1370 struct field_ref *ref = (struct field_ref *) insn->data;
1371
1372 dbg_printk("load field ref offset %u type user sequence\n",
1373 ref->offset);
1374 estack_push(stack, top, ax, bx);
1375 estack_ax(stack, top)->u.s.seq_len =
1376 *(unsigned long *) &filter_stack_data[ref->offset];
1377 estack_ax(stack, top)->u.s.user_str =
1378 *(const char **) (&filter_stack_data[ref->offset
1379 + sizeof(unsigned long)]);
1380 if (unlikely(!estack_ax(stack, top)->u.s.user_str)) {
1381 dbg_printk("Filter warning: loading a NULL sequence.\n");
1382 ret = -EINVAL;
1383 goto end;
1384 }
1385 estack_ax(stack, top)->u.s.literal_type =
1386 ESTACK_STRING_LITERAL_TYPE_NONE;
1387 estack_ax(stack, top)->u.s.user = 1;
1388 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1389 PO;
1390 }
1391
1392 OP(FILTER_OP_GET_CONTEXT_ROOT):
1393 {
1394 dbg_printk("op get context root\n");
1395 estack_push(stack, top, ax, bx);
1396 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1397 /* "field" only needed for variants. */
1398 estack_ax(stack, top)->u.ptr.field = NULL;
1399 next_pc += sizeof(struct load_op);
1400 PO;
1401 }
1402
1403 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1404 {
1405 BUG_ON(1);
1406 PO;
1407 }
1408
1409 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1410 {
1411 dbg_printk("op get app payload root\n");
1412 estack_push(stack, top, ax, bx);
1413 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1414 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1415 /* "field" only needed for variants. */
1416 estack_ax(stack, top)->u.ptr.field = NULL;
1417 next_pc += sizeof(struct load_op);
1418 PO;
1419 }
1420
1421 OP(FILTER_OP_GET_SYMBOL):
1422 {
1423 dbg_printk("op get symbol\n");
1424 switch (estack_ax(stack, top)->u.ptr.type) {
1425 case LOAD_OBJECT:
1426 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1427 ret = -EINVAL;
1428 goto end;
1429 case LOAD_ROOT_CONTEXT:
1430 case LOAD_ROOT_APP_CONTEXT:
1431 case LOAD_ROOT_PAYLOAD:
1432 /*
1433 * symbol lookup is performed by
1434 * specialization.
1435 */
1436 ret = -EINVAL;
1437 goto end;
1438 }
1439 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1440 PO;
1441 }
1442
1443 OP(FILTER_OP_GET_SYMBOL_FIELD):
1444 {
1445 /*
1446 * Used for first variant encountered in a
1447 * traversal. Variants are not implemented yet.
1448 */
1449 ret = -EINVAL;
1450 goto end;
1451 }
1452
1453 OP(FILTER_OP_GET_INDEX_U16):
1454 {
1455 struct load_op *insn = (struct load_op *) pc;
1456 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1457
1458 dbg_printk("op get index u16\n");
1459 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1460 if (ret)
1461 goto end;
1462 estack_ax_v = estack_ax(stack, top)->u.v;
1463 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1464 PO;
1465 }
1466
1467 OP(FILTER_OP_GET_INDEX_U64):
1468 {
1469 struct load_op *insn = (struct load_op *) pc;
1470 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1471
1472 dbg_printk("op get index u64\n");
1473 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1474 if (ret)
1475 goto end;
1476 estack_ax_v = estack_ax(stack, top)->u.v;
1477 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1478 PO;
1479 }
1480
1481 OP(FILTER_OP_LOAD_FIELD):
1482 {
1483 dbg_printk("op load field\n");
1484 ret = dynamic_load_field(estack_ax(stack, top));
1485 if (ret)
1486 goto end;
1487 estack_ax_v = estack_ax(stack, top)->u.v;
1488 next_pc += sizeof(struct load_op);
1489 PO;
1490 }
1491
1492 OP(FILTER_OP_LOAD_FIELD_S8):
1493 {
1494 dbg_printk("op load field s8\n");
1495
1496 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1497 next_pc += sizeof(struct load_op);
1498 PO;
1499 }
1500 OP(FILTER_OP_LOAD_FIELD_S16):
1501 {
1502 dbg_printk("op load field s16\n");
1503
1504 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1505 next_pc += sizeof(struct load_op);
1506 PO;
1507 }
1508 OP(FILTER_OP_LOAD_FIELD_S32):
1509 {
1510 dbg_printk("op load field s32\n");
1511
1512 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1513 next_pc += sizeof(struct load_op);
1514 PO;
1515 }
1516 OP(FILTER_OP_LOAD_FIELD_S64):
1517 {
1518 dbg_printk("op load field s64\n");
1519
1520 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1521 next_pc += sizeof(struct load_op);
1522 PO;
1523 }
1524 OP(FILTER_OP_LOAD_FIELD_U8):
1525 {
1526 dbg_printk("op load field u8\n");
1527
1528 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1529 next_pc += sizeof(struct load_op);
1530 PO;
1531 }
1532 OP(FILTER_OP_LOAD_FIELD_U16):
1533 {
1534 dbg_printk("op load field u16\n");
1535
1536 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1537 next_pc += sizeof(struct load_op);
1538 PO;
1539 }
1540 OP(FILTER_OP_LOAD_FIELD_U32):
1541 {
1542 dbg_printk("op load field u32\n");
1543
1544 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1545 next_pc += sizeof(struct load_op);
1546 PO;
1547 }
1548 OP(FILTER_OP_LOAD_FIELD_U64):
1549 {
1550 dbg_printk("op load field u64\n");
1551
1552 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1553 next_pc += sizeof(struct load_op);
1554 PO;
1555 }
1556 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1557 {
1558 ret = -EINVAL;
1559 goto end;
1560 }
1561
1562 OP(FILTER_OP_LOAD_FIELD_STRING):
1563 {
1564 const char *str;
1565
1566 dbg_printk("op load field string\n");
1567 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1568 estack_ax(stack, top)->u.s.str = str;
1569 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1570 dbg_printk("Filter warning: loading a NULL string.\n");
1571 ret = -EINVAL;
1572 goto end;
1573 }
1574 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1575 estack_ax(stack, top)->u.s.literal_type =
1576 ESTACK_STRING_LITERAL_TYPE_NONE;
1577 next_pc += sizeof(struct load_op);
1578 PO;
1579 }
1580
1581 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1582 {
1583 const char *ptr;
1584
1585 dbg_printk("op load field string sequence\n");
1586 ptr = estack_ax(stack, top)->u.ptr.ptr;
1587 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1588 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1589 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1590 dbg_printk("Filter warning: loading a NULL sequence.\n");
1591 ret = -EINVAL;
1592 goto end;
1593 }
1594 estack_ax(stack, top)->u.s.literal_type =
1595 ESTACK_STRING_LITERAL_TYPE_NONE;
1596 next_pc += sizeof(struct load_op);
1597 PO;
1598 }
1599
1600 END_OP
1601 end:
1602 /* return 0 (discard) on error */
1603 if (ret)
1604 return 0;
1605 return retval;
1606 }
1607
1608 #undef START_OP
1609 #undef OP
1610 #undef PO
1611 #undef END_OP
This page took 0.103349 seconds and 3 git commands to generate.