2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <wrapper/uaccess.h>
28 #include <wrapper/frame.h>
29 #include <wrapper/types.h>
31 #include <lttng-filter.h>
32 #include <lttng-string-utils.h>
34 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
37 * get_char should be called with page fault handler disabled if it is expected
38 * to handle user-space read.
41 char get_char(struct estack_entry
*reg
, size_t offset
)
43 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
48 /* Handle invalid access as end of string. */
49 if (unlikely(!lttng_access_ok(VERIFY_READ
,
50 reg
->u
.s
.user_str
+ offset
,
53 /* Handle fault (nonzero return value) as end of string. */
54 if (unlikely(__copy_from_user_inatomic(&c
,
55 reg
->u
.s
.user_str
+ offset
,
60 return reg
->u
.s
.str
[offset
];
66 * -2: unknown escape char.
70 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
75 *c
= get_char(reg
, *offset
);
91 char get_char_at_cb(size_t at
, void *data
)
93 return get_char(data
, at
);
97 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
99 bool has_user
= false;
101 struct estack_entry
*pattern_reg
;
102 struct estack_entry
*candidate_reg
;
104 /* Disable the page fault handler when reading from userspace. */
105 if (estack_bx(stack
, top
)->u
.s
.user
106 || estack_ax(stack
, top
)->u
.s
.user
) {
111 /* Find out which side is the pattern vs. the candidate. */
112 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
113 pattern_reg
= estack_ax(stack
, top
);
114 candidate_reg
= estack_bx(stack
, top
);
116 pattern_reg
= estack_bx(stack
, top
);
117 candidate_reg
= estack_ax(stack
, top
);
120 /* Perform the match operation. */
121 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
122 pattern_reg
, get_char_at_cb
, candidate_reg
);
130 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
132 size_t offset_bx
= 0, offset_ax
= 0;
133 int diff
, has_user
= 0;
135 if (estack_bx(stack
, top
)->u
.s
.user
136 || estack_ax(stack
, top
)->u
.s
.user
) {
144 char char_bx
, char_ax
;
146 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
147 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
149 if (unlikely(char_bx
== '\0')) {
150 if (char_ax
== '\0') {
154 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
155 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
156 ret
= parse_char(estack_ax(stack
, top
),
157 &char_ax
, &offset_ax
);
167 if (unlikely(char_ax
== '\0')) {
168 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
169 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
170 ret
= parse_char(estack_bx(stack
, top
),
171 &char_bx
, &offset_bx
);
180 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
181 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
182 ret
= parse_char(estack_bx(stack
, top
),
183 &char_bx
, &offset_bx
);
187 } else if (ret
== -2) {
190 /* else compare both char */
192 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
193 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
194 ret
= parse_char(estack_ax(stack
, top
),
195 &char_ax
, &offset_ax
);
199 } else if (ret
== -2) {
216 diff
= char_bx
- char_ax
;
228 uint64_t lttng_filter_false(void *filter_data
,
229 struct lttng_probe_ctx
*lttng_probe_ctx
,
230 const char *filter_stack_data
)
235 #ifdef INTERPRETER_USE_SWITCH
238 * Fallback for compilers that do not support taking address of labels.
242 start_pc = &bytecode->data[0]; \
243 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
245 dbg_printk("Executing op %s (%u)\n", \
246 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
247 (unsigned int) *(filter_opcode_t *) pc); \
248 switch (*(filter_opcode_t *) pc) {
250 #define OP(name) case name
260 * Dispatch-table based interpreter.
264 start_pc = &bytecode->data[0]; \
265 pc = next_pc = start_pc; \
266 if (unlikely(pc - start_pc >= bytecode->len)) \
268 goto *dispatch[*(filter_opcode_t *) pc];
275 goto *dispatch[*(filter_opcode_t *) pc];
282 * Return 0 (discard), or raise the 0x1 flag (log event).
283 * Currently, other flags are kept for future extensions and have no
286 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
287 struct lttng_probe_ctx
*lttng_probe_ctx
,
288 const char *filter_stack_data
)
290 struct bytecode_runtime
*bytecode
= filter_data
;
291 void *pc
, *next_pc
, *start_pc
;
294 struct estack _stack
;
295 struct estack
*stack
= &_stack
;
296 register int64_t ax
= 0, bx
= 0;
297 register int top
= FILTER_STACK_EMPTY
;
298 #ifndef INTERPRETER_USE_SWITCH
299 static void *dispatch
[NR_FILTER_OPS
] = {
300 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
302 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
305 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
306 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
307 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
308 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
309 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
310 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
311 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
312 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
313 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
314 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
316 /* binary comparators */
317 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
318 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
319 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
320 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
321 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
322 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
324 /* string binary comparator */
325 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
326 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
327 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
328 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
329 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
330 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
332 /* globbing pattern binary comparator */
333 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
334 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
336 /* s64 binary comparator */
337 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
338 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
339 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
340 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
341 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
342 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
344 /* double binary comparator */
345 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
346 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
347 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
348 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
349 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
350 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
352 /* Mixed S64-double binary comparators */
353 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
354 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
355 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
356 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
357 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
358 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
360 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
361 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
362 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
363 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
364 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
365 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
368 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
369 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
370 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
371 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
372 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
373 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
374 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
375 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
376 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
379 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
380 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
383 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
384 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
385 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
386 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
387 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
389 /* load from immediate operand */
390 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
391 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
392 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
393 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
396 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
397 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
398 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
400 /* get context ref */
401 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
402 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
403 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
404 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
406 /* load userspace field ref */
407 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
408 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
410 #endif /* #ifndef INTERPRETER_USE_SWITCH */
414 OP(FILTER_OP_UNKNOWN
):
415 OP(FILTER_OP_LOAD_FIELD_REF
):
416 OP(FILTER_OP_GET_CONTEXT_REF
):
417 #ifdef INTERPRETER_USE_SWITCH
419 #endif /* INTERPRETER_USE_SWITCH */
420 printk(KERN_WARNING
"unknown bytecode op %u\n",
421 (unsigned int) *(filter_opcode_t
*) pc
);
425 OP(FILTER_OP_RETURN
):
426 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
427 retval
= !!estack_ax_v
;
437 OP(FILTER_OP_RSHIFT
):
438 OP(FILTER_OP_LSHIFT
):
439 OP(FILTER_OP_BIN_AND
):
440 OP(FILTER_OP_BIN_OR
):
441 OP(FILTER_OP_BIN_XOR
):
442 printk(KERN_WARNING
"unsupported bytecode op %u\n",
443 (unsigned int) *(filter_opcode_t
*) pc
);
453 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
454 (unsigned int) *(filter_opcode_t
*) pc
);
458 OP(FILTER_OP_EQ_STRING
):
462 res
= (stack_strcmp(stack
, top
, "==") == 0);
463 estack_pop(stack
, top
, ax
, bx
);
465 next_pc
+= sizeof(struct binary_op
);
468 OP(FILTER_OP_NE_STRING
):
472 res
= (stack_strcmp(stack
, top
, "!=") != 0);
473 estack_pop(stack
, top
, ax
, bx
);
475 next_pc
+= sizeof(struct binary_op
);
478 OP(FILTER_OP_GT_STRING
):
482 res
= (stack_strcmp(stack
, top
, ">") > 0);
483 estack_pop(stack
, top
, ax
, bx
);
485 next_pc
+= sizeof(struct binary_op
);
488 OP(FILTER_OP_LT_STRING
):
492 res
= (stack_strcmp(stack
, top
, "<") < 0);
493 estack_pop(stack
, top
, ax
, bx
);
495 next_pc
+= sizeof(struct binary_op
);
498 OP(FILTER_OP_GE_STRING
):
502 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
503 estack_pop(stack
, top
, ax
, bx
);
505 next_pc
+= sizeof(struct binary_op
);
508 OP(FILTER_OP_LE_STRING
):
512 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
513 estack_pop(stack
, top
, ax
, bx
);
515 next_pc
+= sizeof(struct binary_op
);
519 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
523 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
524 estack_pop(stack
, top
, ax
, bx
);
526 next_pc
+= sizeof(struct binary_op
);
529 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
533 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
534 estack_pop(stack
, top
, ax
, bx
);
536 next_pc
+= sizeof(struct binary_op
);
540 OP(FILTER_OP_EQ_S64
):
544 res
= (estack_bx_v
== estack_ax_v
);
545 estack_pop(stack
, top
, ax
, bx
);
547 next_pc
+= sizeof(struct binary_op
);
550 OP(FILTER_OP_NE_S64
):
554 res
= (estack_bx_v
!= estack_ax_v
);
555 estack_pop(stack
, top
, ax
, bx
);
557 next_pc
+= sizeof(struct binary_op
);
560 OP(FILTER_OP_GT_S64
):
564 res
= (estack_bx_v
> estack_ax_v
);
565 estack_pop(stack
, top
, ax
, bx
);
567 next_pc
+= sizeof(struct binary_op
);
570 OP(FILTER_OP_LT_S64
):
574 res
= (estack_bx_v
< estack_ax_v
);
575 estack_pop(stack
, top
, ax
, bx
);
577 next_pc
+= sizeof(struct binary_op
);
580 OP(FILTER_OP_GE_S64
):
584 res
= (estack_bx_v
>= estack_ax_v
);
585 estack_pop(stack
, top
, ax
, bx
);
587 next_pc
+= sizeof(struct binary_op
);
590 OP(FILTER_OP_LE_S64
):
594 res
= (estack_bx_v
<= estack_ax_v
);
595 estack_pop(stack
, top
, ax
, bx
);
597 next_pc
+= sizeof(struct binary_op
);
601 OP(FILTER_OP_EQ_DOUBLE
):
602 OP(FILTER_OP_NE_DOUBLE
):
603 OP(FILTER_OP_GT_DOUBLE
):
604 OP(FILTER_OP_LT_DOUBLE
):
605 OP(FILTER_OP_GE_DOUBLE
):
606 OP(FILTER_OP_LE_DOUBLE
):
612 /* Mixed S64-double binary comparators */
613 OP(FILTER_OP_EQ_DOUBLE_S64
):
614 OP(FILTER_OP_NE_DOUBLE_S64
):
615 OP(FILTER_OP_GT_DOUBLE_S64
):
616 OP(FILTER_OP_LT_DOUBLE_S64
):
617 OP(FILTER_OP_GE_DOUBLE_S64
):
618 OP(FILTER_OP_LE_DOUBLE_S64
):
619 OP(FILTER_OP_EQ_S64_DOUBLE
):
620 OP(FILTER_OP_NE_S64_DOUBLE
):
621 OP(FILTER_OP_GT_S64_DOUBLE
):
622 OP(FILTER_OP_LT_S64_DOUBLE
):
623 OP(FILTER_OP_GE_S64_DOUBLE
):
624 OP(FILTER_OP_LE_S64_DOUBLE
):
631 OP(FILTER_OP_UNARY_PLUS
):
632 OP(FILTER_OP_UNARY_MINUS
):
633 OP(FILTER_OP_UNARY_NOT
):
634 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
635 (unsigned int) *(filter_opcode_t
*) pc
);
640 OP(FILTER_OP_UNARY_PLUS_S64
):
642 next_pc
+= sizeof(struct unary_op
);
645 OP(FILTER_OP_UNARY_MINUS_S64
):
647 estack_ax_v
= -estack_ax_v
;
648 next_pc
+= sizeof(struct unary_op
);
651 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
652 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
657 OP(FILTER_OP_UNARY_NOT_S64
):
659 estack_ax_v
= !estack_ax_v
;
660 next_pc
+= sizeof(struct unary_op
);
663 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
672 struct logical_op
*insn
= (struct logical_op
*) pc
;
674 /* If AX is 0, skip and evaluate to 0 */
675 if (unlikely(estack_ax_v
== 0)) {
676 dbg_printk("Jumping to bytecode offset %u\n",
677 (unsigned int) insn
->skip_offset
);
678 next_pc
= start_pc
+ insn
->skip_offset
;
680 /* Pop 1 when jump not taken */
681 estack_pop(stack
, top
, ax
, bx
);
682 next_pc
+= sizeof(struct logical_op
);
688 struct logical_op
*insn
= (struct logical_op
*) pc
;
690 /* If AX is nonzero, skip and evaluate to 1 */
692 if (unlikely(estack_ax_v
!= 0)) {
694 dbg_printk("Jumping to bytecode offset %u\n",
695 (unsigned int) insn
->skip_offset
);
696 next_pc
= start_pc
+ insn
->skip_offset
;
698 /* Pop 1 when jump not taken */
699 estack_pop(stack
, top
, ax
, bx
);
700 next_pc
+= sizeof(struct logical_op
);
707 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
709 struct load_op
*insn
= (struct load_op
*) pc
;
710 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
712 dbg_printk("load field ref offset %u type string\n",
714 estack_push(stack
, top
, ax
, bx
);
715 estack_ax(stack
, top
)->u
.s
.str
=
716 *(const char * const *) &filter_stack_data
[ref
->offset
];
717 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
718 dbg_printk("Filter warning: loading a NULL string.\n");
722 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
723 estack_ax(stack
, top
)->u
.s
.literal_type
=
724 ESTACK_STRING_LITERAL_TYPE_NONE
;
725 estack_ax(stack
, top
)->u
.s
.user
= 0;
726 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
727 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
731 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
733 struct load_op
*insn
= (struct load_op
*) pc
;
734 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
736 dbg_printk("load field ref offset %u type sequence\n",
738 estack_push(stack
, top
, ax
, bx
);
739 estack_ax(stack
, top
)->u
.s
.seq_len
=
740 *(unsigned long *) &filter_stack_data
[ref
->offset
];
741 estack_ax(stack
, top
)->u
.s
.str
=
742 *(const char **) (&filter_stack_data
[ref
->offset
743 + sizeof(unsigned long)]);
744 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
745 dbg_printk("Filter warning: loading a NULL sequence.\n");
749 estack_ax(stack
, top
)->u
.s
.literal_type
=
750 ESTACK_STRING_LITERAL_TYPE_NONE
;
751 estack_ax(stack
, top
)->u
.s
.user
= 0;
752 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
756 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
758 struct load_op
*insn
= (struct load_op
*) pc
;
759 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
761 dbg_printk("load field ref offset %u type s64\n",
763 estack_push(stack
, top
, ax
, bx
);
765 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
766 dbg_printk("ref load s64 %lld\n",
767 (long long) estack_ax_v
);
768 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
772 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
778 /* load from immediate operand */
779 OP(FILTER_OP_LOAD_STRING
):
781 struct load_op
*insn
= (struct load_op
*) pc
;
783 dbg_printk("load string %s\n", insn
->data
);
784 estack_push(stack
, top
, ax
, bx
);
785 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
786 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
787 estack_ax(stack
, top
)->u
.s
.literal_type
=
788 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
789 estack_ax(stack
, top
)->u
.s
.user
= 0;
790 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
794 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
796 struct load_op
*insn
= (struct load_op
*) pc
;
798 dbg_printk("load globbing pattern %s\n", insn
->data
);
799 estack_push(stack
, top
, ax
, bx
);
800 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
801 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
802 estack_ax(stack
, top
)->u
.s
.literal_type
=
803 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
804 estack_ax(stack
, top
)->u
.s
.user
= 0;
805 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
809 OP(FILTER_OP_LOAD_S64
):
811 struct load_op
*insn
= (struct load_op
*) pc
;
813 estack_push(stack
, top
, ax
, bx
);
814 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
815 dbg_printk("load s64 %lld\n",
816 (long long) estack_ax_v
);
817 next_pc
+= sizeof(struct load_op
)
818 + sizeof(struct literal_numeric
);
822 OP(FILTER_OP_LOAD_DOUBLE
):
829 OP(FILTER_OP_CAST_TO_S64
):
830 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
831 (unsigned int) *(filter_opcode_t
*) pc
);
835 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
841 OP(FILTER_OP_CAST_NOP
):
843 next_pc
+= sizeof(struct cast_op
);
847 /* get context ref */
848 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
850 struct load_op
*insn
= (struct load_op
*) pc
;
851 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
852 struct lttng_ctx_field
*ctx_field
;
853 union lttng_ctx_value v
;
855 dbg_printk("get context ref offset %u type string\n",
857 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
858 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
859 estack_push(stack
, top
, ax
, bx
);
860 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
861 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
862 dbg_printk("Filter warning: loading a NULL string.\n");
866 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
867 estack_ax(stack
, top
)->u
.s
.literal_type
=
868 ESTACK_STRING_LITERAL_TYPE_NONE
;
869 estack_ax(stack
, top
)->u
.s
.user
= 0;
870 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
871 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
875 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
877 struct load_op
*insn
= (struct load_op
*) pc
;
878 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
879 struct lttng_ctx_field
*ctx_field
;
880 union lttng_ctx_value v
;
882 dbg_printk("get context ref offset %u type s64\n",
884 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
885 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
886 estack_push(stack
, top
, ax
, bx
);
888 dbg_printk("ref get context s64 %lld\n",
889 (long long) estack_ax_v
);
890 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
894 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
900 /* load userspace field ref */
901 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
903 struct load_op
*insn
= (struct load_op
*) pc
;
904 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
906 dbg_printk("load field ref offset %u type user string\n",
908 estack_push(stack
, top
, ax
, bx
);
909 estack_ax(stack
, top
)->u
.s
.user_str
=
910 *(const char * const *) &filter_stack_data
[ref
->offset
];
911 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
912 dbg_printk("Filter warning: loading a NULL string.\n");
916 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
917 estack_ax(stack
, top
)->u
.s
.literal_type
=
918 ESTACK_STRING_LITERAL_TYPE_NONE
;
919 estack_ax(stack
, top
)->u
.s
.user
= 1;
920 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
921 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
925 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
927 struct load_op
*insn
= (struct load_op
*) pc
;
928 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
930 dbg_printk("load field ref offset %u type user sequence\n",
932 estack_push(stack
, top
, ax
, bx
);
933 estack_ax(stack
, top
)->u
.s
.seq_len
=
934 *(unsigned long *) &filter_stack_data
[ref
->offset
];
935 estack_ax(stack
, top
)->u
.s
.user_str
=
936 *(const char **) (&filter_stack_data
[ref
->offset
937 + sizeof(unsigned long)]);
938 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
939 dbg_printk("Filter warning: loading a NULL sequence.\n");
943 estack_ax(stack
, top
)->u
.s
.literal_type
=
944 ESTACK_STRING_LITERAL_TYPE_NONE
;
945 estack_ax(stack
, top
)->u
.s
.user
= 1;
946 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
952 /* return 0 (discard) on error */