2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/uaccess.h>
28 #include <wrapper/frame.h>
30 #include <lttng-filter.h>
32 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
35 * get_char should be called with page fault handler disabled if it is expected
36 * to handle user-space read.
39 char get_char(struct estack_entry
*reg
, size_t offset
)
41 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
46 /* Handle invalid access as end of string. */
47 if (unlikely(!access_ok(VERIFY_READ
,
48 reg
->u
.s
.user_str
+ offset
,
51 /* Handle fault (nonzero return value) as end of string. */
52 if (unlikely(__copy_from_user_inatomic(&c
,
53 reg
->u
.s
.user_str
+ offset
,
58 return reg
->u
.s
.str
[offset
];
64 * -2: unknown escape char.
68 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
73 *c
= get_char(reg
, *offset
);
89 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
91 size_t offset_bx
= 0, offset_ax
= 0;
92 int diff
, has_user
= 0;
95 if (estack_bx(stack
, top
)->u
.s
.user
96 || estack_ax(stack
, top
)->u
.s
.user
) {
106 char char_bx
, char_ax
;
108 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
109 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
111 if (unlikely(char_bx
== '\0')) {
112 if (char_ax
== '\0') {
116 if (estack_ax(stack
, top
)->u
.s
.literal
) {
117 ret
= parse_char(estack_ax(stack
, top
),
118 &char_ax
, &offset_ax
);
128 if (unlikely(char_ax
== '\0')) {
129 if (estack_bx(stack
, top
)->u
.s
.literal
) {
130 ret
= parse_char(estack_bx(stack
, top
),
131 &char_bx
, &offset_bx
);
140 if (estack_bx(stack
, top
)->u
.s
.literal
) {
141 ret
= parse_char(estack_bx(stack
, top
),
142 &char_bx
, &offset_bx
);
146 } else if (ret
== -2) {
149 /* else compare both char */
151 if (estack_ax(stack
, top
)->u
.s
.literal
) {
152 ret
= parse_char(estack_ax(stack
, top
),
153 &char_ax
, &offset_ax
);
157 } else if (ret
== -2) {
174 diff
= char_bx
- char_ax
;
187 uint64_t lttng_filter_false(void *filter_data
,
188 struct lttng_probe_ctx
*lttng_probe_ctx
,
189 const char *filter_stack_data
)
194 #ifdef INTERPRETER_USE_SWITCH
197 * Fallback for compilers that do not support taking address of labels.
201 start_pc = &bytecode->data[0]; \
202 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
204 dbg_printk("Executing op %s (%u)\n", \
205 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
206 (unsigned int) *(filter_opcode_t *) pc); \
207 switch (*(filter_opcode_t *) pc) {
209 #define OP(name) case name
219 * Dispatch-table based interpreter.
223 start_pc = &bytecode->data[0]; \
224 pc = next_pc = start_pc; \
225 if (unlikely(pc - start_pc >= bytecode->len)) \
227 goto *dispatch[*(filter_opcode_t *) pc];
234 goto *dispatch[*(filter_opcode_t *) pc];
241 * Return 0 (discard), or raise the 0x1 flag (log event).
242 * Currently, other flags are kept for future extensions and have no
245 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
246 struct lttng_probe_ctx
*lttng_probe_ctx
,
247 const char *filter_stack_data
)
249 struct bytecode_runtime
*bytecode
= filter_data
;
250 void *pc
, *next_pc
, *start_pc
;
253 struct estack _stack
;
254 struct estack
*stack
= &_stack
;
255 register int64_t ax
= 0, bx
= 0;
256 register int top
= FILTER_STACK_EMPTY
;
257 #ifndef INTERPRETER_USE_SWITCH
258 static void *dispatch
[NR_FILTER_OPS
] = {
259 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
261 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
264 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
265 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
266 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
267 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
268 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
269 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
270 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
271 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
272 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
273 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
275 /* binary comparators */
276 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
277 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
278 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
279 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
280 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
281 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
283 /* string binary comparator */
284 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
285 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
286 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
287 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
288 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
289 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
291 /* s64 binary comparator */
292 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
293 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
294 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
295 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
296 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
297 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
299 /* double binary comparator */
300 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
301 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
302 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
303 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
304 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
305 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
307 /* Mixed S64-double binary comparators */
308 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
309 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
310 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
311 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
312 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
313 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
315 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
316 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
317 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
318 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
319 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
320 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
323 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
324 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
325 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
326 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
327 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
328 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
329 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
330 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
331 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
334 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
335 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
338 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
339 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
340 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
341 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
342 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
344 /* load from immediate operand */
345 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
346 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
347 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
350 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
351 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
352 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
354 /* get context ref */
355 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
356 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
357 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
358 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
360 /* load userspace field ref */
361 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
362 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
364 #endif /* #ifndef INTERPRETER_USE_SWITCH */
368 OP(FILTER_OP_UNKNOWN
):
369 OP(FILTER_OP_LOAD_FIELD_REF
):
370 OP(FILTER_OP_GET_CONTEXT_REF
):
371 #ifdef INTERPRETER_USE_SWITCH
373 #endif /* INTERPRETER_USE_SWITCH */
374 printk(KERN_WARNING
"unknown bytecode op %u\n",
375 (unsigned int) *(filter_opcode_t
*) pc
);
379 OP(FILTER_OP_RETURN
):
380 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
381 retval
= !!estack_ax_v
;
391 OP(FILTER_OP_RSHIFT
):
392 OP(FILTER_OP_LSHIFT
):
393 OP(FILTER_OP_BIN_AND
):
394 OP(FILTER_OP_BIN_OR
):
395 OP(FILTER_OP_BIN_XOR
):
396 printk(KERN_WARNING
"unsupported bytecode op %u\n",
397 (unsigned int) *(filter_opcode_t
*) pc
);
407 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
408 (unsigned int) *(filter_opcode_t
*) pc
);
412 OP(FILTER_OP_EQ_STRING
):
416 res
= (stack_strcmp(stack
, top
, "==") == 0);
417 estack_pop(stack
, top
, ax
, bx
);
419 next_pc
+= sizeof(struct binary_op
);
422 OP(FILTER_OP_NE_STRING
):
426 res
= (stack_strcmp(stack
, top
, "!=") != 0);
427 estack_pop(stack
, top
, ax
, bx
);
429 next_pc
+= sizeof(struct binary_op
);
432 OP(FILTER_OP_GT_STRING
):
436 res
= (stack_strcmp(stack
, top
, ">") > 0);
437 estack_pop(stack
, top
, ax
, bx
);
439 next_pc
+= sizeof(struct binary_op
);
442 OP(FILTER_OP_LT_STRING
):
446 res
= (stack_strcmp(stack
, top
, "<") < 0);
447 estack_pop(stack
, top
, ax
, bx
);
449 next_pc
+= sizeof(struct binary_op
);
452 OP(FILTER_OP_GE_STRING
):
456 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
457 estack_pop(stack
, top
, ax
, bx
);
459 next_pc
+= sizeof(struct binary_op
);
462 OP(FILTER_OP_LE_STRING
):
466 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
467 estack_pop(stack
, top
, ax
, bx
);
469 next_pc
+= sizeof(struct binary_op
);
473 OP(FILTER_OP_EQ_S64
):
477 res
= (estack_bx_v
== estack_ax_v
);
478 estack_pop(stack
, top
, ax
, bx
);
480 next_pc
+= sizeof(struct binary_op
);
483 OP(FILTER_OP_NE_S64
):
487 res
= (estack_bx_v
!= estack_ax_v
);
488 estack_pop(stack
, top
, ax
, bx
);
490 next_pc
+= sizeof(struct binary_op
);
493 OP(FILTER_OP_GT_S64
):
497 res
= (estack_bx_v
> estack_ax_v
);
498 estack_pop(stack
, top
, ax
, bx
);
500 next_pc
+= sizeof(struct binary_op
);
503 OP(FILTER_OP_LT_S64
):
507 res
= (estack_bx_v
< estack_ax_v
);
508 estack_pop(stack
, top
, ax
, bx
);
510 next_pc
+= sizeof(struct binary_op
);
513 OP(FILTER_OP_GE_S64
):
517 res
= (estack_bx_v
>= estack_ax_v
);
518 estack_pop(stack
, top
, ax
, bx
);
520 next_pc
+= sizeof(struct binary_op
);
523 OP(FILTER_OP_LE_S64
):
527 res
= (estack_bx_v
<= estack_ax_v
);
528 estack_pop(stack
, top
, ax
, bx
);
530 next_pc
+= sizeof(struct binary_op
);
534 OP(FILTER_OP_EQ_DOUBLE
):
535 OP(FILTER_OP_NE_DOUBLE
):
536 OP(FILTER_OP_GT_DOUBLE
):
537 OP(FILTER_OP_LT_DOUBLE
):
538 OP(FILTER_OP_GE_DOUBLE
):
539 OP(FILTER_OP_LE_DOUBLE
):
545 /* Mixed S64-double binary comparators */
546 OP(FILTER_OP_EQ_DOUBLE_S64
):
547 OP(FILTER_OP_NE_DOUBLE_S64
):
548 OP(FILTER_OP_GT_DOUBLE_S64
):
549 OP(FILTER_OP_LT_DOUBLE_S64
):
550 OP(FILTER_OP_GE_DOUBLE_S64
):
551 OP(FILTER_OP_LE_DOUBLE_S64
):
552 OP(FILTER_OP_EQ_S64_DOUBLE
):
553 OP(FILTER_OP_NE_S64_DOUBLE
):
554 OP(FILTER_OP_GT_S64_DOUBLE
):
555 OP(FILTER_OP_LT_S64_DOUBLE
):
556 OP(FILTER_OP_GE_S64_DOUBLE
):
557 OP(FILTER_OP_LE_S64_DOUBLE
):
564 OP(FILTER_OP_UNARY_PLUS
):
565 OP(FILTER_OP_UNARY_MINUS
):
566 OP(FILTER_OP_UNARY_NOT
):
567 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
568 (unsigned int) *(filter_opcode_t
*) pc
);
573 OP(FILTER_OP_UNARY_PLUS_S64
):
575 next_pc
+= sizeof(struct unary_op
);
578 OP(FILTER_OP_UNARY_MINUS_S64
):
580 estack_ax_v
= -estack_ax_v
;
581 next_pc
+= sizeof(struct unary_op
);
584 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
585 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
590 OP(FILTER_OP_UNARY_NOT_S64
):
592 estack_ax_v
= !estack_ax_v
;
593 next_pc
+= sizeof(struct unary_op
);
596 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
605 struct logical_op
*insn
= (struct logical_op
*) pc
;
607 /* If AX is 0, skip and evaluate to 0 */
608 if (unlikely(estack_ax_v
== 0)) {
609 dbg_printk("Jumping to bytecode offset %u\n",
610 (unsigned int) insn
->skip_offset
);
611 next_pc
= start_pc
+ insn
->skip_offset
;
613 /* Pop 1 when jump not taken */
614 estack_pop(stack
, top
, ax
, bx
);
615 next_pc
+= sizeof(struct logical_op
);
621 struct logical_op
*insn
= (struct logical_op
*) pc
;
623 /* If AX is nonzero, skip and evaluate to 1 */
625 if (unlikely(estack_ax_v
!= 0)) {
627 dbg_printk("Jumping to bytecode offset %u\n",
628 (unsigned int) insn
->skip_offset
);
629 next_pc
= start_pc
+ insn
->skip_offset
;
631 /* Pop 1 when jump not taken */
632 estack_pop(stack
, top
, ax
, bx
);
633 next_pc
+= sizeof(struct logical_op
);
640 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
642 struct load_op
*insn
= (struct load_op
*) pc
;
643 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
645 dbg_printk("load field ref offset %u type string\n",
647 estack_push(stack
, top
, ax
, bx
);
648 estack_ax(stack
, top
)->u
.s
.str
=
649 *(const char * const *) &filter_stack_data
[ref
->offset
];
650 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
651 dbg_printk("Filter warning: loading a NULL string.\n");
655 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
656 estack_ax(stack
, top
)->u
.s
.literal
= 0;
657 estack_ax(stack
, top
)->u
.s
.user
= 0;
658 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
659 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
663 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
665 struct load_op
*insn
= (struct load_op
*) pc
;
666 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
668 dbg_printk("load field ref offset %u type sequence\n",
670 estack_push(stack
, top
, ax
, bx
);
671 estack_ax(stack
, top
)->u
.s
.seq_len
=
672 *(unsigned long *) &filter_stack_data
[ref
->offset
];
673 estack_ax(stack
, top
)->u
.s
.str
=
674 *(const char **) (&filter_stack_data
[ref
->offset
675 + sizeof(unsigned long)]);
676 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
677 dbg_printk("Filter warning: loading a NULL sequence.\n");
681 estack_ax(stack
, top
)->u
.s
.literal
= 0;
682 estack_ax(stack
, top
)->u
.s
.user
= 0;
683 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
687 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
689 struct load_op
*insn
= (struct load_op
*) pc
;
690 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
692 dbg_printk("load field ref offset %u type s64\n",
694 estack_push(stack
, top
, ax
, bx
);
696 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
697 dbg_printk("ref load s64 %lld\n",
698 (long long) estack_ax_v
);
699 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
703 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
709 /* load from immediate operand */
710 OP(FILTER_OP_LOAD_STRING
):
712 struct load_op
*insn
= (struct load_op
*) pc
;
714 dbg_printk("load string %s\n", insn
->data
);
715 estack_push(stack
, top
, ax
, bx
);
716 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
717 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
718 estack_ax(stack
, top
)->u
.s
.literal
= 1;
719 estack_ax(stack
, top
)->u
.s
.user
= 0;
720 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
724 OP(FILTER_OP_LOAD_S64
):
726 struct load_op
*insn
= (struct load_op
*) pc
;
728 estack_push(stack
, top
, ax
, bx
);
729 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
730 dbg_printk("load s64 %lld\n",
731 (long long) estack_ax_v
);
732 next_pc
+= sizeof(struct load_op
)
733 + sizeof(struct literal_numeric
);
737 OP(FILTER_OP_LOAD_DOUBLE
):
744 OP(FILTER_OP_CAST_TO_S64
):
745 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
746 (unsigned int) *(filter_opcode_t
*) pc
);
750 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
756 OP(FILTER_OP_CAST_NOP
):
758 next_pc
+= sizeof(struct cast_op
);
762 /* get context ref */
763 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
765 struct load_op
*insn
= (struct load_op
*) pc
;
766 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
767 struct lttng_ctx_field
*ctx_field
;
768 union lttng_ctx_value v
;
770 dbg_printk("get context ref offset %u type string\n",
772 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
773 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
774 estack_push(stack
, top
, ax
, bx
);
775 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
776 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
777 dbg_printk("Filter warning: loading a NULL string.\n");
781 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
782 estack_ax(stack
, top
)->u
.s
.literal
= 0;
783 estack_ax(stack
, top
)->u
.s
.user
= 0;
784 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
785 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
789 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
791 struct load_op
*insn
= (struct load_op
*) pc
;
792 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
793 struct lttng_ctx_field
*ctx_field
;
794 union lttng_ctx_value v
;
796 dbg_printk("get context ref offset %u type s64\n",
798 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
799 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
800 estack_push(stack
, top
, ax
, bx
);
802 dbg_printk("ref get context s64 %lld\n",
803 (long long) estack_ax_v
);
804 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
808 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
814 /* load userspace field ref */
815 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
817 struct load_op
*insn
= (struct load_op
*) pc
;
818 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
820 dbg_printk("load field ref offset %u type user string\n",
822 estack_push(stack
, top
, ax
, bx
);
823 estack_ax(stack
, top
)->u
.s
.user_str
=
824 *(const char * const *) &filter_stack_data
[ref
->offset
];
825 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
826 dbg_printk("Filter warning: loading a NULL string.\n");
830 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
831 estack_ax(stack
, top
)->u
.s
.literal
= 0;
832 estack_ax(stack
, top
)->u
.s
.user
= 1;
833 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
834 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
838 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
840 struct load_op
*insn
= (struct load_op
*) pc
;
841 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
843 dbg_printk("load field ref offset %u type user sequence\n",
845 estack_push(stack
, top
, ax
, bx
);
846 estack_ax(stack
, top
)->u
.s
.seq_len
=
847 *(unsigned long *) &filter_stack_data
[ref
->offset
];
848 estack_ax(stack
, top
)->u
.s
.user_str
=
849 *(const char **) (&filter_stack_data
[ref
->offset
850 + sizeof(unsigned long)]);
851 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
852 dbg_printk("Filter warning: loading a NULL sequence.\n");
856 estack_ax(stack
, top
)->u
.s
.literal
= 0;
857 estack_ax(stack
, top
)->u
.s
.user
= 1;
858 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
864 /* return 0 (discard) on error */