2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/uaccess.h>
25 #include <lttng-filter.h>
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
32 char get_char(struct estack_entry
*reg
, size_t offset
)
34 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ
,
41 reg
->u
.s
.user_str
+ offset
,
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c
,
46 reg
->u
.s
.user_str
+ offset
,
51 return reg
->u
.s
.str
[offset
];
57 * -2: unknown escape char.
61 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
66 *c
= get_char(reg
, *offset
);
82 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
84 size_t offset_bx
= 0, offset_ax
= 0;
85 int diff
, has_user
= 0;
88 if (estack_bx(stack
, top
)->u
.s
.user
89 || estack_ax(stack
, top
)->u
.s
.user
) {
99 char char_bx
, char_ax
;
101 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
102 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
104 if (unlikely(char_bx
== '\0')) {
105 if (char_ax
== '\0') {
109 if (estack_ax(stack
, top
)->u
.s
.literal
) {
110 ret
= parse_char(estack_ax(stack
, top
),
111 &char_ax
, &offset_ax
);
121 if (unlikely(char_ax
== '\0')) {
122 if (estack_bx(stack
, top
)->u
.s
.literal
) {
123 ret
= parse_char(estack_bx(stack
, top
),
124 &char_bx
, &offset_bx
);
133 if (estack_bx(stack
, top
)->u
.s
.literal
) {
134 ret
= parse_char(estack_bx(stack
, top
),
135 &char_bx
, &offset_bx
);
139 } else if (ret
== -2) {
142 /* else compare both char */
144 if (estack_ax(stack
, top
)->u
.s
.literal
) {
145 ret
= parse_char(estack_ax(stack
, top
),
146 &char_ax
, &offset_ax
);
150 } else if (ret
== -2) {
167 diff
= char_bx
- char_ax
;
180 uint64_t lttng_filter_false(void *filter_data
,
181 struct lttng_probe_ctx
*lttng_probe_ctx
,
182 const char *filter_stack_data
)
187 #ifdef INTERPRETER_USE_SWITCH
190 * Fallback for compilers that do not support taking address of labels.
194 start_pc = &bytecode->data[0]; \
195 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
197 dbg_printk("Executing op %s (%u)\n", \
198 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
199 (unsigned int) *(filter_opcode_t *) pc); \
200 switch (*(filter_opcode_t *) pc) {
202 #define OP(name) case name
212 * Dispatch-table based interpreter.
216 start_pc = &bytecode->data[0]; \
217 pc = next_pc = start_pc; \
218 if (unlikely(pc - start_pc >= bytecode->len)) \
220 goto *dispatch[*(filter_opcode_t *) pc];
227 goto *dispatch[*(filter_opcode_t *) pc];
234 * Return 0 (discard), or raise the 0x1 flag (log event).
235 * Currently, other flags are kept for future extensions and have no
238 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
239 struct lttng_probe_ctx
*lttng_probe_ctx
,
240 const char *filter_stack_data
)
242 struct bytecode_runtime
*bytecode
= filter_data
;
243 void *pc
, *next_pc
, *start_pc
;
246 struct estack _stack
;
247 struct estack
*stack
= &_stack
;
248 register int64_t ax
= 0, bx
= 0;
249 register int top
= FILTER_STACK_EMPTY
;
250 #ifndef INTERPRETER_USE_SWITCH
251 static void *dispatch
[NR_FILTER_OPS
] = {
252 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
254 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
257 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
258 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
259 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
260 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
261 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
262 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
263 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
264 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
265 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
266 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
268 /* binary comparators */
269 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
270 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
271 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
272 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
273 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
274 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
276 /* string binary comparator */
277 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
278 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
279 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
280 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
281 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
282 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
284 /* s64 binary comparator */
285 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
286 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
287 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
288 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
289 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
290 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
292 /* double binary comparator */
293 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
294 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
295 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
296 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
297 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
298 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
300 /* Mixed S64-double binary comparators */
301 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
302 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
303 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
304 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
305 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
306 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
308 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
309 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
310 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
311 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
312 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
313 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
316 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
317 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
318 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
319 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
320 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
321 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
322 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
323 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
324 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
327 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
328 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
331 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
332 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
333 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
334 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
335 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
337 /* load from immediate operand */
338 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
339 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
340 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
343 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
344 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
345 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
347 /* get context ref */
348 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
349 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
350 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
351 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
353 /* load userspace field ref */
354 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
355 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
357 #endif /* #ifndef INTERPRETER_USE_SWITCH */
361 OP(FILTER_OP_UNKNOWN
):
362 OP(FILTER_OP_LOAD_FIELD_REF
):
363 OP(FILTER_OP_GET_CONTEXT_REF
):
364 #ifdef INTERPRETER_USE_SWITCH
366 #endif /* INTERPRETER_USE_SWITCH */
367 printk(KERN_WARNING
"unknown bytecode op %u\n",
368 (unsigned int) *(filter_opcode_t
*) pc
);
372 OP(FILTER_OP_RETURN
):
373 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
374 retval
= !!estack_ax_v
;
384 OP(FILTER_OP_RSHIFT
):
385 OP(FILTER_OP_LSHIFT
):
386 OP(FILTER_OP_BIN_AND
):
387 OP(FILTER_OP_BIN_OR
):
388 OP(FILTER_OP_BIN_XOR
):
389 printk(KERN_WARNING
"unsupported bytecode op %u\n",
390 (unsigned int) *(filter_opcode_t
*) pc
);
400 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
401 (unsigned int) *(filter_opcode_t
*) pc
);
405 OP(FILTER_OP_EQ_STRING
):
409 res
= (stack_strcmp(stack
, top
, "==") == 0);
410 estack_pop(stack
, top
, ax
, bx
);
412 next_pc
+= sizeof(struct binary_op
);
415 OP(FILTER_OP_NE_STRING
):
419 res
= (stack_strcmp(stack
, top
, "!=") != 0);
420 estack_pop(stack
, top
, ax
, bx
);
422 next_pc
+= sizeof(struct binary_op
);
425 OP(FILTER_OP_GT_STRING
):
429 res
= (stack_strcmp(stack
, top
, ">") > 0);
430 estack_pop(stack
, top
, ax
, bx
);
432 next_pc
+= sizeof(struct binary_op
);
435 OP(FILTER_OP_LT_STRING
):
439 res
= (stack_strcmp(stack
, top
, "<") < 0);
440 estack_pop(stack
, top
, ax
, bx
);
442 next_pc
+= sizeof(struct binary_op
);
445 OP(FILTER_OP_GE_STRING
):
449 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
450 estack_pop(stack
, top
, ax
, bx
);
452 next_pc
+= sizeof(struct binary_op
);
455 OP(FILTER_OP_LE_STRING
):
459 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
460 estack_pop(stack
, top
, ax
, bx
);
462 next_pc
+= sizeof(struct binary_op
);
466 OP(FILTER_OP_EQ_S64
):
470 res
= (estack_bx_v
== estack_ax_v
);
471 estack_pop(stack
, top
, ax
, bx
);
473 next_pc
+= sizeof(struct binary_op
);
476 OP(FILTER_OP_NE_S64
):
480 res
= (estack_bx_v
!= estack_ax_v
);
481 estack_pop(stack
, top
, ax
, bx
);
483 next_pc
+= sizeof(struct binary_op
);
486 OP(FILTER_OP_GT_S64
):
490 res
= (estack_bx_v
> estack_ax_v
);
491 estack_pop(stack
, top
, ax
, bx
);
493 next_pc
+= sizeof(struct binary_op
);
496 OP(FILTER_OP_LT_S64
):
500 res
= (estack_bx_v
< estack_ax_v
);
501 estack_pop(stack
, top
, ax
, bx
);
503 next_pc
+= sizeof(struct binary_op
);
506 OP(FILTER_OP_GE_S64
):
510 res
= (estack_bx_v
>= estack_ax_v
);
511 estack_pop(stack
, top
, ax
, bx
);
513 next_pc
+= sizeof(struct binary_op
);
516 OP(FILTER_OP_LE_S64
):
520 res
= (estack_bx_v
<= estack_ax_v
);
521 estack_pop(stack
, top
, ax
, bx
);
523 next_pc
+= sizeof(struct binary_op
);
527 OP(FILTER_OP_EQ_DOUBLE
):
528 OP(FILTER_OP_NE_DOUBLE
):
529 OP(FILTER_OP_GT_DOUBLE
):
530 OP(FILTER_OP_LT_DOUBLE
):
531 OP(FILTER_OP_GE_DOUBLE
):
532 OP(FILTER_OP_LE_DOUBLE
):
538 /* Mixed S64-double binary comparators */
539 OP(FILTER_OP_EQ_DOUBLE_S64
):
540 OP(FILTER_OP_NE_DOUBLE_S64
):
541 OP(FILTER_OP_GT_DOUBLE_S64
):
542 OP(FILTER_OP_LT_DOUBLE_S64
):
543 OP(FILTER_OP_GE_DOUBLE_S64
):
544 OP(FILTER_OP_LE_DOUBLE_S64
):
545 OP(FILTER_OP_EQ_S64_DOUBLE
):
546 OP(FILTER_OP_NE_S64_DOUBLE
):
547 OP(FILTER_OP_GT_S64_DOUBLE
):
548 OP(FILTER_OP_LT_S64_DOUBLE
):
549 OP(FILTER_OP_GE_S64_DOUBLE
):
550 OP(FILTER_OP_LE_S64_DOUBLE
):
557 OP(FILTER_OP_UNARY_PLUS
):
558 OP(FILTER_OP_UNARY_MINUS
):
559 OP(FILTER_OP_UNARY_NOT
):
560 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
561 (unsigned int) *(filter_opcode_t
*) pc
);
566 OP(FILTER_OP_UNARY_PLUS_S64
):
568 next_pc
+= sizeof(struct unary_op
);
571 OP(FILTER_OP_UNARY_MINUS_S64
):
573 estack_ax_v
= -estack_ax_v
;
574 next_pc
+= sizeof(struct unary_op
);
577 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
578 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
583 OP(FILTER_OP_UNARY_NOT_S64
):
585 estack_ax_v
= !estack_ax_v
;
586 next_pc
+= sizeof(struct unary_op
);
589 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
598 struct logical_op
*insn
= (struct logical_op
*) pc
;
600 /* If AX is 0, skip and evaluate to 0 */
601 if (unlikely(estack_ax_v
== 0)) {
602 dbg_printk("Jumping to bytecode offset %u\n",
603 (unsigned int) insn
->skip_offset
);
604 next_pc
= start_pc
+ insn
->skip_offset
;
606 /* Pop 1 when jump not taken */
607 estack_pop(stack
, top
, ax
, bx
);
608 next_pc
+= sizeof(struct logical_op
);
614 struct logical_op
*insn
= (struct logical_op
*) pc
;
616 /* If AX is nonzero, skip and evaluate to 1 */
618 if (unlikely(estack_ax_v
!= 0)) {
620 dbg_printk("Jumping to bytecode offset %u\n",
621 (unsigned int) insn
->skip_offset
);
622 next_pc
= start_pc
+ insn
->skip_offset
;
624 /* Pop 1 when jump not taken */
625 estack_pop(stack
, top
, ax
, bx
);
626 next_pc
+= sizeof(struct logical_op
);
633 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
635 struct load_op
*insn
= (struct load_op
*) pc
;
636 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
638 dbg_printk("load field ref offset %u type string\n",
640 estack_push(stack
, top
, ax
, bx
);
641 estack_ax(stack
, top
)->u
.s
.str
=
642 *(const char * const *) &filter_stack_data
[ref
->offset
];
643 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
644 dbg_printk("Filter warning: loading a NULL string.\n");
648 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
649 estack_ax(stack
, top
)->u
.s
.literal
= 0;
650 estack_ax(stack
, top
)->u
.s
.user
= 0;
651 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
652 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
656 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
658 struct load_op
*insn
= (struct load_op
*) pc
;
659 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
661 dbg_printk("load field ref offset %u type sequence\n",
663 estack_push(stack
, top
, ax
, bx
);
664 estack_ax(stack
, top
)->u
.s
.seq_len
=
665 *(unsigned long *) &filter_stack_data
[ref
->offset
];
666 estack_ax(stack
, top
)->u
.s
.str
=
667 *(const char **) (&filter_stack_data
[ref
->offset
668 + sizeof(unsigned long)]);
669 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
670 dbg_printk("Filter warning: loading a NULL sequence.\n");
674 estack_ax(stack
, top
)->u
.s
.literal
= 0;
675 estack_ax(stack
, top
)->u
.s
.user
= 0;
676 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
680 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
682 struct load_op
*insn
= (struct load_op
*) pc
;
683 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
685 dbg_printk("load field ref offset %u type s64\n",
687 estack_push(stack
, top
, ax
, bx
);
689 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
690 dbg_printk("ref load s64 %lld\n",
691 (long long) estack_ax_v
);
692 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
696 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
702 /* load from immediate operand */
703 OP(FILTER_OP_LOAD_STRING
):
705 struct load_op
*insn
= (struct load_op
*) pc
;
707 dbg_printk("load string %s\n", insn
->data
);
708 estack_push(stack
, top
, ax
, bx
);
709 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
710 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
711 estack_ax(stack
, top
)->u
.s
.literal
= 1;
712 estack_ax(stack
, top
)->u
.s
.user
= 0;
713 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
717 OP(FILTER_OP_LOAD_S64
):
719 struct load_op
*insn
= (struct load_op
*) pc
;
721 estack_push(stack
, top
, ax
, bx
);
722 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
723 dbg_printk("load s64 %lld\n",
724 (long long) estack_ax_v
);
725 next_pc
+= sizeof(struct load_op
)
726 + sizeof(struct literal_numeric
);
730 OP(FILTER_OP_LOAD_DOUBLE
):
737 OP(FILTER_OP_CAST_TO_S64
):
738 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
739 (unsigned int) *(filter_opcode_t
*) pc
);
743 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
749 OP(FILTER_OP_CAST_NOP
):
751 next_pc
+= sizeof(struct cast_op
);
755 /* get context ref */
756 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
758 struct load_op
*insn
= (struct load_op
*) pc
;
759 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
760 struct lttng_ctx_field
*ctx_field
;
761 union lttng_ctx_value v
;
763 dbg_printk("get context ref offset %u type string\n",
765 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
766 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
767 estack_push(stack
, top
, ax
, bx
);
768 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
769 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
770 dbg_printk("Filter warning: loading a NULL string.\n");
774 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
775 estack_ax(stack
, top
)->u
.s
.literal
= 0;
776 estack_ax(stack
, top
)->u
.s
.user
= 0;
777 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
778 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
782 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
784 struct load_op
*insn
= (struct load_op
*) pc
;
785 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
786 struct lttng_ctx_field
*ctx_field
;
787 union lttng_ctx_value v
;
789 dbg_printk("get context ref offset %u type s64\n",
791 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
792 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
793 estack_push(stack
, top
, ax
, bx
);
795 dbg_printk("ref get context s64 %lld\n",
796 (long long) estack_ax_v
);
797 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
801 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
807 /* load userspace field ref */
808 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
810 struct load_op
*insn
= (struct load_op
*) pc
;
811 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
813 dbg_printk("load field ref offset %u type user string\n",
815 estack_push(stack
, top
, ax
, bx
);
816 estack_ax(stack
, top
)->u
.s
.user_str
=
817 *(const char * const *) &filter_stack_data
[ref
->offset
];
818 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
819 dbg_printk("Filter warning: loading a NULL string.\n");
823 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
824 estack_ax(stack
, top
)->u
.s
.literal
= 0;
825 estack_ax(stack
, top
)->u
.s
.user
= 1;
826 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
827 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
831 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
833 struct load_op
*insn
= (struct load_op
*) pc
;
834 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
836 dbg_printk("load field ref offset %u type user sequence\n",
838 estack_push(stack
, top
, ax
, bx
);
839 estack_ax(stack
, top
)->u
.s
.seq_len
=
840 *(unsigned long *) &filter_stack_data
[ref
->offset
];
841 estack_ax(stack
, top
)->u
.s
.user_str
=
842 *(const char **) (&filter_stack_data
[ref
->offset
843 + sizeof(unsigned long)]);
844 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
845 dbg_printk("Filter warning: loading a NULL sequence.\n");
849 estack_ax(stack
, top
)->u
.s
.literal
= 0;
850 estack_ax(stack
, top
)->u
.s
.user
= 1;
851 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
857 /* return 0 (discard) on error */