2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/uaccess.h>
25 #include "lttng-filter.h"
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
32 char get_char(struct estack_entry
*reg
, size_t offset
)
34 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ
,
41 reg
->u
.s
.user_str
+ offset
,
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c
,
46 reg
->u
.s
.user_str
+ offset
,
51 return reg
->u
.s
.str
[offset
];
57 * -2: unknown escape char.
61 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
66 *c
= get_char(reg
, *offset
);
82 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
84 size_t offset_bx
= 0, offset_ax
= 0;
85 int diff
, has_user
= 0;
88 if (estack_bx(stack
, top
)->u
.s
.user
89 || estack_ax(stack
, top
)->u
.s
.user
) {
99 char char_bx
, char_ax
;
101 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
102 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
104 if (unlikely(char_bx
== '\0')) {
105 if (char_ax
== '\0') {
109 if (estack_ax(stack
, top
)->u
.s
.literal
) {
110 ret
= parse_char(estack_ax(stack
, top
),
111 &char_ax
, &offset_ax
);
121 if (unlikely(char_ax
== '\0')) {
122 if (char_bx
== '\0') {
126 if (estack_bx(stack
, top
)->u
.s
.literal
) {
127 ret
= parse_char(estack_bx(stack
, top
),
128 &char_bx
, &offset_bx
);
138 if (estack_bx(stack
, top
)->u
.s
.literal
) {
139 ret
= parse_char(estack_bx(stack
, top
),
140 &char_bx
, &offset_bx
);
144 } else if (ret
== -2) {
147 /* else compare both char */
149 if (estack_ax(stack
, top
)->u
.s
.literal
) {
150 ret
= parse_char(estack_ax(stack
, top
),
151 &char_ax
, &offset_ax
);
155 } else if (ret
== -2) {
172 diff
= char_bx
- char_ax
;
185 uint64_t lttng_filter_false(void *filter_data
,
186 const char *filter_stack_data
)
191 #ifdef INTERPRETER_USE_SWITCH
194 * Fallback for compilers that do not support taking address of labels.
198 start_pc = &bytecode->data[0]; \
199 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
201 dbg_printk("Executing op %s (%u)\n", \
202 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
203 (unsigned int) *(filter_opcode_t *) pc); \
204 switch (*(filter_opcode_t *) pc) {
206 #define OP(name) case name
216 * Dispatch-table based interpreter.
220 start_pc = &bytecode->data[0]; \
221 pc = next_pc = start_pc; \
222 if (unlikely(pc - start_pc >= bytecode->len)) \
224 goto *dispatch[*(filter_opcode_t *) pc];
231 goto *dispatch[*(filter_opcode_t *) pc];
238 * Return 0 (discard), or raise the 0x1 flag (log event).
239 * Currently, other flags are kept for future extensions and have no
242 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
243 const char *filter_stack_data
)
245 struct bytecode_runtime
*bytecode
= filter_data
;
246 void *pc
, *next_pc
, *start_pc
;
249 struct estack _stack
;
250 struct estack
*stack
= &_stack
;
251 register int64_t ax
= 0, bx
= 0;
252 register int top
= FILTER_STACK_EMPTY
;
253 #ifndef INTERPRETER_USE_SWITCH
254 static void *dispatch
[NR_FILTER_OPS
] = {
255 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
257 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
260 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
261 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
262 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
263 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
264 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
265 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
266 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
267 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
268 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
269 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
271 /* binary comparators */
272 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
273 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
274 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
275 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
276 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
277 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
279 /* string binary comparator */
280 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
281 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
282 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
283 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
284 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
285 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
287 /* s64 binary comparator */
288 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
289 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
290 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
291 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
292 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
293 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
295 /* double binary comparator */
296 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
297 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
298 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
299 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
300 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
301 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
303 /* Mixed S64-double binary comparators */
304 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
305 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
306 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
307 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
308 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
309 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
311 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
312 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
313 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
314 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
315 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
316 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
319 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
320 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
321 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
322 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
323 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
324 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
325 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
326 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
327 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
330 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
331 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
334 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
335 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
336 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
337 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
338 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
340 /* load from immediate operand */
341 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
342 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
343 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
346 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
347 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
348 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
350 /* get context ref */
351 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
352 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
353 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
354 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
356 /* load userspace field ref */
357 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
358 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
360 #endif /* #ifndef INTERPRETER_USE_SWITCH */
364 OP(FILTER_OP_UNKNOWN
):
365 OP(FILTER_OP_LOAD_FIELD_REF
):
366 OP(FILTER_OP_GET_CONTEXT_REF
):
367 #ifdef INTERPRETER_USE_SWITCH
369 #endif /* INTERPRETER_USE_SWITCH */
370 printk(KERN_WARNING
"unknown bytecode op %u\n",
371 (unsigned int) *(filter_opcode_t
*) pc
);
375 OP(FILTER_OP_RETURN
):
376 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
377 retval
= !!estack_ax_v
;
387 OP(FILTER_OP_RSHIFT
):
388 OP(FILTER_OP_LSHIFT
):
389 OP(FILTER_OP_BIN_AND
):
390 OP(FILTER_OP_BIN_OR
):
391 OP(FILTER_OP_BIN_XOR
):
392 printk(KERN_WARNING
"unsupported bytecode op %u\n",
393 (unsigned int) *(filter_opcode_t
*) pc
);
403 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
404 (unsigned int) *(filter_opcode_t
*) pc
);
408 OP(FILTER_OP_EQ_STRING
):
412 res
= (stack_strcmp(stack
, top
, "==") == 0);
413 estack_pop(stack
, top
, ax
, bx
);
415 next_pc
+= sizeof(struct binary_op
);
418 OP(FILTER_OP_NE_STRING
):
422 res
= (stack_strcmp(stack
, top
, "!=") != 0);
423 estack_pop(stack
, top
, ax
, bx
);
425 next_pc
+= sizeof(struct binary_op
);
428 OP(FILTER_OP_GT_STRING
):
432 res
= (stack_strcmp(stack
, top
, ">") > 0);
433 estack_pop(stack
, top
, ax
, bx
);
435 next_pc
+= sizeof(struct binary_op
);
438 OP(FILTER_OP_LT_STRING
):
442 res
= (stack_strcmp(stack
, top
, "<") < 0);
443 estack_pop(stack
, top
, ax
, bx
);
445 next_pc
+= sizeof(struct binary_op
);
448 OP(FILTER_OP_GE_STRING
):
452 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
453 estack_pop(stack
, top
, ax
, bx
);
455 next_pc
+= sizeof(struct binary_op
);
458 OP(FILTER_OP_LE_STRING
):
462 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
463 estack_pop(stack
, top
, ax
, bx
);
465 next_pc
+= sizeof(struct binary_op
);
469 OP(FILTER_OP_EQ_S64
):
473 res
= (estack_bx_v
== estack_ax_v
);
474 estack_pop(stack
, top
, ax
, bx
);
476 next_pc
+= sizeof(struct binary_op
);
479 OP(FILTER_OP_NE_S64
):
483 res
= (estack_bx_v
!= estack_ax_v
);
484 estack_pop(stack
, top
, ax
, bx
);
486 next_pc
+= sizeof(struct binary_op
);
489 OP(FILTER_OP_GT_S64
):
493 res
= (estack_bx_v
> estack_ax_v
);
494 estack_pop(stack
, top
, ax
, bx
);
496 next_pc
+= sizeof(struct binary_op
);
499 OP(FILTER_OP_LT_S64
):
503 res
= (estack_bx_v
< estack_ax_v
);
504 estack_pop(stack
, top
, ax
, bx
);
506 next_pc
+= sizeof(struct binary_op
);
509 OP(FILTER_OP_GE_S64
):
513 res
= (estack_bx_v
>= estack_ax_v
);
514 estack_pop(stack
, top
, ax
, bx
);
516 next_pc
+= sizeof(struct binary_op
);
519 OP(FILTER_OP_LE_S64
):
523 res
= (estack_bx_v
<= estack_ax_v
);
524 estack_pop(stack
, top
, ax
, bx
);
526 next_pc
+= sizeof(struct binary_op
);
530 OP(FILTER_OP_EQ_DOUBLE
):
531 OP(FILTER_OP_NE_DOUBLE
):
532 OP(FILTER_OP_GT_DOUBLE
):
533 OP(FILTER_OP_LT_DOUBLE
):
534 OP(FILTER_OP_GE_DOUBLE
):
535 OP(FILTER_OP_LE_DOUBLE
):
541 /* Mixed S64-double binary comparators */
542 OP(FILTER_OP_EQ_DOUBLE_S64
):
543 OP(FILTER_OP_NE_DOUBLE_S64
):
544 OP(FILTER_OP_GT_DOUBLE_S64
):
545 OP(FILTER_OP_LT_DOUBLE_S64
):
546 OP(FILTER_OP_GE_DOUBLE_S64
):
547 OP(FILTER_OP_LE_DOUBLE_S64
):
548 OP(FILTER_OP_EQ_S64_DOUBLE
):
549 OP(FILTER_OP_NE_S64_DOUBLE
):
550 OP(FILTER_OP_GT_S64_DOUBLE
):
551 OP(FILTER_OP_LT_S64_DOUBLE
):
552 OP(FILTER_OP_GE_S64_DOUBLE
):
553 OP(FILTER_OP_LE_S64_DOUBLE
):
560 OP(FILTER_OP_UNARY_PLUS
):
561 OP(FILTER_OP_UNARY_MINUS
):
562 OP(FILTER_OP_UNARY_NOT
):
563 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
564 (unsigned int) *(filter_opcode_t
*) pc
);
569 OP(FILTER_OP_UNARY_PLUS_S64
):
571 next_pc
+= sizeof(struct unary_op
);
574 OP(FILTER_OP_UNARY_MINUS_S64
):
576 estack_ax_v
= -estack_ax_v
;
577 next_pc
+= sizeof(struct unary_op
);
580 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
581 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
586 OP(FILTER_OP_UNARY_NOT_S64
):
588 estack_ax_v
= !estack_ax_v
;
589 next_pc
+= sizeof(struct unary_op
);
592 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
601 struct logical_op
*insn
= (struct logical_op
*) pc
;
603 /* If AX is 0, skip and evaluate to 0 */
604 if (unlikely(estack_ax_v
== 0)) {
605 dbg_printk("Jumping to bytecode offset %u\n",
606 (unsigned int) insn
->skip_offset
);
607 next_pc
= start_pc
+ insn
->skip_offset
;
609 /* Pop 1 when jump not taken */
610 estack_pop(stack
, top
, ax
, bx
);
611 next_pc
+= sizeof(struct logical_op
);
617 struct logical_op
*insn
= (struct logical_op
*) pc
;
619 /* If AX is nonzero, skip and evaluate to 1 */
621 if (unlikely(estack_ax_v
!= 0)) {
623 dbg_printk("Jumping to bytecode offset %u\n",
624 (unsigned int) insn
->skip_offset
);
625 next_pc
= start_pc
+ insn
->skip_offset
;
627 /* Pop 1 when jump not taken */
628 estack_pop(stack
, top
, ax
, bx
);
629 next_pc
+= sizeof(struct logical_op
);
636 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
638 struct load_op
*insn
= (struct load_op
*) pc
;
639 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
641 dbg_printk("load field ref offset %u type string\n",
643 estack_push(stack
, top
, ax
, bx
);
644 estack_ax(stack
, top
)->u
.s
.str
=
645 *(const char * const *) &filter_stack_data
[ref
->offset
];
646 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
647 dbg_printk("Filter warning: loading a NULL string.\n");
651 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
652 estack_ax(stack
, top
)->u
.s
.literal
= 0;
653 estack_ax(stack
, top
)->u
.s
.user
= 0;
654 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
655 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
659 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
661 struct load_op
*insn
= (struct load_op
*) pc
;
662 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
664 dbg_printk("load field ref offset %u type sequence\n",
666 estack_push(stack
, top
, ax
, bx
);
667 estack_ax(stack
, top
)->u
.s
.seq_len
=
668 *(unsigned long *) &filter_stack_data
[ref
->offset
];
669 estack_ax(stack
, top
)->u
.s
.str
=
670 *(const char **) (&filter_stack_data
[ref
->offset
671 + sizeof(unsigned long)]);
672 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
673 dbg_printk("Filter warning: loading a NULL sequence.\n");
677 estack_ax(stack
, top
)->u
.s
.literal
= 0;
678 estack_ax(stack
, top
)->u
.s
.user
= 0;
679 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
683 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
685 struct load_op
*insn
= (struct load_op
*) pc
;
686 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
688 dbg_printk("load field ref offset %u type s64\n",
690 estack_push(stack
, top
, ax
, bx
);
692 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
693 dbg_printk("ref load s64 %lld\n",
694 (long long) estack_ax_v
);
695 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
699 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
705 /* load from immediate operand */
706 OP(FILTER_OP_LOAD_STRING
):
708 struct load_op
*insn
= (struct load_op
*) pc
;
710 dbg_printk("load string %s\n", insn
->data
);
711 estack_push(stack
, top
, ax
, bx
);
712 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
713 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
714 estack_ax(stack
, top
)->u
.s
.literal
= 1;
715 estack_ax(stack
, top
)->u
.s
.user
= 0;
716 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
720 OP(FILTER_OP_LOAD_S64
):
722 struct load_op
*insn
= (struct load_op
*) pc
;
724 estack_push(stack
, top
, ax
, bx
);
725 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
726 dbg_printk("load s64 %lld\n",
727 (long long) estack_ax_v
);
728 next_pc
+= sizeof(struct load_op
)
729 + sizeof(struct literal_numeric
);
733 OP(FILTER_OP_LOAD_DOUBLE
):
740 OP(FILTER_OP_CAST_TO_S64
):
741 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
742 (unsigned int) *(filter_opcode_t
*) pc
);
746 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
752 OP(FILTER_OP_CAST_NOP
):
754 next_pc
+= sizeof(struct cast_op
);
758 /* get context ref */
759 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
761 struct load_op
*insn
= (struct load_op
*) pc
;
762 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
763 struct lttng_ctx_field
*ctx_field
;
764 union lttng_ctx_value v
;
766 dbg_printk("get context ref offset %u type string\n",
768 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
769 ctx_field
->get_value(ctx_field
, &v
);
770 estack_push(stack
, top
, ax
, bx
);
771 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
772 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
773 dbg_printk("Filter warning: loading a NULL string.\n");
777 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
778 estack_ax(stack
, top
)->u
.s
.literal
= 0;
779 estack_ax(stack
, top
)->u
.s
.user
= 0;
780 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
781 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
785 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
787 struct load_op
*insn
= (struct load_op
*) pc
;
788 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
789 struct lttng_ctx_field
*ctx_field
;
790 union lttng_ctx_value v
;
792 dbg_printk("get context ref offset %u type s64\n",
794 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
795 ctx_field
->get_value(ctx_field
, &v
);
796 estack_push(stack
, top
, ax
, bx
);
798 dbg_printk("ref get context s64 %lld\n",
799 (long long) estack_ax_v
);
800 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
804 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
810 /* load userspace field ref */
811 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
813 struct load_op
*insn
= (struct load_op
*) pc
;
814 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
816 dbg_printk("load field ref offset %u type user string\n",
818 estack_push(stack
, top
, ax
, bx
);
819 estack_ax(stack
, top
)->u
.s
.str
=
820 *(const char * const *) &filter_stack_data
[ref
->offset
];
821 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
822 dbg_printk("Filter warning: loading a NULL string.\n");
826 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
827 estack_ax(stack
, top
)->u
.s
.literal
= 0;
828 estack_ax(stack
, top
)->u
.s
.user
= 1;
829 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
830 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
834 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
836 struct load_op
*insn
= (struct load_op
*) pc
;
837 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
839 dbg_printk("load field ref offset %u type user sequence\n",
841 estack_push(stack
, top
, ax
, bx
);
842 estack_ax(stack
, top
)->u
.s
.seq_len
=
843 *(unsigned long *) &filter_stack_data
[ref
->offset
];
844 estack_ax(stack
, top
)->u
.s
.str
=
845 *(const char **) (&filter_stack_data
[ref
->offset
846 + sizeof(unsigned long)]);
847 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
848 dbg_printk("Filter warning: loading a NULL sequence.\n");
852 estack_ax(stack
, top
)->u
.s
.literal
= 0;
853 estack_ax(stack
, top
)->u
.s
.user
= 1;
854 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
860 /* return 0 (discard) on error */