2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/uaccess.h>
25 #include "lttng-filter.h"
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
32 char get_char(struct estack_entry
*reg
, size_t offset
)
34 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ
,
41 reg
->u
.s
.user_str
+ offset
,
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c
,
46 reg
->u
.s
.user_str
+ offset
,
51 return reg
->u
.s
.str
[offset
];
57 * -2: unknown escape char.
61 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
66 *c
= get_char(reg
, *offset
);
82 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
84 size_t offset_bx
= 0, offset_ax
= 0;
85 int diff
, has_user
= 0;
88 if (estack_bx(stack
, top
)->u
.s
.user
89 || estack_ax(stack
, top
)->u
.s
.user
) {
99 char char_bx
, char_ax
;
101 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
102 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
104 if (unlikely(char_bx
== '\0')) {
105 if (char_ax
== '\0') {
109 if (estack_ax(stack
, top
)->u
.s
.literal
) {
110 ret
= parse_char(estack_ax(stack
, top
),
111 &char_ax
, &offset_ax
);
121 if (unlikely(char_ax
== '\0')) {
122 if (estack_bx(stack
, top
)->u
.s
.literal
) {
123 ret
= parse_char(estack_bx(stack
, top
),
124 &char_bx
, &offset_bx
);
133 if (estack_bx(stack
, top
)->u
.s
.literal
) {
134 ret
= parse_char(estack_bx(stack
, top
),
135 &char_bx
, &offset_bx
);
139 } else if (ret
== -2) {
142 /* else compare both char */
144 if (estack_ax(stack
, top
)->u
.s
.literal
) {
145 ret
= parse_char(estack_ax(stack
, top
),
146 &char_ax
, &offset_ax
);
150 } else if (ret
== -2) {
167 diff
= char_bx
- char_ax
;
180 uint64_t lttng_filter_false(void *filter_data
,
181 const char *filter_stack_data
)
186 #ifdef INTERPRETER_USE_SWITCH
189 * Fallback for compilers that do not support taking address of labels.
193 start_pc = &bytecode->data[0]; \
194 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
196 dbg_printk("Executing op %s (%u)\n", \
197 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
198 (unsigned int) *(filter_opcode_t *) pc); \
199 switch (*(filter_opcode_t *) pc) {
201 #define OP(name) case name
211 * Dispatch-table based interpreter.
215 start_pc = &bytecode->data[0]; \
216 pc = next_pc = start_pc; \
217 if (unlikely(pc - start_pc >= bytecode->len)) \
219 goto *dispatch[*(filter_opcode_t *) pc];
226 goto *dispatch[*(filter_opcode_t *) pc];
233 * Return 0 (discard), or raise the 0x1 flag (log event).
234 * Currently, other flags are kept for future extensions and have no
237 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
238 const char *filter_stack_data
)
240 struct bytecode_runtime
*bytecode
= filter_data
;
241 void *pc
, *next_pc
, *start_pc
;
244 struct estack _stack
;
245 struct estack
*stack
= &_stack
;
246 register int64_t ax
= 0, bx
= 0;
247 register int top
= FILTER_STACK_EMPTY
;
248 #ifndef INTERPRETER_USE_SWITCH
249 static void *dispatch
[NR_FILTER_OPS
] = {
250 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
252 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
255 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
256 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
257 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
258 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
259 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
260 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
261 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
262 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
263 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
264 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
266 /* binary comparators */
267 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
268 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
269 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
270 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
271 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
272 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
274 /* string binary comparator */
275 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
276 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
277 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
278 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
279 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
280 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
282 /* s64 binary comparator */
283 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
284 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
285 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
286 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
287 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
288 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
290 /* double binary comparator */
291 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
292 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
293 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
294 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
295 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
296 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
298 /* Mixed S64-double binary comparators */
299 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
300 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
301 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
302 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
303 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
304 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
306 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
307 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
308 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
309 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
310 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
311 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
314 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
315 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
316 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
317 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
318 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
319 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
320 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
321 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
322 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
325 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
326 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
329 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
330 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
331 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
332 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
333 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
335 /* load from immediate operand */
336 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
337 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
338 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
341 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
342 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
343 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
345 /* get context ref */
346 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
347 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
348 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
349 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
351 /* load userspace field ref */
352 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
353 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
355 #endif /* #ifndef INTERPRETER_USE_SWITCH */
359 OP(FILTER_OP_UNKNOWN
):
360 OP(FILTER_OP_LOAD_FIELD_REF
):
361 OP(FILTER_OP_GET_CONTEXT_REF
):
362 #ifdef INTERPRETER_USE_SWITCH
364 #endif /* INTERPRETER_USE_SWITCH */
365 printk(KERN_WARNING
"unknown bytecode op %u\n",
366 (unsigned int) *(filter_opcode_t
*) pc
);
370 OP(FILTER_OP_RETURN
):
371 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
372 retval
= !!estack_ax_v
;
382 OP(FILTER_OP_RSHIFT
):
383 OP(FILTER_OP_LSHIFT
):
384 OP(FILTER_OP_BIN_AND
):
385 OP(FILTER_OP_BIN_OR
):
386 OP(FILTER_OP_BIN_XOR
):
387 printk(KERN_WARNING
"unsupported bytecode op %u\n",
388 (unsigned int) *(filter_opcode_t
*) pc
);
398 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
399 (unsigned int) *(filter_opcode_t
*) pc
);
403 OP(FILTER_OP_EQ_STRING
):
407 res
= (stack_strcmp(stack
, top
, "==") == 0);
408 estack_pop(stack
, top
, ax
, bx
);
410 next_pc
+= sizeof(struct binary_op
);
413 OP(FILTER_OP_NE_STRING
):
417 res
= (stack_strcmp(stack
, top
, "!=") != 0);
418 estack_pop(stack
, top
, ax
, bx
);
420 next_pc
+= sizeof(struct binary_op
);
423 OP(FILTER_OP_GT_STRING
):
427 res
= (stack_strcmp(stack
, top
, ">") > 0);
428 estack_pop(stack
, top
, ax
, bx
);
430 next_pc
+= sizeof(struct binary_op
);
433 OP(FILTER_OP_LT_STRING
):
437 res
= (stack_strcmp(stack
, top
, "<") < 0);
438 estack_pop(stack
, top
, ax
, bx
);
440 next_pc
+= sizeof(struct binary_op
);
443 OP(FILTER_OP_GE_STRING
):
447 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
448 estack_pop(stack
, top
, ax
, bx
);
450 next_pc
+= sizeof(struct binary_op
);
453 OP(FILTER_OP_LE_STRING
):
457 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
458 estack_pop(stack
, top
, ax
, bx
);
460 next_pc
+= sizeof(struct binary_op
);
464 OP(FILTER_OP_EQ_S64
):
468 res
= (estack_bx_v
== estack_ax_v
);
469 estack_pop(stack
, top
, ax
, bx
);
471 next_pc
+= sizeof(struct binary_op
);
474 OP(FILTER_OP_NE_S64
):
478 res
= (estack_bx_v
!= estack_ax_v
);
479 estack_pop(stack
, top
, ax
, bx
);
481 next_pc
+= sizeof(struct binary_op
);
484 OP(FILTER_OP_GT_S64
):
488 res
= (estack_bx_v
> estack_ax_v
);
489 estack_pop(stack
, top
, ax
, bx
);
491 next_pc
+= sizeof(struct binary_op
);
494 OP(FILTER_OP_LT_S64
):
498 res
= (estack_bx_v
< estack_ax_v
);
499 estack_pop(stack
, top
, ax
, bx
);
501 next_pc
+= sizeof(struct binary_op
);
504 OP(FILTER_OP_GE_S64
):
508 res
= (estack_bx_v
>= estack_ax_v
);
509 estack_pop(stack
, top
, ax
, bx
);
511 next_pc
+= sizeof(struct binary_op
);
514 OP(FILTER_OP_LE_S64
):
518 res
= (estack_bx_v
<= estack_ax_v
);
519 estack_pop(stack
, top
, ax
, bx
);
521 next_pc
+= sizeof(struct binary_op
);
525 OP(FILTER_OP_EQ_DOUBLE
):
526 OP(FILTER_OP_NE_DOUBLE
):
527 OP(FILTER_OP_GT_DOUBLE
):
528 OP(FILTER_OP_LT_DOUBLE
):
529 OP(FILTER_OP_GE_DOUBLE
):
530 OP(FILTER_OP_LE_DOUBLE
):
536 /* Mixed S64-double binary comparators */
537 OP(FILTER_OP_EQ_DOUBLE_S64
):
538 OP(FILTER_OP_NE_DOUBLE_S64
):
539 OP(FILTER_OP_GT_DOUBLE_S64
):
540 OP(FILTER_OP_LT_DOUBLE_S64
):
541 OP(FILTER_OP_GE_DOUBLE_S64
):
542 OP(FILTER_OP_LE_DOUBLE_S64
):
543 OP(FILTER_OP_EQ_S64_DOUBLE
):
544 OP(FILTER_OP_NE_S64_DOUBLE
):
545 OP(FILTER_OP_GT_S64_DOUBLE
):
546 OP(FILTER_OP_LT_S64_DOUBLE
):
547 OP(FILTER_OP_GE_S64_DOUBLE
):
548 OP(FILTER_OP_LE_S64_DOUBLE
):
555 OP(FILTER_OP_UNARY_PLUS
):
556 OP(FILTER_OP_UNARY_MINUS
):
557 OP(FILTER_OP_UNARY_NOT
):
558 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
559 (unsigned int) *(filter_opcode_t
*) pc
);
564 OP(FILTER_OP_UNARY_PLUS_S64
):
566 next_pc
+= sizeof(struct unary_op
);
569 OP(FILTER_OP_UNARY_MINUS_S64
):
571 estack_ax_v
= -estack_ax_v
;
572 next_pc
+= sizeof(struct unary_op
);
575 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
576 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
581 OP(FILTER_OP_UNARY_NOT_S64
):
583 estack_ax_v
= !estack_ax_v
;
584 next_pc
+= sizeof(struct unary_op
);
587 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
596 struct logical_op
*insn
= (struct logical_op
*) pc
;
598 /* If AX is 0, skip and evaluate to 0 */
599 if (unlikely(estack_ax_v
== 0)) {
600 dbg_printk("Jumping to bytecode offset %u\n",
601 (unsigned int) insn
->skip_offset
);
602 next_pc
= start_pc
+ insn
->skip_offset
;
604 /* Pop 1 when jump not taken */
605 estack_pop(stack
, top
, ax
, bx
);
606 next_pc
+= sizeof(struct logical_op
);
612 struct logical_op
*insn
= (struct logical_op
*) pc
;
614 /* If AX is nonzero, skip and evaluate to 1 */
616 if (unlikely(estack_ax_v
!= 0)) {
618 dbg_printk("Jumping to bytecode offset %u\n",
619 (unsigned int) insn
->skip_offset
);
620 next_pc
= start_pc
+ insn
->skip_offset
;
622 /* Pop 1 when jump not taken */
623 estack_pop(stack
, top
, ax
, bx
);
624 next_pc
+= sizeof(struct logical_op
);
631 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
633 struct load_op
*insn
= (struct load_op
*) pc
;
634 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
636 dbg_printk("load field ref offset %u type string\n",
638 estack_push(stack
, top
, ax
, bx
);
639 estack_ax(stack
, top
)->u
.s
.str
=
640 *(const char * const *) &filter_stack_data
[ref
->offset
];
641 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
642 dbg_printk("Filter warning: loading a NULL string.\n");
646 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
647 estack_ax(stack
, top
)->u
.s
.literal
= 0;
648 estack_ax(stack
, top
)->u
.s
.user
= 0;
649 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
650 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
654 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
656 struct load_op
*insn
= (struct load_op
*) pc
;
657 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
659 dbg_printk("load field ref offset %u type sequence\n",
661 estack_push(stack
, top
, ax
, bx
);
662 estack_ax(stack
, top
)->u
.s
.seq_len
=
663 *(unsigned long *) &filter_stack_data
[ref
->offset
];
664 estack_ax(stack
, top
)->u
.s
.str
=
665 *(const char **) (&filter_stack_data
[ref
->offset
666 + sizeof(unsigned long)]);
667 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
668 dbg_printk("Filter warning: loading a NULL sequence.\n");
672 estack_ax(stack
, top
)->u
.s
.literal
= 0;
673 estack_ax(stack
, top
)->u
.s
.user
= 0;
674 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
678 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
680 struct load_op
*insn
= (struct load_op
*) pc
;
681 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
683 dbg_printk("load field ref offset %u type s64\n",
685 estack_push(stack
, top
, ax
, bx
);
687 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
688 dbg_printk("ref load s64 %lld\n",
689 (long long) estack_ax_v
);
690 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
694 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
700 /* load from immediate operand */
701 OP(FILTER_OP_LOAD_STRING
):
703 struct load_op
*insn
= (struct load_op
*) pc
;
705 dbg_printk("load string %s\n", insn
->data
);
706 estack_push(stack
, top
, ax
, bx
);
707 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
708 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
709 estack_ax(stack
, top
)->u
.s
.literal
= 1;
710 estack_ax(stack
, top
)->u
.s
.user
= 0;
711 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
715 OP(FILTER_OP_LOAD_S64
):
717 struct load_op
*insn
= (struct load_op
*) pc
;
719 estack_push(stack
, top
, ax
, bx
);
720 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
721 dbg_printk("load s64 %lld\n",
722 (long long) estack_ax_v
);
723 next_pc
+= sizeof(struct load_op
)
724 + sizeof(struct literal_numeric
);
728 OP(FILTER_OP_LOAD_DOUBLE
):
735 OP(FILTER_OP_CAST_TO_S64
):
736 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
737 (unsigned int) *(filter_opcode_t
*) pc
);
741 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
747 OP(FILTER_OP_CAST_NOP
):
749 next_pc
+= sizeof(struct cast_op
);
753 /* get context ref */
754 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
756 struct load_op
*insn
= (struct load_op
*) pc
;
757 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
758 struct lttng_ctx_field
*ctx_field
;
759 union lttng_ctx_value v
;
761 dbg_printk("get context ref offset %u type string\n",
763 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
764 ctx_field
->get_value(ctx_field
, &v
);
765 estack_push(stack
, top
, ax
, bx
);
766 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
767 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
768 dbg_printk("Filter warning: loading a NULL string.\n");
772 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
773 estack_ax(stack
, top
)->u
.s
.literal
= 0;
774 estack_ax(stack
, top
)->u
.s
.user
= 0;
775 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
776 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
780 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
782 struct load_op
*insn
= (struct load_op
*) pc
;
783 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
784 struct lttng_ctx_field
*ctx_field
;
785 union lttng_ctx_value v
;
787 dbg_printk("get context ref offset %u type s64\n",
789 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
790 ctx_field
->get_value(ctx_field
, &v
);
791 estack_push(stack
, top
, ax
, bx
);
793 dbg_printk("ref get context s64 %lld\n",
794 (long long) estack_ax_v
);
795 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
799 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
805 /* load userspace field ref */
806 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
808 struct load_op
*insn
= (struct load_op
*) pc
;
809 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
811 dbg_printk("load field ref offset %u type user string\n",
813 estack_push(stack
, top
, ax
, bx
);
814 estack_ax(stack
, top
)->u
.s
.user_str
=
815 *(const char * const *) &filter_stack_data
[ref
->offset
];
816 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
817 dbg_printk("Filter warning: loading a NULL string.\n");
821 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
822 estack_ax(stack
, top
)->u
.s
.literal
= 0;
823 estack_ax(stack
, top
)->u
.s
.user
= 1;
824 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
825 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
829 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
831 struct load_op
*insn
= (struct load_op
*) pc
;
832 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
834 dbg_printk("load field ref offset %u type user sequence\n",
836 estack_push(stack
, top
, ax
, bx
);
837 estack_ax(stack
, top
)->u
.s
.seq_len
=
838 *(unsigned long *) &filter_stack_data
[ref
->offset
];
839 estack_ax(stack
, top
)->u
.s
.user_str
=
840 *(const char **) (&filter_stack_data
[ref
->offset
841 + sizeof(unsigned long)]);
842 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
843 dbg_printk("Filter warning: loading a NULL sequence.\n");
847 estack_ax(stack
, top
)->u
.s
.literal
= 0;
848 estack_ax(stack
, top
)->u
.s
.user
= 1;
849 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
855 /* return 0 (discard) on error */