2 * lttng-filter-interpreter.c
4 * LTTng modules filter interpreter.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/uaccess.h>
25 #include "lttng-filter.h"
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
32 char get_char(struct estack_entry
*reg
, size_t offset
)
34 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ
,
41 reg
->u
.s
.user_str
+ offset
,
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c
,
46 reg
->u
.s
.user_str
+ offset
,
51 return reg
->u
.s
.str
[offset
];
57 * -2: unknown escape char.
61 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
66 *c
= get_char(reg
, *offset
);
82 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
84 size_t offset_bx
= 0, offset_ax
= 0;
85 int diff
, has_user
= 0;
88 if (estack_bx(stack
, top
)->u
.s
.user
89 || estack_ax(stack
, top
)->u
.s
.user
) {
99 char char_bx
, char_ax
;
101 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
102 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
104 if (unlikely(char_bx
== '\0')) {
105 if (char_ax
== '\0') {
109 if (estack_ax(stack
, top
)->u
.s
.literal
) {
110 ret
= parse_char(estack_ax(stack
, top
),
111 &char_ax
, &offset_ax
);
121 if (unlikely(char_ax
== '\0')) {
122 if (char_bx
== '\0') {
126 if (estack_bx(stack
, top
)->u
.s
.literal
) {
127 ret
= parse_char(estack_bx(stack
, top
),
128 &char_bx
, &offset_bx
);
138 if (estack_bx(stack
, top
)->u
.s
.literal
) {
139 ret
= parse_char(estack_bx(stack
, top
),
140 &char_bx
, &offset_bx
);
144 } else if (ret
== -2) {
147 /* else compare both char */
149 if (estack_ax(stack
, top
)->u
.s
.literal
) {
150 ret
= parse_char(estack_ax(stack
, top
),
151 &char_ax
, &offset_ax
);
155 } else if (ret
== -2) {
172 diff
= char_bx
- char_ax
;
185 uint64_t lttng_filter_false(void *filter_data
,
186 struct lttng_probe_ctx
*lttng_probe_ctx
,
187 const char *filter_stack_data
)
192 #ifdef INTERPRETER_USE_SWITCH
195 * Fallback for compilers that do not support taking address of labels.
199 start_pc = &bytecode->data[0]; \
200 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
202 dbg_printk("Executing op %s (%u)\n", \
203 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
204 (unsigned int) *(filter_opcode_t *) pc); \
205 switch (*(filter_opcode_t *) pc) {
207 #define OP(name) case name
217 * Dispatch-table based interpreter.
221 start_pc = &bytecode->data[0]; \
222 pc = next_pc = start_pc; \
223 if (unlikely(pc - start_pc >= bytecode->len)) \
225 goto *dispatch[*(filter_opcode_t *) pc];
232 goto *dispatch[*(filter_opcode_t *) pc];
239 * Return 0 (discard), or raise the 0x1 flag (log event).
240 * Currently, other flags are kept for future extensions and have no
243 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
244 struct lttng_probe_ctx
*lttng_probe_ctx
,
245 const char *filter_stack_data
)
247 struct bytecode_runtime
*bytecode
= filter_data
;
248 void *pc
, *next_pc
, *start_pc
;
251 struct estack _stack
;
252 struct estack
*stack
= &_stack
;
253 register int64_t ax
= 0, bx
= 0;
254 register int top
= FILTER_STACK_EMPTY
;
255 #ifndef INTERPRETER_USE_SWITCH
256 static void *dispatch
[NR_FILTER_OPS
] = {
257 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
259 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
262 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
263 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
264 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
265 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
266 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
267 [ FILTER_OP_RSHIFT
] = &&LABEL_FILTER_OP_RSHIFT
,
268 [ FILTER_OP_LSHIFT
] = &&LABEL_FILTER_OP_LSHIFT
,
269 [ FILTER_OP_BIN_AND
] = &&LABEL_FILTER_OP_BIN_AND
,
270 [ FILTER_OP_BIN_OR
] = &&LABEL_FILTER_OP_BIN_OR
,
271 [ FILTER_OP_BIN_XOR
] = &&LABEL_FILTER_OP_BIN_XOR
,
273 /* binary comparators */
274 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
275 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
276 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
277 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
278 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
279 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
281 /* string binary comparator */
282 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
283 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
284 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
285 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
286 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
287 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
289 /* s64 binary comparator */
290 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
291 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
292 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
293 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
294 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
295 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
297 /* double binary comparator */
298 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
299 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
300 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
301 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
302 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
303 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
305 /* Mixed S64-double binary comparators */
306 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
307 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
308 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
309 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
310 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
311 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
313 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
314 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
315 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
316 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
317 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
318 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
321 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
322 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
323 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
324 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
325 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
326 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
327 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
328 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
329 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
332 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
333 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
336 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
337 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
338 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
339 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
340 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
342 /* load from immediate operand */
343 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
344 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
345 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
348 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
349 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
350 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
352 /* get context ref */
353 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
354 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
355 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
356 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
358 /* load userspace field ref */
359 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
360 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
362 #endif /* #ifndef INTERPRETER_USE_SWITCH */
366 OP(FILTER_OP_UNKNOWN
):
367 OP(FILTER_OP_LOAD_FIELD_REF
):
368 OP(FILTER_OP_GET_CONTEXT_REF
):
369 #ifdef INTERPRETER_USE_SWITCH
371 #endif /* INTERPRETER_USE_SWITCH */
372 printk(KERN_WARNING
"unknown bytecode op %u\n",
373 (unsigned int) *(filter_opcode_t
*) pc
);
377 OP(FILTER_OP_RETURN
):
378 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
379 retval
= !!estack_ax_v
;
389 OP(FILTER_OP_RSHIFT
):
390 OP(FILTER_OP_LSHIFT
):
391 OP(FILTER_OP_BIN_AND
):
392 OP(FILTER_OP_BIN_OR
):
393 OP(FILTER_OP_BIN_XOR
):
394 printk(KERN_WARNING
"unsupported bytecode op %u\n",
395 (unsigned int) *(filter_opcode_t
*) pc
);
405 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
406 (unsigned int) *(filter_opcode_t
*) pc
);
410 OP(FILTER_OP_EQ_STRING
):
414 res
= (stack_strcmp(stack
, top
, "==") == 0);
415 estack_pop(stack
, top
, ax
, bx
);
417 next_pc
+= sizeof(struct binary_op
);
420 OP(FILTER_OP_NE_STRING
):
424 res
= (stack_strcmp(stack
, top
, "!=") != 0);
425 estack_pop(stack
, top
, ax
, bx
);
427 next_pc
+= sizeof(struct binary_op
);
430 OP(FILTER_OP_GT_STRING
):
434 res
= (stack_strcmp(stack
, top
, ">") > 0);
435 estack_pop(stack
, top
, ax
, bx
);
437 next_pc
+= sizeof(struct binary_op
);
440 OP(FILTER_OP_LT_STRING
):
444 res
= (stack_strcmp(stack
, top
, "<") < 0);
445 estack_pop(stack
, top
, ax
, bx
);
447 next_pc
+= sizeof(struct binary_op
);
450 OP(FILTER_OP_GE_STRING
):
454 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
455 estack_pop(stack
, top
, ax
, bx
);
457 next_pc
+= sizeof(struct binary_op
);
460 OP(FILTER_OP_LE_STRING
):
464 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
465 estack_pop(stack
, top
, ax
, bx
);
467 next_pc
+= sizeof(struct binary_op
);
471 OP(FILTER_OP_EQ_S64
):
475 res
= (estack_bx_v
== estack_ax_v
);
476 estack_pop(stack
, top
, ax
, bx
);
478 next_pc
+= sizeof(struct binary_op
);
481 OP(FILTER_OP_NE_S64
):
485 res
= (estack_bx_v
!= estack_ax_v
);
486 estack_pop(stack
, top
, ax
, bx
);
488 next_pc
+= sizeof(struct binary_op
);
491 OP(FILTER_OP_GT_S64
):
495 res
= (estack_bx_v
> estack_ax_v
);
496 estack_pop(stack
, top
, ax
, bx
);
498 next_pc
+= sizeof(struct binary_op
);
501 OP(FILTER_OP_LT_S64
):
505 res
= (estack_bx_v
< estack_ax_v
);
506 estack_pop(stack
, top
, ax
, bx
);
508 next_pc
+= sizeof(struct binary_op
);
511 OP(FILTER_OP_GE_S64
):
515 res
= (estack_bx_v
>= estack_ax_v
);
516 estack_pop(stack
, top
, ax
, bx
);
518 next_pc
+= sizeof(struct binary_op
);
521 OP(FILTER_OP_LE_S64
):
525 res
= (estack_bx_v
<= estack_ax_v
);
526 estack_pop(stack
, top
, ax
, bx
);
528 next_pc
+= sizeof(struct binary_op
);
532 OP(FILTER_OP_EQ_DOUBLE
):
533 OP(FILTER_OP_NE_DOUBLE
):
534 OP(FILTER_OP_GT_DOUBLE
):
535 OP(FILTER_OP_LT_DOUBLE
):
536 OP(FILTER_OP_GE_DOUBLE
):
537 OP(FILTER_OP_LE_DOUBLE
):
543 /* Mixed S64-double binary comparators */
544 OP(FILTER_OP_EQ_DOUBLE_S64
):
545 OP(FILTER_OP_NE_DOUBLE_S64
):
546 OP(FILTER_OP_GT_DOUBLE_S64
):
547 OP(FILTER_OP_LT_DOUBLE_S64
):
548 OP(FILTER_OP_GE_DOUBLE_S64
):
549 OP(FILTER_OP_LE_DOUBLE_S64
):
550 OP(FILTER_OP_EQ_S64_DOUBLE
):
551 OP(FILTER_OP_NE_S64_DOUBLE
):
552 OP(FILTER_OP_GT_S64_DOUBLE
):
553 OP(FILTER_OP_LT_S64_DOUBLE
):
554 OP(FILTER_OP_GE_S64_DOUBLE
):
555 OP(FILTER_OP_LE_S64_DOUBLE
):
562 OP(FILTER_OP_UNARY_PLUS
):
563 OP(FILTER_OP_UNARY_MINUS
):
564 OP(FILTER_OP_UNARY_NOT
):
565 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
566 (unsigned int) *(filter_opcode_t
*) pc
);
571 OP(FILTER_OP_UNARY_PLUS_S64
):
573 next_pc
+= sizeof(struct unary_op
);
576 OP(FILTER_OP_UNARY_MINUS_S64
):
578 estack_ax_v
= -estack_ax_v
;
579 next_pc
+= sizeof(struct unary_op
);
582 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
583 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
588 OP(FILTER_OP_UNARY_NOT_S64
):
590 estack_ax_v
= !estack_ax_v
;
591 next_pc
+= sizeof(struct unary_op
);
594 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
603 struct logical_op
*insn
= (struct logical_op
*) pc
;
605 /* If AX is 0, skip and evaluate to 0 */
606 if (unlikely(estack_ax_v
== 0)) {
607 dbg_printk("Jumping to bytecode offset %u\n",
608 (unsigned int) insn
->skip_offset
);
609 next_pc
= start_pc
+ insn
->skip_offset
;
611 /* Pop 1 when jump not taken */
612 estack_pop(stack
, top
, ax
, bx
);
613 next_pc
+= sizeof(struct logical_op
);
619 struct logical_op
*insn
= (struct logical_op
*) pc
;
621 /* If AX is nonzero, skip and evaluate to 1 */
623 if (unlikely(estack_ax_v
!= 0)) {
625 dbg_printk("Jumping to bytecode offset %u\n",
626 (unsigned int) insn
->skip_offset
);
627 next_pc
= start_pc
+ insn
->skip_offset
;
629 /* Pop 1 when jump not taken */
630 estack_pop(stack
, top
, ax
, bx
);
631 next_pc
+= sizeof(struct logical_op
);
638 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
640 struct load_op
*insn
= (struct load_op
*) pc
;
641 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
643 dbg_printk("load field ref offset %u type string\n",
645 estack_push(stack
, top
, ax
, bx
);
646 estack_ax(stack
, top
)->u
.s
.str
=
647 *(const char * const *) &filter_stack_data
[ref
->offset
];
648 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
649 dbg_printk("Filter warning: loading a NULL string.\n");
653 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
654 estack_ax(stack
, top
)->u
.s
.literal
= 0;
655 estack_ax(stack
, top
)->u
.s
.user
= 0;
656 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
657 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
661 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
663 struct load_op
*insn
= (struct load_op
*) pc
;
664 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
666 dbg_printk("load field ref offset %u type sequence\n",
668 estack_push(stack
, top
, ax
, bx
);
669 estack_ax(stack
, top
)->u
.s
.seq_len
=
670 *(unsigned long *) &filter_stack_data
[ref
->offset
];
671 estack_ax(stack
, top
)->u
.s
.str
=
672 *(const char **) (&filter_stack_data
[ref
->offset
673 + sizeof(unsigned long)]);
674 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
675 dbg_printk("Filter warning: loading a NULL sequence.\n");
679 estack_ax(stack
, top
)->u
.s
.literal
= 0;
680 estack_ax(stack
, top
)->u
.s
.user
= 0;
681 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
685 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
687 struct load_op
*insn
= (struct load_op
*) pc
;
688 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
690 dbg_printk("load field ref offset %u type s64\n",
692 estack_push(stack
, top
, ax
, bx
);
694 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
695 dbg_printk("ref load s64 %lld\n",
696 (long long) estack_ax_v
);
697 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
701 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
707 /* load from immediate operand */
708 OP(FILTER_OP_LOAD_STRING
):
710 struct load_op
*insn
= (struct load_op
*) pc
;
712 dbg_printk("load string %s\n", insn
->data
);
713 estack_push(stack
, top
, ax
, bx
);
714 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
715 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
716 estack_ax(stack
, top
)->u
.s
.literal
= 1;
717 estack_ax(stack
, top
)->u
.s
.user
= 0;
718 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
722 OP(FILTER_OP_LOAD_S64
):
724 struct load_op
*insn
= (struct load_op
*) pc
;
726 estack_push(stack
, top
, ax
, bx
);
727 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
728 dbg_printk("load s64 %lld\n",
729 (long long) estack_ax_v
);
730 next_pc
+= sizeof(struct load_op
)
731 + sizeof(struct literal_numeric
);
735 OP(FILTER_OP_LOAD_DOUBLE
):
742 OP(FILTER_OP_CAST_TO_S64
):
743 printk(KERN_WARNING
"unsupported non-specialized bytecode op %u\n",
744 (unsigned int) *(filter_opcode_t
*) pc
);
748 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
754 OP(FILTER_OP_CAST_NOP
):
756 next_pc
+= sizeof(struct cast_op
);
760 /* get context ref */
761 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
763 struct load_op
*insn
= (struct load_op
*) pc
;
764 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
765 struct lttng_ctx_field
*ctx_field
;
766 union lttng_ctx_value v
;
768 dbg_printk("get context ref offset %u type string\n",
770 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
771 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
772 estack_push(stack
, top
, ax
, bx
);
773 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
774 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
775 dbg_printk("Filter warning: loading a NULL string.\n");
779 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
780 estack_ax(stack
, top
)->u
.s
.literal
= 0;
781 estack_ax(stack
, top
)->u
.s
.user
= 0;
782 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
783 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
787 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
789 struct load_op
*insn
= (struct load_op
*) pc
;
790 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
791 struct lttng_ctx_field
*ctx_field
;
792 union lttng_ctx_value v
;
794 dbg_printk("get context ref offset %u type s64\n",
796 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
797 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
798 estack_push(stack
, top
, ax
, bx
);
800 dbg_printk("ref get context s64 %lld\n",
801 (long long) estack_ax_v
);
802 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
806 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
812 /* load userspace field ref */
813 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
815 struct load_op
*insn
= (struct load_op
*) pc
;
816 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
818 dbg_printk("load field ref offset %u type user string\n",
820 estack_push(stack
, top
, ax
, bx
);
821 estack_ax(stack
, top
)->u
.s
.str
=
822 *(const char * const *) &filter_stack_data
[ref
->offset
];
823 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
824 dbg_printk("Filter warning: loading a NULL string.\n");
828 estack_ax(stack
, top
)->u
.s
.seq_len
= UINT_MAX
;
829 estack_ax(stack
, top
)->u
.s
.literal
= 0;
830 estack_ax(stack
, top
)->u
.s
.user
= 1;
831 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
832 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
836 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
838 struct load_op
*insn
= (struct load_op
*) pc
;
839 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
841 dbg_printk("load field ref offset %u type user sequence\n",
843 estack_push(stack
, top
, ax
, bx
);
844 estack_ax(stack
, top
)->u
.s
.seq_len
=
845 *(unsigned long *) &filter_stack_data
[ref
->offset
];
846 estack_ax(stack
, top
)->u
.s
.str
=
847 *(const char **) (&filter_stack_data
[ref
->offset
848 + sizeof(unsigned long)]);
849 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
850 dbg_printk("Filter warning: loading a NULL sequence.\n");
854 estack_ax(stack
, top
)->u
.s
.literal
= 0;
855 estack_ax(stack
, top
)->u
.s
.user
= 1;
856 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
862 /* return 0 (discard) on error */