Version 2.7.7
[lttng-modules.git] / lttng-filter-interpreter.c
1 /*
2 * lttng-filter-interpreter.c
3 *
4 * LTTng modules filter interpreter.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/uaccess.h>
24
25 #include "lttng-filter.h"
26
27 /*
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
30 */
31 static
32 char get_char(struct estack_entry *reg, size_t offset)
33 {
34 if (unlikely(offset >= reg->u.s.seq_len))
35 return '\0';
36 if (reg->u.s.user) {
37 char c;
38
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ,
41 reg->u.s.user_str + offset,
42 sizeof(c))))
43 return '\0';
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c,
46 reg->u.s.user_str + offset,
47 sizeof(c))))
48 return '\0';
49 return c;
50 } else {
51 return reg->u.s.str[offset];
52 }
53 }
54
55 /*
56 * -1: wildcard found.
57 * -2: unknown escape char.
58 * 0: normal char.
59 */
60 static
61 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
62 {
63 switch (*c) {
64 case '\\':
65 (*offset)++;
66 *c = get_char(reg, *offset);
67 switch (*c) {
68 case '\\':
69 case '*':
70 return 0;
71 default:
72 return -2;
73 }
74 case '*':
75 return -1;
76 default:
77 return 0;
78 }
79 }
80
81 static
82 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
83 {
84 size_t offset_bx = 0, offset_ax = 0;
85 int diff, has_user = 0;
86 mm_segment_t old_fs;
87
88 if (estack_bx(stack, top)->u.s.user
89 || estack_ax(stack, top)->u.s.user) {
90 has_user = 1;
91 old_fs = get_fs();
92 set_fs(KERNEL_DS);
93 pagefault_disable();
94 }
95
96 for (;;) {
97 int ret;
98 int escaped_r0 = 0;
99 char char_bx, char_ax;
100
101 char_bx = get_char(estack_bx(stack, top), offset_bx);
102 char_ax = get_char(estack_ax(stack, top), offset_ax);
103
104 if (unlikely(char_bx == '\0')) {
105 if (char_ax == '\0') {
106 diff = 0;
107 break;
108 } else {
109 if (estack_ax(stack, top)->u.s.literal) {
110 ret = parse_char(estack_ax(stack, top),
111 &char_ax, &offset_ax);
112 if (ret == -1) {
113 diff = 0;
114 break;
115 }
116 }
117 diff = -1;
118 break;
119 }
120 }
121 if (unlikely(char_ax == '\0')) {
122 if (estack_bx(stack, top)->u.s.literal) {
123 ret = parse_char(estack_bx(stack, top),
124 &char_bx, &offset_bx);
125 if (ret == -1) {
126 diff = 0;
127 break;
128 }
129 }
130 diff = 1;
131 break;
132 }
133 if (estack_bx(stack, top)->u.s.literal) {
134 ret = parse_char(estack_bx(stack, top),
135 &char_bx, &offset_bx);
136 if (ret == -1) {
137 diff = 0;
138 break;
139 } else if (ret == -2) {
140 escaped_r0 = 1;
141 }
142 /* else compare both char */
143 }
144 if (estack_ax(stack, top)->u.s.literal) {
145 ret = parse_char(estack_ax(stack, top),
146 &char_ax, &offset_ax);
147 if (ret == -1) {
148 diff = 0;
149 break;
150 } else if (ret == -2) {
151 if (!escaped_r0) {
152 diff = -1;
153 break;
154 }
155 } else {
156 if (escaped_r0) {
157 diff = 1;
158 break;
159 }
160 }
161 } else {
162 if (escaped_r0) {
163 diff = 1;
164 break;
165 }
166 }
167 diff = char_bx - char_ax;
168 if (diff != 0)
169 break;
170 offset_bx++;
171 offset_ax++;
172 }
173 if (has_user) {
174 pagefault_enable();
175 set_fs(old_fs);
176 }
177 return diff;
178 }
179
180 uint64_t lttng_filter_false(void *filter_data,
181 const char *filter_stack_data)
182 {
183 return 0;
184 }
185
186 #ifdef INTERPRETER_USE_SWITCH
187
188 /*
189 * Fallback for compilers that do not support taking address of labels.
190 */
191
192 #define START_OP \
193 start_pc = &bytecode->data[0]; \
194 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
195 pc = next_pc) { \
196 dbg_printk("Executing op %s (%u)\n", \
197 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
198 (unsigned int) *(filter_opcode_t *) pc); \
199 switch (*(filter_opcode_t *) pc) {
200
201 #define OP(name) case name
202
203 #define PO break
204
205 #define END_OP } \
206 }
207
208 #else
209
210 /*
211 * Dispatch-table based interpreter.
212 */
213
214 #define START_OP \
215 start_pc = &bytecode->data[0]; \
216 pc = next_pc = start_pc; \
217 if (unlikely(pc - start_pc >= bytecode->len)) \
218 goto end; \
219 goto *dispatch[*(filter_opcode_t *) pc];
220
221 #define OP(name) \
222 LABEL_##name
223
224 #define PO \
225 pc = next_pc; \
226 goto *dispatch[*(filter_opcode_t *) pc];
227
228 #define END_OP
229
230 #endif
231
232 /*
233 * Return 0 (discard), or raise the 0x1 flag (log event).
234 * Currently, other flags are kept for future extensions and have no
235 * effect.
236 */
237 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
238 const char *filter_stack_data)
239 {
240 struct bytecode_runtime *bytecode = filter_data;
241 void *pc, *next_pc, *start_pc;
242 int ret = -EINVAL;
243 uint64_t retval = 0;
244 struct estack _stack;
245 struct estack *stack = &_stack;
246 register int64_t ax = 0, bx = 0;
247 register int top = FILTER_STACK_EMPTY;
248 #ifndef INTERPRETER_USE_SWITCH
249 static void *dispatch[NR_FILTER_OPS] = {
250 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
251
252 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
253
254 /* binary */
255 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
256 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
257 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
258 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
259 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
260 [ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
261 [ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
262 [ FILTER_OP_BIN_AND ] = &&LABEL_FILTER_OP_BIN_AND,
263 [ FILTER_OP_BIN_OR ] = &&LABEL_FILTER_OP_BIN_OR,
264 [ FILTER_OP_BIN_XOR ] = &&LABEL_FILTER_OP_BIN_XOR,
265
266 /* binary comparators */
267 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
268 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
269 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
270 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
271 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
272 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
273
274 /* string binary comparator */
275 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
276 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
277 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
278 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
279 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
280 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
281
282 /* s64 binary comparator */
283 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
284 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
285 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
286 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
287 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
288 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
289
290 /* double binary comparator */
291 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
292 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
293 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
294 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
295 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
296 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
297
298 /* Mixed S64-double binary comparators */
299 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
300 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
301 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
302 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
303 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
304 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
305
306 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
307 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
308 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
309 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
310 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
311 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
312
313 /* unary */
314 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
315 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
316 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
317 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
318 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
319 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
320 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
321 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
322 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
323
324 /* logical */
325 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
326 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
327
328 /* load field ref */
329 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
330 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
331 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
332 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
333 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
334
335 /* load from immediate operand */
336 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
337 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
338 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
339
340 /* cast */
341 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
342 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
343 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
344
345 /* get context ref */
346 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
347 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
348 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
349 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
350
351 /* load userspace field ref */
352 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
353 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
354 };
355 #endif /* #ifndef INTERPRETER_USE_SWITCH */
356
357 START_OP
358
359 OP(FILTER_OP_UNKNOWN):
360 OP(FILTER_OP_LOAD_FIELD_REF):
361 OP(FILTER_OP_GET_CONTEXT_REF):
362 #ifdef INTERPRETER_USE_SWITCH
363 default:
364 #endif /* INTERPRETER_USE_SWITCH */
365 printk(KERN_WARNING "unknown bytecode op %u\n",
366 (unsigned int) *(filter_opcode_t *) pc);
367 ret = -EINVAL;
368 goto end;
369
370 OP(FILTER_OP_RETURN):
371 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
372 retval = !!estack_ax_v;
373 ret = 0;
374 goto end;
375
376 /* binary */
377 OP(FILTER_OP_MUL):
378 OP(FILTER_OP_DIV):
379 OP(FILTER_OP_MOD):
380 OP(FILTER_OP_PLUS):
381 OP(FILTER_OP_MINUS):
382 OP(FILTER_OP_RSHIFT):
383 OP(FILTER_OP_LSHIFT):
384 OP(FILTER_OP_BIN_AND):
385 OP(FILTER_OP_BIN_OR):
386 OP(FILTER_OP_BIN_XOR):
387 printk(KERN_WARNING "unsupported bytecode op %u\n",
388 (unsigned int) *(filter_opcode_t *) pc);
389 ret = -EINVAL;
390 goto end;
391
392 OP(FILTER_OP_EQ):
393 OP(FILTER_OP_NE):
394 OP(FILTER_OP_GT):
395 OP(FILTER_OP_LT):
396 OP(FILTER_OP_GE):
397 OP(FILTER_OP_LE):
398 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
399 (unsigned int) *(filter_opcode_t *) pc);
400 ret = -EINVAL;
401 goto end;
402
403 OP(FILTER_OP_EQ_STRING):
404 {
405 int res;
406
407 res = (stack_strcmp(stack, top, "==") == 0);
408 estack_pop(stack, top, ax, bx);
409 estack_ax_v = res;
410 next_pc += sizeof(struct binary_op);
411 PO;
412 }
413 OP(FILTER_OP_NE_STRING):
414 {
415 int res;
416
417 res = (stack_strcmp(stack, top, "!=") != 0);
418 estack_pop(stack, top, ax, bx);
419 estack_ax_v = res;
420 next_pc += sizeof(struct binary_op);
421 PO;
422 }
423 OP(FILTER_OP_GT_STRING):
424 {
425 int res;
426
427 res = (stack_strcmp(stack, top, ">") > 0);
428 estack_pop(stack, top, ax, bx);
429 estack_ax_v = res;
430 next_pc += sizeof(struct binary_op);
431 PO;
432 }
433 OP(FILTER_OP_LT_STRING):
434 {
435 int res;
436
437 res = (stack_strcmp(stack, top, "<") < 0);
438 estack_pop(stack, top, ax, bx);
439 estack_ax_v = res;
440 next_pc += sizeof(struct binary_op);
441 PO;
442 }
443 OP(FILTER_OP_GE_STRING):
444 {
445 int res;
446
447 res = (stack_strcmp(stack, top, ">=") >= 0);
448 estack_pop(stack, top, ax, bx);
449 estack_ax_v = res;
450 next_pc += sizeof(struct binary_op);
451 PO;
452 }
453 OP(FILTER_OP_LE_STRING):
454 {
455 int res;
456
457 res = (stack_strcmp(stack, top, "<=") <= 0);
458 estack_pop(stack, top, ax, bx);
459 estack_ax_v = res;
460 next_pc += sizeof(struct binary_op);
461 PO;
462 }
463
464 OP(FILTER_OP_EQ_S64):
465 {
466 int res;
467
468 res = (estack_bx_v == estack_ax_v);
469 estack_pop(stack, top, ax, bx);
470 estack_ax_v = res;
471 next_pc += sizeof(struct binary_op);
472 PO;
473 }
474 OP(FILTER_OP_NE_S64):
475 {
476 int res;
477
478 res = (estack_bx_v != estack_ax_v);
479 estack_pop(stack, top, ax, bx);
480 estack_ax_v = res;
481 next_pc += sizeof(struct binary_op);
482 PO;
483 }
484 OP(FILTER_OP_GT_S64):
485 {
486 int res;
487
488 res = (estack_bx_v > estack_ax_v);
489 estack_pop(stack, top, ax, bx);
490 estack_ax_v = res;
491 next_pc += sizeof(struct binary_op);
492 PO;
493 }
494 OP(FILTER_OP_LT_S64):
495 {
496 int res;
497
498 res = (estack_bx_v < estack_ax_v);
499 estack_pop(stack, top, ax, bx);
500 estack_ax_v = res;
501 next_pc += sizeof(struct binary_op);
502 PO;
503 }
504 OP(FILTER_OP_GE_S64):
505 {
506 int res;
507
508 res = (estack_bx_v >= estack_ax_v);
509 estack_pop(stack, top, ax, bx);
510 estack_ax_v = res;
511 next_pc += sizeof(struct binary_op);
512 PO;
513 }
514 OP(FILTER_OP_LE_S64):
515 {
516 int res;
517
518 res = (estack_bx_v <= estack_ax_v);
519 estack_pop(stack, top, ax, bx);
520 estack_ax_v = res;
521 next_pc += sizeof(struct binary_op);
522 PO;
523 }
524
525 OP(FILTER_OP_EQ_DOUBLE):
526 OP(FILTER_OP_NE_DOUBLE):
527 OP(FILTER_OP_GT_DOUBLE):
528 OP(FILTER_OP_LT_DOUBLE):
529 OP(FILTER_OP_GE_DOUBLE):
530 OP(FILTER_OP_LE_DOUBLE):
531 {
532 BUG_ON(1);
533 PO;
534 }
535
536 /* Mixed S64-double binary comparators */
537 OP(FILTER_OP_EQ_DOUBLE_S64):
538 OP(FILTER_OP_NE_DOUBLE_S64):
539 OP(FILTER_OP_GT_DOUBLE_S64):
540 OP(FILTER_OP_LT_DOUBLE_S64):
541 OP(FILTER_OP_GE_DOUBLE_S64):
542 OP(FILTER_OP_LE_DOUBLE_S64):
543 OP(FILTER_OP_EQ_S64_DOUBLE):
544 OP(FILTER_OP_NE_S64_DOUBLE):
545 OP(FILTER_OP_GT_S64_DOUBLE):
546 OP(FILTER_OP_LT_S64_DOUBLE):
547 OP(FILTER_OP_GE_S64_DOUBLE):
548 OP(FILTER_OP_LE_S64_DOUBLE):
549 {
550 BUG_ON(1);
551 PO;
552 }
553
554 /* unary */
555 OP(FILTER_OP_UNARY_PLUS):
556 OP(FILTER_OP_UNARY_MINUS):
557 OP(FILTER_OP_UNARY_NOT):
558 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
559 (unsigned int) *(filter_opcode_t *) pc);
560 ret = -EINVAL;
561 goto end;
562
563
564 OP(FILTER_OP_UNARY_PLUS_S64):
565 {
566 next_pc += sizeof(struct unary_op);
567 PO;
568 }
569 OP(FILTER_OP_UNARY_MINUS_S64):
570 {
571 estack_ax_v = -estack_ax_v;
572 next_pc += sizeof(struct unary_op);
573 PO;
574 }
575 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
576 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
577 {
578 BUG_ON(1);
579 PO;
580 }
581 OP(FILTER_OP_UNARY_NOT_S64):
582 {
583 estack_ax_v = !estack_ax_v;
584 next_pc += sizeof(struct unary_op);
585 PO;
586 }
587 OP(FILTER_OP_UNARY_NOT_DOUBLE):
588 {
589 BUG_ON(1);
590 PO;
591 }
592
593 /* logical */
594 OP(FILTER_OP_AND):
595 {
596 struct logical_op *insn = (struct logical_op *) pc;
597
598 /* If AX is 0, skip and evaluate to 0 */
599 if (unlikely(estack_ax_v == 0)) {
600 dbg_printk("Jumping to bytecode offset %u\n",
601 (unsigned int) insn->skip_offset);
602 next_pc = start_pc + insn->skip_offset;
603 } else {
604 /* Pop 1 when jump not taken */
605 estack_pop(stack, top, ax, bx);
606 next_pc += sizeof(struct logical_op);
607 }
608 PO;
609 }
610 OP(FILTER_OP_OR):
611 {
612 struct logical_op *insn = (struct logical_op *) pc;
613
614 /* If AX is nonzero, skip and evaluate to 1 */
615
616 if (unlikely(estack_ax_v != 0)) {
617 estack_ax_v = 1;
618 dbg_printk("Jumping to bytecode offset %u\n",
619 (unsigned int) insn->skip_offset);
620 next_pc = start_pc + insn->skip_offset;
621 } else {
622 /* Pop 1 when jump not taken */
623 estack_pop(stack, top, ax, bx);
624 next_pc += sizeof(struct logical_op);
625 }
626 PO;
627 }
628
629
630 /* load field ref */
631 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
632 {
633 struct load_op *insn = (struct load_op *) pc;
634 struct field_ref *ref = (struct field_ref *) insn->data;
635
636 dbg_printk("load field ref offset %u type string\n",
637 ref->offset);
638 estack_push(stack, top, ax, bx);
639 estack_ax(stack, top)->u.s.str =
640 *(const char * const *) &filter_stack_data[ref->offset];
641 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
642 dbg_printk("Filter warning: loading a NULL string.\n");
643 ret = -EINVAL;
644 goto end;
645 }
646 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
647 estack_ax(stack, top)->u.s.literal = 0;
648 estack_ax(stack, top)->u.s.user = 0;
649 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
650 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
651 PO;
652 }
653
654 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
655 {
656 struct load_op *insn = (struct load_op *) pc;
657 struct field_ref *ref = (struct field_ref *) insn->data;
658
659 dbg_printk("load field ref offset %u type sequence\n",
660 ref->offset);
661 estack_push(stack, top, ax, bx);
662 estack_ax(stack, top)->u.s.seq_len =
663 *(unsigned long *) &filter_stack_data[ref->offset];
664 estack_ax(stack, top)->u.s.str =
665 *(const char **) (&filter_stack_data[ref->offset
666 + sizeof(unsigned long)]);
667 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
668 dbg_printk("Filter warning: loading a NULL sequence.\n");
669 ret = -EINVAL;
670 goto end;
671 }
672 estack_ax(stack, top)->u.s.literal = 0;
673 estack_ax(stack, top)->u.s.user = 0;
674 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
675 PO;
676 }
677
678 OP(FILTER_OP_LOAD_FIELD_REF_S64):
679 {
680 struct load_op *insn = (struct load_op *) pc;
681 struct field_ref *ref = (struct field_ref *) insn->data;
682
683 dbg_printk("load field ref offset %u type s64\n",
684 ref->offset);
685 estack_push(stack, top, ax, bx);
686 estack_ax_v =
687 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
688 dbg_printk("ref load s64 %lld\n",
689 (long long) estack_ax_v);
690 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
691 PO;
692 }
693
694 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
695 {
696 BUG_ON(1);
697 PO;
698 }
699
700 /* load from immediate operand */
701 OP(FILTER_OP_LOAD_STRING):
702 {
703 struct load_op *insn = (struct load_op *) pc;
704
705 dbg_printk("load string %s\n", insn->data);
706 estack_push(stack, top, ax, bx);
707 estack_ax(stack, top)->u.s.str = insn->data;
708 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
709 estack_ax(stack, top)->u.s.literal = 1;
710 estack_ax(stack, top)->u.s.user = 0;
711 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
712 PO;
713 }
714
715 OP(FILTER_OP_LOAD_S64):
716 {
717 struct load_op *insn = (struct load_op *) pc;
718
719 estack_push(stack, top, ax, bx);
720 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
721 dbg_printk("load s64 %lld\n",
722 (long long) estack_ax_v);
723 next_pc += sizeof(struct load_op)
724 + sizeof(struct literal_numeric);
725 PO;
726 }
727
728 OP(FILTER_OP_LOAD_DOUBLE):
729 {
730 BUG_ON(1);
731 PO;
732 }
733
734 /* cast */
735 OP(FILTER_OP_CAST_TO_S64):
736 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
737 (unsigned int) *(filter_opcode_t *) pc);
738 ret = -EINVAL;
739 goto end;
740
741 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
742 {
743 BUG_ON(1);
744 PO;
745 }
746
747 OP(FILTER_OP_CAST_NOP):
748 {
749 next_pc += sizeof(struct cast_op);
750 PO;
751 }
752
753 /* get context ref */
754 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
755 {
756 struct load_op *insn = (struct load_op *) pc;
757 struct field_ref *ref = (struct field_ref *) insn->data;
758 struct lttng_ctx_field *ctx_field;
759 union lttng_ctx_value v;
760
761 dbg_printk("get context ref offset %u type string\n",
762 ref->offset);
763 ctx_field = &lttng_static_ctx->fields[ref->offset];
764 ctx_field->get_value(ctx_field, &v);
765 estack_push(stack, top, ax, bx);
766 estack_ax(stack, top)->u.s.str = v.str;
767 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
768 dbg_printk("Filter warning: loading a NULL string.\n");
769 ret = -EINVAL;
770 goto end;
771 }
772 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
773 estack_ax(stack, top)->u.s.literal = 0;
774 estack_ax(stack, top)->u.s.user = 0;
775 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
776 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
777 PO;
778 }
779
780 OP(FILTER_OP_GET_CONTEXT_REF_S64):
781 {
782 struct load_op *insn = (struct load_op *) pc;
783 struct field_ref *ref = (struct field_ref *) insn->data;
784 struct lttng_ctx_field *ctx_field;
785 union lttng_ctx_value v;
786
787 dbg_printk("get context ref offset %u type s64\n",
788 ref->offset);
789 ctx_field = &lttng_static_ctx->fields[ref->offset];
790 ctx_field->get_value(ctx_field, &v);
791 estack_push(stack, top, ax, bx);
792 estack_ax_v = v.s64;
793 dbg_printk("ref get context s64 %lld\n",
794 (long long) estack_ax_v);
795 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
796 PO;
797 }
798
799 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
800 {
801 BUG_ON(1);
802 PO;
803 }
804
805 /* load userspace field ref */
806 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
807 {
808 struct load_op *insn = (struct load_op *) pc;
809 struct field_ref *ref = (struct field_ref *) insn->data;
810
811 dbg_printk("load field ref offset %u type user string\n",
812 ref->offset);
813 estack_push(stack, top, ax, bx);
814 estack_ax(stack, top)->u.s.user_str =
815 *(const char * const *) &filter_stack_data[ref->offset];
816 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
817 dbg_printk("Filter warning: loading a NULL string.\n");
818 ret = -EINVAL;
819 goto end;
820 }
821 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
822 estack_ax(stack, top)->u.s.literal = 0;
823 estack_ax(stack, top)->u.s.user = 1;
824 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
825 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
826 PO;
827 }
828
829 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
830 {
831 struct load_op *insn = (struct load_op *) pc;
832 struct field_ref *ref = (struct field_ref *) insn->data;
833
834 dbg_printk("load field ref offset %u type user sequence\n",
835 ref->offset);
836 estack_push(stack, top, ax, bx);
837 estack_ax(stack, top)->u.s.seq_len =
838 *(unsigned long *) &filter_stack_data[ref->offset];
839 estack_ax(stack, top)->u.s.user_str =
840 *(const char **) (&filter_stack_data[ref->offset
841 + sizeof(unsigned long)]);
842 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
843 dbg_printk("Filter warning: loading a NULL sequence.\n");
844 ret = -EINVAL;
845 goto end;
846 }
847 estack_ax(stack, top)->u.s.literal = 0;
848 estack_ax(stack, top)->u.s.user = 1;
849 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
850 PO;
851 }
852
853 END_OP
854 end:
855 /* return 0 (discard) on error */
856 if (ret)
857 return 0;
858 return retval;
859 }
860
861 #undef START_OP
862 #undef OP
863 #undef PO
864 #undef END_OP
This page took 0.045837 seconds and 4 git commands to generate.