Use system header paths in core implementation files
[lttng-modules.git] / lttng-filter-interpreter.c
1 /*
2 * lttng-filter-interpreter.c
3 *
4 * LTTng modules filter interpreter.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/uaccess.h>
24
25 #include <lttng-filter.h>
26
27 /*
28 * get_char should be called with page fault handler disabled if it is expected
29 * to handle user-space read.
30 */
31 static
32 char get_char(struct estack_entry *reg, size_t offset)
33 {
34 if (unlikely(offset >= reg->u.s.seq_len))
35 return '\0';
36 if (reg->u.s.user) {
37 char c;
38
39 /* Handle invalid access as end of string. */
40 if (unlikely(!access_ok(VERIFY_READ,
41 reg->u.s.user_str + offset,
42 sizeof(c))))
43 return '\0';
44 /* Handle fault (nonzero return value) as end of string. */
45 if (unlikely(__copy_from_user_inatomic(&c,
46 reg->u.s.user_str + offset,
47 sizeof(c))))
48 return '\0';
49 return c;
50 } else {
51 return reg->u.s.str[offset];
52 }
53 }
54
55 /*
56 * -1: wildcard found.
57 * -2: unknown escape char.
58 * 0: normal char.
59 */
60 static
61 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
62 {
63 switch (*c) {
64 case '\\':
65 (*offset)++;
66 *c = get_char(reg, *offset);
67 switch (*c) {
68 case '\\':
69 case '*':
70 return 0;
71 default:
72 return -2;
73 }
74 case '*':
75 return -1;
76 default:
77 return 0;
78 }
79 }
80
81 static
82 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
83 {
84 size_t offset_bx = 0, offset_ax = 0;
85 int diff, has_user = 0;
86 mm_segment_t old_fs;
87
88 if (estack_bx(stack, top)->u.s.user
89 || estack_ax(stack, top)->u.s.user) {
90 has_user = 1;
91 old_fs = get_fs();
92 set_fs(KERNEL_DS);
93 pagefault_disable();
94 }
95
96 for (;;) {
97 int ret;
98 int escaped_r0 = 0;
99 char char_bx, char_ax;
100
101 char_bx = get_char(estack_bx(stack, top), offset_bx);
102 char_ax = get_char(estack_ax(stack, top), offset_ax);
103
104 if (unlikely(char_bx == '\0')) {
105 if (char_ax == '\0') {
106 diff = 0;
107 break;
108 } else {
109 if (estack_ax(stack, top)->u.s.literal) {
110 ret = parse_char(estack_ax(stack, top),
111 &char_ax, &offset_ax);
112 if (ret == -1) {
113 diff = 0;
114 break;
115 }
116 }
117 diff = -1;
118 break;
119 }
120 }
121 if (unlikely(char_ax == '\0')) {
122 if (char_bx == '\0') {
123 diff = 0;
124 break;
125 } else {
126 if (estack_bx(stack, top)->u.s.literal) {
127 ret = parse_char(estack_bx(stack, top),
128 &char_bx, &offset_bx);
129 if (ret == -1) {
130 diff = 0;
131 break;
132 }
133 }
134 diff = 1;
135 break;
136 }
137 }
138 if (estack_bx(stack, top)->u.s.literal) {
139 ret = parse_char(estack_bx(stack, top),
140 &char_bx, &offset_bx);
141 if (ret == -1) {
142 diff = 0;
143 break;
144 } else if (ret == -2) {
145 escaped_r0 = 1;
146 }
147 /* else compare both char */
148 }
149 if (estack_ax(stack, top)->u.s.literal) {
150 ret = parse_char(estack_ax(stack, top),
151 &char_ax, &offset_ax);
152 if (ret == -1) {
153 diff = 0;
154 break;
155 } else if (ret == -2) {
156 if (!escaped_r0) {
157 diff = -1;
158 break;
159 }
160 } else {
161 if (escaped_r0) {
162 diff = 1;
163 break;
164 }
165 }
166 } else {
167 if (escaped_r0) {
168 diff = 1;
169 break;
170 }
171 }
172 diff = char_bx - char_ax;
173 if (diff != 0)
174 break;
175 offset_bx++;
176 offset_ax++;
177 }
178 if (has_user) {
179 pagefault_enable();
180 set_fs(old_fs);
181 }
182 return diff;
183 }
184
185 uint64_t lttng_filter_false(void *filter_data,
186 struct lttng_probe_ctx *lttng_probe_ctx,
187 const char *filter_stack_data)
188 {
189 return 0;
190 }
191
192 #ifdef INTERPRETER_USE_SWITCH
193
194 /*
195 * Fallback for compilers that do not support taking address of labels.
196 */
197
198 #define START_OP \
199 start_pc = &bytecode->data[0]; \
200 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
201 pc = next_pc) { \
202 dbg_printk("Executing op %s (%u)\n", \
203 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
204 (unsigned int) *(filter_opcode_t *) pc); \
205 switch (*(filter_opcode_t *) pc) {
206
207 #define OP(name) case name
208
209 #define PO break
210
211 #define END_OP } \
212 }
213
214 #else
215
216 /*
217 * Dispatch-table based interpreter.
218 */
219
220 #define START_OP \
221 start_pc = &bytecode->data[0]; \
222 pc = next_pc = start_pc; \
223 if (unlikely(pc - start_pc >= bytecode->len)) \
224 goto end; \
225 goto *dispatch[*(filter_opcode_t *) pc];
226
227 #define OP(name) \
228 LABEL_##name
229
230 #define PO \
231 pc = next_pc; \
232 goto *dispatch[*(filter_opcode_t *) pc];
233
234 #define END_OP
235
236 #endif
237
238 /*
239 * Return 0 (discard), or raise the 0x1 flag (log event).
240 * Currently, other flags are kept for future extensions and have no
241 * effect.
242 */
243 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
244 struct lttng_probe_ctx *lttng_probe_ctx,
245 const char *filter_stack_data)
246 {
247 struct bytecode_runtime *bytecode = filter_data;
248 void *pc, *next_pc, *start_pc;
249 int ret = -EINVAL;
250 uint64_t retval = 0;
251 struct estack _stack;
252 struct estack *stack = &_stack;
253 register int64_t ax = 0, bx = 0;
254 register int top = FILTER_STACK_EMPTY;
255 #ifndef INTERPRETER_USE_SWITCH
256 static void *dispatch[NR_FILTER_OPS] = {
257 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
258
259 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
260
261 /* binary */
262 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
263 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
264 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
265 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
266 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
267 [ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
268 [ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
269 [ FILTER_OP_BIN_AND ] = &&LABEL_FILTER_OP_BIN_AND,
270 [ FILTER_OP_BIN_OR ] = &&LABEL_FILTER_OP_BIN_OR,
271 [ FILTER_OP_BIN_XOR ] = &&LABEL_FILTER_OP_BIN_XOR,
272
273 /* binary comparators */
274 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
275 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
276 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
277 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
278 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
279 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
280
281 /* string binary comparator */
282 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
283 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
284 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
285 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
286 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
287 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
288
289 /* s64 binary comparator */
290 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
291 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
292 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
293 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
294 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
295 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
296
297 /* double binary comparator */
298 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
299 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
300 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
301 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
302 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
303 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
304
305 /* Mixed S64-double binary comparators */
306 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
307 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
308 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
309 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
310 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
311 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
312
313 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
314 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
315 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
316 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
317 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
318 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
319
320 /* unary */
321 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
322 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
323 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
324 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
325 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
326 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
327 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
328 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
329 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
330
331 /* logical */
332 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
333 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
334
335 /* load field ref */
336 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
337 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
338 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
339 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
340 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
341
342 /* load from immediate operand */
343 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
344 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
345 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
346
347 /* cast */
348 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
349 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
350 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
351
352 /* get context ref */
353 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
354 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
355 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
356 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
357
358 /* load userspace field ref */
359 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
360 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
361 };
362 #endif /* #ifndef INTERPRETER_USE_SWITCH */
363
364 START_OP
365
366 OP(FILTER_OP_UNKNOWN):
367 OP(FILTER_OP_LOAD_FIELD_REF):
368 OP(FILTER_OP_GET_CONTEXT_REF):
369 #ifdef INTERPRETER_USE_SWITCH
370 default:
371 #endif /* INTERPRETER_USE_SWITCH */
372 printk(KERN_WARNING "unknown bytecode op %u\n",
373 (unsigned int) *(filter_opcode_t *) pc);
374 ret = -EINVAL;
375 goto end;
376
377 OP(FILTER_OP_RETURN):
378 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
379 retval = !!estack_ax_v;
380 ret = 0;
381 goto end;
382
383 /* binary */
384 OP(FILTER_OP_MUL):
385 OP(FILTER_OP_DIV):
386 OP(FILTER_OP_MOD):
387 OP(FILTER_OP_PLUS):
388 OP(FILTER_OP_MINUS):
389 OP(FILTER_OP_RSHIFT):
390 OP(FILTER_OP_LSHIFT):
391 OP(FILTER_OP_BIN_AND):
392 OP(FILTER_OP_BIN_OR):
393 OP(FILTER_OP_BIN_XOR):
394 printk(KERN_WARNING "unsupported bytecode op %u\n",
395 (unsigned int) *(filter_opcode_t *) pc);
396 ret = -EINVAL;
397 goto end;
398
399 OP(FILTER_OP_EQ):
400 OP(FILTER_OP_NE):
401 OP(FILTER_OP_GT):
402 OP(FILTER_OP_LT):
403 OP(FILTER_OP_GE):
404 OP(FILTER_OP_LE):
405 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
406 (unsigned int) *(filter_opcode_t *) pc);
407 ret = -EINVAL;
408 goto end;
409
410 OP(FILTER_OP_EQ_STRING):
411 {
412 int res;
413
414 res = (stack_strcmp(stack, top, "==") == 0);
415 estack_pop(stack, top, ax, bx);
416 estack_ax_v = res;
417 next_pc += sizeof(struct binary_op);
418 PO;
419 }
420 OP(FILTER_OP_NE_STRING):
421 {
422 int res;
423
424 res = (stack_strcmp(stack, top, "!=") != 0);
425 estack_pop(stack, top, ax, bx);
426 estack_ax_v = res;
427 next_pc += sizeof(struct binary_op);
428 PO;
429 }
430 OP(FILTER_OP_GT_STRING):
431 {
432 int res;
433
434 res = (stack_strcmp(stack, top, ">") > 0);
435 estack_pop(stack, top, ax, bx);
436 estack_ax_v = res;
437 next_pc += sizeof(struct binary_op);
438 PO;
439 }
440 OP(FILTER_OP_LT_STRING):
441 {
442 int res;
443
444 res = (stack_strcmp(stack, top, "<") < 0);
445 estack_pop(stack, top, ax, bx);
446 estack_ax_v = res;
447 next_pc += sizeof(struct binary_op);
448 PO;
449 }
450 OP(FILTER_OP_GE_STRING):
451 {
452 int res;
453
454 res = (stack_strcmp(stack, top, ">=") >= 0);
455 estack_pop(stack, top, ax, bx);
456 estack_ax_v = res;
457 next_pc += sizeof(struct binary_op);
458 PO;
459 }
460 OP(FILTER_OP_LE_STRING):
461 {
462 int res;
463
464 res = (stack_strcmp(stack, top, "<=") <= 0);
465 estack_pop(stack, top, ax, bx);
466 estack_ax_v = res;
467 next_pc += sizeof(struct binary_op);
468 PO;
469 }
470
471 OP(FILTER_OP_EQ_S64):
472 {
473 int res;
474
475 res = (estack_bx_v == estack_ax_v);
476 estack_pop(stack, top, ax, bx);
477 estack_ax_v = res;
478 next_pc += sizeof(struct binary_op);
479 PO;
480 }
481 OP(FILTER_OP_NE_S64):
482 {
483 int res;
484
485 res = (estack_bx_v != estack_ax_v);
486 estack_pop(stack, top, ax, bx);
487 estack_ax_v = res;
488 next_pc += sizeof(struct binary_op);
489 PO;
490 }
491 OP(FILTER_OP_GT_S64):
492 {
493 int res;
494
495 res = (estack_bx_v > estack_ax_v);
496 estack_pop(stack, top, ax, bx);
497 estack_ax_v = res;
498 next_pc += sizeof(struct binary_op);
499 PO;
500 }
501 OP(FILTER_OP_LT_S64):
502 {
503 int res;
504
505 res = (estack_bx_v < estack_ax_v);
506 estack_pop(stack, top, ax, bx);
507 estack_ax_v = res;
508 next_pc += sizeof(struct binary_op);
509 PO;
510 }
511 OP(FILTER_OP_GE_S64):
512 {
513 int res;
514
515 res = (estack_bx_v >= estack_ax_v);
516 estack_pop(stack, top, ax, bx);
517 estack_ax_v = res;
518 next_pc += sizeof(struct binary_op);
519 PO;
520 }
521 OP(FILTER_OP_LE_S64):
522 {
523 int res;
524
525 res = (estack_bx_v <= estack_ax_v);
526 estack_pop(stack, top, ax, bx);
527 estack_ax_v = res;
528 next_pc += sizeof(struct binary_op);
529 PO;
530 }
531
532 OP(FILTER_OP_EQ_DOUBLE):
533 OP(FILTER_OP_NE_DOUBLE):
534 OP(FILTER_OP_GT_DOUBLE):
535 OP(FILTER_OP_LT_DOUBLE):
536 OP(FILTER_OP_GE_DOUBLE):
537 OP(FILTER_OP_LE_DOUBLE):
538 {
539 BUG_ON(1);
540 PO;
541 }
542
543 /* Mixed S64-double binary comparators */
544 OP(FILTER_OP_EQ_DOUBLE_S64):
545 OP(FILTER_OP_NE_DOUBLE_S64):
546 OP(FILTER_OP_GT_DOUBLE_S64):
547 OP(FILTER_OP_LT_DOUBLE_S64):
548 OP(FILTER_OP_GE_DOUBLE_S64):
549 OP(FILTER_OP_LE_DOUBLE_S64):
550 OP(FILTER_OP_EQ_S64_DOUBLE):
551 OP(FILTER_OP_NE_S64_DOUBLE):
552 OP(FILTER_OP_GT_S64_DOUBLE):
553 OP(FILTER_OP_LT_S64_DOUBLE):
554 OP(FILTER_OP_GE_S64_DOUBLE):
555 OP(FILTER_OP_LE_S64_DOUBLE):
556 {
557 BUG_ON(1);
558 PO;
559 }
560
561 /* unary */
562 OP(FILTER_OP_UNARY_PLUS):
563 OP(FILTER_OP_UNARY_MINUS):
564 OP(FILTER_OP_UNARY_NOT):
565 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
566 (unsigned int) *(filter_opcode_t *) pc);
567 ret = -EINVAL;
568 goto end;
569
570
571 OP(FILTER_OP_UNARY_PLUS_S64):
572 {
573 next_pc += sizeof(struct unary_op);
574 PO;
575 }
576 OP(FILTER_OP_UNARY_MINUS_S64):
577 {
578 estack_ax_v = -estack_ax_v;
579 next_pc += sizeof(struct unary_op);
580 PO;
581 }
582 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
583 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
584 {
585 BUG_ON(1);
586 PO;
587 }
588 OP(FILTER_OP_UNARY_NOT_S64):
589 {
590 estack_ax_v = !estack_ax_v;
591 next_pc += sizeof(struct unary_op);
592 PO;
593 }
594 OP(FILTER_OP_UNARY_NOT_DOUBLE):
595 {
596 BUG_ON(1);
597 PO;
598 }
599
600 /* logical */
601 OP(FILTER_OP_AND):
602 {
603 struct logical_op *insn = (struct logical_op *) pc;
604
605 /* If AX is 0, skip and evaluate to 0 */
606 if (unlikely(estack_ax_v == 0)) {
607 dbg_printk("Jumping to bytecode offset %u\n",
608 (unsigned int) insn->skip_offset);
609 next_pc = start_pc + insn->skip_offset;
610 } else {
611 /* Pop 1 when jump not taken */
612 estack_pop(stack, top, ax, bx);
613 next_pc += sizeof(struct logical_op);
614 }
615 PO;
616 }
617 OP(FILTER_OP_OR):
618 {
619 struct logical_op *insn = (struct logical_op *) pc;
620
621 /* If AX is nonzero, skip and evaluate to 1 */
622
623 if (unlikely(estack_ax_v != 0)) {
624 estack_ax_v = 1;
625 dbg_printk("Jumping to bytecode offset %u\n",
626 (unsigned int) insn->skip_offset);
627 next_pc = start_pc + insn->skip_offset;
628 } else {
629 /* Pop 1 when jump not taken */
630 estack_pop(stack, top, ax, bx);
631 next_pc += sizeof(struct logical_op);
632 }
633 PO;
634 }
635
636
637 /* load field ref */
638 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
639 {
640 struct load_op *insn = (struct load_op *) pc;
641 struct field_ref *ref = (struct field_ref *) insn->data;
642
643 dbg_printk("load field ref offset %u type string\n",
644 ref->offset);
645 estack_push(stack, top, ax, bx);
646 estack_ax(stack, top)->u.s.str =
647 *(const char * const *) &filter_stack_data[ref->offset];
648 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
649 dbg_printk("Filter warning: loading a NULL string.\n");
650 ret = -EINVAL;
651 goto end;
652 }
653 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
654 estack_ax(stack, top)->u.s.literal = 0;
655 estack_ax(stack, top)->u.s.user = 0;
656 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
657 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
658 PO;
659 }
660
661 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
662 {
663 struct load_op *insn = (struct load_op *) pc;
664 struct field_ref *ref = (struct field_ref *) insn->data;
665
666 dbg_printk("load field ref offset %u type sequence\n",
667 ref->offset);
668 estack_push(stack, top, ax, bx);
669 estack_ax(stack, top)->u.s.seq_len =
670 *(unsigned long *) &filter_stack_data[ref->offset];
671 estack_ax(stack, top)->u.s.str =
672 *(const char **) (&filter_stack_data[ref->offset
673 + sizeof(unsigned long)]);
674 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
675 dbg_printk("Filter warning: loading a NULL sequence.\n");
676 ret = -EINVAL;
677 goto end;
678 }
679 estack_ax(stack, top)->u.s.literal = 0;
680 estack_ax(stack, top)->u.s.user = 0;
681 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
682 PO;
683 }
684
685 OP(FILTER_OP_LOAD_FIELD_REF_S64):
686 {
687 struct load_op *insn = (struct load_op *) pc;
688 struct field_ref *ref = (struct field_ref *) insn->data;
689
690 dbg_printk("load field ref offset %u type s64\n",
691 ref->offset);
692 estack_push(stack, top, ax, bx);
693 estack_ax_v =
694 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
695 dbg_printk("ref load s64 %lld\n",
696 (long long) estack_ax_v);
697 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
698 PO;
699 }
700
701 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
702 {
703 BUG_ON(1);
704 PO;
705 }
706
707 /* load from immediate operand */
708 OP(FILTER_OP_LOAD_STRING):
709 {
710 struct load_op *insn = (struct load_op *) pc;
711
712 dbg_printk("load string %s\n", insn->data);
713 estack_push(stack, top, ax, bx);
714 estack_ax(stack, top)->u.s.str = insn->data;
715 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
716 estack_ax(stack, top)->u.s.literal = 1;
717 estack_ax(stack, top)->u.s.user = 0;
718 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
719 PO;
720 }
721
722 OP(FILTER_OP_LOAD_S64):
723 {
724 struct load_op *insn = (struct load_op *) pc;
725
726 estack_push(stack, top, ax, bx);
727 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
728 dbg_printk("load s64 %lld\n",
729 (long long) estack_ax_v);
730 next_pc += sizeof(struct load_op)
731 + sizeof(struct literal_numeric);
732 PO;
733 }
734
735 OP(FILTER_OP_LOAD_DOUBLE):
736 {
737 BUG_ON(1);
738 PO;
739 }
740
741 /* cast */
742 OP(FILTER_OP_CAST_TO_S64):
743 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
744 (unsigned int) *(filter_opcode_t *) pc);
745 ret = -EINVAL;
746 goto end;
747
748 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
749 {
750 BUG_ON(1);
751 PO;
752 }
753
754 OP(FILTER_OP_CAST_NOP):
755 {
756 next_pc += sizeof(struct cast_op);
757 PO;
758 }
759
760 /* get context ref */
761 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
762 {
763 struct load_op *insn = (struct load_op *) pc;
764 struct field_ref *ref = (struct field_ref *) insn->data;
765 struct lttng_ctx_field *ctx_field;
766 union lttng_ctx_value v;
767
768 dbg_printk("get context ref offset %u type string\n",
769 ref->offset);
770 ctx_field = &lttng_static_ctx->fields[ref->offset];
771 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
772 estack_push(stack, top, ax, bx);
773 estack_ax(stack, top)->u.s.str = v.str;
774 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
775 dbg_printk("Filter warning: loading a NULL string.\n");
776 ret = -EINVAL;
777 goto end;
778 }
779 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
780 estack_ax(stack, top)->u.s.literal = 0;
781 estack_ax(stack, top)->u.s.user = 0;
782 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
783 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
784 PO;
785 }
786
787 OP(FILTER_OP_GET_CONTEXT_REF_S64):
788 {
789 struct load_op *insn = (struct load_op *) pc;
790 struct field_ref *ref = (struct field_ref *) insn->data;
791 struct lttng_ctx_field *ctx_field;
792 union lttng_ctx_value v;
793
794 dbg_printk("get context ref offset %u type s64\n",
795 ref->offset);
796 ctx_field = &lttng_static_ctx->fields[ref->offset];
797 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
798 estack_push(stack, top, ax, bx);
799 estack_ax_v = v.s64;
800 dbg_printk("ref get context s64 %lld\n",
801 (long long) estack_ax_v);
802 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
803 PO;
804 }
805
806 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
807 {
808 BUG_ON(1);
809 PO;
810 }
811
812 /* load userspace field ref */
813 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
814 {
815 struct load_op *insn = (struct load_op *) pc;
816 struct field_ref *ref = (struct field_ref *) insn->data;
817
818 dbg_printk("load field ref offset %u type user string\n",
819 ref->offset);
820 estack_push(stack, top, ax, bx);
821 estack_ax(stack, top)->u.s.user_str =
822 *(const char * const *) &filter_stack_data[ref->offset];
823 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
824 dbg_printk("Filter warning: loading a NULL string.\n");
825 ret = -EINVAL;
826 goto end;
827 }
828 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
829 estack_ax(stack, top)->u.s.literal = 0;
830 estack_ax(stack, top)->u.s.user = 1;
831 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
832 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
833 PO;
834 }
835
836 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
837 {
838 struct load_op *insn = (struct load_op *) pc;
839 struct field_ref *ref = (struct field_ref *) insn->data;
840
841 dbg_printk("load field ref offset %u type user sequence\n",
842 ref->offset);
843 estack_push(stack, top, ax, bx);
844 estack_ax(stack, top)->u.s.seq_len =
845 *(unsigned long *) &filter_stack_data[ref->offset];
846 estack_ax(stack, top)->u.s.user_str =
847 *(const char **) (&filter_stack_data[ref->offset
848 + sizeof(unsigned long)]);
849 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
850 dbg_printk("Filter warning: loading a NULL sequence.\n");
851 ret = -EINVAL;
852 goto end;
853 }
854 estack_ax(stack, top)->u.s.literal = 0;
855 estack_ax(stack, top)->u.s.user = 1;
856 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
857 PO;
858 }
859
860 END_OP
861 end:
862 /* return 0 (discard) on error */
863 if (ret)
864 return 0;
865 return retval;
866 }
867
868 #undef START_OP
869 #undef OP
870 #undef PO
871 #undef END_OP
This page took 0.046682 seconds and 5 git commands to generate.