Filter: opcode for ref loads
[lttng-ust.git] / liblttng-ust / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng UST filter code.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <errno.h>
24 #include <stdio.h>
25 #include <helper.h>
26 #include <lttng/ust-events.h>
27 #include <stdint.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <limits.h>
32 #include <usterr-signal-safe.h>
33 #include "filter-bytecode.h"
34
35 #define NR_REG 2
36
37 #ifndef min_t
38 #define min_t(type, a, b) \
39 ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
40 #endif
41
42 #ifndef likely
43 #define likely(x) __builtin_expect(!!(x), 1)
44 #endif
45
46 #ifndef unlikely
47 #define unlikely(x) __builtin_expect(!!(x), 0)
48 #endif
49
50 #ifdef DEBUG
51 #define dbg_printf(fmt, args...) printf("[debug bytecode] " fmt, ## args)
52 #else
53 #define dbg_printf(fmt, args...) \
54 do { \
55 /* do nothing but check printf format */ \
56 if (0) \
57 printf("[debug bytecode] " fmt, ## args); \
58 } while (0)
59 #endif
60
61 /* Linked bytecode */
62 struct bytecode_runtime {
63 uint16_t len;
64 char data[0];
65 };
66
67 enum reg_type {
68 REG_S64,
69 REG_DOUBLE,
70 REG_STRING,
71 };
72
73 /* Validation registers */
74 struct vreg {
75 enum reg_type type;
76 int literal; /* is string literal ? */
77 };
78
79 /* Execution registers */
80 struct reg {
81 enum reg_type type;
82 int64_t v;
83 double d;
84
85 const char *str;
86 size_t seq_len;
87 int literal; /* is string literal ? */
88 };
89
90 static const char *opnames[] = {
91 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
92
93 [ FILTER_OP_RETURN ] = "RETURN",
94
95 /* binary */
96 [ FILTER_OP_MUL ] = "MUL",
97 [ FILTER_OP_DIV ] = "DIV",
98 [ FILTER_OP_MOD ] = "MOD",
99 [ FILTER_OP_PLUS ] = "PLUS",
100 [ FILTER_OP_MINUS ] = "MINUS",
101 [ FILTER_OP_RSHIFT ] = "RSHIFT",
102 [ FILTER_OP_LSHIFT ] = "LSHIFT",
103 [ FILTER_OP_BIN_AND ] = "BIN_AND",
104 [ FILTER_OP_BIN_OR ] = "BIN_OR",
105 [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
106 [ FILTER_OP_EQ ] = "EQ",
107 [ FILTER_OP_NE ] = "NE",
108 [ FILTER_OP_GT ] = "GT",
109 [ FILTER_OP_LT ] = "LT",
110 [ FILTER_OP_GE ] = "GE",
111 [ FILTER_OP_LE ] = "LE",
112
113 /* unary */
114 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
115 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
116 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
117
118 /* logical */
119 [ FILTER_OP_AND ] = "AND",
120 [ FILTER_OP_OR ] = "OR",
121
122 /* load */
123 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
124 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
125 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
126 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
127 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
128
129 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
130 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
131 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
132 };
133
134 static
135 const char *print_op(enum filter_op op)
136 {
137 if (op >= NR_FILTER_OPS)
138 return "UNKNOWN";
139 else
140 return opnames[op];
141 }
142
143 /*
144 * -1: wildcard found.
145 * -2: unknown escape char.
146 * 0: normal char.
147 */
148
149 static
150 int parse_char(const char **p)
151 {
152 switch (**p) {
153 case '\\':
154 (*p)++;
155 switch (**p) {
156 case '\\':
157 case '*':
158 return 0;
159 default:
160 return -2;
161 }
162 case '*':
163 return -1;
164 default:
165 return 0;
166 }
167 }
168
169 static
170 int reg_strcmp(struct reg reg[NR_REG], const char *cmp_type)
171 {
172 const char *p = reg[REG_R0].str, *q = reg[REG_R1].str;
173 int ret;
174 int diff;
175
176 for (;;) {
177 int escaped_r0 = 0;
178
179 if (unlikely(p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')) {
180 if (q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')
181 diff = 0;
182 else
183 diff = -1;
184 break;
185 }
186 if (unlikely(q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')) {
187 if (p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')
188 diff = 0;
189 else
190 diff = 1;
191 break;
192 }
193 if (reg[REG_R0].literal) {
194 ret = parse_char(&p);
195 if (ret == -1) {
196 return 0;
197 } else if (ret == -2) {
198 escaped_r0 = 1;
199 }
200 /* else compare both char */
201 }
202 if (reg[REG_R1].literal) {
203 ret = parse_char(&q);
204 if (ret == -1) {
205 return 0;
206 } else if (ret == -2) {
207 if (!escaped_r0)
208 return -1;
209 } else {
210 if (escaped_r0)
211 return 1;
212 }
213 } else {
214 if (escaped_r0)
215 return 1;
216 }
217 diff = *p - *q;
218 if (diff != 0)
219 break;
220 p++;
221 q++;
222 }
223 return diff;
224 }
225
226 static
227 int lttng_filter_false(void *filter_data,
228 const char *filter_stack_data)
229 {
230 return 0;
231 }
232
233 static
234 int lttng_filter_interpret_bytecode(void *filter_data,
235 const char *filter_stack_data)
236 {
237 struct bytecode_runtime *bytecode = filter_data;
238 void *pc, *next_pc, *start_pc;
239 int ret = -EINVAL;
240 int retval = 0;
241 struct reg reg[NR_REG];
242 int i;
243
244 for (i = 0; i < NR_REG; i++) {
245 reg[i].type = REG_S64;
246 reg[i].v = 0;
247 reg[i].d = 0.0;
248 reg[i].str = NULL;
249 reg[i].seq_len = 0;
250 reg[i].literal = 0;
251 }
252
253 start_pc = &bytecode->data[0];
254 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
255 pc = next_pc) {
256 dbg_printf("Executing op %s (%u)\n",
257 print_op((unsigned int) *(filter_opcode_t *) pc),
258 (unsigned int) *(filter_opcode_t *) pc);
259 switch (*(filter_opcode_t *) pc) {
260 case FILTER_OP_UNKNOWN:
261 case FILTER_OP_LOAD_FIELD_REF:
262 default:
263 ERR("unknown bytecode op %u\n",
264 (unsigned int) *(filter_opcode_t *) pc);
265 ret = -EINVAL;
266 goto end;
267
268 case FILTER_OP_RETURN:
269 retval = !!reg[0].v;
270 ret = 0;
271 goto end;
272
273 /* binary */
274 case FILTER_OP_MUL:
275 case FILTER_OP_DIV:
276 case FILTER_OP_MOD:
277 case FILTER_OP_PLUS:
278 case FILTER_OP_MINUS:
279 case FILTER_OP_RSHIFT:
280 case FILTER_OP_LSHIFT:
281 case FILTER_OP_BIN_AND:
282 case FILTER_OP_BIN_OR:
283 case FILTER_OP_BIN_XOR:
284 ERR("unsupported bytecode op %u\n",
285 (unsigned int) *(filter_opcode_t *) pc);
286 ret = -EINVAL;
287 goto end;
288
289 case FILTER_OP_EQ:
290 {
291 switch (reg[REG_R0].type) {
292 default:
293 ERR("unknown register type\n");
294 ret = -EINVAL;
295 goto end;
296
297 case REG_STRING:
298 reg[REG_R0].v = (reg_strcmp(reg, "==") == 0);
299 break;
300 case REG_S64:
301 switch (reg[REG_R1].type) {
302 default:
303 ERR("unknown register type\n");
304 ret = -EINVAL;
305 goto end;
306
307 case REG_S64:
308 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].v);
309 break;
310 case REG_DOUBLE:
311 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].d);
312 break;
313 }
314 break;
315 case REG_DOUBLE:
316 switch (reg[REG_R1].type) {
317 default:
318 ERR("unknown register type\n");
319 ret = -EINVAL;
320 goto end;
321
322 case REG_S64:
323 reg[REG_R0].v = (reg[REG_R0].d == reg[REG_R1].v);
324 break;
325 case REG_DOUBLE:
326 reg[REG_R0].v = (reg[REG_R0].d == reg[REG_R1].d);
327 break;
328 }
329 break;
330 }
331 reg[REG_R0].type = REG_S64;
332 next_pc += sizeof(struct binary_op);
333 break;
334 }
335 case FILTER_OP_NE:
336 {
337 switch (reg[REG_R0].type) {
338 default:
339 ERR("unknown register type\n");
340 ret = -EINVAL;
341 goto end;
342
343 case REG_STRING:
344 reg[REG_R0].v = (reg_strcmp(reg, "!=") != 0);
345 break;
346 case REG_S64:
347 switch (reg[REG_R1].type) {
348 default:
349 ERR("unknown register type\n");
350 ret = -EINVAL;
351 goto end;
352
353 case REG_S64:
354 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].v);
355 break;
356 case REG_DOUBLE:
357 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].d);
358 break;
359 }
360 break;
361 case REG_DOUBLE:
362 switch (reg[REG_R1].type) {
363 default:
364 ERR("unknown register type\n");
365 ret = -EINVAL;
366 goto end;
367
368 case REG_S64:
369 reg[REG_R0].v = (reg[REG_R0].d != reg[REG_R1].v);
370 break;
371 case REG_DOUBLE:
372 reg[REG_R0].v = (reg[REG_R0].d != reg[REG_R1].d);
373 break;
374 }
375 break;
376 }
377 reg[REG_R0].type = REG_S64;
378 next_pc += sizeof(struct binary_op);
379 break;
380 }
381 case FILTER_OP_GT:
382 {
383 switch (reg[REG_R0].type) {
384 default:
385 ERR("unknown register type\n");
386 ret = -EINVAL;
387 goto end;
388
389 case REG_STRING:
390 reg[REG_R0].v = (reg_strcmp(reg, ">") > 0);
391 break;
392 case REG_S64:
393 switch (reg[REG_R1].type) {
394 default:
395 ERR("unknown register type\n");
396 ret = -EINVAL;
397 goto end;
398
399 case REG_S64:
400 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].v);
401 break;
402 case REG_DOUBLE:
403 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].d);
404 break;
405 }
406 break;
407 case REG_DOUBLE:
408 switch (reg[REG_R1].type) {
409 default:
410 ERR("unknown register type\n");
411 ret = -EINVAL;
412 goto end;
413
414 case REG_S64:
415 reg[REG_R0].v = (reg[REG_R0].d > reg[REG_R1].v);
416 break;
417 case REG_DOUBLE:
418 reg[REG_R0].v = (reg[REG_R0].d > reg[REG_R1].d);
419 break;
420 }
421 break;
422 }
423 reg[REG_R0].type = REG_S64;
424 next_pc += sizeof(struct binary_op);
425 break;
426 }
427 case FILTER_OP_LT:
428 {
429 switch (reg[REG_R0].type) {
430 default:
431 ERR("unknown register type\n");
432 ret = -EINVAL;
433 goto end;
434
435 case REG_STRING:
436 reg[REG_R0].v = (reg_strcmp(reg, "<") < 0);
437 break;
438 case REG_S64:
439 switch (reg[REG_R1].type) {
440 default:
441 ERR("unknown register type\n");
442 ret = -EINVAL;
443 goto end;
444
445 case REG_S64:
446 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].v);
447 break;
448 case REG_DOUBLE:
449 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].d);
450 break;
451 }
452 break;
453 case REG_DOUBLE:
454 switch (reg[REG_R1].type) {
455 default:
456 ERR("unknown register type\n");
457 ret = -EINVAL;
458 goto end;
459
460 case REG_S64:
461 reg[REG_R0].v = (reg[REG_R0].d < reg[REG_R1].v);
462 break;
463 case REG_DOUBLE:
464 reg[REG_R0].v = (reg[REG_R0].d < reg[REG_R1].d);
465 break;
466 }
467 break;
468 }
469 reg[REG_R0].type = REG_S64;
470 next_pc += sizeof(struct binary_op);
471 break;
472 }
473 case FILTER_OP_GE:
474 {
475 switch (reg[REG_R0].type) {
476 default:
477 ERR("unknown register type\n");
478 ret = -EINVAL;
479 goto end;
480
481 case REG_STRING:
482 reg[REG_R0].v = (reg_strcmp(reg, ">=") >= 0);
483 break;
484 case REG_S64:
485 switch (reg[REG_R1].type) {
486 default:
487 ERR("unknown register type\n");
488 ret = -EINVAL;
489 goto end;
490
491 case REG_S64:
492 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].v);
493 break;
494 case REG_DOUBLE:
495 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].d);
496 break;
497 }
498 break;
499 case REG_DOUBLE:
500 switch (reg[REG_R1].type) {
501 default:
502 ERR("unknown register type\n");
503 ret = -EINVAL;
504 goto end;
505
506 case REG_S64:
507 reg[REG_R0].v = (reg[REG_R0].d >= reg[REG_R1].v);
508 break;
509 case REG_DOUBLE:
510 reg[REG_R0].v = (reg[REG_R0].d >= reg[REG_R1].d);
511 break;
512 }
513 break;
514 }
515 reg[REG_R0].type = REG_S64;
516 next_pc += sizeof(struct binary_op);
517 break;
518 }
519 case FILTER_OP_LE:
520 {
521 switch (reg[REG_R0].type) {
522 default:
523 ERR("unknown register type\n");
524 ret = -EINVAL;
525 goto end;
526
527 case REG_STRING:
528 reg[REG_R0].v = (reg_strcmp(reg, "<=") <= 0);
529 break;
530 case REG_S64:
531 switch (reg[REG_R1].type) {
532 default:
533 ERR("unknown register type\n");
534 ret = -EINVAL;
535 goto end;
536
537 case REG_S64:
538 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].v);
539 break;
540 case REG_DOUBLE:
541 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].d);
542 break;
543 }
544 break;
545 case REG_DOUBLE:
546 switch (reg[REG_R1].type) {
547 default:
548 ERR("unknown register type\n");
549 ret = -EINVAL;
550 goto end;
551
552 case REG_S64:
553 reg[REG_R0].v = (reg[REG_R0].d <= reg[REG_R1].v);
554 break;
555 case REG_DOUBLE:
556 reg[REG_R0].v = (reg[REG_R0].d <= reg[REG_R1].d);
557 break;
558 }
559 break;
560 }
561 reg[REG_R0].type = REG_S64;
562 next_pc += sizeof(struct binary_op);
563 break;
564 }
565
566 /* unary */
567 case FILTER_OP_UNARY_PLUS:
568 {
569 next_pc += sizeof(struct unary_op);
570 break;
571 }
572 case FILTER_OP_UNARY_MINUS:
573 {
574 struct unary_op *insn = (struct unary_op *) pc;
575
576 switch (reg[insn->reg].type) {
577 default:
578 ERR("unknown register type\n");
579 ret = -EINVAL;
580 goto end;
581
582 case REG_STRING:
583 ERR("Unary minus can only be applied to numeric or floating point registers\n");
584 ret = -EINVAL;
585 goto end;
586 case REG_S64:
587 reg[insn->reg].v = -reg[insn->reg].v;
588 break;
589 case REG_DOUBLE:
590 reg[insn->reg].d = -reg[insn->reg].d;
591 break;
592 }
593 next_pc += sizeof(struct unary_op);
594 break;
595 }
596 case FILTER_OP_UNARY_NOT:
597 {
598 struct unary_op *insn = (struct unary_op *) pc;
599
600 switch (reg[insn->reg].type) {
601 default:
602 ERR("unknown register type\n");
603 ret = -EINVAL;
604 goto end;
605
606 case REG_STRING:
607 ERR("Unary not can only be applied to numeric or floating point registers\n");
608 ret = -EINVAL;
609 goto end;
610 case REG_S64:
611 reg[insn->reg].v = !reg[insn->reg].v;
612 break;
613 case REG_DOUBLE:
614 reg[insn->reg].d = !reg[insn->reg].d;
615 break;
616 }
617 reg[insn->reg].v = !reg[insn->reg].v;
618 next_pc += sizeof(struct unary_op);
619 break;
620 }
621 /* logical */
622 case FILTER_OP_AND:
623 {
624 struct logical_op *insn = (struct logical_op *) pc;
625
626 /* If REG_R0 is 0, skip and evaluate to 0 */
627 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v == 0)
628 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d == 0.0)) {
629 dbg_printf("Jumping to bytecode offset %u\n",
630 (unsigned int) insn->skip_offset);
631 next_pc = start_pc + insn->skip_offset;
632 } else {
633 next_pc += sizeof(struct logical_op);
634 }
635 break;
636 }
637 case FILTER_OP_OR:
638 {
639 struct logical_op *insn = (struct logical_op *) pc;
640
641 /* If REG_R0 is nonzero, skip and evaluate to 1 */
642
643 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v != 0)
644 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d != 0.0)) {
645 reg[REG_R0].v = 1;
646 dbg_printf("Jumping to bytecode offset %u\n",
647 (unsigned int) insn->skip_offset);
648 next_pc = start_pc + insn->skip_offset;
649 } else {
650 next_pc += sizeof(struct logical_op);
651 }
652 break;
653 }
654
655 /* load */
656 case FILTER_OP_LOAD_FIELD_REF_STRING:
657 {
658 struct load_op *insn = (struct load_op *) pc;
659 struct field_ref *ref = (struct field_ref *) insn->data;
660
661 dbg_printf("load field ref offset %u type string\n",
662 ref->offset);
663 reg[insn->reg].str =
664 *(const char * const *) &filter_stack_data[ref->offset];
665 reg[insn->reg].type = REG_STRING;
666 reg[insn->reg].seq_len = UINT_MAX;
667 reg[insn->reg].literal = 0;
668 dbg_printf("ref load string %s\n", reg[insn->reg].str);
669 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
670 break;
671 }
672
673 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
674 {
675 struct load_op *insn = (struct load_op *) pc;
676 struct field_ref *ref = (struct field_ref *) insn->data;
677
678 dbg_printf("load field ref offset %u type sequence\n",
679 ref->offset);
680 reg[insn->reg].seq_len =
681 *(unsigned long *) &filter_stack_data[ref->offset];
682 reg[insn->reg].str =
683 *(const char **) (&filter_stack_data[ref->offset
684 + sizeof(unsigned long)]);
685 reg[insn->reg].type = REG_STRING;
686 reg[insn->reg].literal = 0;
687 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
688 break;
689 }
690
691 case FILTER_OP_LOAD_FIELD_REF_S64:
692 {
693 struct load_op *insn = (struct load_op *) pc;
694 struct field_ref *ref = (struct field_ref *) insn->data;
695
696 dbg_printf("load field ref offset %u type s64\n",
697 ref->offset);
698 memcpy(&reg[insn->reg].v, &filter_stack_data[ref->offset],
699 sizeof(struct literal_numeric));
700 reg[insn->reg].type = REG_S64;
701 reg[insn->reg].literal = 0;
702 dbg_printf("ref load s64 %" PRIi64 "\n", reg[insn->reg].v);
703 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
704 break;
705 }
706
707 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
708 {
709 struct load_op *insn = (struct load_op *) pc;
710 struct field_ref *ref = (struct field_ref *) insn->data;
711
712 dbg_printf("load field ref offset %u type double\n",
713 ref->offset);
714 memcpy(&reg[insn->reg].d, &filter_stack_data[ref->offset],
715 sizeof(struct literal_double));
716 reg[insn->reg].type = REG_DOUBLE;
717 reg[insn->reg].literal = 0;
718 dbg_printf("ref load double %g\n", reg[insn->reg].d);
719 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
720 break;
721 }
722
723 case FILTER_OP_LOAD_STRING:
724 {
725 struct load_op *insn = (struct load_op *) pc;
726
727 dbg_printf("load string %s\n", insn->data);
728 reg[insn->reg].str = insn->data;
729 reg[insn->reg].type = REG_STRING;
730 reg[insn->reg].seq_len = UINT_MAX;
731 reg[insn->reg].literal = 1;
732 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
733 break;
734 }
735
736 case FILTER_OP_LOAD_S64:
737 {
738 struct load_op *insn = (struct load_op *) pc;
739
740 memcpy(&reg[insn->reg].v, insn->data,
741 sizeof(struct literal_numeric));
742 dbg_printf("load s64 %" PRIi64 "\n", reg[insn->reg].v);
743 reg[insn->reg].type = REG_S64;
744 next_pc += sizeof(struct load_op)
745 + sizeof(struct literal_numeric);
746 break;
747 }
748
749 case FILTER_OP_LOAD_DOUBLE:
750 {
751 struct load_op *insn = (struct load_op *) pc;
752
753 memcpy(&reg[insn->reg].d, insn->data,
754 sizeof(struct literal_double));
755 dbg_printf("load s64 %g\n", reg[insn->reg].d);
756 reg[insn->reg].type = REG_DOUBLE;
757 next_pc += sizeof(struct load_op)
758 + sizeof(struct literal_double);
759 break;
760 }
761 }
762 }
763 end:
764 /* return 0 (discard) on error */
765 if (ret)
766 return 0;
767 return retval;
768 }
769
770 static
771 int bin_op_compare_check(struct vreg reg[NR_REG], const char *str)
772 {
773 switch (reg[REG_R0].type) {
774 default:
775 goto error_unknown;
776
777 case REG_STRING:
778 switch (reg[REG_R1].type) {
779 default:
780 goto error_unknown;
781
782 case REG_STRING:
783 break;
784 case REG_S64:
785 case REG_DOUBLE:
786 goto error_mismatch;
787 }
788 break;
789 case REG_S64:
790 case REG_DOUBLE:
791 switch (reg[REG_R1].type) {
792 default:
793 goto error_unknown;
794
795 case REG_STRING:
796 goto error_mismatch;
797
798 case REG_S64:
799 case REG_DOUBLE:
800 break;
801 }
802 break;
803 }
804 return 0;
805
806 error_unknown:
807
808 return -EINVAL;
809 error_mismatch:
810 ERR("type mismatch for '%s' binary operator\n", str);
811 return -EINVAL;
812 }
813
814 static
815 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
816 {
817 void *pc, *next_pc, *start_pc;
818 int ret = -EINVAL;
819 struct vreg reg[NR_REG];
820 int i;
821
822 for (i = 0; i < NR_REG; i++) {
823 reg[i].type = REG_S64;
824 reg[i].literal = 0;
825 }
826
827 start_pc = &bytecode->data[0];
828 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
829 pc = next_pc) {
830 if (unlikely(pc >= start_pc + bytecode->len)) {
831 ERR("filter bytecode overflow\n");
832 ret = -EINVAL;
833 goto end;
834 }
835 dbg_printf("Validating op %s (%u)\n",
836 print_op((unsigned int) *(filter_opcode_t *) pc),
837 (unsigned int) *(filter_opcode_t *) pc);
838 switch (*(filter_opcode_t *) pc) {
839 case FILTER_OP_UNKNOWN:
840 default:
841 ERR("unknown bytecode op %u\n",
842 (unsigned int) *(filter_opcode_t *) pc);
843 ret = -EINVAL;
844 goto end;
845
846 case FILTER_OP_RETURN:
847 ret = 0;
848 goto end;
849
850 /* binary */
851 case FILTER_OP_MUL:
852 case FILTER_OP_DIV:
853 case FILTER_OP_MOD:
854 case FILTER_OP_PLUS:
855 case FILTER_OP_MINUS:
856 case FILTER_OP_RSHIFT:
857 case FILTER_OP_LSHIFT:
858 case FILTER_OP_BIN_AND:
859 case FILTER_OP_BIN_OR:
860 case FILTER_OP_BIN_XOR:
861 ERR("unsupported bytecode op %u\n",
862 (unsigned int) *(filter_opcode_t *) pc);
863 ret = -EINVAL;
864 goto end;
865
866 case FILTER_OP_EQ:
867 {
868 ret = bin_op_compare_check(reg, "==");
869 if (ret)
870 goto end;
871 reg[REG_R0].type = REG_S64;
872 next_pc += sizeof(struct binary_op);
873 break;
874 }
875 case FILTER_OP_NE:
876 {
877 ret = bin_op_compare_check(reg, "!=");
878 if (ret)
879 goto end;
880 reg[REG_R0].type = REG_S64;
881 next_pc += sizeof(struct binary_op);
882 break;
883 }
884 case FILTER_OP_GT:
885 {
886 ret = bin_op_compare_check(reg, ">");
887 if (ret)
888 goto end;
889 reg[REG_R0].type = REG_S64;
890 next_pc += sizeof(struct binary_op);
891 break;
892 }
893 case FILTER_OP_LT:
894 {
895 ret = bin_op_compare_check(reg, "<");
896 if (ret)
897 goto end;
898 reg[REG_R0].type = REG_S64;
899 next_pc += sizeof(struct binary_op);
900 break;
901 }
902 case FILTER_OP_GE:
903 {
904 ret = bin_op_compare_check(reg, ">=");
905 if (ret)
906 goto end;
907 reg[REG_R0].type = REG_S64;
908 next_pc += sizeof(struct binary_op);
909 break;
910 }
911 case FILTER_OP_LE:
912 {
913 ret = bin_op_compare_check(reg, "<=");
914 if (ret)
915 goto end;
916 reg[REG_R0].type = REG_S64;
917 next_pc += sizeof(struct binary_op);
918 break;
919 }
920
921 /* unary */
922 case FILTER_OP_UNARY_PLUS:
923 case FILTER_OP_UNARY_MINUS:
924 case FILTER_OP_UNARY_NOT:
925 {
926 struct unary_op *insn = (struct unary_op *) pc;
927
928 if (unlikely(insn->reg >= REG_ERROR)) {
929 ERR("invalid register %u\n",
930 (unsigned int) insn->reg);
931 ret = -EINVAL;
932 goto end;
933 }
934 switch (reg[insn->reg].type) {
935 default:
936 ERR("unknown register type\n");
937 ret = -EINVAL;
938 goto end;
939
940 case REG_STRING:
941 ERR("Unary op can only be applied to numeric or floating point registers\n");
942 ret = -EINVAL;
943 goto end;
944 case REG_S64:
945 break;
946 case REG_DOUBLE:
947 break;
948 }
949 next_pc += sizeof(struct unary_op);
950 break;
951 }
952 /* logical */
953 case FILTER_OP_AND:
954 case FILTER_OP_OR:
955 {
956 struct logical_op *insn = (struct logical_op *) pc;
957
958 if (unlikely(reg[REG_R0].type == REG_STRING)) {
959 ERR("Logical operator 'and' can only be applied to numeric and floating point registers\n");
960 ret = -EINVAL;
961 goto end;
962 }
963
964 dbg_printf("Validate jumping to bytecode offset %u\n",
965 (unsigned int) insn->skip_offset);
966 if (unlikely(start_pc + insn->skip_offset <= pc)) {
967 ERR("Loops are not allowed in bytecode\n");
968 ret = -EINVAL;
969 goto end;
970 }
971 next_pc += sizeof(struct logical_op);
972 break;
973 }
974
975 /* load */
976 case FILTER_OP_LOAD_FIELD_REF:
977 {
978 ERR("Unknown field ref type\n");
979 ret = -EINVAL;
980 goto end;
981 }
982 case FILTER_OP_LOAD_FIELD_REF_STRING:
983 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
984 {
985 struct load_op *insn = (struct load_op *) pc;
986 struct field_ref *ref = (struct field_ref *) insn->data;
987
988 if (unlikely(insn->reg >= REG_ERROR)) {
989 ERR("invalid register %u\n",
990 (unsigned int) insn->reg);
991 ret = -EINVAL;
992 goto end;
993 }
994 dbg_printf("Validate load field ref offset %u type string\n",
995 ref->offset);
996 reg[insn->reg].type = REG_STRING;
997 reg[insn->reg].literal = 0;
998 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
999 break;
1000 }
1001 case FILTER_OP_LOAD_FIELD_REF_S64:
1002 {
1003 struct load_op *insn = (struct load_op *) pc;
1004 struct field_ref *ref = (struct field_ref *) insn->data;
1005
1006 if (unlikely(insn->reg >= REG_ERROR)) {
1007 ERR("invalid register %u\n",
1008 (unsigned int) insn->reg);
1009 ret = -EINVAL;
1010 goto end;
1011 }
1012 dbg_printf("Validate load field ref offset %u type s64\n",
1013 ref->offset);
1014 reg[insn->reg].type = REG_S64;
1015 reg[insn->reg].literal = 0;
1016 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1017 break;
1018 }
1019 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1020 {
1021 struct load_op *insn = (struct load_op *) pc;
1022 struct field_ref *ref = (struct field_ref *) insn->data;
1023
1024 if (unlikely(insn->reg >= REG_ERROR)) {
1025 ERR("invalid register %u\n",
1026 (unsigned int) insn->reg);
1027 ret = -EINVAL;
1028 goto end;
1029 }
1030 dbg_printf("Validate load field ref offset %u type double\n",
1031 ref->offset);
1032 reg[insn->reg].type = REG_DOUBLE;
1033 reg[insn->reg].literal = 0;
1034 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1035 break;
1036 }
1037
1038 case FILTER_OP_LOAD_STRING:
1039 {
1040 struct load_op *insn = (struct load_op *) pc;
1041
1042 if (unlikely(insn->reg >= REG_ERROR)) {
1043 ERR("invalid register %u\n",
1044 (unsigned int) insn->reg);
1045 ret = -EINVAL;
1046 goto end;
1047 }
1048 reg[insn->reg].type = REG_STRING;
1049 reg[insn->reg].literal = 1;
1050 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1051 break;
1052 }
1053
1054 case FILTER_OP_LOAD_S64:
1055 {
1056 struct load_op *insn = (struct load_op *) pc;
1057
1058 if (unlikely(insn->reg >= REG_ERROR)) {
1059 ERR("invalid register %u\n",
1060 (unsigned int) insn->reg);
1061 ret = -EINVAL;
1062 goto end;
1063 }
1064 reg[insn->reg].type = REG_S64;
1065 next_pc += sizeof(struct load_op)
1066 + sizeof(struct literal_numeric);
1067 break;
1068 }
1069
1070 case FILTER_OP_LOAD_DOUBLE:
1071 {
1072 struct load_op *insn = (struct load_op *) pc;
1073
1074 if (unlikely(insn->reg >= REG_ERROR)) {
1075 ERR("invalid register %u\n",
1076 (unsigned int) insn->reg);
1077 ret = -EINVAL;
1078 goto end;
1079 }
1080 reg[insn->reg].type = REG_DOUBLE;
1081 next_pc += sizeof(struct load_op)
1082 + sizeof(struct literal_double);
1083 break;
1084 }
1085 }
1086 }
1087 end:
1088 return ret;
1089 }
1090
1091 static
1092 int apply_field_reloc(struct ltt_event *event,
1093 struct bytecode_runtime *runtime,
1094 uint32_t runtime_len,
1095 uint32_t reloc_offset,
1096 const char *field_name)
1097 {
1098 const struct lttng_event_desc *desc;
1099 const struct lttng_event_field *fields, *field = NULL;
1100 unsigned int nr_fields, i;
1101 struct field_ref *field_ref;
1102 struct load_op *op;
1103 uint32_t field_offset = 0;
1104
1105 dbg_printf("Apply reloc: %u %s\n", reloc_offset, field_name);
1106
1107 /* Ensure that the reloc is within the code */
1108 if (runtime_len - reloc_offset < sizeof(uint16_t))
1109 return -EINVAL;
1110
1111 /* Lookup event by name */
1112 desc = event->desc;
1113 if (!desc)
1114 return -EINVAL;
1115 fields = desc->fields;
1116 if (!fields)
1117 return -EINVAL;
1118 nr_fields = desc->nr_fields;
1119 for (i = 0; i < nr_fields; i++) {
1120 if (!strcmp(fields[i].name, field_name)) {
1121 field = &fields[i];
1122 break;
1123 }
1124 /* compute field offset */
1125 switch (fields[i].type.atype) {
1126 case atype_integer:
1127 case atype_enum:
1128 field_offset += sizeof(int64_t);
1129 break;
1130 case atype_array:
1131 case atype_sequence:
1132 field_offset += sizeof(unsigned long);
1133 field_offset += sizeof(void *);
1134 break;
1135 case atype_string:
1136 field_offset += sizeof(void *);
1137 break;
1138 case atype_float:
1139 field_offset += sizeof(double);
1140 break;
1141 default:
1142 return -EINVAL;
1143 }
1144 }
1145 if (!field)
1146 return -EINVAL;
1147
1148 /* Check if field offset is too large for 16-bit offset */
1149 if (field_offset > FILTER_BYTECODE_MAX_LEN)
1150 return -EINVAL;
1151
1152 /* set type */
1153 op = (struct load_op *) &runtime->data[reloc_offset];
1154 field_ref = (struct field_ref *) op->data;
1155 switch (field->type.atype) {
1156 case atype_integer:
1157 case atype_enum:
1158 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
1159 break;
1160 case atype_array:
1161 case atype_sequence:
1162 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
1163 break;
1164 case atype_string:
1165 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
1166 break;
1167 case atype_float:
1168 op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
1169 break;
1170 default:
1171 return -EINVAL;
1172 }
1173 /* set offset */
1174 field_ref->offset = (uint16_t) field_offset;
1175 return 0;
1176 }
1177
1178 /*
1179 * Take a bytecode with reloc table and link it to an event to create a
1180 * bytecode runtime.
1181 */
1182 static
1183 int _lttng_filter_event_link_bytecode(struct ltt_event *event,
1184 struct lttng_ust_filter_bytecode *filter_bytecode)
1185 {
1186 int ret, offset, next_offset;
1187 struct bytecode_runtime *runtime = NULL;
1188 size_t runtime_alloc_len;
1189
1190 if (!filter_bytecode)
1191 return 0;
1192 /* Even is not connected to any description */
1193 if (!event->desc)
1194 return 0;
1195 /* Bytecode already linked */
1196 if (event->filter || event->filter_data)
1197 return 0;
1198
1199 dbg_printf("Linking\n");
1200
1201 /* We don't need the reloc table in the runtime */
1202 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->reloc_offset;
1203 runtime = zmalloc(runtime_alloc_len);
1204 if (!runtime) {
1205 ret = -ENOMEM;
1206 goto link_error;
1207 }
1208 runtime->len = filter_bytecode->reloc_offset;
1209 /* copy original bytecode */
1210 memcpy(runtime->data, filter_bytecode->data, runtime->len);
1211 /*
1212 * apply relocs. Those are a uint16_t (offset in bytecode)
1213 * followed by a string (field name).
1214 */
1215 for (offset = filter_bytecode->reloc_offset;
1216 offset < filter_bytecode->len;
1217 offset = next_offset) {
1218 uint16_t reloc_offset =
1219 *(uint16_t *) &filter_bytecode->data[offset];
1220 const char *field_name =
1221 (const char *) &filter_bytecode->data[offset + sizeof(uint16_t)];
1222
1223 ret = apply_field_reloc(event, runtime, runtime->len, reloc_offset, field_name);
1224 if (ret) {
1225 goto link_error;
1226 }
1227 next_offset = offset + sizeof(uint16_t) + strlen(field_name) + 1;
1228 }
1229 /* Validate bytecode */
1230 ret = lttng_filter_validate_bytecode(runtime);
1231 if (ret) {
1232 goto link_error;
1233 }
1234 event->filter_data = runtime;
1235 event->filter = lttng_filter_interpret_bytecode;
1236 return 0;
1237
1238 link_error:
1239 event->filter = lttng_filter_false;
1240 free(runtime);
1241 return ret;
1242 }
1243
1244 void lttng_filter_event_link_bytecode(struct ltt_event *event,
1245 struct lttng_ust_filter_bytecode *filter_bytecode)
1246 {
1247 int ret;
1248
1249 ret = _lttng_filter_event_link_bytecode(event, filter_bytecode);
1250 if (ret) {
1251 fprintf(stderr, "[lttng filter] error linking event bytecode\n");
1252 }
1253 }
1254
1255 /*
1256 * Link bytecode to all events for a wildcard. Skips events that already
1257 * have a bytecode linked.
1258 * We do not set each event's filter_bytecode field, because they do not
1259 * own the filter_bytecode: the wildcard owns it.
1260 */
1261 void lttng_filter_wildcard_link_bytecode(struct session_wildcard *wildcard)
1262 {
1263 struct ltt_event *event;
1264 int ret;
1265
1266 if (!wildcard->filter_bytecode)
1267 return;
1268
1269 cds_list_for_each_entry(event, &wildcard->events, wildcard_list) {
1270 if (event->filter)
1271 continue;
1272 ret = _lttng_filter_event_link_bytecode(event,
1273 wildcard->filter_bytecode);
1274 if (ret) {
1275 fprintf(stderr, "[lttng filter] error linking wildcard bytecode\n");
1276 }
1277
1278 }
1279 return;
1280 }
1281
1282 /*
1283 * Need to attach filter to an event before starting tracing for the
1284 * session. We own the filter_bytecode if we return success.
1285 */
1286 int lttng_filter_event_attach_bytecode(struct ltt_event *event,
1287 struct lttng_ust_filter_bytecode *filter_bytecode)
1288 {
1289 if (event->chan->session->been_active)
1290 return -EPERM;
1291 if (event->filter_bytecode)
1292 return -EEXIST;
1293 event->filter_bytecode = filter_bytecode;
1294 return 0;
1295 }
1296
1297 /*
1298 * Need to attach filter to a wildcard before starting tracing for the
1299 * session. We own the filter_bytecode if we return success.
1300 */
1301 int lttng_filter_wildcard_attach_bytecode(struct session_wildcard *wildcard,
1302 struct lttng_ust_filter_bytecode *filter_bytecode)
1303 {
1304 if (wildcard->chan->session->been_active)
1305 return -EPERM;
1306 if (wildcard->filter_bytecode)
1307 return -EEXIST;
1308 wildcard->filter_bytecode = filter_bytecode;
1309 return 0;
1310 }
This page took 0.057267 seconds and 5 git commands to generate.