Only print filter errors if LTTNG_UST_DEBUG is set
[lttng-ust.git] / liblttng-ust / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng UST filter code.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <errno.h>
24 #include <stdio.h>
25 #include <helper.h>
26 #include <lttng/ust-events.h>
27 #include <stdint.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <limits.h>
32 #include <usterr-signal-safe.h>
33 #include "filter-bytecode.h"
34
35 #define NR_REG 2
36
37 #ifndef min_t
38 #define min_t(type, a, b) \
39 ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
40 #endif
41
42 #ifndef likely
43 #define likely(x) __builtin_expect(!!(x), 1)
44 #endif
45
46 #ifndef unlikely
47 #define unlikely(x) __builtin_expect(!!(x), 0)
48 #endif
49
50 #ifdef DEBUG
51 #define dbg_printf(fmt, args...) printf("[debug bytecode] " fmt, ## args)
52 #else
53 #define dbg_printf(fmt, args...) \
54 do { \
55 /* do nothing but check printf format */ \
56 if (0) \
57 printf("[debug bytecode] " fmt, ## args); \
58 } while (0)
59 #endif
60
61 /* Linked bytecode */
62 struct bytecode_runtime {
63 uint16_t len;
64 char data[0];
65 };
66
67 struct reg {
68 enum {
69 REG_S64,
70 REG_DOUBLE,
71 REG_STRING,
72 } type;
73 int64_t v;
74 double d;
75
76 const char *str;
77 size_t seq_len;
78 int literal; /* is string literal ? */
79 };
80
81 static const char *opnames[] = {
82 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
83
84 [ FILTER_OP_RETURN ] = "RETURN",
85
86 /* binary */
87 [ FILTER_OP_MUL ] = "MUL",
88 [ FILTER_OP_DIV ] = "DIV",
89 [ FILTER_OP_MOD ] = "MOD",
90 [ FILTER_OP_PLUS ] = "PLUS",
91 [ FILTER_OP_MINUS ] = "MINUS",
92 [ FILTER_OP_RSHIFT ] = "RSHIFT",
93 [ FILTER_OP_LSHIFT ] = "LSHIFT",
94 [ FILTER_OP_BIN_AND ] = "BIN_AND",
95 [ FILTER_OP_BIN_OR ] = "BIN_OR",
96 [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
97 [ FILTER_OP_EQ ] = "EQ",
98 [ FILTER_OP_NE ] = "NE",
99 [ FILTER_OP_GT ] = "GT",
100 [ FILTER_OP_LT ] = "LT",
101 [ FILTER_OP_GE ] = "GE",
102 [ FILTER_OP_LE ] = "LE",
103
104 /* unary */
105 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
106 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
107 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
108
109 /* logical */
110 [ FILTER_OP_AND ] = "AND",
111 [ FILTER_OP_OR ] = "OR",
112
113 /* load */
114 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
115 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
116 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
117 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
118 };
119
120 static
121 const char *print_op(enum filter_op op)
122 {
123 if (op >= NR_FILTER_OPS)
124 return "UNKNOWN";
125 else
126 return opnames[op];
127 }
128
129 /*
130 * -1: wildcard found.
131 * -2: unknown escape char.
132 * 0: normal char.
133 */
134
135 static
136 int parse_char(const char **p)
137 {
138 switch (**p) {
139 case '\\':
140 (*p)++;
141 switch (**p) {
142 case '\\':
143 case '*':
144 return 0;
145 default:
146 return -2;
147 }
148 case '*':
149 return -1;
150 default:
151 return 0;
152 }
153 }
154
155 static
156 int reg_strcmp(struct reg reg[NR_REG], const char *cmp_type)
157 {
158 const char *p = reg[REG_R0].str, *q = reg[REG_R1].str;
159 int ret;
160 int diff;
161
162 for (;;) {
163 int escaped_r0 = 0;
164
165 if (unlikely(p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')) {
166 if (q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')
167 diff = 0;
168 else
169 diff = -1;
170 break;
171 }
172 if (unlikely(q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')) {
173 if (p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')
174 diff = 0;
175 else
176 diff = 1;
177 break;
178 }
179 if (reg[REG_R0].literal) {
180 ret = parse_char(&p);
181 if (ret == -1) {
182 return 0;
183 } else if (ret == -2) {
184 escaped_r0 = 1;
185 }
186 /* else compare both char */
187 }
188 if (reg[REG_R1].literal) {
189 ret = parse_char(&q);
190 if (ret == -1) {
191 return 0;
192 } else if (ret == -2) {
193 if (!escaped_r0)
194 return -1;
195 } else {
196 if (escaped_r0)
197 return 1;
198 }
199 } else {
200 if (escaped_r0)
201 return 1;
202 }
203 diff = *p - *q;
204 if (diff != 0)
205 break;
206 p++;
207 q++;
208 }
209 return diff;
210 }
211
212 static
213 int lttng_filter_false(void *filter_data,
214 const char *filter_stack_data)
215 {
216 return 0;
217 }
218
219 static
220 int lttng_filter_interpret_bytecode(void *filter_data,
221 const char *filter_stack_data)
222 {
223 struct bytecode_runtime *bytecode = filter_data;
224 void *pc, *next_pc, *start_pc;
225 int ret = -EINVAL;
226 int retval = 0;
227 struct reg reg[NR_REG];
228 int i;
229
230 for (i = 0; i < NR_REG; i++) {
231 reg[i].type = REG_S64;
232 reg[i].v = 0;
233 reg[i].d = 0.0;
234 reg[i].str = NULL;
235 reg[i].seq_len = 0;
236 reg[i].literal = 0;
237 }
238
239 start_pc = &bytecode->data[0];
240 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
241 pc = next_pc) {
242 if (unlikely(pc >= start_pc + bytecode->len)) {
243 ERR("filter bytecode overflow\n");
244 ret = -EINVAL;
245 goto end;
246 }
247 dbg_printf("Executing op %s (%u)\n",
248 print_op((unsigned int) *(filter_opcode_t *) pc),
249 (unsigned int) *(filter_opcode_t *) pc);
250 switch (*(filter_opcode_t *) pc) {
251 case FILTER_OP_UNKNOWN:
252 default:
253 ERR("unknown bytecode op %u\n",
254 (unsigned int) *(filter_opcode_t *) pc);
255 ret = -EINVAL;
256 goto end;
257
258 case FILTER_OP_RETURN:
259 retval = !!reg[0].v;
260 ret = 0;
261 goto end;
262
263 /* binary */
264 case FILTER_OP_MUL:
265 case FILTER_OP_DIV:
266 case FILTER_OP_MOD:
267 case FILTER_OP_PLUS:
268 case FILTER_OP_MINUS:
269 case FILTER_OP_RSHIFT:
270 case FILTER_OP_LSHIFT:
271 case FILTER_OP_BIN_AND:
272 case FILTER_OP_BIN_OR:
273 case FILTER_OP_BIN_XOR:
274 ERR("unsupported bytecode op %u\n",
275 (unsigned int) *(filter_opcode_t *) pc);
276 ret = -EINVAL;
277 goto end;
278
279 case FILTER_OP_EQ:
280 {
281 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
282 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
283 ERR("type mismatch for '==' binary operator\n");
284 ret = -EINVAL;
285 goto end;
286 }
287 switch (reg[REG_R0].type) {
288 default:
289 ERR("unknown register type\n");
290 ret = -EINVAL;
291 goto end;
292
293 case REG_STRING:
294 reg[REG_R0].v = (reg_strcmp(reg, "==") == 0);
295 break;
296 case REG_S64:
297 switch (reg[REG_R1].type) {
298 default:
299 ERR("unknown register type\n");
300 ret = -EINVAL;
301 goto end;
302
303 case REG_S64:
304 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].v);
305 break;
306 case REG_DOUBLE:
307 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].d);
308 break;
309 }
310 break;
311 case REG_DOUBLE:
312 switch (reg[REG_R1].type) {
313 default:
314 ERR("unknown register type\n");
315 ret = -EINVAL;
316 goto end;
317
318 case REG_S64:
319 reg[REG_R0].v = (reg[REG_R0].d == reg[REG_R1].v);
320 break;
321 case REG_DOUBLE:
322 reg[REG_R0].v = (reg[REG_R0].d == reg[REG_R1].d);
323 break;
324 }
325 break;
326 }
327 reg[REG_R0].type = REG_S64;
328 next_pc += sizeof(struct binary_op);
329 break;
330 }
331 case FILTER_OP_NE:
332 {
333 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
334 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
335 ERR("type mismatch for '!=' binary operator\n");
336 ret = -EINVAL;
337 goto end;
338 }
339 switch (reg[REG_R0].type) {
340 default:
341 ERR("unknown register type\n");
342 ret = -EINVAL;
343 goto end;
344
345 case REG_STRING:
346 reg[REG_R0].v = (reg_strcmp(reg, "!=") != 0);
347 break;
348 case REG_S64:
349 switch (reg[REG_R1].type) {
350 default:
351 ERR("unknown register type\n");
352 ret = -EINVAL;
353 goto end;
354
355 case REG_S64:
356 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].v);
357 break;
358 case REG_DOUBLE:
359 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].d);
360 break;
361 }
362 break;
363 case REG_DOUBLE:
364 switch (reg[REG_R1].type) {
365 default:
366 ERR("unknown register type\n");
367 ret = -EINVAL;
368 goto end;
369
370 case REG_S64:
371 reg[REG_R0].v = (reg[REG_R0].d != reg[REG_R1].v);
372 break;
373 case REG_DOUBLE:
374 reg[REG_R0].v = (reg[REG_R0].d != reg[REG_R1].d);
375 break;
376 }
377 break;
378 }
379 reg[REG_R0].type = REG_S64;
380 next_pc += sizeof(struct binary_op);
381 break;
382 }
383 case FILTER_OP_GT:
384 {
385 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
386 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
387 ERR("type mismatch for '>' binary operator\n");
388 ret = -EINVAL;
389 goto end;
390 }
391 switch (reg[REG_R0].type) {
392 default:
393 ERR("unknown register type\n");
394 ret = -EINVAL;
395 goto end;
396
397 case REG_STRING:
398 reg[REG_R0].v = (reg_strcmp(reg, ">") > 0);
399 break;
400 case REG_S64:
401 switch (reg[REG_R1].type) {
402 default:
403 ERR("unknown register type\n");
404 ret = -EINVAL;
405 goto end;
406
407 case REG_S64:
408 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].v);
409 break;
410 case REG_DOUBLE:
411 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].d);
412 break;
413 }
414 break;
415 case REG_DOUBLE:
416 switch (reg[REG_R1].type) {
417 default:
418 ERR("unknown register type\n");
419 ret = -EINVAL;
420 goto end;
421
422 case REG_S64:
423 reg[REG_R0].v = (reg[REG_R0].d > reg[REG_R1].v);
424 break;
425 case REG_DOUBLE:
426 reg[REG_R0].v = (reg[REG_R0].d > reg[REG_R1].d);
427 break;
428 }
429 break;
430 }
431 reg[REG_R0].type = REG_S64;
432 next_pc += sizeof(struct binary_op);
433 break;
434 }
435 case FILTER_OP_LT:
436 {
437 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
438 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
439 ERR("type mismatch for '<' binary operator\n");
440 ret = -EINVAL;
441 goto end;
442 }
443 switch (reg[REG_R0].type) {
444 default:
445 ERR("unknown register type\n");
446 ret = -EINVAL;
447 goto end;
448
449 case REG_STRING:
450 reg[REG_R0].v = (reg_strcmp(reg, "<") < 0);
451 break;
452 case REG_S64:
453 switch (reg[REG_R1].type) {
454 default:
455 ERR("unknown register type\n");
456 ret = -EINVAL;
457 goto end;
458
459 case REG_S64:
460 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].v);
461 break;
462 case REG_DOUBLE:
463 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].d);
464 break;
465 }
466 break;
467 case REG_DOUBLE:
468 switch (reg[REG_R1].type) {
469 default:
470 ERR("unknown register type\n");
471 ret = -EINVAL;
472 goto end;
473
474 case REG_S64:
475 reg[REG_R0].v = (reg[REG_R0].d < reg[REG_R1].v);
476 break;
477 case REG_DOUBLE:
478 reg[REG_R0].v = (reg[REG_R0].d < reg[REG_R1].d);
479 break;
480 }
481 break;
482 }
483 reg[REG_R0].type = REG_S64;
484 next_pc += sizeof(struct binary_op);
485 break;
486 }
487 case FILTER_OP_GE:
488 {
489 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
490 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
491 ERR("type mismatch for '>=' binary operator\n");
492 ret = -EINVAL;
493 goto end;
494 }
495 switch (reg[REG_R0].type) {
496 default:
497 ERR("unknown register type\n");
498 ret = -EINVAL;
499 goto end;
500
501 case REG_STRING:
502 reg[REG_R0].v = (reg_strcmp(reg, ">=") >= 0);
503 break;
504 case REG_S64:
505 switch (reg[REG_R1].type) {
506 default:
507 ERR("unknown register type\n");
508 ret = -EINVAL;
509 goto end;
510
511 case REG_S64:
512 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].v);
513 break;
514 case REG_DOUBLE:
515 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].d);
516 break;
517 }
518 break;
519 case REG_DOUBLE:
520 switch (reg[REG_R1].type) {
521 default:
522 ERR("unknown register type\n");
523 ret = -EINVAL;
524 goto end;
525
526 case REG_S64:
527 reg[REG_R0].v = (reg[REG_R0].d >= reg[REG_R1].v);
528 break;
529 case REG_DOUBLE:
530 reg[REG_R0].v = (reg[REG_R0].d >= reg[REG_R1].d);
531 break;
532 }
533 break;
534 }
535 reg[REG_R0].type = REG_S64;
536 next_pc += sizeof(struct binary_op);
537 break;
538 }
539 case FILTER_OP_LE:
540 {
541 if (unlikely((reg[REG_R0].type == REG_STRING && reg[REG_R1].type != REG_STRING)
542 || (reg[REG_R0].type != REG_STRING && reg[REG_R1].type == REG_STRING))) {
543 ERR("type mismatch for '<=' binary operator\n");
544 ret = -EINVAL;
545 goto end;
546 }
547 switch (reg[REG_R0].type) {
548 default:
549 ERR("unknown register type\n");
550 ret = -EINVAL;
551 goto end;
552
553 case REG_STRING:
554 reg[REG_R0].v = (reg_strcmp(reg, "<=") <= 0);
555 break;
556 case REG_S64:
557 switch (reg[REG_R1].type) {
558 default:
559 ERR("unknown register type\n");
560 ret = -EINVAL;
561 goto end;
562
563 case REG_S64:
564 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].v);
565 break;
566 case REG_DOUBLE:
567 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].d);
568 break;
569 }
570 break;
571 case REG_DOUBLE:
572 switch (reg[REG_R1].type) {
573 default:
574 ERR("unknown register type\n");
575 ret = -EINVAL;
576 goto end;
577
578 case REG_S64:
579 reg[REG_R0].v = (reg[REG_R0].d <= reg[REG_R1].v);
580 break;
581 case REG_DOUBLE:
582 reg[REG_R0].v = (reg[REG_R0].d <= reg[REG_R1].d);
583 break;
584 }
585 break;
586 }
587 reg[REG_R0].type = REG_S64;
588 next_pc += sizeof(struct binary_op);
589 break;
590 }
591
592 /* unary */
593 case FILTER_OP_UNARY_PLUS:
594 {
595 struct unary_op *insn = (struct unary_op *) pc;
596
597 if (unlikely(insn->reg >= REG_ERROR)) {
598 ERR("invalid register %u\n",
599 (unsigned int) insn->reg);
600 ret = -EINVAL;
601 goto end;
602 }
603 switch (reg[insn->reg].type) {
604 default:
605 ERR("unknown register type\n");
606 ret = -EINVAL;
607 goto end;
608
609 case REG_STRING:
610 ERR("Unary plus can only be applied to numeric or floating point registers\n");
611 ret = -EINVAL;
612 goto end;
613 case REG_S64:
614 break;
615 case REG_DOUBLE:
616 break;
617 }
618 next_pc += sizeof(struct unary_op);
619 break;
620 }
621 case FILTER_OP_UNARY_MINUS:
622 {
623 struct unary_op *insn = (struct unary_op *) pc;
624
625 if (unlikely(insn->reg >= REG_ERROR)) {
626 ERR("invalid register %u\n",
627 (unsigned int) insn->reg);
628 ret = -EINVAL;
629 goto end;
630 }
631 switch (reg[insn->reg].type) {
632 default:
633 ERR("unknown register type\n");
634 ret = -EINVAL;
635 goto end;
636
637 case REG_STRING:
638 ERR("Unary minus can only be applied to numeric or floating point registers\n");
639 ret = -EINVAL;
640 goto end;
641 case REG_S64:
642 reg[insn->reg].v = -reg[insn->reg].v;
643 break;
644 case REG_DOUBLE:
645 reg[insn->reg].d = -reg[insn->reg].d;
646 break;
647 }
648 next_pc += sizeof(struct unary_op);
649 break;
650 }
651 case FILTER_OP_UNARY_NOT:
652 {
653 struct unary_op *insn = (struct unary_op *) pc;
654
655 if (unlikely(insn->reg >= REG_ERROR)) {
656 ERR("invalid register %u\n",
657 (unsigned int) insn->reg);
658 ret = -EINVAL;
659 goto end;
660 }
661 switch (reg[insn->reg].type) {
662 default:
663 ERR("unknown register type\n");
664 ret = -EINVAL;
665 goto end;
666
667 case REG_STRING:
668 ERR("Unary not can only be applied to numeric or floating point registers\n");
669 ret = -EINVAL;
670 goto end;
671 case REG_S64:
672 reg[insn->reg].v = !reg[insn->reg].v;
673 break;
674 case REG_DOUBLE:
675 reg[insn->reg].d = !reg[insn->reg].d;
676 break;
677 }
678 if (unlikely(reg[insn->reg].type != REG_S64)) {
679 ERR("Unary not can only be applied to numeric register\n");
680 ret = -EINVAL;
681 goto end;
682 }
683 reg[insn->reg].v = !reg[insn->reg].v;
684 next_pc += sizeof(struct unary_op);
685 break;
686 }
687 /* logical */
688 case FILTER_OP_AND:
689 {
690 struct logical_op *insn = (struct logical_op *) pc;
691
692 if (unlikely(reg[REG_R0].type == REG_STRING)) {
693 ERR("Logical operator 'and' can only be applied to numeric and floating point registers\n");
694 ret = -EINVAL;
695 goto end;
696 }
697
698 /* If REG_R0 is 0, skip and evaluate to 0 */
699 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v == 0)
700 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d == 0.0)) {
701 dbg_printf("Jumping to bytecode offset %u\n",
702 (unsigned int) insn->skip_offset);
703 next_pc = start_pc + insn->skip_offset;
704 if (unlikely(next_pc <= pc)) {
705 ERR("Loops are not allowed in bytecode\n");
706 ret = -EINVAL;
707 goto end;
708 }
709 } else {
710 next_pc += sizeof(struct logical_op);
711 }
712 break;
713 }
714 case FILTER_OP_OR:
715 {
716 struct logical_op *insn = (struct logical_op *) pc;
717
718 if (unlikely(reg[REG_R0].type == REG_STRING)) {
719 ERR("Logical operator 'or' can only be applied to numeric and floating point registers\n");
720 ret = -EINVAL;
721 goto end;
722 }
723
724 /* If REG_R0 is nonzero, skip and evaluate to 1 */
725
726 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v != 0)
727 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d != 0.0)) {
728 reg[REG_R0].v = 1;
729 dbg_printf("Jumping to bytecode offset %u\n",
730 (unsigned int) insn->skip_offset);
731 next_pc = start_pc + insn->skip_offset;
732 if (unlikely(next_pc <= pc)) {
733 ERR("Loops are not allowed in bytecode\n");
734 ret = -EINVAL;
735 goto end;
736 }
737 } else {
738 next_pc += sizeof(struct logical_op);
739 }
740 break;
741 }
742
743 /* load */
744 case FILTER_OP_LOAD_FIELD_REF:
745 {
746 struct load_op *insn = (struct load_op *) pc;
747 struct field_ref *ref = (struct field_ref *) insn->data;
748
749 if (unlikely(insn->reg >= REG_ERROR)) {
750 ERR("invalid register %u\n",
751 (unsigned int) insn->reg);
752 ret = -EINVAL;
753 goto end;
754 }
755 dbg_printf("load field ref offset %u type %u\n",
756 ref->offset, ref->type);
757 switch (ref->type) {
758 case FIELD_REF_UNKNOWN:
759 default:
760 ERR("unknown field ref type\n");
761 ret = -EINVAL;
762 goto end;
763
764 case FIELD_REF_STRING:
765 reg[insn->reg].str =
766 *(const char * const *) &filter_stack_data[ref->offset];
767 reg[insn->reg].type = REG_STRING;
768 reg[insn->reg].seq_len = UINT_MAX;
769 reg[insn->reg].literal = 0;
770 dbg_printf("ref load string %s\n", reg[insn->reg].str);
771 break;
772 case FIELD_REF_SEQUENCE:
773 reg[insn->reg].seq_len =
774 *(unsigned long *) &filter_stack_data[ref->offset];
775 reg[insn->reg].str =
776 *(const char **) (&filter_stack_data[ref->offset
777 + sizeof(unsigned long)]);
778 reg[insn->reg].type = REG_STRING;
779 reg[insn->reg].literal = 0;
780 break;
781 case FIELD_REF_S64:
782 memcpy(&reg[insn->reg].v, &filter_stack_data[ref->offset],
783 sizeof(struct literal_numeric));
784 reg[insn->reg].type = REG_S64;
785 reg[insn->reg].literal = 0;
786 dbg_printf("ref load s64 %" PRIi64 "\n", reg[insn->reg].v);
787 break;
788 case FIELD_REF_DOUBLE:
789 memcpy(&reg[insn->reg].d, &filter_stack_data[ref->offset],
790 sizeof(struct literal_double));
791 reg[insn->reg].type = REG_DOUBLE;
792 reg[insn->reg].literal = 0;
793 dbg_printf("ref load double %g\n", reg[insn->reg].d);
794 break;
795 }
796
797 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
798 break;
799 }
800
801 case FILTER_OP_LOAD_STRING:
802 {
803 struct load_op *insn = (struct load_op *) pc;
804
805 if (unlikely(insn->reg >= REG_ERROR)) {
806 ERR("invalid register %u\n",
807 (unsigned int) insn->reg);
808 ret = -EINVAL;
809 goto end;
810 }
811 dbg_printf("load string %s\n", insn->data);
812 reg[insn->reg].str = insn->data;
813 reg[insn->reg].type = REG_STRING;
814 reg[insn->reg].seq_len = UINT_MAX;
815 reg[insn->reg].literal = 1;
816 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
817 break;
818 }
819
820 case FILTER_OP_LOAD_S64:
821 {
822 struct load_op *insn = (struct load_op *) pc;
823
824 if (unlikely(insn->reg >= REG_ERROR)) {
825 ERR("invalid register %u\n",
826 (unsigned int) insn->reg);
827 ret = -EINVAL;
828 goto end;
829 }
830 memcpy(&reg[insn->reg].v, insn->data,
831 sizeof(struct literal_numeric));
832 dbg_printf("load s64 %" PRIi64 "\n", reg[insn->reg].v);
833 reg[insn->reg].type = REG_S64;
834 next_pc += sizeof(struct load_op)
835 + sizeof(struct literal_numeric);
836 break;
837 }
838
839 case FILTER_OP_LOAD_DOUBLE:
840 {
841 struct load_op *insn = (struct load_op *) pc;
842
843 if (unlikely(insn->reg >= REG_ERROR)) {
844 ERR("invalid register %u\n",
845 (unsigned int) insn->reg);
846 ret = -EINVAL;
847 goto end;
848 }
849 memcpy(&reg[insn->reg].d, insn->data,
850 sizeof(struct literal_double));
851 dbg_printf("load s64 %g\n", reg[insn->reg].d);
852 reg[insn->reg].type = REG_DOUBLE;
853 next_pc += sizeof(struct load_op)
854 + sizeof(struct literal_double);
855 break;
856 }
857 }
858 }
859 end:
860 /* return 0 (discard) on error */
861 if (ret)
862 return 0;
863 return retval;
864 }
865
866 static
867 int apply_field_reloc(struct ltt_event *event,
868 struct bytecode_runtime *runtime,
869 uint32_t runtime_len,
870 uint32_t reloc_offset,
871 const char *field_name)
872 {
873 const struct lttng_event_desc *desc;
874 const struct lttng_event_field *fields, *field = NULL;
875 unsigned int nr_fields, i;
876 struct field_ref *field_ref;
877 uint32_t field_offset = 0;
878
879 dbg_printf("Apply reloc: %u %s\n", reloc_offset, field_name);
880
881 /* Ensure that the reloc is within the code */
882 if (runtime_len - reloc_offset < sizeof(uint16_t))
883 return -EINVAL;
884
885 /* Lookup event by name */
886 desc = event->desc;
887 if (!desc)
888 return -EINVAL;
889 fields = desc->fields;
890 if (!fields)
891 return -EINVAL;
892 nr_fields = desc->nr_fields;
893 for (i = 0; i < nr_fields; i++) {
894 if (!strcmp(fields[i].name, field_name)) {
895 field = &fields[i];
896 break;
897 }
898 /* compute field offset */
899 switch (fields[i].type.atype) {
900 case atype_integer:
901 case atype_enum:
902 field_offset += sizeof(int64_t);
903 break;
904 case atype_array:
905 case atype_sequence:
906 field_offset += sizeof(unsigned long);
907 field_offset += sizeof(void *);
908 break;
909 case atype_string:
910 field_offset += sizeof(void *);
911 break;
912 case atype_float:
913 field_offset += sizeof(double);
914 break;
915 default:
916 return -EINVAL;
917 }
918 }
919 if (!field)
920 return -EINVAL;
921
922 /* Check if field offset is too large for 16-bit offset */
923 if (field_offset > FILTER_BYTECODE_MAX_LEN)
924 return -EINVAL;
925
926 /* set type */
927 field_ref = (struct field_ref *) &runtime->data[reloc_offset];
928 switch (field->type.atype) {
929 case atype_integer:
930 case atype_enum:
931 field_ref->type = FIELD_REF_S64;
932 field_ref->type = FIELD_REF_S64;
933 break;
934 case atype_array:
935 case atype_sequence:
936 field_ref->type = FIELD_REF_SEQUENCE;
937 break;
938 case atype_string:
939 field_ref->type = FIELD_REF_STRING;
940 break;
941 case atype_float:
942 field_ref->type = FIELD_REF_DOUBLE;
943 break;
944 default:
945 return -EINVAL;
946 }
947 /* set offset */
948 field_ref->offset = (uint16_t) field_offset;
949 return 0;
950 }
951
952 /*
953 * Take a bytecode with reloc table and link it to an event to create a
954 * bytecode runtime.
955 */
956 static
957 int _lttng_filter_event_link_bytecode(struct ltt_event *event,
958 struct lttng_ust_filter_bytecode *filter_bytecode)
959 {
960 int ret, offset, next_offset;
961 struct bytecode_runtime *runtime = NULL;
962 size_t runtime_alloc_len;
963
964 if (!filter_bytecode)
965 return 0;
966 /* Even is not connected to any description */
967 if (!event->desc)
968 return 0;
969 /* Bytecode already linked */
970 if (event->filter || event->filter_data)
971 return 0;
972
973 dbg_printf("Linking\n");
974
975 /* We don't need the reloc table in the runtime */
976 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->reloc_offset;
977 runtime = zmalloc(runtime_alloc_len);
978 if (!runtime) {
979 ret = -ENOMEM;
980 goto link_error;
981 }
982 runtime->len = filter_bytecode->reloc_offset;
983 /* copy original bytecode */
984 memcpy(runtime->data, filter_bytecode->data, runtime->len);
985 /*
986 * apply relocs. Those are a uint16_t (offset in bytecode)
987 * followed by a string (field name).
988 */
989 for (offset = filter_bytecode->reloc_offset;
990 offset < filter_bytecode->len;
991 offset = next_offset) {
992 uint16_t reloc_offset =
993 *(uint16_t *) &filter_bytecode->data[offset];
994 const char *field_name =
995 (const char *) &filter_bytecode->data[offset + sizeof(uint16_t)];
996
997 ret = apply_field_reloc(event, runtime, runtime->len, reloc_offset, field_name);
998 if (ret) {
999 goto link_error;
1000 }
1001 next_offset = offset + sizeof(uint16_t) + strlen(field_name) + 1;
1002 }
1003 event->filter_data = runtime;
1004 event->filter = lttng_filter_interpret_bytecode;
1005 return 0;
1006
1007 link_error:
1008 event->filter = lttng_filter_false;
1009 free(runtime);
1010 return ret;
1011 }
1012
1013 void lttng_filter_event_link_bytecode(struct ltt_event *event,
1014 struct lttng_ust_filter_bytecode *filter_bytecode)
1015 {
1016 int ret;
1017
1018 ret = _lttng_filter_event_link_bytecode(event, filter_bytecode);
1019 if (ret) {
1020 fprintf(stderr, "[lttng filter] error linking event bytecode\n");
1021 }
1022 }
1023
1024 /*
1025 * Link bytecode to all events for a wildcard. Skips events that already
1026 * have a bytecode linked.
1027 * We do not set each event's filter_bytecode field, because they do not
1028 * own the filter_bytecode: the wildcard owns it.
1029 */
1030 void lttng_filter_wildcard_link_bytecode(struct session_wildcard *wildcard)
1031 {
1032 struct ltt_event *event;
1033 int ret;
1034
1035 if (!wildcard->filter_bytecode)
1036 return;
1037
1038 cds_list_for_each_entry(event, &wildcard->events, wildcard_list) {
1039 if (event->filter)
1040 continue;
1041 ret = _lttng_filter_event_link_bytecode(event,
1042 wildcard->filter_bytecode);
1043 if (ret) {
1044 fprintf(stderr, "[lttng filter] error linking wildcard bytecode\n");
1045 }
1046
1047 }
1048 return;
1049 }
1050
1051 /*
1052 * Need to attach filter to an event before starting tracing for the
1053 * session. We own the filter_bytecode if we return success.
1054 */
1055 int lttng_filter_event_attach_bytecode(struct ltt_event *event,
1056 struct lttng_ust_filter_bytecode *filter_bytecode)
1057 {
1058 if (event->chan->session->been_active)
1059 return -EPERM;
1060 if (event->filter_bytecode)
1061 return -EEXIST;
1062 event->filter_bytecode = filter_bytecode;
1063 return 0;
1064 }
1065
1066 /*
1067 * Need to attach filter to a wildcard before starting tracing for the
1068 * session. We own the filter_bytecode if we return success.
1069 */
1070 int lttng_filter_wildcard_attach_bytecode(struct session_wildcard *wildcard,
1071 struct lttng_ust_filter_bytecode *filter_bytecode)
1072 {
1073 if (wildcard->chan->session->been_active)
1074 return -EPERM;
1075 if (wildcard->filter_bytecode)
1076 return -EEXIST;
1077 wildcard->filter_bytecode = filter_bytecode;
1078 return 0;
1079 }
This page took 0.052718 seconds and 5 git commands to generate.