Implement filter bytecode interpreter and linker
[lttng-ust.git] / liblttng-ust / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng UST filter code.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <errno.h>
24 #include <stdio.h>
25 #include <helper.h>
26 #include <lttng/ust-events.h>
27 #include <stdint.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <limits.h>
32 #include "filter-bytecode.h"
33
34 #define NR_REG 2
35
36 #ifndef min_t
37 #define min_t(type, a, b) \
38 ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
39 #endif
40
41 #ifndef likely
42 #define likely(x) __builtin_expect(!!(x), 1)
43 #endif
44
45 #ifndef unlikely
46 #define unlikely(x) __builtin_expect(!!(x), 0)
47 #endif
48
49 #ifdef DEBUG
50 #define dbg_printf(fmt, args...) printf("[debug bytecode] " fmt, ## args)
51 #else
52 #define dbg_printf(fmt, args...) \
53 do { \
54 /* do nothing but check printf format */ \
55 if (0) \
56 printf("[debug bytecode] " fmt, ## args); \
57 } while (0)
58 #endif
59
60 /* Linked bytecode */
61 struct bytecode_runtime {
62 uint16_t len;
63 char data[0];
64 };
65
66 struct reg {
67 enum {
68 REG_S64,
69 REG_STRING, /* NULL-terminated string */
70 REG_SEQUENCE, /* non-null terminated */
71 } type;
72 int64_t v;
73
74 const char *str;
75 size_t seq_len;
76 int literal; /* is string literal ? */
77 };
78
79 static const char *opnames[] = {
80 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
81
82 [ FILTER_OP_RETURN ] = "RETURN",
83
84 /* binary */
85 [ FILTER_OP_MUL ] = "MUL",
86 [ FILTER_OP_DIV ] = "DIV",
87 [ FILTER_OP_MOD ] = "MOD",
88 [ FILTER_OP_PLUS ] = "PLUS",
89 [ FILTER_OP_MINUS ] = "MINUS",
90 [ FILTER_OP_RSHIFT ] = "RSHIFT",
91 [ FILTER_OP_LSHIFT ] = "LSHIFT",
92 [ FILTER_OP_BIN_AND ] = "BIN_AND",
93 [ FILTER_OP_BIN_OR ] = "BIN_OR",
94 [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
95 [ FILTER_OP_EQ ] = "EQ",
96 [ FILTER_OP_NE ] = "NE",
97 [ FILTER_OP_GT ] = "GT",
98 [ FILTER_OP_LT ] = "LT",
99 [ FILTER_OP_GE ] = "GE",
100 [ FILTER_OP_LE ] = "LE",
101
102 /* unary */
103 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
104 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
105 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
106
107 /* logical */
108 [ FILTER_OP_AND ] = "AND",
109 [ FILTER_OP_OR ] = "OR",
110
111 /* load */
112 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
113 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
114 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
115 };
116
117 static
118 const char *print_op(enum filter_op op)
119 {
120 if (op >= NR_FILTER_OPS)
121 return "UNKNOWN";
122 else
123 return opnames[op];
124 }
125
126 /*
127 * -1: wildcard found.
128 * -2: unknown escape char.
129 * 0: normal char.
130 */
131
132 static
133 int parse_char(const char **p)
134 {
135 switch (**p) {
136 case '\\':
137 (*p)++;
138 switch (**p) {
139 case '\\':
140 case '*':
141 return 0;
142 default:
143 return -2;
144 }
145 case '*':
146 return -1;
147 default:
148 return 0;
149 }
150 }
151
152 static
153 int reg_strcmp(struct reg reg[NR_REG], const char *cmp_type)
154 {
155 const char *p = reg[REG_R0].str, *q = reg[REG_R1].str;
156 int ret;
157 int diff;
158
159 for (;;) {
160 int escaped_r0 = 0;
161
162 if (unlikely(p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')) {
163 if (q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')
164 diff = 0;
165 else
166 diff = -1;
167 break;
168 }
169 if (unlikely(q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')) {
170 if (p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')
171 diff = 0;
172 else
173 diff = 1;
174 break;
175 }
176 if (reg[REG_R0].literal) {
177 ret = parse_char(&p);
178 if (ret == -1) {
179 return 0;
180 } else if (ret == -2) {
181 escaped_r0 = 1;
182 }
183 /* else compare both char */
184 }
185 if (reg[REG_R1].literal) {
186 ret = parse_char(&q);
187 if (ret == -1) {
188 return 0;
189 } else if (ret == -2) {
190 if (!escaped_r0)
191 return -1;
192 } else {
193 if (escaped_r0)
194 return 1;
195 }
196 } else {
197 if (escaped_r0)
198 return 1;
199 }
200 diff = *p - *q;
201 if (diff != 0)
202 break;
203 p++;
204 q++;
205 }
206 return diff;
207 }
208
209 static
210 int lttng_filter_false(void *filter_data,
211 const char *filter_stack_data)
212 {
213 return 0;
214 }
215
216 static
217 int lttng_filter_interpret_bytecode(void *filter_data,
218 const char *filter_stack_data)
219 {
220 struct bytecode_runtime *bytecode = filter_data;
221 void *pc, *next_pc, *start_pc;
222 int ret = -EINVAL;
223 int retval = 0;
224 struct reg reg[NR_REG];
225 int i;
226
227 for (i = 0; i < NR_REG; i++) {
228 reg[i].type = REG_S64;
229 reg[i].v = 0;
230 reg[i].str = NULL;
231 reg[i].seq_len = 0;
232 reg[i].literal = 0;
233 }
234
235 start_pc = &bytecode->data[0];
236 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
237 pc = next_pc) {
238 if (unlikely(pc >= start_pc + bytecode->len)) {
239 fprintf(stderr, "[error] filter bytecode overflow\n");
240 ret = -EINVAL;
241 goto end;
242 }
243 dbg_printf("Executing op %s (%u)\n",
244 print_op((unsigned int) *(filter_opcode_t *) pc),
245 (unsigned int) *(filter_opcode_t *) pc);
246 switch (*(filter_opcode_t *) pc) {
247 case FILTER_OP_UNKNOWN:
248 default:
249 fprintf(stderr, "[error] unknown bytecode op %u\n",
250 (unsigned int) *(filter_opcode_t *) pc);
251 ret = -EINVAL;
252 goto end;
253
254 case FILTER_OP_RETURN:
255 retval = !!reg[0].v;
256 ret = 0;
257 goto end;
258
259 /* binary */
260 case FILTER_OP_MUL:
261 case FILTER_OP_DIV:
262 case FILTER_OP_MOD:
263 case FILTER_OP_PLUS:
264 case FILTER_OP_MINUS:
265 case FILTER_OP_RSHIFT:
266 case FILTER_OP_LSHIFT:
267 case FILTER_OP_BIN_AND:
268 case FILTER_OP_BIN_OR:
269 case FILTER_OP_BIN_XOR:
270 fprintf(stderr, "[error] unsupported bytecode op %u\n",
271 (unsigned int) *(filter_opcode_t *) pc);
272 ret = -EINVAL;
273 goto end;
274
275 case FILTER_OP_EQ:
276 {
277 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
278 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
279 fprintf(stderr, "[error] type mismatch for '==' binary operator\n");
280 ret = -EINVAL;
281 goto end;
282 }
283 switch (reg[REG_R0].type) {
284 default:
285 fprintf(stderr, "[error] unknown register type\n");
286 ret = -EINVAL;
287 goto end;
288
289 case REG_STRING:
290 case REG_SEQUENCE:
291 reg[REG_R0].v = (reg_strcmp(reg, "==") == 0);
292 break;
293 case REG_S64:
294 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].v);
295 break;
296 }
297 reg[REG_R0].type = REG_S64;
298 next_pc += sizeof(struct binary_op);
299 break;
300 }
301 case FILTER_OP_NE:
302 {
303 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
304 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
305 fprintf(stderr, "[error] type mismatch for '!=' binary operator\n");
306 ret = -EINVAL;
307 goto end;
308 }
309 switch (reg[REG_R0].type) {
310 default:
311 fprintf(stderr, "[error] unknown register type\n");
312 ret = -EINVAL;
313 goto end;
314
315 case REG_STRING:
316 case REG_SEQUENCE:
317 reg[REG_R0].v = (reg_strcmp(reg, "!=") != 0);
318 break;
319 case REG_S64:
320 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].v);
321 break;
322 }
323 reg[REG_R0].type = REG_S64;
324 next_pc += sizeof(struct binary_op);
325 break;
326 }
327 case FILTER_OP_GT:
328 {
329 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
330 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
331 fprintf(stderr, "[error] type mismatch for '>' binary operator\n");
332 ret = -EINVAL;
333 goto end;
334 }
335 switch (reg[REG_R0].type) {
336 default:
337 fprintf(stderr, "[error] unknown register type\n");
338 ret = -EINVAL;
339 goto end;
340
341 case REG_STRING:
342 case REG_SEQUENCE:
343 reg[REG_R0].v = (reg_strcmp(reg, ">") > 0);
344 break;
345 case REG_S64:
346 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].v);
347 break;
348 }
349 reg[REG_R0].type = REG_S64;
350 next_pc += sizeof(struct binary_op);
351 break;
352 }
353 case FILTER_OP_LT:
354 {
355 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
356 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
357 fprintf(stderr, "[error] type mismatch for '<' binary operator\n");
358 ret = -EINVAL;
359 goto end;
360 }
361 switch (reg[REG_R0].type) {
362 default:
363 fprintf(stderr, "[error] unknown register type\n");
364 ret = -EINVAL;
365 goto end;
366
367 case REG_STRING:
368 case REG_SEQUENCE:
369 reg[REG_R0].v = (reg_strcmp(reg, "<") < 0);
370 break;
371 case REG_S64:
372 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].v);
373 break;
374 }
375 reg[REG_R0].type = REG_S64;
376 next_pc += sizeof(struct binary_op);
377 break;
378 }
379 case FILTER_OP_GE:
380 {
381 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
382 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
383 fprintf(stderr, "[error] type mismatch for '>=' binary operator\n");
384 ret = -EINVAL;
385 goto end;
386 }
387 switch (reg[REG_R0].type) {
388 default:
389 fprintf(stderr, "[error] unknown register type\n");
390 ret = -EINVAL;
391 goto end;
392
393 case REG_STRING:
394 case REG_SEQUENCE:
395 reg[REG_R0].v = (reg_strcmp(reg, ">=") >= 0);
396 break;
397 case REG_S64:
398 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].v);
399 break;
400 }
401 reg[REG_R0].type = REG_S64;
402 next_pc += sizeof(struct binary_op);
403 break;
404 }
405 case FILTER_OP_LE:
406 {
407 if (unlikely((reg[REG_R0].type == REG_S64 && reg[REG_R1].type != REG_S64)
408 || (reg[REG_R0].type != REG_S64 && reg[REG_R1].type == REG_S64))) {
409 fprintf(stderr, "[error] type mismatch for '<=' binary operator\n");
410 ret = -EINVAL;
411 goto end;
412 }
413 switch (reg[REG_R0].type) {
414 default:
415 fprintf(stderr, "[error] unknown register type\n");
416 ret = -EINVAL;
417 goto end;
418
419 case REG_STRING:
420 case REG_SEQUENCE:
421 reg[REG_R0].v = (reg_strcmp(reg, "<=") <= 0);
422 break;
423 case REG_S64:
424 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].v);
425 break;
426 }
427 reg[REG_R0].type = REG_S64;
428 next_pc += sizeof(struct binary_op);
429 break;
430 }
431
432 /* unary */
433 case FILTER_OP_UNARY_PLUS:
434 {
435 struct unary_op *insn = (struct unary_op *) pc;
436
437 if (unlikely(insn->reg >= REG_ERROR)) {
438 fprintf(stderr, "[error] invalid register %u\n",
439 (unsigned int) insn->reg);
440 ret = -EINVAL;
441 goto end;
442 }
443 if (unlikely(reg[insn->reg].type != REG_S64)) {
444 fprintf(stderr, "[error] Unary plus can only be applied to numeric register\n");
445 ret = -EINVAL;
446 goto end;
447 }
448 next_pc += sizeof(struct unary_op);
449 break;
450 }
451 case FILTER_OP_UNARY_MINUS:
452 {
453 struct unary_op *insn = (struct unary_op *) pc;
454
455 if (unlikely(insn->reg >= REG_ERROR)) {
456 fprintf(stderr, "[error] invalid register %u\n",
457 (unsigned int) insn->reg);
458 ret = -EINVAL;
459 goto end;
460 }
461 if (unlikely(reg[insn->reg].type != REG_S64)) {
462 fprintf(stderr, "[error] Unary minus can only be applied to numeric register\n");
463 ret = -EINVAL;
464 goto end;
465 }
466 reg[insn->reg].v = -reg[insn->reg].v;
467 next_pc += sizeof(struct unary_op);
468 break;
469 }
470 case FILTER_OP_UNARY_NOT:
471 {
472 struct unary_op *insn = (struct unary_op *) pc;
473
474 if (unlikely(insn->reg >= REG_ERROR)) {
475 fprintf(stderr, "[error] invalid register %u\n",
476 (unsigned int) insn->reg);
477 ret = -EINVAL;
478 goto end;
479 }
480 if (unlikely(reg[insn->reg].type != REG_S64)) {
481 fprintf(stderr, "[error] Unary not can only be applied to numeric register\n");
482 ret = -EINVAL;
483 goto end;
484 }
485 reg[insn->reg].v = !reg[insn->reg].v;
486 next_pc += sizeof(struct unary_op);
487 break;
488 }
489 /* logical */
490 case FILTER_OP_AND:
491 {
492 struct logical_op *insn = (struct logical_op *) pc;
493
494 if (unlikely(reg[REG_R0].type != REG_S64)) {
495 fprintf(stderr, "[error] Logical operator 'and' can only be applied to numeric register\n");
496 ret = -EINVAL;
497 goto end;
498 }
499
500 /* If REG_R0 is 0, skip and evaluate to 0 */
501 if (reg[REG_R0].v == 0) {
502 dbg_printf("Jumping to bytecode offset %u\n",
503 (unsigned int) insn->skip_offset);
504 next_pc = start_pc + insn->skip_offset;
505 if (unlikely(next_pc <= pc)) {
506 fprintf(stderr, "[error] Loops are not allowed in bytecode\n");
507 ret = -EINVAL;
508 goto end;
509 }
510 } else {
511 next_pc += sizeof(struct logical_op);
512 }
513 break;
514 }
515 case FILTER_OP_OR:
516 {
517 struct logical_op *insn = (struct logical_op *) pc;
518
519 if (unlikely(reg[REG_R0].type != REG_S64)) {
520 fprintf(stderr, "[error] Logical operator 'and' can only be applied to numeric register\n");
521 ret = -EINVAL;
522 goto end;
523 }
524
525 /* If REG_R0 is nonzero, skip and evaluate to 1 */
526 if (reg[REG_R0].v != 0) {
527 reg[REG_R0].v = 1;
528 dbg_printf("Jumping to bytecode offset %u\n",
529 (unsigned int) insn->skip_offset);
530 next_pc = start_pc + insn->skip_offset;
531 if (unlikely(next_pc <= pc)) {
532 fprintf(stderr, "[error] Loops are not allowed in bytecode\n");
533 ret = -EINVAL;
534 goto end;
535 }
536 } else {
537 next_pc += sizeof(struct logical_op);
538 }
539 break;
540 }
541
542 /* load */
543 case FILTER_OP_LOAD_FIELD_REF:
544 {
545 struct load_op *insn = (struct load_op *) pc;
546 struct field_ref *ref = (struct field_ref *) insn->data;
547
548 if (unlikely(insn->reg >= REG_ERROR)) {
549 fprintf(stderr, "[error] invalid register %u\n",
550 (unsigned int) insn->reg);
551 ret = -EINVAL;
552 goto end;
553 }
554 dbg_printf("load field ref offset %u type %u\n",
555 ref->offset, ref->type);
556 switch (ref->type) {
557 case FIELD_REF_UNKNOWN:
558 default:
559 fprintf(stderr, "[error] unknown field ref type\n");
560 ret = -EINVAL;
561 goto end;
562
563 case FIELD_REF_STRING:
564 reg[insn->reg].str =
565 *(const char * const *) &filter_stack_data[ref->offset];
566 reg[insn->reg].type = REG_STRING;
567 reg[insn->reg].seq_len = UINT_MAX;
568 reg[insn->reg].literal = 0;
569 dbg_printf("ref load string %s\n", reg[insn->reg].str);
570 break;
571 case FIELD_REF_SEQUENCE:
572 reg[insn->reg].seq_len =
573 *(unsigned long *) &filter_stack_data[ref->offset];
574 reg[insn->reg].str =
575 *(const char **) (&filter_stack_data[ref->offset
576 + sizeof(unsigned long)]);
577 reg[insn->reg].type = REG_SEQUENCE;
578 reg[insn->reg].literal = 0;
579 break;
580 case FIELD_REF_S64:
581 memcpy(&reg[insn->reg].v, &filter_stack_data[ref->offset],
582 sizeof(struct literal_numeric));
583 reg[insn->reg].type = REG_S64;
584 reg[insn->reg].literal = 0;
585 dbg_printf("ref load s64 %" PRIi64 "\n", reg[insn->reg].v);
586 break;
587 }
588
589 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
590 break;
591 }
592
593 case FILTER_OP_LOAD_STRING:
594 {
595 struct load_op *insn = (struct load_op *) pc;
596
597 if (unlikely(insn->reg >= REG_ERROR)) {
598 fprintf(stderr, "[error] invalid register %u\n",
599 (unsigned int) insn->reg);
600 ret = -EINVAL;
601 goto end;
602 }
603 dbg_printf("load string %s\n", insn->data);
604 reg[insn->reg].str = insn->data;
605 reg[insn->reg].type = REG_STRING;
606 reg[insn->reg].seq_len = UINT_MAX;
607 reg[insn->reg].literal = 1;
608 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
609 break;
610 }
611
612 case FILTER_OP_LOAD_S64:
613 {
614 struct load_op *insn = (struct load_op *) pc;
615
616 if (unlikely(insn->reg >= REG_ERROR)) {
617 fprintf(stderr, "[error] invalid register %u\n",
618 (unsigned int) insn->reg);
619 ret = -EINVAL;
620 goto end;
621 }
622 memcpy(&reg[insn->reg].v, insn->data,
623 sizeof(struct literal_numeric));
624 dbg_printf("load s64 %" PRIi64 "\n", reg[insn->reg].v);
625 reg[insn->reg].type = REG_S64;
626 next_pc += sizeof(struct load_op)
627 + sizeof(struct literal_numeric);
628 break;
629 }
630 }
631 }
632 end:
633 /* return 0 (discard) on error */
634 if (ret)
635 return 0;
636 return retval;
637 }
638
639 static
640 int apply_field_reloc(struct ltt_event *event,
641 struct bytecode_runtime *runtime,
642 uint32_t runtime_len,
643 uint32_t reloc_offset,
644 const char *field_name)
645 {
646 const struct lttng_event_desc *desc;
647 const struct lttng_event_field *fields, *field = NULL;
648 unsigned int nr_fields, i;
649 struct field_ref *field_ref;
650 uint32_t field_offset = 0;
651
652 fprintf(stderr, "Apply reloc: %u %s\n", reloc_offset, field_name);
653
654 /* Ensure that the reloc is within the code */
655 if (runtime_len - reloc_offset < sizeof(uint16_t))
656 return -EINVAL;
657
658 /* Lookup event by name */
659 desc = event->desc;
660 if (!desc)
661 return -EINVAL;
662 fields = desc->fields;
663 if (!fields)
664 return -EINVAL;
665 nr_fields = desc->nr_fields;
666 for (i = 0; i < nr_fields; i++) {
667 if (!strcmp(fields[i].name, field_name)) {
668 field = &fields[i];
669 break;
670 }
671 /* compute field offset */
672 switch (fields[i].type.atype) {
673 case atype_integer:
674 case atype_enum:
675 field_offset += sizeof(int64_t);
676 break;
677 case atype_array:
678 case atype_sequence:
679 field_offset += sizeof(unsigned long);
680 field_offset += sizeof(void *);
681 break;
682 case atype_string:
683 field_offset += sizeof(void *);
684 break;
685 case atype_float:
686 field_offset += sizeof(double);
687 default:
688 return -EINVAL;
689 }
690 }
691 if (!field)
692 return -EINVAL;
693
694 /* Check if field offset is too large for 16-bit offset */
695 if (field_offset > FILTER_BYTECODE_MAX_LEN)
696 return -EINVAL;
697
698 /* set type */
699 field_ref = (struct field_ref *) &runtime->data[reloc_offset];
700 switch (field->type.atype) {
701 case atype_integer:
702 case atype_enum:
703 field_ref->type = FIELD_REF_S64;
704 field_ref->type = FIELD_REF_S64;
705 break;
706 case atype_array:
707 case atype_sequence:
708 field_ref->type = FIELD_REF_SEQUENCE;
709 break;
710 case atype_string:
711 field_ref->type = FIELD_REF_STRING;
712 break;
713 case atype_float:
714 return -EINVAL;
715 default:
716 return -EINVAL;
717 }
718 /* set offset */
719 field_ref->offset = (uint16_t) field_offset;
720 return 0;
721 }
722
723 /*
724 * Take a bytecode with reloc table and link it to an event to create a
725 * bytecode runtime.
726 */
727 static
728 int _lttng_filter_event_link_bytecode(struct ltt_event *event,
729 struct lttng_ust_filter_bytecode *filter_bytecode)
730 {
731 int ret, offset, next_offset;
732 struct bytecode_runtime *runtime = NULL;
733 size_t runtime_alloc_len;
734
735 if (!filter_bytecode)
736 return 0;
737 /* Even is not connected to any description */
738 if (!event->desc)
739 return 0;
740 /* Bytecode already linked */
741 if (event->filter || event->filter_data)
742 return 0;
743
744 fprintf(stderr, "Linking\n");
745
746 /* We don't need the reloc table in the runtime */
747 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->reloc_offset;
748 runtime = zmalloc(runtime_alloc_len);
749 if (!runtime) {
750 ret = -ENOMEM;
751 goto link_error;
752 }
753 runtime->len = filter_bytecode->reloc_offset;
754 /* copy original bytecode */
755 memcpy(runtime->data, filter_bytecode->data, runtime->len);
756 /*
757 * apply relocs. Those are a uint16_t (offset in bytecode)
758 * followed by a string (field name).
759 */
760 fprintf(stderr, "iter for %d %d\n", filter_bytecode->reloc_offset, filter_bytecode->len);
761 for (offset = filter_bytecode->reloc_offset;
762 offset < filter_bytecode->len;
763 offset = next_offset) {
764 uint16_t reloc_offset =
765 *(uint16_t *) &filter_bytecode->data[offset];
766 const char *field_name =
767 (const char *) &filter_bytecode->data[offset + sizeof(uint16_t)];
768
769 ret = apply_field_reloc(event, runtime, runtime->len, reloc_offset, field_name);
770 if (ret) {
771 goto link_error;
772 }
773 next_offset = offset + sizeof(uint16_t) + strlen(field_name) + 1;
774 }
775 event->filter_data = runtime;
776 event->filter = lttng_filter_interpret_bytecode;
777 return 0;
778
779 link_error:
780 event->filter = lttng_filter_false;
781 free(runtime);
782 return ret;
783 }
784
785 void lttng_filter_event_link_bytecode(struct ltt_event *event,
786 struct lttng_ust_filter_bytecode *filter_bytecode)
787 {
788 int ret;
789
790 ret = _lttng_filter_event_link_bytecode(event, filter_bytecode);
791 if (ret) {
792 fprintf(stderr, "[lttng filter] error linking event bytecode\n");
793 }
794 }
795
796 /*
797 * Link bytecode to all events for a wildcard. Skips events that already
798 * have a bytecode linked.
799 * We do not set each event's filter_bytecode field, because they do not
800 * own the filter_bytecode: the wildcard owns it.
801 */
802 void lttng_filter_wildcard_link_bytecode(struct session_wildcard *wildcard)
803 {
804 struct ltt_event *event;
805 int ret;
806
807 if (!wildcard->filter_bytecode)
808 return;
809
810 cds_list_for_each_entry(event, &wildcard->events, wildcard_list) {
811 if (event->filter)
812 continue;
813 ret = _lttng_filter_event_link_bytecode(event,
814 wildcard->filter_bytecode);
815 if (ret) {
816 fprintf(stderr, "[lttng filter] error linking wildcard bytecode\n");
817 }
818
819 }
820 return;
821 }
822
823 /*
824 * Need to attach filter to an event before starting tracing for the
825 * session. We own the filter_bytecode if we return success.
826 */
827 int lttng_filter_event_attach_bytecode(struct ltt_event *event,
828 struct lttng_ust_filter_bytecode *filter_bytecode)
829 {
830 if (event->chan->session->been_active)
831 return -EPERM;
832 if (event->filter_bytecode)
833 return -EEXIST;
834 event->filter_bytecode = filter_bytecode;
835 return 0;
836 }
837
838 /*
839 * Need to attach filter to a wildcard before starting tracing for the
840 * session. We own the filter_bytecode if we return success.
841 */
842 int lttng_filter_wildcard_attach_bytecode(struct session_wildcard *wildcard,
843 struct lttng_ust_filter_bytecode *filter_bytecode)
844 {
845 if (wildcard->chan->session->been_active)
846 return -EPERM;
847 if (wildcard->filter_bytecode)
848 return -EEXIST;
849 wildcard->filter_bytecode = filter_bytecode;
850 return 0;
851 }
This page took 0.04761 seconds and 5 git commands to generate.