Filter: fix bytecode validation typo
[lttng-ust.git] / liblttng-ust / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng UST filter code.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <errno.h>
24 #include <stdio.h>
25 #include <helper.h>
26 #include <lttng/ust-events.h>
27 #include <stdint.h>
28 #include <errno.h>
29 #include <string.h>
30 #include <inttypes.h>
31 #include <limits.h>
32 #include <usterr-signal-safe.h>
33 #include "filter-bytecode.h"
34
35 #define NR_REG 2
36
37 #ifndef min_t
38 #define min_t(type, a, b) \
39 ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
40 #endif
41
42 #ifndef likely
43 #define likely(x) __builtin_expect(!!(x), 1)
44 #endif
45
46 #ifndef unlikely
47 #define unlikely(x) __builtin_expect(!!(x), 0)
48 #endif
49
50 #ifdef DEBUG
51 #define dbg_printf(fmt, args...) printf("[debug bytecode] " fmt, ## args)
52 #else
53 #define dbg_printf(fmt, args...) \
54 do { \
55 /* do nothing but check printf format */ \
56 if (0) \
57 printf("[debug bytecode] " fmt, ## args); \
58 } while (0)
59 #endif
60
61 /* Linked bytecode */
62 struct bytecode_runtime {
63 uint16_t len;
64 char data[0];
65 };
66
67 enum reg_type {
68 REG_S64,
69 REG_DOUBLE,
70 REG_STRING,
71 REG_TYPE_UNKNOWN,
72 };
73
74 /* Validation registers */
75 struct vreg {
76 enum reg_type type;
77 int literal; /* is string literal ? */
78 };
79
80 /* Execution registers */
81 struct reg {
82 enum reg_type type;
83 int64_t v;
84 double d;
85
86 const char *str;
87 size_t seq_len;
88 int literal; /* is string literal ? */
89 };
90
91 static const char *opnames[] = {
92 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
93
94 [ FILTER_OP_RETURN ] = "RETURN",
95
96 /* binary */
97 [ FILTER_OP_MUL ] = "MUL",
98 [ FILTER_OP_DIV ] = "DIV",
99 [ FILTER_OP_MOD ] = "MOD",
100 [ FILTER_OP_PLUS ] = "PLUS",
101 [ FILTER_OP_MINUS ] = "MINUS",
102 [ FILTER_OP_RSHIFT ] = "RSHIFT",
103 [ FILTER_OP_LSHIFT ] = "LSHIFT",
104 [ FILTER_OP_BIN_AND ] = "BIN_AND",
105 [ FILTER_OP_BIN_OR ] = "BIN_OR",
106 [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
107
108 /* binary comparators */
109 [ FILTER_OP_EQ ] = "EQ",
110 [ FILTER_OP_NE ] = "NE",
111 [ FILTER_OP_GT ] = "GT",
112 [ FILTER_OP_LT ] = "LT",
113 [ FILTER_OP_GE ] = "GE",
114 [ FILTER_OP_LE ] = "LE",
115
116 /* string binary comparators */
117 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
118 [ FILTER_OP_NE_STRING ] = "NE_STRING",
119 [ FILTER_OP_GT_STRING ] = "GT_STRING",
120 [ FILTER_OP_LT_STRING ] = "LT_STRING",
121 [ FILTER_OP_GE_STRING ] = "GE_STRING",
122 [ FILTER_OP_LE_STRING ] = "LE_STRING",
123
124 /* s64 binary comparators */
125 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
126 [ FILTER_OP_NE_S64 ] = "NE_S64",
127 [ FILTER_OP_GT_S64 ] = "GT_S64",
128 [ FILTER_OP_LT_S64 ] = "LT_S64",
129 [ FILTER_OP_GE_S64 ] = "GE_S64",
130 [ FILTER_OP_LE_S64 ] = "LE_S64",
131
132 /* double binary comparators */
133 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
134 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
135 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
136 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
137 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
138 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
139
140
141 /* unary */
142 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
143 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
144 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
145 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
146 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
147 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
148 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
149 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
150 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
151
152 /* logical */
153 [ FILTER_OP_AND ] = "AND",
154 [ FILTER_OP_OR ] = "OR",
155
156 /* load */
157 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
158 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
159 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
160 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
161 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
162
163 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
164 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
165 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
166 };
167
168 static
169 const char *print_op(enum filter_op op)
170 {
171 if (op >= NR_FILTER_OPS)
172 return "UNKNOWN";
173 else
174 return opnames[op];
175 }
176
177 /*
178 * -1: wildcard found.
179 * -2: unknown escape char.
180 * 0: normal char.
181 */
182
183 static
184 int parse_char(const char **p)
185 {
186 switch (**p) {
187 case '\\':
188 (*p)++;
189 switch (**p) {
190 case '\\':
191 case '*':
192 return 0;
193 default:
194 return -2;
195 }
196 case '*':
197 return -1;
198 default:
199 return 0;
200 }
201 }
202
203 static
204 int reg_strcmp(struct reg reg[NR_REG], const char *cmp_type)
205 {
206 const char *p = reg[REG_R0].str, *q = reg[REG_R1].str;
207 int ret;
208 int diff;
209
210 for (;;) {
211 int escaped_r0 = 0;
212
213 if (unlikely(p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')) {
214 if (q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')
215 diff = 0;
216 else
217 diff = -1;
218 break;
219 }
220 if (unlikely(q - reg[REG_R1].str > reg[REG_R1].seq_len || *q == '\0')) {
221 if (p - reg[REG_R0].str > reg[REG_R0].seq_len || *p == '\0')
222 diff = 0;
223 else
224 diff = 1;
225 break;
226 }
227 if (reg[REG_R0].literal) {
228 ret = parse_char(&p);
229 if (ret == -1) {
230 return 0;
231 } else if (ret == -2) {
232 escaped_r0 = 1;
233 }
234 /* else compare both char */
235 }
236 if (reg[REG_R1].literal) {
237 ret = parse_char(&q);
238 if (ret == -1) {
239 return 0;
240 } else if (ret == -2) {
241 if (!escaped_r0)
242 return -1;
243 } else {
244 if (escaped_r0)
245 return 1;
246 }
247 } else {
248 if (escaped_r0)
249 return 1;
250 }
251 diff = *p - *q;
252 if (diff != 0)
253 break;
254 p++;
255 q++;
256 }
257 return diff;
258 }
259
260 static
261 int lttng_filter_false(void *filter_data,
262 const char *filter_stack_data)
263 {
264 return 0;
265 }
266
267 static
268 int lttng_filter_interpret_bytecode(void *filter_data,
269 const char *filter_stack_data)
270 {
271 struct bytecode_runtime *bytecode = filter_data;
272 void *pc, *next_pc, *start_pc;
273 int ret = -EINVAL;
274 int retval = 0;
275 struct reg reg[NR_REG];
276
277 start_pc = &bytecode->data[0];
278 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
279 pc = next_pc) {
280 dbg_printf("Executing op %s (%u)\n",
281 print_op((unsigned int) *(filter_opcode_t *) pc),
282 (unsigned int) *(filter_opcode_t *) pc);
283 switch (*(filter_opcode_t *) pc) {
284 case FILTER_OP_UNKNOWN:
285 case FILTER_OP_LOAD_FIELD_REF:
286 default:
287 ERR("unknown bytecode op %u\n",
288 (unsigned int) *(filter_opcode_t *) pc);
289 ret = -EINVAL;
290 goto end;
291
292 case FILTER_OP_RETURN:
293 retval = !!reg[0].v;
294 ret = 0;
295 goto end;
296
297 /* binary */
298 case FILTER_OP_MUL:
299 case FILTER_OP_DIV:
300 case FILTER_OP_MOD:
301 case FILTER_OP_PLUS:
302 case FILTER_OP_MINUS:
303 case FILTER_OP_RSHIFT:
304 case FILTER_OP_LSHIFT:
305 case FILTER_OP_BIN_AND:
306 case FILTER_OP_BIN_OR:
307 case FILTER_OP_BIN_XOR:
308 ERR("unsupported bytecode op %u\n",
309 (unsigned int) *(filter_opcode_t *) pc);
310 ret = -EINVAL;
311 goto end;
312
313 case FILTER_OP_EQ:
314 case FILTER_OP_NE:
315 case FILTER_OP_GT:
316 case FILTER_OP_LT:
317 case FILTER_OP_GE:
318 case FILTER_OP_LE:
319 ERR("unsupported non-specialized bytecode op %u\n",
320 (unsigned int) *(filter_opcode_t *) pc);
321 ret = -EINVAL;
322 goto end;
323
324 case FILTER_OP_EQ_STRING:
325 {
326 reg[REG_R0].v = (reg_strcmp(reg, "==") == 0);
327 reg[REG_R0].type = REG_S64;
328 next_pc += sizeof(struct binary_op);
329 break;
330 }
331 case FILTER_OP_NE_STRING:
332 {
333 reg[REG_R0].v = (reg_strcmp(reg, "!=") != 0);
334 reg[REG_R0].type = REG_S64;
335 next_pc += sizeof(struct binary_op);
336 break;
337 }
338 case FILTER_OP_GT_STRING:
339 {
340 reg[REG_R0].v = (reg_strcmp(reg, ">") > 0);
341 reg[REG_R0].type = REG_S64;
342 next_pc += sizeof(struct binary_op);
343 break;
344 }
345 case FILTER_OP_LT_STRING:
346 {
347 reg[REG_R0].v = (reg_strcmp(reg, "<") < 0);
348 reg[REG_R0].type = REG_S64;
349 next_pc += sizeof(struct binary_op);
350 break;
351 }
352 case FILTER_OP_GE_STRING:
353 {
354 reg[REG_R0].v = (reg_strcmp(reg, ">=") >= 0);
355 reg[REG_R0].type = REG_S64;
356 next_pc += sizeof(struct binary_op);
357 break;
358 }
359 case FILTER_OP_LE_STRING:
360 {
361 reg[REG_R0].v = (reg_strcmp(reg, "<=") <= 0);
362 reg[REG_R0].type = REG_S64;
363 next_pc += sizeof(struct binary_op);
364 break;
365 }
366
367 case FILTER_OP_EQ_S64:
368 {
369 reg[REG_R0].v = (reg[REG_R0].v == reg[REG_R1].v);
370 reg[REG_R0].type = REG_S64;
371 next_pc += sizeof(struct binary_op);
372 break;
373 }
374 case FILTER_OP_NE_S64:
375 {
376 reg[REG_R0].v = (reg[REG_R0].v != reg[REG_R1].v);
377 reg[REG_R0].type = REG_S64;
378 next_pc += sizeof(struct binary_op);
379 break;
380 }
381 case FILTER_OP_GT_S64:
382 {
383 reg[REG_R0].v = (reg[REG_R0].v > reg[REG_R1].v);
384 reg[REG_R0].type = REG_S64;
385 next_pc += sizeof(struct binary_op);
386 break;
387 }
388 case FILTER_OP_LT_S64:
389 {
390 reg[REG_R0].v = (reg[REG_R0].v < reg[REG_R1].v);
391 reg[REG_R0].type = REG_S64;
392 next_pc += sizeof(struct binary_op);
393 break;
394 }
395 case FILTER_OP_GE_S64:
396 {
397 reg[REG_R0].v = (reg[REG_R0].v >= reg[REG_R1].v);
398 reg[REG_R0].type = REG_S64;
399 next_pc += sizeof(struct binary_op);
400 break;
401 }
402 case FILTER_OP_LE_S64:
403 {
404 reg[REG_R0].v = (reg[REG_R0].v <= reg[REG_R1].v);
405 reg[REG_R0].type = REG_S64;
406 next_pc += sizeof(struct binary_op);
407 break;
408 }
409
410 case FILTER_OP_EQ_DOUBLE:
411 {
412 if (unlikely(reg[REG_R0].type == REG_S64))
413 reg[REG_R0].d = (double) reg[REG_R0].v;
414 else if (unlikely(reg[REG_R1].type == REG_S64))
415 reg[REG_R1].d = (double) reg[REG_R1].v;
416 reg[REG_R0].v = (reg[REG_R0].d == reg[REG_R1].d);
417 reg[REG_R0].type = REG_S64;
418 next_pc += sizeof(struct binary_op);
419 break;
420 }
421 case FILTER_OP_NE_DOUBLE:
422 {
423 if (unlikely(reg[REG_R0].type == REG_S64))
424 reg[REG_R0].d = (double) reg[REG_R0].v;
425 else if (unlikely(reg[REG_R1].type == REG_S64))
426 reg[REG_R1].d = (double) reg[REG_R1].v;
427 reg[REG_R0].v = (reg[REG_R0].d != reg[REG_R1].d);
428 reg[REG_R0].type = REG_S64;
429 next_pc += sizeof(struct binary_op);
430 break;
431 }
432 case FILTER_OP_GT_DOUBLE:
433 {
434 if (unlikely(reg[REG_R0].type == REG_S64))
435 reg[REG_R0].d = (double) reg[REG_R0].v;
436 else if (unlikely(reg[REG_R1].type == REG_S64))
437 reg[REG_R1].d = (double) reg[REG_R1].v;
438 reg[REG_R0].v = (reg[REG_R0].d > reg[REG_R1].d);
439 reg[REG_R0].type = REG_S64;
440 next_pc += sizeof(struct binary_op);
441 break;
442 }
443 case FILTER_OP_LT_DOUBLE:
444 {
445 if (unlikely(reg[REG_R0].type == REG_S64))
446 reg[REG_R0].d = (double) reg[REG_R0].v;
447 else if (unlikely(reg[REG_R1].type == REG_S64))
448 reg[REG_R1].d = (double) reg[REG_R1].v;
449 reg[REG_R0].v = (reg[REG_R0].d < reg[REG_R1].d);
450 reg[REG_R0].type = REG_S64;
451 next_pc += sizeof(struct binary_op);
452 break;
453 }
454 case FILTER_OP_GE_DOUBLE:
455 {
456 if (unlikely(reg[REG_R0].type == REG_S64))
457 reg[REG_R0].d = (double) reg[REG_R0].v;
458 else if (unlikely(reg[REG_R1].type == REG_S64))
459 reg[REG_R1].d = (double) reg[REG_R1].v;
460 reg[REG_R0].v = (reg[REG_R0].d >= reg[REG_R1].d);
461 reg[REG_R0].type = REG_S64;
462 next_pc += sizeof(struct binary_op);
463 break;
464 }
465 case FILTER_OP_LE_DOUBLE:
466 {
467 if (unlikely(reg[REG_R0].type == REG_S64))
468 reg[REG_R0].d = (double) reg[REG_R0].v;
469 else if (unlikely(reg[REG_R1].type == REG_S64))
470 reg[REG_R1].d = (double) reg[REG_R1].v;
471 reg[REG_R0].v = (reg[REG_R0].d <= reg[REG_R1].d);
472 reg[REG_R0].type = REG_S64;
473 next_pc += sizeof(struct binary_op);
474 break;
475 }
476
477 /* unary */
478 case FILTER_OP_UNARY_PLUS:
479 {
480 next_pc += sizeof(struct unary_op);
481 break;
482 }
483 case FILTER_OP_UNARY_MINUS:
484 {
485 struct unary_op *insn = (struct unary_op *) pc;
486
487 switch (reg[insn->reg].type) {
488 default:
489 ERR("unknown register type\n");
490 ret = -EINVAL;
491 goto end;
492
493 case REG_STRING:
494 ERR("Unary minus can only be applied to numeric or floating point registers\n");
495 ret = -EINVAL;
496 goto end;
497 case REG_S64:
498 reg[insn->reg].v = -reg[insn->reg].v;
499 break;
500 case REG_DOUBLE:
501 reg[insn->reg].d = -reg[insn->reg].d;
502 break;
503 }
504 next_pc += sizeof(struct unary_op);
505 break;
506 }
507 case FILTER_OP_UNARY_NOT:
508 {
509 struct unary_op *insn = (struct unary_op *) pc;
510
511 switch (reg[insn->reg].type) {
512 default:
513 ERR("unknown register type\n");
514 ret = -EINVAL;
515 goto end;
516
517 case REG_STRING:
518 ERR("Unary not can only be applied to numeric or floating point registers\n");
519 ret = -EINVAL;
520 goto end;
521 case REG_S64:
522 reg[insn->reg].v = !reg[insn->reg].v;
523 break;
524 case REG_DOUBLE:
525 reg[insn->reg].d = !reg[insn->reg].d;
526 break;
527 }
528 reg[insn->reg].v = !reg[insn->reg].v;
529 next_pc += sizeof(struct unary_op);
530 break;
531 }
532 /* logical */
533 case FILTER_OP_AND:
534 {
535 struct logical_op *insn = (struct logical_op *) pc;
536
537 /* If REG_R0 is 0, skip and evaluate to 0 */
538 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v == 0)
539 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d == 0.0)) {
540 dbg_printf("Jumping to bytecode offset %u\n",
541 (unsigned int) insn->skip_offset);
542 next_pc = start_pc + insn->skip_offset;
543 } else {
544 next_pc += sizeof(struct logical_op);
545 }
546 break;
547 }
548 case FILTER_OP_OR:
549 {
550 struct logical_op *insn = (struct logical_op *) pc;
551
552 /* If REG_R0 is nonzero, skip and evaluate to 1 */
553
554 if ((reg[REG_R0].type == REG_S64 && reg[REG_R0].v != 0)
555 || (reg[REG_R0].type == REG_DOUBLE && reg[REG_R0].d != 0.0)) {
556 reg[REG_R0].v = 1;
557 dbg_printf("Jumping to bytecode offset %u\n",
558 (unsigned int) insn->skip_offset);
559 next_pc = start_pc + insn->skip_offset;
560 } else {
561 next_pc += sizeof(struct logical_op);
562 }
563 break;
564 }
565
566 /* load */
567 case FILTER_OP_LOAD_FIELD_REF_STRING:
568 {
569 struct load_op *insn = (struct load_op *) pc;
570 struct field_ref *ref = (struct field_ref *) insn->data;
571
572 dbg_printf("load field ref offset %u type string\n",
573 ref->offset);
574 reg[insn->reg].str =
575 *(const char * const *) &filter_stack_data[ref->offset];
576 reg[insn->reg].type = REG_STRING;
577 reg[insn->reg].seq_len = UINT_MAX;
578 reg[insn->reg].literal = 0;
579 dbg_printf("ref load string %s\n", reg[insn->reg].str);
580 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
581 break;
582 }
583
584 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
585 {
586 struct load_op *insn = (struct load_op *) pc;
587 struct field_ref *ref = (struct field_ref *) insn->data;
588
589 dbg_printf("load field ref offset %u type sequence\n",
590 ref->offset);
591 reg[insn->reg].seq_len =
592 *(unsigned long *) &filter_stack_data[ref->offset];
593 reg[insn->reg].str =
594 *(const char **) (&filter_stack_data[ref->offset
595 + sizeof(unsigned long)]);
596 reg[insn->reg].type = REG_STRING;
597 reg[insn->reg].literal = 0;
598 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
599 break;
600 }
601
602 case FILTER_OP_LOAD_FIELD_REF_S64:
603 {
604 struct load_op *insn = (struct load_op *) pc;
605 struct field_ref *ref = (struct field_ref *) insn->data;
606
607 dbg_printf("load field ref offset %u type s64\n",
608 ref->offset);
609 memcpy(&reg[insn->reg].v, &filter_stack_data[ref->offset],
610 sizeof(struct literal_numeric));
611 reg[insn->reg].type = REG_S64;
612 reg[insn->reg].literal = 0;
613 dbg_printf("ref load s64 %" PRIi64 "\n", reg[insn->reg].v);
614 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
615 break;
616 }
617
618 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
619 {
620 struct load_op *insn = (struct load_op *) pc;
621 struct field_ref *ref = (struct field_ref *) insn->data;
622
623 dbg_printf("load field ref offset %u type double\n",
624 ref->offset);
625 memcpy(&reg[insn->reg].d, &filter_stack_data[ref->offset],
626 sizeof(struct literal_double));
627 reg[insn->reg].type = REG_DOUBLE;
628 reg[insn->reg].literal = 0;
629 dbg_printf("ref load double %g\n", reg[insn->reg].d);
630 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
631 break;
632 }
633
634 case FILTER_OP_LOAD_STRING:
635 {
636 struct load_op *insn = (struct load_op *) pc;
637
638 dbg_printf("load string %s\n", insn->data);
639 reg[insn->reg].str = insn->data;
640 reg[insn->reg].type = REG_STRING;
641 reg[insn->reg].seq_len = UINT_MAX;
642 reg[insn->reg].literal = 1;
643 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
644 break;
645 }
646
647 case FILTER_OP_LOAD_S64:
648 {
649 struct load_op *insn = (struct load_op *) pc;
650
651 memcpy(&reg[insn->reg].v, insn->data,
652 sizeof(struct literal_numeric));
653 dbg_printf("load s64 %" PRIi64 "\n", reg[insn->reg].v);
654 reg[insn->reg].type = REG_S64;
655 reg[insn->reg].literal = 1;
656 next_pc += sizeof(struct load_op)
657 + sizeof(struct literal_numeric);
658 break;
659 }
660
661 case FILTER_OP_LOAD_DOUBLE:
662 {
663 struct load_op *insn = (struct load_op *) pc;
664
665 memcpy(&reg[insn->reg].d, insn->data,
666 sizeof(struct literal_double));
667 dbg_printf("load s64 %g\n", reg[insn->reg].d);
668 reg[insn->reg].type = REG_DOUBLE;
669 reg[insn->reg].literal = 1;
670 next_pc += sizeof(struct load_op)
671 + sizeof(struct literal_double);
672 break;
673 }
674 }
675 }
676 end:
677 /* return 0 (discard) on error */
678 if (ret)
679 return 0;
680 return retval;
681 }
682
683 static
684 int bin_op_compare_check(struct vreg reg[NR_REG], const char *str)
685 {
686 switch (reg[REG_R0].type) {
687 default:
688 goto error_unknown;
689
690 case REG_STRING:
691 switch (reg[REG_R1].type) {
692 default:
693 goto error_unknown;
694
695 case REG_STRING:
696 break;
697 case REG_S64:
698 case REG_DOUBLE:
699 goto error_mismatch;
700 }
701 break;
702 case REG_S64:
703 case REG_DOUBLE:
704 switch (reg[REG_R1].type) {
705 default:
706 goto error_unknown;
707
708 case REG_STRING:
709 goto error_mismatch;
710
711 case REG_S64:
712 case REG_DOUBLE:
713 break;
714 }
715 break;
716 }
717 return 0;
718
719 error_unknown:
720
721 return -EINVAL;
722 error_mismatch:
723 ERR("type mismatch for '%s' binary operator\n", str);
724 return -EINVAL;
725 }
726
727 static
728 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
729 {
730 void *pc, *next_pc, *start_pc;
731 int ret = -EINVAL;
732 struct vreg reg[NR_REG];
733 int i;
734
735 for (i = 0; i < NR_REG; i++) {
736 reg[i].type = REG_TYPE_UNKNOWN;
737 reg[i].literal = 0;
738 }
739
740 start_pc = &bytecode->data[0];
741 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
742 pc = next_pc) {
743 if (unlikely(pc >= start_pc + bytecode->len)) {
744 ERR("filter bytecode overflow\n");
745 ret = -EINVAL;
746 goto end;
747 }
748 dbg_printf("Validating op %s (%u)\n",
749 print_op((unsigned int) *(filter_opcode_t *) pc),
750 (unsigned int) *(filter_opcode_t *) pc);
751 switch (*(filter_opcode_t *) pc) {
752 case FILTER_OP_UNKNOWN:
753 default:
754 ERR("unknown bytecode op %u\n",
755 (unsigned int) *(filter_opcode_t *) pc);
756 ret = -EINVAL;
757 goto end;
758
759 case FILTER_OP_RETURN:
760 ret = 0;
761 goto end;
762
763 /* binary */
764 case FILTER_OP_MUL:
765 case FILTER_OP_DIV:
766 case FILTER_OP_MOD:
767 case FILTER_OP_PLUS:
768 case FILTER_OP_MINUS:
769 case FILTER_OP_RSHIFT:
770 case FILTER_OP_LSHIFT:
771 case FILTER_OP_BIN_AND:
772 case FILTER_OP_BIN_OR:
773 case FILTER_OP_BIN_XOR:
774 ERR("unsupported bytecode op %u\n",
775 (unsigned int) *(filter_opcode_t *) pc);
776 ret = -EINVAL;
777 goto end;
778
779 case FILTER_OP_EQ:
780 {
781 ret = bin_op_compare_check(reg, "==");
782 if (ret)
783 goto end;
784 reg[REG_R0].type = REG_S64;
785 next_pc += sizeof(struct binary_op);
786 break;
787 }
788 case FILTER_OP_NE:
789 {
790 ret = bin_op_compare_check(reg, "!=");
791 if (ret)
792 goto end;
793 reg[REG_R0].type = REG_S64;
794 next_pc += sizeof(struct binary_op);
795 break;
796 }
797 case FILTER_OP_GT:
798 {
799 ret = bin_op_compare_check(reg, ">");
800 if (ret)
801 goto end;
802 reg[REG_R0].type = REG_S64;
803 next_pc += sizeof(struct binary_op);
804 break;
805 }
806 case FILTER_OP_LT:
807 {
808 ret = bin_op_compare_check(reg, "<");
809 if (ret)
810 goto end;
811 reg[REG_R0].type = REG_S64;
812 next_pc += sizeof(struct binary_op);
813 break;
814 }
815 case FILTER_OP_GE:
816 {
817 ret = bin_op_compare_check(reg, ">=");
818 if (ret)
819 goto end;
820 reg[REG_R0].type = REG_S64;
821 next_pc += sizeof(struct binary_op);
822 break;
823 }
824 case FILTER_OP_LE:
825 {
826 ret = bin_op_compare_check(reg, "<=");
827 if (ret)
828 goto end;
829 reg[REG_R0].type = REG_S64;
830 next_pc += sizeof(struct binary_op);
831 break;
832 }
833
834 case FILTER_OP_EQ_STRING:
835 case FILTER_OP_NE_STRING:
836 case FILTER_OP_GT_STRING:
837 case FILTER_OP_LT_STRING:
838 case FILTER_OP_GE_STRING:
839 case FILTER_OP_LE_STRING:
840 {
841 if (reg[REG_R0].type != REG_STRING
842 || reg[REG_R1].type != REG_STRING) {
843 ERR("Unexpected register type for string comparator\n");
844 ret = -EINVAL;
845 goto end;
846 }
847 reg[REG_R0].type = REG_S64;
848 next_pc += sizeof(struct binary_op);
849 break;
850 }
851
852 case FILTER_OP_EQ_S64:
853 case FILTER_OP_NE_S64:
854 case FILTER_OP_GT_S64:
855 case FILTER_OP_LT_S64:
856 case FILTER_OP_GE_S64:
857 case FILTER_OP_LE_S64:
858 {
859 if (reg[REG_R0].type != REG_S64
860 || reg[REG_R1].type != REG_S64) {
861 ERR("Unexpected register type for s64 comparator\n");
862 ret = -EINVAL;
863 goto end;
864 }
865 reg[REG_R0].type = REG_S64;
866 next_pc += sizeof(struct binary_op);
867 break;
868 }
869
870 case FILTER_OP_EQ_DOUBLE:
871 case FILTER_OP_NE_DOUBLE:
872 case FILTER_OP_GT_DOUBLE:
873 case FILTER_OP_LT_DOUBLE:
874 case FILTER_OP_GE_DOUBLE:
875 case FILTER_OP_LE_DOUBLE:
876 {
877 if ((reg[REG_R0].type != REG_DOUBLE && reg[REG_R0].type != REG_S64)
878 || (reg[REG_R1].type != REG_DOUBLE && reg[REG_R1].type != REG_S64)) {
879 ERR("Unexpected register type for double comparator\n");
880 ret = -EINVAL;
881 goto end;
882 }
883 reg[REG_R0].type = REG_DOUBLE;
884 next_pc += sizeof(struct binary_op);
885 break;
886 }
887
888 /* unary */
889 case FILTER_OP_UNARY_PLUS:
890 case FILTER_OP_UNARY_MINUS:
891 case FILTER_OP_UNARY_NOT:
892 {
893 struct unary_op *insn = (struct unary_op *) pc;
894
895 if (unlikely(insn->reg >= REG_ERROR)) {
896 ERR("invalid register %u\n",
897 (unsigned int) insn->reg);
898 ret = -EINVAL;
899 goto end;
900 }
901 switch (reg[insn->reg].type) {
902 default:
903 ERR("unknown register type\n");
904 ret = -EINVAL;
905 goto end;
906
907 case REG_STRING:
908 ERR("Unary op can only be applied to numeric or floating point registers\n");
909 ret = -EINVAL;
910 goto end;
911 case REG_S64:
912 break;
913 case REG_DOUBLE:
914 break;
915 }
916 next_pc += sizeof(struct unary_op);
917 break;
918 }
919
920 case FILTER_OP_UNARY_PLUS_S64:
921 case FILTER_OP_UNARY_MINUS_S64:
922 case FILTER_OP_UNARY_NOT_S64:
923 {
924 struct unary_op *insn = (struct unary_op *) pc;
925
926 if (unlikely(insn->reg >= REG_ERROR)) {
927 ERR("invalid register %u\n",
928 (unsigned int) insn->reg);
929 ret = -EINVAL;
930 goto end;
931 }
932 if (reg[insn->reg].type != REG_S64) {
933 ERR("Invalid register type\n");
934 ret = -EINVAL;
935 goto end;
936 }
937 next_pc += sizeof(struct unary_op);
938 break;
939 }
940
941 case FILTER_OP_UNARY_PLUS_DOUBLE:
942 case FILTER_OP_UNARY_MINUS_DOUBLE:
943 case FILTER_OP_UNARY_NOT_DOUBLE:
944 {
945 struct unary_op *insn = (struct unary_op *) pc;
946
947 if (unlikely(insn->reg >= REG_ERROR)) {
948 ERR("invalid register %u\n",
949 (unsigned int) insn->reg);
950 ret = -EINVAL;
951 goto end;
952 }
953 if (reg[insn->reg].type != REG_DOUBLE) {
954 ERR("Invalid register type\n");
955 ret = -EINVAL;
956 goto end;
957 }
958 next_pc += sizeof(struct unary_op);
959 break;
960 }
961
962 /* logical */
963 case FILTER_OP_AND:
964 case FILTER_OP_OR:
965 {
966 struct logical_op *insn = (struct logical_op *) pc;
967
968 if (unlikely(reg[REG_R0].type == REG_TYPE_UNKNOWN
969 || reg[REG_R1].type == REG_TYPE_UNKNOWN
970 || reg[REG_R0].type == REG_STRING
971 || reg[REG_R1].type == REG_STRING)) {
972 ERR("Logical comparator can only be applied to numeric and floating point registers\n");
973 ret = -EINVAL;
974 goto end;
975 }
976
977 dbg_printf("Validate jumping to bytecode offset %u\n",
978 (unsigned int) insn->skip_offset);
979 if (unlikely(start_pc + insn->skip_offset <= pc)) {
980 ERR("Loops are not allowed in bytecode\n");
981 ret = -EINVAL;
982 goto end;
983 }
984 next_pc += sizeof(struct logical_op);
985 break;
986 }
987
988 /* load */
989 case FILTER_OP_LOAD_FIELD_REF:
990 {
991 ERR("Unknown field ref type\n");
992 ret = -EINVAL;
993 goto end;
994 }
995 case FILTER_OP_LOAD_FIELD_REF_STRING:
996 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
997 {
998 struct load_op *insn = (struct load_op *) pc;
999 struct field_ref *ref = (struct field_ref *) insn->data;
1000
1001 if (unlikely(insn->reg >= REG_ERROR)) {
1002 ERR("invalid register %u\n",
1003 (unsigned int) insn->reg);
1004 ret = -EINVAL;
1005 goto end;
1006 }
1007 dbg_printf("Validate load field ref offset %u type string\n",
1008 ref->offset);
1009 reg[insn->reg].type = REG_STRING;
1010 reg[insn->reg].literal = 0;
1011 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1012 break;
1013 }
1014 case FILTER_OP_LOAD_FIELD_REF_S64:
1015 {
1016 struct load_op *insn = (struct load_op *) pc;
1017 struct field_ref *ref = (struct field_ref *) insn->data;
1018
1019 if (unlikely(insn->reg >= REG_ERROR)) {
1020 ERR("invalid register %u\n",
1021 (unsigned int) insn->reg);
1022 ret = -EINVAL;
1023 goto end;
1024 }
1025 dbg_printf("Validate load field ref offset %u type s64\n",
1026 ref->offset);
1027 reg[insn->reg].type = REG_S64;
1028 reg[insn->reg].literal = 0;
1029 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1030 break;
1031 }
1032 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1033 {
1034 struct load_op *insn = (struct load_op *) pc;
1035 struct field_ref *ref = (struct field_ref *) insn->data;
1036
1037 if (unlikely(insn->reg >= REG_ERROR)) {
1038 ERR("invalid register %u\n",
1039 (unsigned int) insn->reg);
1040 ret = -EINVAL;
1041 goto end;
1042 }
1043 dbg_printf("Validate load field ref offset %u type double\n",
1044 ref->offset);
1045 reg[insn->reg].type = REG_DOUBLE;
1046 reg[insn->reg].literal = 0;
1047 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1048 break;
1049 }
1050
1051 case FILTER_OP_LOAD_STRING:
1052 {
1053 struct load_op *insn = (struct load_op *) pc;
1054
1055 if (unlikely(insn->reg >= REG_ERROR)) {
1056 ERR("invalid register %u\n",
1057 (unsigned int) insn->reg);
1058 ret = -EINVAL;
1059 goto end;
1060 }
1061 reg[insn->reg].type = REG_STRING;
1062 reg[insn->reg].literal = 1;
1063 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1064 break;
1065 }
1066
1067 case FILTER_OP_LOAD_S64:
1068 {
1069 struct load_op *insn = (struct load_op *) pc;
1070
1071 if (unlikely(insn->reg >= REG_ERROR)) {
1072 ERR("invalid register %u\n",
1073 (unsigned int) insn->reg);
1074 ret = -EINVAL;
1075 goto end;
1076 }
1077 reg[insn->reg].type = REG_S64;
1078 reg[insn->reg].literal = 1;
1079 next_pc += sizeof(struct load_op)
1080 + sizeof(struct literal_numeric);
1081 break;
1082 }
1083
1084 case FILTER_OP_LOAD_DOUBLE:
1085 {
1086 struct load_op *insn = (struct load_op *) pc;
1087
1088 if (unlikely(insn->reg >= REG_ERROR)) {
1089 ERR("invalid register %u\n",
1090 (unsigned int) insn->reg);
1091 ret = -EINVAL;
1092 goto end;
1093 }
1094 reg[insn->reg].type = REG_DOUBLE;
1095 reg[insn->reg].literal = 1;
1096 next_pc += sizeof(struct load_op)
1097 + sizeof(struct literal_double);
1098 break;
1099 }
1100 }
1101 }
1102 end:
1103 return ret;
1104 }
1105
1106 static
1107 int lttng_filter_specialize_bytecode(struct bytecode_runtime *bytecode)
1108 {
1109 void *pc, *next_pc, *start_pc;
1110 int ret = -EINVAL;
1111 struct vreg reg[NR_REG];
1112 int i;
1113
1114 for (i = 0; i < NR_REG; i++) {
1115 reg[i].type = REG_TYPE_UNKNOWN;
1116 reg[i].literal = 0;
1117 }
1118
1119 start_pc = &bytecode->data[0];
1120 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1121 pc = next_pc) {
1122 switch (*(filter_opcode_t *) pc) {
1123 case FILTER_OP_UNKNOWN:
1124 default:
1125 ERR("unknown bytecode op %u\n",
1126 (unsigned int) *(filter_opcode_t *) pc);
1127 ret = -EINVAL;
1128 goto end;
1129
1130 case FILTER_OP_RETURN:
1131 ret = 0;
1132 goto end;
1133
1134 /* binary */
1135 case FILTER_OP_MUL:
1136 case FILTER_OP_DIV:
1137 case FILTER_OP_MOD:
1138 case FILTER_OP_PLUS:
1139 case FILTER_OP_MINUS:
1140 case FILTER_OP_RSHIFT:
1141 case FILTER_OP_LSHIFT:
1142 case FILTER_OP_BIN_AND:
1143 case FILTER_OP_BIN_OR:
1144 case FILTER_OP_BIN_XOR:
1145 ERR("unsupported bytecode op %u\n",
1146 (unsigned int) *(filter_opcode_t *) pc);
1147 ret = -EINVAL;
1148 goto end;
1149
1150 case FILTER_OP_EQ:
1151 {
1152 struct binary_op *insn = (struct binary_op *) pc;
1153
1154 switch(reg[REG_R0].type) {
1155 default:
1156 ERR("unknown register type\n");
1157 ret = -EINVAL;
1158 goto end;
1159
1160 case REG_STRING:
1161 insn->op = FILTER_OP_EQ_STRING;
1162 break;
1163 case REG_S64:
1164 if (reg[REG_R1].type == REG_S64)
1165 insn->op = FILTER_OP_EQ_S64;
1166 else
1167 insn->op = FILTER_OP_EQ_DOUBLE;
1168 break;
1169 case REG_DOUBLE:
1170 insn->op = FILTER_OP_EQ_DOUBLE;
1171 break;
1172 }
1173 reg[REG_R0].type = REG_S64;
1174 next_pc += sizeof(struct binary_op);
1175 break;
1176 }
1177
1178 case FILTER_OP_NE:
1179 {
1180 struct binary_op *insn = (struct binary_op *) pc;
1181
1182 switch(reg[REG_R0].type) {
1183 default:
1184 ERR("unknown register type\n");
1185 ret = -EINVAL;
1186 goto end;
1187
1188 case REG_STRING:
1189 insn->op = FILTER_OP_NE_STRING;
1190 break;
1191 case REG_S64:
1192 if (reg[REG_R1].type == REG_S64)
1193 insn->op = FILTER_OP_NE_S64;
1194 else
1195 insn->op = FILTER_OP_NE_DOUBLE;
1196 break;
1197 case REG_DOUBLE:
1198 insn->op = FILTER_OP_NE_DOUBLE;
1199 break;
1200 }
1201 reg[REG_R0].type = REG_S64;
1202 next_pc += sizeof(struct binary_op);
1203 break;
1204 }
1205
1206 case FILTER_OP_GT:
1207 {
1208 struct binary_op *insn = (struct binary_op *) pc;
1209
1210 switch(reg[REG_R0].type) {
1211 default:
1212 ERR("unknown register type\n");
1213 ret = -EINVAL;
1214 goto end;
1215
1216 case REG_STRING:
1217 insn->op = FILTER_OP_GT_STRING;
1218 break;
1219 case REG_S64:
1220 if (reg[REG_R1].type == REG_S64)
1221 insn->op = FILTER_OP_GT_S64;
1222 else
1223 insn->op = FILTER_OP_GT_DOUBLE;
1224 break;
1225 case REG_DOUBLE:
1226 insn->op = FILTER_OP_GT_DOUBLE;
1227 break;
1228 }
1229 reg[REG_R0].type = REG_S64;
1230 next_pc += sizeof(struct binary_op);
1231 break;
1232 }
1233
1234 case FILTER_OP_LT:
1235 {
1236 struct binary_op *insn = (struct binary_op *) pc;
1237
1238 switch(reg[REG_R0].type) {
1239 default:
1240 ERR("unknown register type\n");
1241 ret = -EINVAL;
1242 goto end;
1243
1244 case REG_STRING:
1245 insn->op = FILTER_OP_LT_STRING;
1246 break;
1247 case REG_S64:
1248 if (reg[REG_R1].type == REG_S64)
1249 insn->op = FILTER_OP_LT_S64;
1250 else
1251 insn->op = FILTER_OP_LT_DOUBLE;
1252 break;
1253 case REG_DOUBLE:
1254 insn->op = FILTER_OP_LT_DOUBLE;
1255 break;
1256 }
1257 reg[REG_R0].type = REG_S64;
1258 next_pc += sizeof(struct binary_op);
1259 break;
1260 }
1261
1262 case FILTER_OP_GE:
1263 {
1264 struct binary_op *insn = (struct binary_op *) pc;
1265
1266 switch(reg[REG_R0].type) {
1267 default:
1268 ERR("unknown register type\n");
1269 ret = -EINVAL;
1270 goto end;
1271
1272 case REG_STRING:
1273 insn->op = FILTER_OP_GE_STRING;
1274 break;
1275 case REG_S64:
1276 if (reg[REG_R1].type == REG_S64)
1277 insn->op = FILTER_OP_GE_S64;
1278 else
1279 insn->op = FILTER_OP_GE_DOUBLE;
1280 break;
1281 case REG_DOUBLE:
1282 insn->op = FILTER_OP_GE_DOUBLE;
1283 break;
1284 }
1285 reg[REG_R0].type = REG_S64;
1286 next_pc += sizeof(struct binary_op);
1287 break;
1288 }
1289 case FILTER_OP_LE:
1290 {
1291 struct binary_op *insn = (struct binary_op *) pc;
1292
1293 switch(reg[REG_R0].type) {
1294 default:
1295 ERR("unknown register type\n");
1296 ret = -EINVAL;
1297 goto end;
1298
1299 case REG_STRING:
1300 insn->op = FILTER_OP_LE_STRING;
1301 break;
1302 case REG_S64:
1303 if (reg[REG_R1].type == REG_S64)
1304 insn->op = FILTER_OP_LE_S64;
1305 else
1306 insn->op = FILTER_OP_LE_DOUBLE;
1307 break;
1308 case REG_DOUBLE:
1309 insn->op = FILTER_OP_LE_DOUBLE;
1310 break;
1311 }
1312 reg[REG_R0].type = REG_S64;
1313 next_pc += sizeof(struct binary_op);
1314 break;
1315 }
1316
1317 case FILTER_OP_EQ_STRING:
1318 case FILTER_OP_NE_STRING:
1319 case FILTER_OP_GT_STRING:
1320 case FILTER_OP_LT_STRING:
1321 case FILTER_OP_GE_STRING:
1322 case FILTER_OP_LE_STRING:
1323 case FILTER_OP_EQ_S64:
1324 case FILTER_OP_NE_S64:
1325 case FILTER_OP_GT_S64:
1326 case FILTER_OP_LT_S64:
1327 case FILTER_OP_GE_S64:
1328 case FILTER_OP_LE_S64:
1329 case FILTER_OP_EQ_DOUBLE:
1330 case FILTER_OP_NE_DOUBLE:
1331 case FILTER_OP_GT_DOUBLE:
1332 case FILTER_OP_LT_DOUBLE:
1333 case FILTER_OP_GE_DOUBLE:
1334 case FILTER_OP_LE_DOUBLE:
1335 {
1336 reg[REG_R0].type = REG_S64;
1337 next_pc += sizeof(struct binary_op);
1338 break;
1339 }
1340
1341
1342 /* unary */
1343 case FILTER_OP_UNARY_PLUS:
1344 {
1345 struct unary_op *insn = (struct unary_op *) pc;
1346
1347 switch(reg[insn->reg].type) {
1348 default:
1349 ERR("unknown register type\n");
1350 ret = -EINVAL;
1351 goto end;
1352
1353 case REG_S64:
1354 insn->op = FILTER_OP_UNARY_PLUS_S64;
1355 break;
1356 case REG_DOUBLE:
1357 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
1358 break;
1359 }
1360 break;
1361 }
1362
1363 case FILTER_OP_UNARY_MINUS:
1364 {
1365 struct unary_op *insn = (struct unary_op *) pc;
1366
1367 switch(reg[insn->reg].type) {
1368 default:
1369 ERR("unknown register type\n");
1370 ret = -EINVAL;
1371 goto end;
1372
1373 case REG_S64:
1374 insn->op = FILTER_OP_UNARY_MINUS_S64;
1375 break;
1376 case REG_DOUBLE:
1377 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1378 break;
1379 }
1380 break;
1381 }
1382
1383 case FILTER_OP_UNARY_NOT:
1384 {
1385 struct unary_op *insn = (struct unary_op *) pc;
1386
1387 switch(reg[insn->reg].type) {
1388 default:
1389 ERR("unknown register type\n");
1390 ret = -EINVAL;
1391 goto end;
1392
1393 case REG_S64:
1394 insn->op = FILTER_OP_UNARY_NOT_S64;
1395 break;
1396 case REG_DOUBLE:
1397 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1398 break;
1399 }
1400 break;
1401 }
1402
1403 case FILTER_OP_UNARY_PLUS_S64:
1404 case FILTER_OP_UNARY_MINUS_S64:
1405 case FILTER_OP_UNARY_NOT_S64:
1406 case FILTER_OP_UNARY_PLUS_DOUBLE:
1407 case FILTER_OP_UNARY_MINUS_DOUBLE:
1408 case FILTER_OP_UNARY_NOT_DOUBLE:
1409 {
1410 next_pc += sizeof(struct unary_op);
1411 break;
1412 }
1413
1414 /* logical */
1415 case FILTER_OP_AND:
1416 case FILTER_OP_OR:
1417 {
1418 next_pc += sizeof(struct logical_op);
1419 break;
1420 }
1421
1422 /* load */
1423 case FILTER_OP_LOAD_FIELD_REF:
1424 {
1425 ERR("Unknown field ref type\n");
1426 ret = -EINVAL;
1427 goto end;
1428 }
1429 case FILTER_OP_LOAD_FIELD_REF_STRING:
1430 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1431 {
1432 struct load_op *insn = (struct load_op *) pc;
1433
1434 reg[insn->reg].type = REG_STRING;
1435 reg[insn->reg].literal = 0;
1436 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1437 break;
1438 }
1439 case FILTER_OP_LOAD_FIELD_REF_S64:
1440 {
1441 struct load_op *insn = (struct load_op *) pc;
1442
1443 reg[insn->reg].type = REG_S64;
1444 reg[insn->reg].literal = 0;
1445 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1446 break;
1447 }
1448 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1449 {
1450 struct load_op *insn = (struct load_op *) pc;
1451
1452 reg[insn->reg].type = REG_DOUBLE;
1453 reg[insn->reg].literal = 0;
1454 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1455 break;
1456 }
1457
1458 case FILTER_OP_LOAD_STRING:
1459 {
1460 struct load_op *insn = (struct load_op *) pc;
1461
1462 reg[insn->reg].type = REG_STRING;
1463 reg[insn->reg].literal = 1;
1464 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1465 break;
1466 }
1467
1468 case FILTER_OP_LOAD_S64:
1469 {
1470 struct load_op *insn = (struct load_op *) pc;
1471
1472 reg[insn->reg].type = REG_S64;
1473 reg[insn->reg].literal = 1;
1474 next_pc += sizeof(struct load_op)
1475 + sizeof(struct literal_numeric);
1476 break;
1477 }
1478
1479 case FILTER_OP_LOAD_DOUBLE:
1480 {
1481 struct load_op *insn = (struct load_op *) pc;
1482
1483 reg[insn->reg].type = REG_DOUBLE;
1484 reg[insn->reg].literal = 1;
1485 next_pc += sizeof(struct load_op)
1486 + sizeof(struct literal_double);
1487 break;
1488 }
1489 }
1490 }
1491 end:
1492 return ret;
1493 }
1494
1495
1496
1497 static
1498 int apply_field_reloc(struct ltt_event *event,
1499 struct bytecode_runtime *runtime,
1500 uint32_t runtime_len,
1501 uint32_t reloc_offset,
1502 const char *field_name)
1503 {
1504 const struct lttng_event_desc *desc;
1505 const struct lttng_event_field *fields, *field = NULL;
1506 unsigned int nr_fields, i;
1507 struct field_ref *field_ref;
1508 struct load_op *op;
1509 uint32_t field_offset = 0;
1510
1511 dbg_printf("Apply reloc: %u %s\n", reloc_offset, field_name);
1512
1513 /* Ensure that the reloc is within the code */
1514 if (runtime_len - reloc_offset < sizeof(uint16_t))
1515 return -EINVAL;
1516
1517 /* Lookup event by name */
1518 desc = event->desc;
1519 if (!desc)
1520 return -EINVAL;
1521 fields = desc->fields;
1522 if (!fields)
1523 return -EINVAL;
1524 nr_fields = desc->nr_fields;
1525 for (i = 0; i < nr_fields; i++) {
1526 if (!strcmp(fields[i].name, field_name)) {
1527 field = &fields[i];
1528 break;
1529 }
1530 /* compute field offset */
1531 switch (fields[i].type.atype) {
1532 case atype_integer:
1533 case atype_enum:
1534 field_offset += sizeof(int64_t);
1535 break;
1536 case atype_array:
1537 case atype_sequence:
1538 field_offset += sizeof(unsigned long);
1539 field_offset += sizeof(void *);
1540 break;
1541 case atype_string:
1542 field_offset += sizeof(void *);
1543 break;
1544 case atype_float:
1545 field_offset += sizeof(double);
1546 break;
1547 default:
1548 return -EINVAL;
1549 }
1550 }
1551 if (!field)
1552 return -EINVAL;
1553
1554 /* Check if field offset is too large for 16-bit offset */
1555 if (field_offset > FILTER_BYTECODE_MAX_LEN)
1556 return -EINVAL;
1557
1558 /* set type */
1559 op = (struct load_op *) &runtime->data[reloc_offset];
1560 field_ref = (struct field_ref *) op->data;
1561 switch (field->type.atype) {
1562 case atype_integer:
1563 case atype_enum:
1564 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
1565 break;
1566 case atype_array:
1567 case atype_sequence:
1568 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
1569 break;
1570 case atype_string:
1571 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
1572 break;
1573 case atype_float:
1574 op->op = FILTER_OP_LOAD_FIELD_REF_DOUBLE;
1575 break;
1576 default:
1577 return -EINVAL;
1578 }
1579 /* set offset */
1580 field_ref->offset = (uint16_t) field_offset;
1581 return 0;
1582 }
1583
1584 /*
1585 * Take a bytecode with reloc table and link it to an event to create a
1586 * bytecode runtime.
1587 */
1588 static
1589 int _lttng_filter_event_link_bytecode(struct ltt_event *event,
1590 struct lttng_ust_filter_bytecode *filter_bytecode)
1591 {
1592 int ret, offset, next_offset;
1593 struct bytecode_runtime *runtime = NULL;
1594 size_t runtime_alloc_len;
1595
1596 if (!filter_bytecode)
1597 return 0;
1598 /* Even is not connected to any description */
1599 if (!event->desc)
1600 return 0;
1601 /* Bytecode already linked */
1602 if (event->filter || event->filter_data)
1603 return 0;
1604
1605 dbg_printf("Linking\n");
1606
1607 /* We don't need the reloc table in the runtime */
1608 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->reloc_offset;
1609 runtime = zmalloc(runtime_alloc_len);
1610 if (!runtime) {
1611 ret = -ENOMEM;
1612 goto link_error;
1613 }
1614 runtime->len = filter_bytecode->reloc_offset;
1615 /* copy original bytecode */
1616 memcpy(runtime->data, filter_bytecode->data, runtime->len);
1617 /*
1618 * apply relocs. Those are a uint16_t (offset in bytecode)
1619 * followed by a string (field name).
1620 */
1621 for (offset = filter_bytecode->reloc_offset;
1622 offset < filter_bytecode->len;
1623 offset = next_offset) {
1624 uint16_t reloc_offset =
1625 *(uint16_t *) &filter_bytecode->data[offset];
1626 const char *field_name =
1627 (const char *) &filter_bytecode->data[offset + sizeof(uint16_t)];
1628
1629 ret = apply_field_reloc(event, runtime, runtime->len, reloc_offset, field_name);
1630 if (ret) {
1631 goto link_error;
1632 }
1633 next_offset = offset + sizeof(uint16_t) + strlen(field_name) + 1;
1634 }
1635 /* Validate bytecode */
1636 ret = lttng_filter_validate_bytecode(runtime);
1637 if (ret) {
1638 goto link_error;
1639 }
1640 /* Specialize bytecode */
1641 ret = lttng_filter_specialize_bytecode(runtime);
1642 if (ret) {
1643 goto link_error;
1644 }
1645 event->filter_data = runtime;
1646 event->filter = lttng_filter_interpret_bytecode;
1647 return 0;
1648
1649 link_error:
1650 event->filter = lttng_filter_false;
1651 free(runtime);
1652 return ret;
1653 }
1654
1655 void lttng_filter_event_link_bytecode(struct ltt_event *event,
1656 struct lttng_ust_filter_bytecode *filter_bytecode)
1657 {
1658 int ret;
1659
1660 ret = _lttng_filter_event_link_bytecode(event, filter_bytecode);
1661 if (ret) {
1662 fprintf(stderr, "[lttng filter] error linking event bytecode\n");
1663 }
1664 }
1665
1666 /*
1667 * Link bytecode to all events for a wildcard. Skips events that already
1668 * have a bytecode linked.
1669 * We do not set each event's filter_bytecode field, because they do not
1670 * own the filter_bytecode: the wildcard owns it.
1671 */
1672 void lttng_filter_wildcard_link_bytecode(struct session_wildcard *wildcard)
1673 {
1674 struct ltt_event *event;
1675 int ret;
1676
1677 if (!wildcard->filter_bytecode)
1678 return;
1679
1680 cds_list_for_each_entry(event, &wildcard->events, wildcard_list) {
1681 if (event->filter)
1682 continue;
1683 ret = _lttng_filter_event_link_bytecode(event,
1684 wildcard->filter_bytecode);
1685 if (ret) {
1686 fprintf(stderr, "[lttng filter] error linking wildcard bytecode\n");
1687 }
1688
1689 }
1690 return;
1691 }
1692
1693 /*
1694 * Need to attach filter to an event before starting tracing for the
1695 * session. We own the filter_bytecode if we return success.
1696 */
1697 int lttng_filter_event_attach_bytecode(struct ltt_event *event,
1698 struct lttng_ust_filter_bytecode *filter_bytecode)
1699 {
1700 if (event->chan->session->been_active)
1701 return -EPERM;
1702 if (event->filter_bytecode)
1703 return -EEXIST;
1704 event->filter_bytecode = filter_bytecode;
1705 return 0;
1706 }
1707
1708 /*
1709 * Need to attach filter to a wildcard before starting tracing for the
1710 * session. We own the filter_bytecode if we return success.
1711 */
1712 int lttng_filter_wildcard_attach_bytecode(struct session_wildcard *wildcard,
1713 struct lttng_ust_filter_bytecode *filter_bytecode)
1714 {
1715 if (wildcard->chan->session->been_active)
1716 return -EPERM;
1717 if (wildcard->filter_bytecode)
1718 return -EEXIST;
1719 wildcard->filter_bytecode = filter_bytecode;
1720 return 0;
1721 }
This page took 0.062693 seconds and 5 git commands to generate.