Version 2.9.15
[lttng-modules.git] / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng modules filter code.
5 *
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #include <linux/list.h>
24 #include <linux/slab.h>
25
26 #include <lttng-filter.h>
27
28 static const char *opnames[] = {
29 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
30
31 [ FILTER_OP_RETURN ] = "RETURN",
32
33 /* binary */
34 [ FILTER_OP_MUL ] = "MUL",
35 [ FILTER_OP_DIV ] = "DIV",
36 [ FILTER_OP_MOD ] = "MOD",
37 [ FILTER_OP_PLUS ] = "PLUS",
38 [ FILTER_OP_MINUS ] = "MINUS",
39 [ FILTER_OP_RSHIFT ] = "RSHIFT",
40 [ FILTER_OP_LSHIFT ] = "LSHIFT",
41 [ FILTER_OP_BIN_AND ] = "BIN_AND",
42 [ FILTER_OP_BIN_OR ] = "BIN_OR",
43 [ FILTER_OP_BIN_XOR ] = "BIN_XOR",
44
45 /* binary comparators */
46 [ FILTER_OP_EQ ] = "EQ",
47 [ FILTER_OP_NE ] = "NE",
48 [ FILTER_OP_GT ] = "GT",
49 [ FILTER_OP_LT ] = "LT",
50 [ FILTER_OP_GE ] = "GE",
51 [ FILTER_OP_LE ] = "LE",
52
53 /* string binary comparators */
54 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
55 [ FILTER_OP_NE_STRING ] = "NE_STRING",
56 [ FILTER_OP_GT_STRING ] = "GT_STRING",
57 [ FILTER_OP_LT_STRING ] = "LT_STRING",
58 [ FILTER_OP_GE_STRING ] = "GE_STRING",
59 [ FILTER_OP_LE_STRING ] = "LE_STRING",
60
61 /* s64 binary comparators */
62 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
63 [ FILTER_OP_NE_S64 ] = "NE_S64",
64 [ FILTER_OP_GT_S64 ] = "GT_S64",
65 [ FILTER_OP_LT_S64 ] = "LT_S64",
66 [ FILTER_OP_GE_S64 ] = "GE_S64",
67 [ FILTER_OP_LE_S64 ] = "LE_S64",
68
69 /* double binary comparators */
70 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
71 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
72 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
73 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
74 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
75 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
76
77 /* Mixed S64-double binary comparators */
78 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
79 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
80 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
81 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
82 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
83 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
84
85 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
86 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
87 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
88 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
89 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
90 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
91
92 /* unary */
93 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
94 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
95 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
96 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
97 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
98 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
99 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
100 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
101 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
102
103 /* logical */
104 [ FILTER_OP_AND ] = "AND",
105 [ FILTER_OP_OR ] = "OR",
106
107 /* load field ref */
108 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
109 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
110 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
111 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
112 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
113
114 /* load from immediate operand */
115 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
116 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
117 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
118
119 /* cast */
120 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
121 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
122 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
123
124 /* get context ref */
125 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
126 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
127 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
128 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
129
130 /* load userspace field ref */
131 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
132 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
133 };
134
135 const char *lttng_filter_print_op(enum filter_op op)
136 {
137 if (op >= NR_FILTER_OPS)
138 return "UNKNOWN";
139 else
140 return opnames[op];
141 }
142
143 static
144 int apply_field_reloc(struct lttng_event *event,
145 struct bytecode_runtime *runtime,
146 uint32_t runtime_len,
147 uint32_t reloc_offset,
148 const char *field_name)
149 {
150 const struct lttng_event_desc *desc;
151 const struct lttng_event_field *fields, *field = NULL;
152 unsigned int nr_fields, i;
153 struct field_ref *field_ref;
154 struct load_op *op;
155 uint32_t field_offset = 0;
156
157 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
158
159 /* Lookup event by name */
160 desc = event->desc;
161 if (!desc)
162 return -EINVAL;
163 fields = desc->fields;
164 if (!fields)
165 return -EINVAL;
166 nr_fields = desc->nr_fields;
167 for (i = 0; i < nr_fields; i++) {
168 if (!strcmp(fields[i].name, field_name)) {
169 field = &fields[i];
170 break;
171 }
172 /* compute field offset */
173 switch (fields[i].type.atype) {
174 case atype_integer:
175 case atype_enum:
176 field_offset += sizeof(int64_t);
177 break;
178 case atype_array:
179 case atype_sequence:
180 field_offset += sizeof(unsigned long);
181 field_offset += sizeof(void *);
182 break;
183 case atype_string:
184 field_offset += sizeof(void *);
185 break;
186 case atype_struct: /* Unsupported. */
187 case atype_array_compound: /* Unsupported. */
188 case atype_sequence_compound: /* Unsupported. */
189 case atype_variant: /* Unsupported. */
190 default:
191 return -EINVAL;
192 }
193 }
194 if (!field)
195 return -EINVAL;
196
197 /* Check if field offset is too large for 16-bit offset */
198 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
199 return -EINVAL;
200
201 /* set type */
202 op = (struct load_op *) &runtime->data[reloc_offset];
203 field_ref = (struct field_ref *) op->data;
204 switch (field->type.atype) {
205 case atype_integer:
206 case atype_enum:
207 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
208 break;
209 case atype_array:
210 case atype_sequence:
211 if (field->user)
212 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
213 else
214 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
215 break;
216 case atype_string:
217 if (field->user)
218 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
219 else
220 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
221 break;
222 case atype_struct: /* Unsupported. */
223 case atype_array_compound: /* Unsupported. */
224 case atype_sequence_compound: /* Unsupported. */
225 case atype_variant: /* Unsupported. */
226 default:
227 return -EINVAL;
228 }
229 /* set offset */
230 field_ref->offset = (uint16_t) field_offset;
231 return 0;
232 }
233
234 static
235 int apply_context_reloc(struct lttng_event *event,
236 struct bytecode_runtime *runtime,
237 uint32_t runtime_len,
238 uint32_t reloc_offset,
239 const char *context_name)
240 {
241 struct field_ref *field_ref;
242 struct load_op *op;
243 struct lttng_ctx_field *ctx_field;
244 int idx;
245
246 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
247
248 /* Get context index */
249 idx = lttng_get_context_index(lttng_static_ctx, context_name);
250 if (idx < 0)
251 return -ENOENT;
252
253 /* Check if idx is too large for 16-bit offset */
254 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
255 return -EINVAL;
256
257 /* Get context return type */
258 ctx_field = &lttng_static_ctx->fields[idx];
259 op = (struct load_op *) &runtime->data[reloc_offset];
260 field_ref = (struct field_ref *) op->data;
261 switch (ctx_field->event_field.type.atype) {
262 case atype_integer:
263 case atype_enum:
264 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
265 break;
266 /* Sequence and array supported as string */
267 case atype_string:
268 case atype_array:
269 case atype_sequence:
270 BUG_ON(ctx_field->event_field.user);
271 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
272 break;
273 case atype_struct: /* Unsupported. */
274 case atype_array_compound: /* Unsupported. */
275 case atype_sequence_compound: /* Unsupported. */
276 case atype_variant: /* Unsupported. */
277 default:
278 return -EINVAL;
279 }
280 /* set offset to context index within channel contexts */
281 field_ref->offset = (uint16_t) idx;
282 return 0;
283 }
284
285 static
286 int apply_reloc(struct lttng_event *event,
287 struct bytecode_runtime *runtime,
288 uint32_t runtime_len,
289 uint32_t reloc_offset,
290 const char *name)
291 {
292 struct load_op *op;
293
294 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
295
296 /* Ensure that the reloc is within the code */
297 if (runtime_len - reloc_offset < sizeof(uint16_t))
298 return -EINVAL;
299
300 op = (struct load_op *) &runtime->data[reloc_offset];
301 switch (op->op) {
302 case FILTER_OP_LOAD_FIELD_REF:
303 return apply_field_reloc(event, runtime, runtime_len,
304 reloc_offset, name);
305 case FILTER_OP_GET_CONTEXT_REF:
306 return apply_context_reloc(event, runtime, runtime_len,
307 reloc_offset, name);
308 default:
309 printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
310 return -EINVAL;
311 }
312 return 0;
313 }
314
315 static
316 int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
317 struct lttng_event *event)
318 {
319 struct lttng_bytecode_runtime *bc_runtime;
320
321 list_for_each_entry(bc_runtime,
322 &event->bytecode_runtime_head, node) {
323 if (bc_runtime->bc == filter_bytecode)
324 return 1;
325 }
326 return 0;
327 }
328
329 /*
330 * Take a bytecode with reloc table and link it to an event to create a
331 * bytecode runtime.
332 */
333 static
334 int _lttng_filter_event_link_bytecode(struct lttng_event *event,
335 struct lttng_filter_bytecode_node *filter_bytecode,
336 struct list_head *insert_loc)
337 {
338 int ret, offset, next_offset;
339 struct bytecode_runtime *runtime = NULL;
340 size_t runtime_alloc_len;
341
342 if (!filter_bytecode)
343 return 0;
344 /* Bytecode already linked */
345 if (bytecode_is_linked(filter_bytecode, event))
346 return 0;
347
348 dbg_printk("Linking...\n");
349
350 /* We don't need the reloc table in the runtime */
351 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
352 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
353 if (!runtime) {
354 ret = -ENOMEM;
355 goto alloc_error;
356 }
357 runtime->p.bc = filter_bytecode;
358 runtime->len = filter_bytecode->bc.reloc_offset;
359 /* copy original bytecode */
360 memcpy(runtime->data, filter_bytecode->bc.data, runtime->len);
361 /*
362 * apply relocs. Those are a uint16_t (offset in bytecode)
363 * followed by a string (field name).
364 */
365 for (offset = filter_bytecode->bc.reloc_offset;
366 offset < filter_bytecode->bc.len;
367 offset = next_offset) {
368 uint16_t reloc_offset =
369 *(uint16_t *) &filter_bytecode->bc.data[offset];
370 const char *name =
371 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
372
373 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
374 if (ret) {
375 goto link_error;
376 }
377 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
378 }
379 /* Validate bytecode */
380 ret = lttng_filter_validate_bytecode(runtime);
381 if (ret) {
382 goto link_error;
383 }
384 /* Specialize bytecode */
385 ret = lttng_filter_specialize_bytecode(runtime);
386 if (ret) {
387 goto link_error;
388 }
389 runtime->p.filter = lttng_filter_interpret_bytecode;
390 runtime->p.link_failed = 0;
391 list_add_rcu(&runtime->p.node, insert_loc);
392 dbg_printk("Linking successful.\n");
393 return 0;
394
395 link_error:
396 runtime->p.filter = lttng_filter_false;
397 runtime->p.link_failed = 1;
398 list_add_rcu(&runtime->p.node, insert_loc);
399 alloc_error:
400 dbg_printk("Linking failed.\n");
401 return ret;
402 }
403
404 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
405 {
406 struct lttng_filter_bytecode_node *bc = runtime->bc;
407
408 if (!bc->enabler->enabled || runtime->link_failed)
409 runtime->filter = lttng_filter_false;
410 else
411 runtime->filter = lttng_filter_interpret_bytecode;
412 }
413
414 /*
415 * Link bytecode for all enablers referenced by an event.
416 */
417 void lttng_enabler_event_link_bytecode(struct lttng_event *event,
418 struct lttng_enabler *enabler)
419 {
420 struct lttng_filter_bytecode_node *bc;
421 struct lttng_bytecode_runtime *runtime;
422
423 /* Can only be called for events with desc attached */
424 WARN_ON_ONCE(!event->desc);
425
426 /* Link each bytecode. */
427 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
428 int found = 0, ret;
429 struct list_head *insert_loc;
430
431 list_for_each_entry(runtime,
432 &event->bytecode_runtime_head, node) {
433 if (runtime->bc == bc) {
434 found = 1;
435 break;
436 }
437 }
438 /* Skip bytecode already linked */
439 if (found)
440 continue;
441
442 /*
443 * Insert at specified priority (seqnum) in increasing
444 * order.
445 */
446 list_for_each_entry_reverse(runtime,
447 &event->bytecode_runtime_head, node) {
448 if (runtime->bc->bc.seqnum < bc->bc.seqnum) {
449 /* insert here */
450 insert_loc = &runtime->node;
451 goto add_within;
452 }
453 }
454 /* Add to head to list */
455 insert_loc = &event->bytecode_runtime_head;
456 add_within:
457 dbg_printk("linking bytecode\n");
458 ret = _lttng_filter_event_link_bytecode(event, bc,
459 insert_loc);
460 if (ret) {
461 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
462 }
463 }
464 }
465
466 /*
467 * We own the filter_bytecode if we return success.
468 */
469 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
470 struct lttng_filter_bytecode_node *filter_bytecode)
471 {
472 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
473 return 0;
474 }
475
476 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
477 {
478 struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
479
480 list_for_each_entry_safe(filter_bytecode, tmp,
481 &enabler->filter_bytecode_head, node) {
482 kfree(filter_bytecode);
483 }
484 }
485
486 void lttng_free_event_filter_runtime(struct lttng_event *event)
487 {
488 struct bytecode_runtime *runtime, *tmp;
489
490 list_for_each_entry_safe(runtime, tmp,
491 &event->bytecode_runtime_head, p.node) {
492 kfree(runtime);
493 }
494 }
This page took 0.039595 seconds and 4 git commands to generate.