Update for kernel 5.7: use vmalloc_sync_mappings on kernels >= 5.7
[lttng-modules.git] / lttng-filter.c
CommitLineData
9f36eaed
MJ
1/* SPDX-License-Identifier: MIT
2 *
07dfc1d0
MD
3 * lttng-filter.c
4 *
5 * LTTng modules filter code.
6 *
bbf3aef5 7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
07dfc1d0
MD
8 */
9
10#include <linux/list.h>
11#include <linux/slab.h>
12
241ae9a8 13#include <lttng-filter.h>
07dfc1d0
MD
14
15static const char *opnames[] = {
16 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
17
18 [ FILTER_OP_RETURN ] = "RETURN",
19
20 /* binary */
21 [ FILTER_OP_MUL ] = "MUL",
22 [ FILTER_OP_DIV ] = "DIV",
23 [ FILTER_OP_MOD ] = "MOD",
24 [ FILTER_OP_PLUS ] = "PLUS",
25 [ FILTER_OP_MINUS ] = "MINUS",
e16c054b
MD
26 [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
27 [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
3834b99f
MD
28 [ FILTER_OP_BIT_AND ] = "BIT_AND",
29 [ FILTER_OP_BIT_OR ] = "BIT_OR",
30 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
07dfc1d0
MD
31
32 /* binary comparators */
33 [ FILTER_OP_EQ ] = "EQ",
34 [ FILTER_OP_NE ] = "NE",
35 [ FILTER_OP_GT ] = "GT",
36 [ FILTER_OP_LT ] = "LT",
37 [ FILTER_OP_GE ] = "GE",
38 [ FILTER_OP_LE ] = "LE",
39
40 /* string binary comparators */
41 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
42 [ FILTER_OP_NE_STRING ] = "NE_STRING",
43 [ FILTER_OP_GT_STRING ] = "GT_STRING",
44 [ FILTER_OP_LT_STRING ] = "LT_STRING",
45 [ FILTER_OP_GE_STRING ] = "GE_STRING",
46 [ FILTER_OP_LE_STRING ] = "LE_STRING",
47
48 /* s64 binary comparators */
49 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
50 [ FILTER_OP_NE_S64 ] = "NE_S64",
51 [ FILTER_OP_GT_S64 ] = "GT_S64",
52 [ FILTER_OP_LT_S64 ] = "LT_S64",
53 [ FILTER_OP_GE_S64 ] = "GE_S64",
54 [ FILTER_OP_LE_S64 ] = "LE_S64",
55
56 /* double binary comparators */
57 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
58 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
59 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
60 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
61 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
62 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
63
64 /* Mixed S64-double binary comparators */
65 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
66 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
67 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
68 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
69 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
70 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
71
72 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
73 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
74 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
75 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
76 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
77 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
78
79 /* unary */
80 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
81 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
82 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
83 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
84 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
85 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
86 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
87 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
88 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
89
90 /* logical */
91 [ FILTER_OP_AND ] = "AND",
92 [ FILTER_OP_OR ] = "OR",
93
94 /* load field ref */
95 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
96 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
97 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
98 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
99 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
100
101 /* load from immediate operand */
102 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
103 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
104 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
105
106 /* cast */
107 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
108 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
109 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
110
111 /* get context ref */
112 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
113 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
114 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
115 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
f127e61e
MD
116
117 /* load userspace field ref */
118 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
119 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
83fb57ab
MD
120
121 /*
122 * load immediate star globbing pattern (literal string)
123 * from immediate.
124 */
125 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
126
127 /* globbing pattern binary operator: apply to */
128 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
129 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
3834b99f
MD
130
131 /*
132 * Instructions for recursive traversal through composed types.
133 */
134 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
135 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
136 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
137
138 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
139 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
140 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
141 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
142
143 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
144 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
145 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
146 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
147 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
148 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
149 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
150 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
151 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
152 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
153 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
154 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
e16c054b
MD
155
156 [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
57ba4b41
MD
157
158 [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
07dfc1d0
MD
159};
160
161const char *lttng_filter_print_op(enum filter_op op)
162{
163 if (op >= NR_FILTER_OPS)
164 return "UNKNOWN";
165 else
166 return opnames[op];
167}
168
169static
170int apply_field_reloc(struct lttng_event *event,
171 struct bytecode_runtime *runtime,
172 uint32_t runtime_len,
173 uint32_t reloc_offset,
3834b99f
MD
174 const char *field_name,
175 enum filter_op filter_op)
07dfc1d0
MD
176{
177 const struct lttng_event_desc *desc;
178 const struct lttng_event_field *fields, *field = NULL;
179 unsigned int nr_fields, i;
07dfc1d0
MD
180 struct load_op *op;
181 uint32_t field_offset = 0;
182
183 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
184
185 /* Lookup event by name */
186 desc = event->desc;
187 if (!desc)
188 return -EINVAL;
189 fields = desc->fields;
190 if (!fields)
191 return -EINVAL;
192 nr_fields = desc->nr_fields;
193 for (i = 0; i < nr_fields; i++) {
ceabb767
MD
194 if (fields[i].nofilter)
195 continue;
07dfc1d0
MD
196 if (!strcmp(fields[i].name, field_name)) {
197 field = &fields[i];
198 break;
199 }
200 /* compute field offset */
201 switch (fields[i].type.atype) {
202 case atype_integer:
ceabb767 203 case atype_enum_nestable:
07dfc1d0
MD
204 field_offset += sizeof(int64_t);
205 break;
ceabb767
MD
206 case atype_array_nestable:
207 if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
208 return -EINVAL;
209 field_offset += sizeof(unsigned long);
210 field_offset += sizeof(void *);
211 break;
212 case atype_sequence_nestable:
213 if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
214 return -EINVAL;
07dfc1d0
MD
215 field_offset += sizeof(unsigned long);
216 field_offset += sizeof(void *);
217 break;
218 case atype_string:
219 field_offset += sizeof(void *);
220 break;
ceabb767
MD
221 case atype_struct_nestable: /* Unsupported. */
222 case atype_variant_nestable: /* Unsupported. */
07dfc1d0
MD
223 default:
224 return -EINVAL;
225 }
226 }
227 if (!field)
228 return -EINVAL;
229
230 /* Check if field offset is too large for 16-bit offset */
f127e61e 231 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
07dfc1d0
MD
232 return -EINVAL;
233
234 /* set type */
3834b99f
MD
235 op = (struct load_op *) &runtime->code[reloc_offset];
236
237 switch (filter_op) {
238 case FILTER_OP_LOAD_FIELD_REF:
239 {
240 struct field_ref *field_ref;
241
242 field_ref = (struct field_ref *) op->data;
243 switch (field->type.atype) {
244 case atype_integer:
ceabb767 245 case atype_enum_nestable:
3834b99f
MD
246 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
247 break;
ceabb767
MD
248 case atype_array_nestable:
249 case atype_sequence_nestable:
3834b99f
MD
250 if (field->user)
251 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
252 else
253 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
254 break;
255 case atype_string:
256 if (field->user)
257 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
258 else
259 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
260 break;
ceabb767
MD
261 case atype_struct_nestable: /* Unsupported. */
262 case atype_variant_nestable: /* Unsupported. */
3834b99f
MD
263 default:
264 return -EINVAL;
265 }
266 /* set offset */
267 field_ref->offset = (uint16_t) field_offset;
07dfc1d0 268 break;
3834b99f 269 }
07dfc1d0
MD
270 default:
271 return -EINVAL;
272 }
07dfc1d0
MD
273 return 0;
274}
275
276static
277int apply_context_reloc(struct lttng_event *event,
278 struct bytecode_runtime *runtime,
279 uint32_t runtime_len,
280 uint32_t reloc_offset,
3834b99f
MD
281 const char *context_name,
282 enum filter_op filter_op)
07dfc1d0 283{
07dfc1d0
MD
284 struct load_op *op;
285 struct lttng_ctx_field *ctx_field;
286 int idx;
287
288 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
289
290 /* Get context index */
291 idx = lttng_get_context_index(lttng_static_ctx, context_name);
292 if (idx < 0)
293 return -ENOENT;
294
295 /* Check if idx is too large for 16-bit offset */
f127e61e 296 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
07dfc1d0
MD
297 return -EINVAL;
298
299 /* Get context return type */
300 ctx_field = &lttng_static_ctx->fields[idx];
3834b99f
MD
301 op = (struct load_op *) &runtime->code[reloc_offset];
302
303 switch (filter_op) {
304 case FILTER_OP_GET_CONTEXT_REF:
305 {
306 struct field_ref *field_ref;
307
308 field_ref = (struct field_ref *) op->data;
309 switch (ctx_field->event_field.type.atype) {
310 case atype_integer:
ceabb767 311 case atype_enum_nestable:
3834b99f
MD
312 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
313 break;
314 /* Sequence and array supported as string */
315 case atype_string:
3834b99f
MD
316 BUG_ON(ctx_field->event_field.user);
317 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
318 break;
ceabb767
MD
319 case atype_array_nestable:
320 if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
321 return -EINVAL;
322 BUG_ON(ctx_field->event_field.user);
323 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
324 break;
325 case atype_sequence_nestable:
326 if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
327 return -EINVAL;
328 BUG_ON(ctx_field->event_field.user);
329 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
330 break;
331 case atype_struct_nestable: /* Unsupported. */
332 case atype_variant_nestable: /* Unsupported. */
3834b99f
MD
333 default:
334 return -EINVAL;
335 }
336 /* set offset to context index within channel contexts */
337 field_ref->offset = (uint16_t) idx;
07dfc1d0 338 break;
3834b99f 339 }
07dfc1d0
MD
340 default:
341 return -EINVAL;
342 }
07dfc1d0
MD
343 return 0;
344}
345
346static
347int apply_reloc(struct lttng_event *event,
348 struct bytecode_runtime *runtime,
349 uint32_t runtime_len,
350 uint32_t reloc_offset,
351 const char *name)
352{
353 struct load_op *op;
354
355 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
356
357 /* Ensure that the reloc is within the code */
358 if (runtime_len - reloc_offset < sizeof(uint16_t))
359 return -EINVAL;
360
3834b99f 361 op = (struct load_op *) &runtime->code[reloc_offset];
07dfc1d0
MD
362 switch (op->op) {
363 case FILTER_OP_LOAD_FIELD_REF:
364 return apply_field_reloc(event, runtime, runtime_len,
3834b99f 365 reloc_offset, name, op->op);
07dfc1d0
MD
366 case FILTER_OP_GET_CONTEXT_REF:
367 return apply_context_reloc(event, runtime, runtime_len,
3834b99f
MD
368 reloc_offset, name, op->op);
369 case FILTER_OP_GET_SYMBOL:
370 case FILTER_OP_GET_SYMBOL_FIELD:
371 /*
372 * Will be handled by load specialize phase or
373 * dynamically by interpreter.
374 */
375 return 0;
07dfc1d0
MD
376 default:
377 printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
378 return -EINVAL;
379 }
380 return 0;
381}
382
383static
384int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
385 struct lttng_event *event)
386{
387 struct lttng_bytecode_runtime *bc_runtime;
388
389 list_for_each_entry(bc_runtime,
390 &event->bytecode_runtime_head, node) {
391 if (bc_runtime->bc == filter_bytecode)
392 return 1;
393 }
394 return 0;
395}
396
397/*
398 * Take a bytecode with reloc table and link it to an event to create a
399 * bytecode runtime.
400 */
401static
402int _lttng_filter_event_link_bytecode(struct lttng_event *event,
403 struct lttng_filter_bytecode_node *filter_bytecode,
404 struct list_head *insert_loc)
405{
406 int ret, offset, next_offset;
407 struct bytecode_runtime *runtime = NULL;
408 size_t runtime_alloc_len;
409
410 if (!filter_bytecode)
411 return 0;
412 /* Bytecode already linked */
413 if (bytecode_is_linked(filter_bytecode, event))
414 return 0;
415
416 dbg_printk("Linking...\n");
417
418 /* We don't need the reloc table in the runtime */
419 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
420 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
421 if (!runtime) {
422 ret = -ENOMEM;
423 goto alloc_error;
424 }
425 runtime->p.bc = filter_bytecode;
3834b99f 426 runtime->p.event = event;
07dfc1d0
MD
427 runtime->len = filter_bytecode->bc.reloc_offset;
428 /* copy original bytecode */
3834b99f 429 memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
07dfc1d0
MD
430 /*
431 * apply relocs. Those are a uint16_t (offset in bytecode)
432 * followed by a string (field name).
433 */
434 for (offset = filter_bytecode->bc.reloc_offset;
435 offset < filter_bytecode->bc.len;
436 offset = next_offset) {
437 uint16_t reloc_offset =
438 *(uint16_t *) &filter_bytecode->bc.data[offset];
439 const char *name =
440 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
441
442 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
443 if (ret) {
444 goto link_error;
445 }
446 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
447 }
448 /* Validate bytecode */
449 ret = lttng_filter_validate_bytecode(runtime);
450 if (ret) {
451 goto link_error;
452 }
453 /* Specialize bytecode */
3834b99f 454 ret = lttng_filter_specialize_bytecode(event, runtime);
07dfc1d0
MD
455 if (ret) {
456 goto link_error;
457 }
458 runtime->p.filter = lttng_filter_interpret_bytecode;
459 runtime->p.link_failed = 0;
460 list_add_rcu(&runtime->p.node, insert_loc);
461 dbg_printk("Linking successful.\n");
462 return 0;
463
464link_error:
465 runtime->p.filter = lttng_filter_false;
466 runtime->p.link_failed = 1;
467 list_add_rcu(&runtime->p.node, insert_loc);
468alloc_error:
469 dbg_printk("Linking failed.\n");
470 return ret;
471}
472
473void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
474{
475 struct lttng_filter_bytecode_node *bc = runtime->bc;
476
477 if (!bc->enabler->enabled || runtime->link_failed)
478 runtime->filter = lttng_filter_false;
479 else
480 runtime->filter = lttng_filter_interpret_bytecode;
481}
482
483/*
484 * Link bytecode for all enablers referenced by an event.
485 */
486void lttng_enabler_event_link_bytecode(struct lttng_event *event,
487 struct lttng_enabler *enabler)
488{
489 struct lttng_filter_bytecode_node *bc;
490 struct lttng_bytecode_runtime *runtime;
491
492 /* Can only be called for events with desc attached */
493 WARN_ON_ONCE(!event->desc);
494
495 /* Link each bytecode. */
496 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
497 int found = 0, ret;
498 struct list_head *insert_loc;
499
500 list_for_each_entry(runtime,
501 &event->bytecode_runtime_head, node) {
502 if (runtime->bc == bc) {
503 found = 1;
504 break;
505 }
506 }
507 /* Skip bytecode already linked */
508 if (found)
509 continue;
510
511 /*
512 * Insert at specified priority (seqnum) in increasing
df9d0220
FD
513 * order. If there already is a bytecode of the same priority,
514 * insert the new bytecode right after it.
07dfc1d0
MD
515 */
516 list_for_each_entry_reverse(runtime,
517 &event->bytecode_runtime_head, node) {
df9d0220 518 if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
07dfc1d0
MD
519 /* insert here */
520 insert_loc = &runtime->node;
521 goto add_within;
522 }
523 }
524 /* Add to head to list */
525 insert_loc = &event->bytecode_runtime_head;
526 add_within:
527 dbg_printk("linking bytecode\n");
528 ret = _lttng_filter_event_link_bytecode(event, bc,
529 insert_loc);
530 if (ret) {
531 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
532 }
533 }
534}
535
536/*
537 * We own the filter_bytecode if we return success.
538 */
539int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
540 struct lttng_filter_bytecode_node *filter_bytecode)
541{
542 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
543 return 0;
544}
545
546void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
547{
548 struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
549
550 list_for_each_entry_safe(filter_bytecode, tmp,
551 &enabler->filter_bytecode_head, node) {
552 kfree(filter_bytecode);
553 }
554}
555
556void lttng_free_event_filter_runtime(struct lttng_event *event)
557{
558 struct bytecode_runtime *runtime, *tmp;
559
560 list_for_each_entry_safe(runtime, tmp,
561 &event->bytecode_runtime_head, p.node) {
3834b99f 562 kfree(runtime->data);
07dfc1d0
MD
563 kfree(runtime);
564 }
565}
This page took 0.052472 seconds and 4 git commands to generate.