Remove LTTNG_HIDDEN macro
[lttng-ust.git] / liblttng-ust / lttng-bytecode.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode code.
7 */
8
9 #define _LGPL_SOURCE
10 #include <stddef.h>
11 #include <stdint.h>
12
13 #include <urcu/rculist.h>
14
15 #include "context-internal.h"
16 #include "lttng-bytecode.h"
17 #include "ust-events-internal.h"
18 #include "ust-helper.h"
19
20 static const char *opnames[] = {
21 [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
22
23 [ BYTECODE_OP_RETURN ] = "RETURN",
24
25 /* binary */
26 [ BYTECODE_OP_MUL ] = "MUL",
27 [ BYTECODE_OP_DIV ] = "DIV",
28 [ BYTECODE_OP_MOD ] = "MOD",
29 [ BYTECODE_OP_PLUS ] = "PLUS",
30 [ BYTECODE_OP_MINUS ] = "MINUS",
31 [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
32 [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
33 [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
34 [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
35 [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
36
37 /* binary comparators */
38 [ BYTECODE_OP_EQ ] = "EQ",
39 [ BYTECODE_OP_NE ] = "NE",
40 [ BYTECODE_OP_GT ] = "GT",
41 [ BYTECODE_OP_LT ] = "LT",
42 [ BYTECODE_OP_GE ] = "GE",
43 [ BYTECODE_OP_LE ] = "LE",
44
45 /* string binary comparators */
46 [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
47 [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
48 [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
49 [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
50 [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
51 [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
52
53 /* s64 binary comparators */
54 [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
55 [ BYTECODE_OP_NE_S64 ] = "NE_S64",
56 [ BYTECODE_OP_GT_S64 ] = "GT_S64",
57 [ BYTECODE_OP_LT_S64 ] = "LT_S64",
58 [ BYTECODE_OP_GE_S64 ] = "GE_S64",
59 [ BYTECODE_OP_LE_S64 ] = "LE_S64",
60
61 /* double binary comparators */
62 [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
63 [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
64 [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
65 [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
66 [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
67 [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
68
69 /* Mixed S64-double binary comparators */
70 [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
71 [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
72 [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
73 [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
74 [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
75 [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
76
77 [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
78 [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
79 [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
80 [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
81 [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
82 [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
83
84 /* unary */
85 [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
86 [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
87 [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
88 [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
89 [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
90 [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
91 [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
92 [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
93 [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
94
95 /* logical */
96 [ BYTECODE_OP_AND ] = "AND",
97 [ BYTECODE_OP_OR ] = "OR",
98
99 /* load field ref */
100 [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
101 [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
102 [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
103 [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
104 [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
105
106 /* load from immediate operand */
107 [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
108 [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
109 [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
110
111 /* cast */
112 [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
113 [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
114 [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
115
116 /* get context ref */
117 [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
118 [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
119 [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
120 [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
121
122 /* load userspace field ref */
123 [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
124 [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
125
126 /*
127 * load immediate star globbing pattern (literal string)
128 * from immediate.
129 */
130 [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
131
132 /* globbing pattern binary operator: apply to */
133 [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
134 [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
135
136 /*
137 * Instructions for recursive traversal through composed types.
138 */
139 [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
140 [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
141 [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
142
143 [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
144 [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
145 [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
146 [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
147
148 [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
149 [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
150 [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
151 [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
152 [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
153 [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
154 [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
155 [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
156 [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
157 [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
158 [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
159 [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
160
161 [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
162
163 [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
164 };
165
166 const char *lttng_bytecode_print_op(enum bytecode_op op)
167 {
168 if (op >= NR_BYTECODE_OPS)
169 return "UNKNOWN";
170 else
171 return opnames[op];
172 }
173
174 static
175 int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
176 struct bytecode_runtime *runtime,
177 uint32_t runtime_len,
178 uint32_t reloc_offset,
179 const char *field_name,
180 enum bytecode_op bytecode_op)
181 {
182 const struct lttng_ust_event_field **fields, *field = NULL;
183 unsigned int nr_fields, i;
184 struct load_op *op;
185 uint32_t field_offset = 0;
186
187 dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
188
189 /* Lookup event by name */
190 if (!event_desc)
191 return -EINVAL;
192 fields = event_desc->fields;
193 if (!fields)
194 return -EINVAL;
195 nr_fields = event_desc->nr_fields;
196 for (i = 0; i < nr_fields; i++) {
197 if (fields[i]->nofilter) {
198 continue;
199 }
200 if (!strcmp(fields[i]->name, field_name)) {
201 field = fields[i];
202 break;
203 }
204 /* compute field offset */
205 switch (fields[i]->type.atype) {
206 case atype_integer:
207 case atype_enum_nestable:
208 field_offset += sizeof(int64_t);
209 break;
210 case atype_array_nestable:
211 case atype_sequence_nestable:
212 field_offset += sizeof(unsigned long);
213 field_offset += sizeof(void *);
214 break;
215 case atype_string:
216 field_offset += sizeof(void *);
217 break;
218 case atype_float:
219 field_offset += sizeof(double);
220 break;
221 default:
222 return -EINVAL;
223 }
224 }
225 if (!field)
226 return -EINVAL;
227
228 /* Check if field offset is too large for 16-bit offset */
229 if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
230 return -EINVAL;
231
232 /* set type */
233 op = (struct load_op *) &runtime->code[reloc_offset];
234
235 switch (bytecode_op) {
236 case BYTECODE_OP_LOAD_FIELD_REF:
237 {
238 struct field_ref *field_ref;
239
240 field_ref = (struct field_ref *) op->data;
241 switch (field->type.atype) {
242 case atype_integer:
243 case atype_enum_nestable:
244 op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
245 break;
246 case atype_array_nestable:
247 case atype_sequence_nestable:
248 op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
249 break;
250 case atype_string:
251 op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
252 break;
253 case atype_float:
254 op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
255 break;
256 default:
257 return -EINVAL;
258 }
259 /* set offset */
260 field_ref->offset = (uint16_t) field_offset;
261 break;
262 }
263 default:
264 return -EINVAL;
265 }
266 return 0;
267 }
268
269 static
270 int apply_context_reloc(struct bytecode_runtime *runtime,
271 uint32_t runtime_len,
272 uint32_t reloc_offset,
273 const char *context_name,
274 enum bytecode_op bytecode_op)
275 {
276 struct load_op *op;
277 struct lttng_ctx_field *ctx_field;
278 int idx;
279 struct lttng_ctx **pctx = runtime->p.priv->pctx;
280
281 dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
282
283 /* Get context index */
284 idx = lttng_get_context_index(*pctx, context_name);
285 if (idx < 0) {
286 if (lttng_context_is_app(context_name)) {
287 int ret;
288
289 ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
290 pctx);
291 if (ret)
292 return ret;
293 idx = lttng_get_context_index(*pctx, context_name);
294 if (idx < 0)
295 return -ENOENT;
296 } else {
297 return -ENOENT;
298 }
299 }
300 /* Check if idx is too large for 16-bit offset */
301 if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
302 return -EINVAL;
303
304 /* Get context return type */
305 ctx_field = &(*pctx)->fields[idx];
306 op = (struct load_op *) &runtime->code[reloc_offset];
307
308 switch (bytecode_op) {
309 case BYTECODE_OP_GET_CONTEXT_REF:
310 {
311 struct field_ref *field_ref;
312
313 field_ref = (struct field_ref *) op->data;
314 switch (ctx_field->event_field.type.atype) {
315 case atype_integer:
316 case atype_enum_nestable:
317 op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
318 break;
319 /* Sequence and array supported as string */
320 case atype_string:
321 case atype_array_nestable:
322 case atype_sequence_nestable:
323 op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
324 break;
325 case atype_float:
326 op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
327 break;
328 case atype_dynamic:
329 op->op = BYTECODE_OP_GET_CONTEXT_REF;
330 break;
331 default:
332 return -EINVAL;
333 }
334 /* set offset to context index within channel contexts */
335 field_ref->offset = (uint16_t) idx;
336 break;
337 }
338 default:
339 return -EINVAL;
340 }
341 return 0;
342 }
343
344 static
345 int apply_reloc(const struct lttng_ust_event_desc *event_desc,
346 struct bytecode_runtime *runtime,
347 uint32_t runtime_len,
348 uint32_t reloc_offset,
349 const char *name)
350 {
351 struct load_op *op;
352
353 dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
354
355 /* Ensure that the reloc is within the code */
356 if (runtime_len - reloc_offset < sizeof(uint16_t))
357 return -EINVAL;
358
359 op = (struct load_op *) &runtime->code[reloc_offset];
360 switch (op->op) {
361 case BYTECODE_OP_LOAD_FIELD_REF:
362 return apply_field_reloc(event_desc, runtime, runtime_len,
363 reloc_offset, name, op->op);
364 case BYTECODE_OP_GET_CONTEXT_REF:
365 return apply_context_reloc(runtime, runtime_len,
366 reloc_offset, name, op->op);
367 case BYTECODE_OP_GET_SYMBOL:
368 case BYTECODE_OP_GET_SYMBOL_FIELD:
369 /*
370 * Will be handled by load specialize phase or
371 * dynamically by interpreter.
372 */
373 return 0;
374 default:
375 ERR("Unknown reloc op type %u\n", op->op);
376 return -EINVAL;
377 }
378 return 0;
379 }
380
381 static
382 int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
383 struct cds_list_head *bytecode_runtime_head)
384 {
385 struct lttng_ust_bytecode_runtime *bc_runtime;
386
387 cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
388 if (bc_runtime->priv->bc == bytecode)
389 return 1;
390 }
391 return 0;
392 }
393
394 /*
395 * Take a bytecode with reloc table and link it to an event to create a
396 * bytecode runtime.
397 */
398 static
399 int link_bytecode(const struct lttng_ust_event_desc *event_desc,
400 struct lttng_ctx **ctx,
401 struct lttng_ust_bytecode_node *bytecode,
402 struct cds_list_head *bytecode_runtime_head,
403 struct cds_list_head *insert_loc)
404 {
405 int ret, offset, next_offset;
406 struct bytecode_runtime *runtime = NULL;
407 struct lttng_ust_bytecode_runtime_private *runtime_priv = NULL;
408 size_t runtime_alloc_len;
409
410 if (!bytecode)
411 return 0;
412 /* Bytecode already linked */
413 if (bytecode_is_linked(bytecode, bytecode_runtime_head))
414 return 0;
415
416 dbg_printf("Linking...\n");
417
418 /* We don't need the reloc table in the runtime */
419 runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
420 runtime = zmalloc(runtime_alloc_len);
421 if (!runtime) {
422 ret = -ENOMEM;
423 goto alloc_error;
424 }
425 runtime_priv = zmalloc(sizeof(struct lttng_ust_bytecode_runtime_private));
426 if (!runtime_priv) {
427 free(runtime);
428 runtime = NULL;
429 ret = -ENOMEM;
430 goto alloc_error;
431 }
432 runtime->p.priv = runtime_priv;
433 runtime->p.struct_size = sizeof(struct lttng_ust_bytecode_runtime);
434 runtime_priv->pub = runtime;
435 runtime_priv->bc = bytecode;
436 runtime_priv->pctx = ctx;
437 runtime->len = bytecode->bc.reloc_offset;
438 /* copy original bytecode */
439 memcpy(runtime->code, bytecode->bc.data, runtime->len);
440 /*
441 * apply relocs. Those are a uint16_t (offset in bytecode)
442 * followed by a string (field name).
443 */
444 for (offset = bytecode->bc.reloc_offset;
445 offset < bytecode->bc.len;
446 offset = next_offset) {
447 uint16_t reloc_offset =
448 *(uint16_t *) &bytecode->bc.data[offset];
449 const char *name =
450 (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
451
452 ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
453 if (ret) {
454 goto link_error;
455 }
456 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
457 }
458 /* Validate bytecode */
459 ret = lttng_bytecode_validate(runtime);
460 if (ret) {
461 goto link_error;
462 }
463 /* Specialize bytecode */
464 ret = lttng_bytecode_specialize(event_desc, runtime);
465 if (ret) {
466 goto link_error;
467 }
468
469 switch (bytecode->type) {
470 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
471 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
472 break;
473 case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
474 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret;
475 break;
476 default:
477 abort();
478 }
479
480 runtime->p.priv->link_failed = 0;
481 cds_list_add_rcu(&runtime->p.node, insert_loc);
482 dbg_printf("Linking successful.\n");
483 return 0;
484
485 link_error:
486 switch (bytecode->type) {
487 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
488 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
489 break;
490 case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
491 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
492 break;
493 default:
494 abort();
495 }
496
497 runtime_priv->link_failed = 1;
498 cds_list_add_rcu(&runtime->p.node, insert_loc);
499 alloc_error:
500 dbg_printf("Linking failed.\n");
501 return ret;
502 }
503
504 void lttng_bytecode_filter_sync_state(struct lttng_ust_bytecode_runtime *runtime)
505 {
506 struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
507
508 if (!bc->enabler->enabled || runtime->priv->link_failed)
509 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
510 else
511 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
512 }
513
514 void lttng_bytecode_capture_sync_state(struct lttng_ust_bytecode_runtime *runtime)
515 {
516 struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
517
518 if (!bc->enabler->enabled || runtime->priv->link_failed)
519 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
520 else
521 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
522 }
523
524 /*
525 * Given the lists of bytecode programs of an instance (trigger or event) and
526 * of a matching enabler, try to link all the enabler's bytecode programs with
527 * the instance.
528 *
529 * This function is called after we confirmed that name enabler and the
530 * instance are name matching (or glob pattern matching).
531 */
532 void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
533 struct lttng_ctx **ctx,
534 struct cds_list_head *instance_bytecode_head,
535 struct cds_list_head *enabler_bytecode_head)
536 {
537 struct lttng_ust_bytecode_node *enabler_bc;
538 struct lttng_ust_bytecode_runtime *runtime;
539
540 assert(event_desc);
541
542 /* Go over all the bytecode programs of the enabler. */
543 cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
544 int found = 0, ret;
545 struct cds_list_head *insert_loc;
546
547 /*
548 * Check if the current enabler bytecode program is already
549 * linked with the instance.
550 */
551 cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
552 if (runtime->priv->bc == enabler_bc) {
553 found = 1;
554 break;
555 }
556 }
557
558 /*
559 * Skip bytecode already linked, go to the next enabler
560 * bytecode program.
561 */
562 if (found)
563 continue;
564
565 /*
566 * Insert at specified priority (seqnum) in increasing
567 * order. If there already is a bytecode of the same priority,
568 * insert the new bytecode right after it.
569 */
570 cds_list_for_each_entry_reverse(runtime,
571 instance_bytecode_head, node) {
572 if (runtime->priv->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
573 /* insert here */
574 insert_loc = &runtime->node;
575 goto add_within;
576 }
577 }
578
579 /* Add to head to list */
580 insert_loc = instance_bytecode_head;
581 add_within:
582 dbg_printf("linking bytecode\n");
583 ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
584 if (ret) {
585 dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
586 }
587 }
588 }
589
590 /*
591 * We own the bytecode if we return success.
592 */
593 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
594 struct lttng_ust_bytecode_node *bytecode)
595 {
596 cds_list_add(&bytecode->node, &enabler->filter_bytecode_head);
597 return 0;
598 }
599
600 static
601 void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
602 {
603 struct bytecode_runtime *runtime, *tmp;
604
605 cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
606 p.node) {
607 free(runtime->data);
608 free(runtime->p.priv);
609 free(runtime);
610 }
611 }
612
613 void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
614 {
615 free_filter_runtime(&event->filter_bytecode_runtime_head);
616 }
This page took 0.042964 seconds and 4 git commands to generate.