Fix: filter attach vs event enable race
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "lttng-ust-baddr.h"
58 #include "wait.h"
59 #include "../libringbuffer/shm.h"
60 #include "jhash.h"
61
62 /*
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
65 */
66
67 static CDS_LIST_HEAD(sessions);
68
69 struct cds_list_head *_lttng_get_sessions(void)
70 {
71 return &sessions;
72 }
73
74 static void _lttng_event_destroy(struct lttng_event *event);
75
76 static
77 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78 static
79 void lttng_session_sync_enablers(struct lttng_session *session);
80 static
81 void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
83 /*
84 * Called with ust lock held.
85 */
86 int lttng_session_active(void)
87 {
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95 }
96
97 static
98 int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102 {
103 if (!has_loglevel)
104 loglevel = TRACE_DEFAULT;
105 switch (req_type) {
106 case LTTNG_UST_LOGLEVEL_RANGE:
107 if (loglevel <= req_loglevel
108 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
109 return 1;
110 else
111 return 0;
112 case LTTNG_UST_LOGLEVEL_SINGLE:
113 if (loglevel == req_loglevel
114 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_ALL:
119 default:
120 if (loglevel <= TRACE_DEBUG)
121 return 1;
122 else
123 return 0;
124 }
125 }
126
127 void synchronize_trace(void)
128 {
129 synchronize_rcu();
130 }
131
132 struct lttng_session *lttng_session_create(void)
133 {
134 struct lttng_session *session;
135 int i;
136
137 session = zmalloc(sizeof(struct lttng_session));
138 if (!session)
139 return NULL;
140 CDS_INIT_LIST_HEAD(&session->chan_head);
141 CDS_INIT_LIST_HEAD(&session->events_head);
142 CDS_INIT_LIST_HEAD(&session->enablers_head);
143 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
144 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
145 cds_list_add(&session->node, &sessions);
146 return session;
147 }
148
149 /*
150 * Only used internally at session destruction.
151 */
152 static
153 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
154 {
155 struct channel *chan;
156 struct lttng_ust_shm_handle *handle;
157
158 cds_list_del(&lttng_chan->node);
159 lttng_destroy_context(lttng_chan->ctx);
160 chan = lttng_chan->chan;
161 handle = lttng_chan->handle;
162 /*
163 * note: lttng_chan is private data contained within handle. It
164 * will be freed along with the handle.
165 */
166 channel_destroy(chan, handle, 0);
167 }
168
169 static
170 void register_event(struct lttng_event *event)
171 {
172 int ret;
173 const struct lttng_event_desc *desc;
174
175 assert(event->registered == 0);
176 desc = event->desc;
177 ret = __tracepoint_probe_register(desc->name,
178 desc->probe_callback,
179 event, desc->signature);
180 WARN_ON_ONCE(ret);
181 if (!ret)
182 event->registered = 1;
183 }
184
185 static
186 void unregister_event(struct lttng_event *event)
187 {
188 int ret;
189 const struct lttng_event_desc *desc;
190
191 assert(event->registered == 1);
192 desc = event->desc;
193 ret = __tracepoint_probe_unregister(desc->name,
194 desc->probe_callback,
195 event);
196 WARN_ON_ONCE(ret);
197 if (!ret)
198 event->registered = 0;
199 }
200
201 /*
202 * Only used internally at session destruction.
203 */
204 static
205 void _lttng_event_unregister(struct lttng_event *event)
206 {
207 if (event->registered)
208 unregister_event(event);
209 }
210
211 void lttng_session_destroy(struct lttng_session *session)
212 {
213 struct lttng_channel *chan, *tmpchan;
214 struct lttng_event *event, *tmpevent;
215 struct lttng_enabler *enabler, *tmpenabler;
216
217 CMM_ACCESS_ONCE(session->active) = 0;
218 cds_list_for_each_entry(event, &session->events_head, node) {
219 _lttng_event_unregister(event);
220 }
221 synchronize_trace(); /* Wait for in-flight events to complete */
222 cds_list_for_each_entry_safe(enabler, tmpenabler,
223 &session->enablers_head, node)
224 lttng_enabler_destroy(enabler);
225 cds_list_for_each_entry_safe(event, tmpevent,
226 &session->events_head, node)
227 _lttng_event_destroy(event);
228 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
229 _lttng_channel_unmap(chan);
230 cds_list_del(&session->node);
231 free(session);
232 }
233
234 int lttng_session_enable(struct lttng_session *session)
235 {
236 int ret = 0;
237 struct lttng_channel *chan;
238 int notify_socket;
239
240 if (session->active) {
241 ret = -EBUSY;
242 goto end;
243 }
244
245 notify_socket = lttng_get_notify_socket(session->owner);
246 if (notify_socket < 0)
247 return notify_socket;
248
249 /* Set transient enabler state to "enabled" */
250 session->tstate = 1;
251
252 /*
253 * Snapshot the number of events per channel to know the type of header
254 * we need to use.
255 */
256 cds_list_for_each_entry(chan, &session->chan_head, node) {
257 const struct lttng_ctx *ctx;
258 const struct lttng_ctx_field *fields = NULL;
259 size_t nr_fields = 0;
260 uint32_t chan_id;
261
262 /* don't change it if session stop/restart */
263 if (chan->header_type)
264 continue;
265 ctx = chan->ctx;
266 if (ctx) {
267 nr_fields = ctx->nr_fields;
268 fields = ctx->fields;
269 }
270 ret = ustcomm_register_channel(notify_socket,
271 session->objd,
272 chan->objd,
273 nr_fields,
274 fields,
275 &chan_id,
276 &chan->header_type);
277 if (ret) {
278 DBG("Error (%d) registering channel to sessiond", ret);
279 return ret;
280 }
281 if (chan_id != chan->id) {
282 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
283 chan_id, chan->id);
284 return -EINVAL;
285 }
286 }
287
288 /* We need to sync enablers with session before activation. */
289 lttng_session_sync_enablers(session);
290
291 /* Set atomically the state to "active" */
292 CMM_ACCESS_ONCE(session->active) = 1;
293 CMM_ACCESS_ONCE(session->been_active) = 1;
294
295 session->statedump_pending = 1;
296 lttng_ust_sockinfo_session_enabled(session->owner);
297 end:
298 return ret;
299 }
300
301 int lttng_session_disable(struct lttng_session *session)
302 {
303 int ret = 0;
304
305 if (!session->active) {
306 ret = -EBUSY;
307 goto end;
308 }
309 /* Set atomically the state to "inactive" */
310 CMM_ACCESS_ONCE(session->active) = 0;
311
312 /* Set transient enabler state to "disabled" */
313 session->tstate = 0;
314 lttng_session_sync_enablers(session);
315 end:
316 return ret;
317 }
318
319 int lttng_channel_enable(struct lttng_channel *channel)
320 {
321 int ret = 0;
322
323 if (channel->enabled) {
324 ret = -EBUSY;
325 goto end;
326 }
327 /* Set transient enabler state to "enabled" */
328 channel->tstate = 1;
329 lttng_session_sync_enablers(channel->session);
330 /* Set atomically the state to "enabled" */
331 CMM_ACCESS_ONCE(channel->enabled) = 1;
332 end:
333 return ret;
334 }
335
336 int lttng_channel_disable(struct lttng_channel *channel)
337 {
338 int ret = 0;
339
340 if (!channel->enabled) {
341 ret = -EBUSY;
342 goto end;
343 }
344 /* Set atomically the state to "disabled" */
345 CMM_ACCESS_ONCE(channel->enabled) = 0;
346 /* Set transient enabler state to "enabled" */
347 channel->tstate = 0;
348 lttng_session_sync_enablers(channel->session);
349 end:
350 return ret;
351 }
352
353 /*
354 * Supports event creation while tracing session is active.
355 */
356 static
357 int lttng_event_create(const struct lttng_event_desc *desc,
358 struct lttng_channel *chan)
359 {
360 const char *event_name = desc->name;
361 struct lttng_event *event;
362 struct lttng_session *session = chan->session;
363 struct cds_hlist_head *head;
364 struct cds_hlist_node *node;
365 int ret = 0;
366 size_t name_len = strlen(event_name);
367 uint32_t hash;
368 int notify_socket, loglevel;
369 const char *uri;
370
371 hash = jhash(event_name, name_len, 0);
372 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
373 cds_hlist_for_each_entry(event, node, head, hlist) {
374 assert(event->desc);
375 if (!strncmp(event->desc->name, desc->name,
376 LTTNG_UST_SYM_NAME_LEN - 1)
377 && chan == event->chan) {
378 ret = -EEXIST;
379 goto exist;
380 }
381 }
382
383 notify_socket = lttng_get_notify_socket(session->owner);
384 if (notify_socket < 0) {
385 ret = notify_socket;
386 goto socket_error;
387 }
388
389 /*
390 * Check if loglevel match. Refuse to connect event if not.
391 */
392 event = zmalloc(sizeof(struct lttng_event));
393 if (!event) {
394 ret = -ENOMEM;
395 goto cache_error;
396 }
397 event->chan = chan;
398
399 /* Event will be enabled by enabler sync. */
400 event->enabled = 0;
401 event->registered = 0;
402 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
403 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
404 event->desc = desc;
405
406 if (desc->loglevel)
407 loglevel = *(*event->desc->loglevel);
408 else
409 loglevel = TRACE_DEFAULT;
410 if (desc->u.ext.model_emf_uri)
411 uri = *(desc->u.ext.model_emf_uri);
412 else
413 uri = NULL;
414
415 /* Fetch event ID from sessiond */
416 ret = ustcomm_register_event(notify_socket,
417 session->objd,
418 chan->objd,
419 event_name,
420 loglevel,
421 desc->signature,
422 desc->nr_fields,
423 desc->fields,
424 uri,
425 &event->id);
426 if (ret < 0) {
427 DBG("Error (%d) registering event to sessiond", ret);
428 goto sessiond_register_error;
429 }
430
431 /* Populate lttng_event structure before tracepoint registration. */
432 cmm_smp_wmb();
433 cds_list_add(&event->node, &chan->session->events_head);
434 cds_hlist_add_head(&event->hlist, head);
435 return 0;
436
437 sessiond_register_error:
438 free(event);
439 cache_error:
440 socket_error:
441 exist:
442 return ret;
443 }
444
445 static
446 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
447 struct lttng_enabler *enabler)
448 {
449 int loglevel = 0;
450 unsigned int has_loglevel = 0;
451
452 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
453 /* Compare excluding final '*' */
454 if (strncmp(desc->name, enabler->event_param.name,
455 strlen(enabler->event_param.name) - 1))
456 return 0;
457 if (desc->loglevel) {
458 loglevel = *(*desc->loglevel);
459 has_loglevel = 1;
460 }
461 if (!lttng_loglevel_match(loglevel,
462 has_loglevel,
463 enabler->event_param.loglevel_type,
464 enabler->event_param.loglevel))
465 return 0;
466 return 1;
467 }
468
469 static
470 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
471 struct lttng_enabler *enabler)
472 {
473 int loglevel = 0;
474 unsigned int has_loglevel = 0;
475
476 assert(enabler->type == LTTNG_ENABLER_EVENT);
477 if (strcmp(desc->name, enabler->event_param.name))
478 return 0;
479 if (desc->loglevel) {
480 loglevel = *(*desc->loglevel);
481 has_loglevel = 1;
482 }
483 if (!lttng_loglevel_match(loglevel,
484 has_loglevel,
485 enabler->event_param.loglevel_type,
486 enabler->event_param.loglevel))
487 return 0;
488 return 1;
489 }
490
491 static
492 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
493 struct lttng_enabler *enabler)
494 {
495 struct lttng_ust_excluder_node *excluder;
496
497 /* If event matches with an excluder, return 'does not match' */
498 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
499 int count;
500
501 for (count = 0; count < excluder->excluder.count; count++) {
502 int found, len;
503 char *excluder_name;
504
505 excluder_name = (char *) (excluder->excluder.names)
506 + count * LTTNG_UST_SYM_NAME_LEN;
507 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
508 if (len > 0 && excluder_name[len - 1] == '*') {
509 found = !strncmp(desc->name, excluder_name,
510 len - 1);
511 } else {
512 found = !strncmp(desc->name, excluder_name,
513 LTTNG_UST_SYM_NAME_LEN - 1);
514 }
515 if (found) {
516 return 0;
517 }
518 }
519 }
520 switch (enabler->type) {
521 case LTTNG_ENABLER_WILDCARD:
522 return lttng_desc_match_wildcard_enabler(desc, enabler);
523 case LTTNG_ENABLER_EVENT:
524 return lttng_desc_match_event_enabler(desc, enabler);
525 default:
526 return -EINVAL;
527 }
528 }
529
530 static
531 int lttng_event_match_enabler(struct lttng_event *event,
532 struct lttng_enabler *enabler)
533 {
534 if (lttng_desc_match_enabler(event->desc, enabler)
535 && event->chan == enabler->chan)
536 return 1;
537 else
538 return 0;
539 }
540
541 static
542 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
543 struct lttng_enabler *enabler)
544 {
545 struct lttng_enabler_ref *enabler_ref;
546
547 cds_list_for_each_entry(enabler_ref,
548 &event->enablers_ref_head, node) {
549 if (enabler_ref->ref == enabler)
550 return enabler_ref;
551 }
552 return NULL;
553 }
554
555 /*
556 * Create struct lttng_event if it is missing and present in the list of
557 * tracepoint probes.
558 */
559 static
560 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
561 {
562 struct lttng_session *session = enabler->chan->session;
563 struct lttng_probe_desc *probe_desc;
564 const struct lttng_event_desc *desc;
565 struct lttng_event *event;
566 int i;
567 struct cds_list_head *probe_list;
568
569 probe_list = lttng_get_probe_list_head();
570 /*
571 * For each probe event, if we find that a probe event matches
572 * our enabler, create an associated lttng_event if not
573 * already present.
574 */
575 cds_list_for_each_entry(probe_desc, probe_list, head) {
576 for (i = 0; i < probe_desc->nr_events; i++) {
577 int found = 0, ret;
578 struct cds_hlist_head *head;
579 struct cds_hlist_node *node;
580 const char *event_name;
581 size_t name_len;
582 uint32_t hash;
583
584 desc = probe_desc->event_desc[i];
585 if (!lttng_desc_match_enabler(desc, enabler))
586 continue;
587 event_name = desc->name;
588 name_len = strlen(event_name);
589
590 /*
591 * Check if already created.
592 */
593 hash = jhash(event_name, name_len, 0);
594 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
595 cds_hlist_for_each_entry(event, node, head, hlist) {
596 if (event->desc == desc
597 && event->chan == enabler->chan)
598 found = 1;
599 }
600 if (found)
601 continue;
602
603 /*
604 * We need to create an event for this
605 * event probe.
606 */
607 ret = lttng_event_create(probe_desc->event_desc[i],
608 enabler->chan);
609 if (ret) {
610 DBG("Unable to create event %s, error %d\n",
611 probe_desc->event_desc[i]->name, ret);
612 }
613 }
614 }
615 }
616
617 /*
618 * Create events associated with an enabler (if not already present),
619 * and add backward reference from the event to the enabler.
620 */
621 static
622 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
623 {
624 struct lttng_session *session = enabler->chan->session;
625 struct lttng_event *event;
626
627 /* First ensure that probe events are created for this enabler. */
628 lttng_create_event_if_missing(enabler);
629
630 /* For each event matching enabler in session event list. */
631 cds_list_for_each_entry(event, &session->events_head, node) {
632 struct lttng_enabler_ref *enabler_ref;
633
634 if (!lttng_event_match_enabler(event, enabler))
635 continue;
636
637 enabler_ref = lttng_event_enabler_ref(event, enabler);
638 if (!enabler_ref) {
639 /*
640 * If no backward ref, create it.
641 * Add backward ref from event to enabler.
642 */
643 enabler_ref = zmalloc(sizeof(*enabler_ref));
644 if (!enabler_ref)
645 return -ENOMEM;
646 enabler_ref->ref = enabler;
647 cds_list_add(&enabler_ref->node,
648 &event->enablers_ref_head);
649 }
650
651 /*
652 * Link filter bytecodes if not linked yet.
653 */
654 lttng_enabler_event_link_bytecode(event, enabler);
655
656 /* TODO: merge event context. */
657 }
658 return 0;
659 }
660
661 /*
662 * Called at library load: connect the probe on all enablers matching
663 * this event.
664 * Called with session mutex held.
665 */
666 int lttng_fix_pending_events(void)
667 {
668 struct lttng_session *session;
669
670 cds_list_for_each_entry(session, &sessions, node) {
671 lttng_session_lazy_sync_enablers(session);
672 }
673 return 0;
674 }
675
676 /*
677 * For each session of the owner thread, execute pending statedump.
678 * Only dump state for the sessions owned by the caller thread, because
679 * we don't keep ust_lock across the entire iteration.
680 */
681 void lttng_handle_pending_statedump(void *owner)
682 {
683 struct lttng_session *session;
684
685 /* Execute state dump */
686 lttng_ust_baddr_statedump(owner);
687
688 /* Clear pending state dump */
689 if (ust_lock()) {
690 goto end;
691 }
692 cds_list_for_each_entry(session, &sessions, node) {
693 if (session->owner != owner)
694 continue;
695 if (!session->statedump_pending)
696 continue;
697 session->statedump_pending = 0;
698 }
699 end:
700 ust_unlock();
701 return;
702 }
703
704 /*
705 * Only used internally at session destruction.
706 */
707 static
708 void _lttng_event_destroy(struct lttng_event *event)
709 {
710 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
711
712 cds_list_del(&event->node);
713 lttng_destroy_context(event->ctx);
714 lttng_free_event_filter_runtime(event);
715 /* Free event enabler refs */
716 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
717 &event->enablers_ref_head, node)
718 free(enabler_ref);
719 free(event);
720 }
721
722 void lttng_ust_events_exit(void)
723 {
724 struct lttng_session *session, *tmpsession;
725
726 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
727 lttng_session_destroy(session);
728 }
729
730 /*
731 * Enabler management.
732 */
733 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
734 struct lttng_ust_event *event_param,
735 struct lttng_channel *chan)
736 {
737 struct lttng_enabler *enabler;
738
739 enabler = zmalloc(sizeof(*enabler));
740 if (!enabler)
741 return NULL;
742 enabler->type = type;
743 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
744 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
745 memcpy(&enabler->event_param, event_param,
746 sizeof(enabler->event_param));
747 enabler->chan = chan;
748 /* ctx left NULL */
749 /*
750 * The "disable" event create comm field has been added to fix a
751 * race between event creation (of a started trace) and enabling
752 * filtering. New session daemon always set the "disable" field
753 * to 1, and are aware that they need to explicitly enable the
754 * event. Older session daemon (within same ABI) leave it at 0,
755 * and therefore we need to enable it here, keeping the original
756 * racy behavior.
757 */
758 enabler->enabled = !event_param->disabled;
759 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
760 lttng_session_lazy_sync_enablers(enabler->chan->session);
761 return enabler;
762 }
763
764 int lttng_enabler_enable(struct lttng_enabler *enabler)
765 {
766 enabler->enabled = 1;
767 lttng_session_lazy_sync_enablers(enabler->chan->session);
768 return 0;
769 }
770
771 int lttng_enabler_disable(struct lttng_enabler *enabler)
772 {
773 enabler->enabled = 0;
774 lttng_session_lazy_sync_enablers(enabler->chan->session);
775 return 0;
776 }
777
778 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
779 struct lttng_ust_filter_bytecode_node *bytecode)
780 {
781 bytecode->enabler = enabler;
782 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
783 lttng_session_lazy_sync_enablers(enabler->chan->session);
784 return 0;
785 }
786
787 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
788 struct lttng_ust_excluder_node *excluder)
789 {
790 excluder->enabler = enabler;
791 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
792 lttng_session_lazy_sync_enablers(enabler->chan->session);
793 return 0;
794 }
795
796 int lttng_attach_context(struct lttng_ust_context *context_param,
797 struct lttng_ctx **ctx, struct lttng_session *session)
798 {
799 /*
800 * We cannot attach a context after trace has been started for a
801 * session because the metadata does not allow expressing this
802 * information outside of the original channel scope.
803 */
804 if (session->been_active)
805 return -EPERM;
806
807 switch (context_param->ctx) {
808 case LTTNG_UST_CONTEXT_PTHREAD_ID:
809 return lttng_add_pthread_id_to_ctx(ctx);
810 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
811 {
812 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
813
814 perf_ctx_param = &context_param->u.perf_counter;
815 return lttng_add_perf_counter_to_ctx(
816 perf_ctx_param->type,
817 perf_ctx_param->config,
818 perf_ctx_param->name,
819 ctx);
820 }
821 case LTTNG_UST_CONTEXT_VTID:
822 return lttng_add_vtid_to_ctx(ctx);
823 case LTTNG_UST_CONTEXT_VPID:
824 return lttng_add_vpid_to_ctx(ctx);
825 case LTTNG_UST_CONTEXT_PROCNAME:
826 return lttng_add_procname_to_ctx(ctx);
827 case LTTNG_UST_CONTEXT_IP:
828 return lttng_add_ip_to_ctx(ctx);
829 default:
830 return -EINVAL;
831 }
832 }
833
834 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
835 struct lttng_ust_context *context_param)
836 {
837 #if 0 // disabled for now.
838 struct lttng_session *session = enabler->chan->session;
839 int ret;
840
841 ret = lttng_attach_context(context_param, &enabler->ctx,
842 session);
843 if (ret)
844 return ret;
845 lttng_session_lazy_sync_enablers(enabler->chan->session);
846 #endif
847 return -ENOSYS;
848 }
849
850 static
851 void lttng_enabler_destroy(struct lttng_enabler *enabler)
852 {
853 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
854 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
855
856 /* Destroy filter bytecode */
857 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
858 &enabler->filter_bytecode_head, node) {
859 free(filter_node);
860 }
861
862 /* Destroy excluders */
863 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
864 &enabler->excluder_head, node) {
865 free(excluder_node);
866 }
867
868 /* Destroy contexts */
869 lttng_destroy_context(enabler->ctx);
870
871 cds_list_del(&enabler->node);
872 free(enabler);
873 }
874
875 /*
876 * lttng_session_sync_enablers should be called just before starting a
877 * session.
878 */
879 static
880 void lttng_session_sync_enablers(struct lttng_session *session)
881 {
882 struct lttng_enabler *enabler;
883 struct lttng_event *event;
884
885 cds_list_for_each_entry(enabler, &session->enablers_head, node)
886 lttng_enabler_ref_events(enabler);
887 /*
888 * For each event, if at least one of its enablers is enabled,
889 * and its channel and session transient states are enabled, we
890 * enable the event, else we disable it.
891 */
892 cds_list_for_each_entry(event, &session->events_head, node) {
893 struct lttng_enabler_ref *enabler_ref;
894 struct lttng_bytecode_runtime *runtime;
895 int enabled = 0, has_enablers_without_bytecode = 0;
896
897 /* Enable events */
898 cds_list_for_each_entry(enabler_ref,
899 &event->enablers_ref_head, node) {
900 if (enabler_ref->ref->enabled) {
901 enabled = 1;
902 break;
903 }
904 }
905 /*
906 * Enabled state is based on union of enablers, with
907 * intesection of session and channel transient enable
908 * states.
909 */
910 enabled = enabled && session->tstate && event->chan->tstate;
911
912 CMM_STORE_SHARED(event->enabled, enabled);
913 /*
914 * Sync tracepoint registration with event enabled
915 * state.
916 */
917 if (enabled) {
918 if (!event->registered)
919 register_event(event);
920 } else {
921 if (event->registered)
922 unregister_event(event);
923 }
924
925 /* Check if has enablers without bytecode enabled */
926 cds_list_for_each_entry(enabler_ref,
927 &event->enablers_ref_head, node) {
928 if (enabler_ref->ref->enabled
929 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
930 has_enablers_without_bytecode = 1;
931 break;
932 }
933 }
934 event->has_enablers_without_bytecode =
935 has_enablers_without_bytecode;
936
937 /* Enable filters */
938 cds_list_for_each_entry(runtime,
939 &event->bytecode_runtime_head, node) {
940 lttng_filter_sync_state(runtime);
941 }
942 }
943 }
944
945 /*
946 * Apply enablers to session events, adding events to session if need
947 * be. It is required after each modification applied to an active
948 * session, and right before session "start".
949 * "lazy" sync means we only sync if required.
950 */
951 static
952 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
953 {
954 /* We can skip if session is not active */
955 if (!session->active)
956 return;
957 lttng_session_sync_enablers(session);
958 }
This page took 0.047871 seconds and 4 git commands to generate.