Fix: many-events registration/unregistration speed
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <urcu/list.h>
26 #include <urcu/hlist.h>
27 #include <pthread.h>
28 #include <errno.h>
29 #include <sys/shm.h>
30 #include <sys/ipc.h>
31 #include <stdint.h>
32 #include <stddef.h>
33 #include <inttypes.h>
34 #include <time.h>
35 #include <lttng/ust-endian.h>
36 #include "clock.h"
37
38 #include <urcu-bp.h>
39 #include <urcu/compiler.h>
40 #include <urcu/uatomic.h>
41 #include <urcu/arch.h>
42
43 #include <lttng/tracepoint.h>
44 #include <lttng/ust-events.h>
45
46 #include <usterr-signal-safe.h>
47 #include <helper.h>
48 #include <lttng/ust-ctl.h>
49 #include <ust-comm.h>
50 #include "error.h"
51 #include "compat.h"
52 #include "lttng-ust-uuid.h"
53
54 #include "tracepoint-internal.h"
55 #include "lttng-tracer.h"
56 #include "lttng-tracer-core.h"
57 #include "lttng-ust-statedump.h"
58 #include "wait.h"
59 #include "../libringbuffer/shm.h"
60 #include "jhash.h"
61
62 /*
63 * All operations within this file are called by the communication
64 * thread, under ust_lock protection.
65 */
66
67 static CDS_LIST_HEAD(sessions);
68
69 struct cds_list_head *_lttng_get_sessions(void)
70 {
71 return &sessions;
72 }
73
74 static void _lttng_event_destroy(struct lttng_event *event);
75
76 static
77 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
78 static
79 void lttng_session_sync_enablers(struct lttng_session *session);
80 static
81 void lttng_enabler_destroy(struct lttng_enabler *enabler);
82
83 /*
84 * Called with ust lock held.
85 */
86 int lttng_session_active(void)
87 {
88 struct lttng_session *iter;
89
90 cds_list_for_each_entry(iter, &sessions, node) {
91 if (iter->active)
92 return 1;
93 }
94 return 0;
95 }
96
97 static
98 int lttng_loglevel_match(int loglevel,
99 unsigned int has_loglevel,
100 enum lttng_ust_loglevel_type req_type,
101 int req_loglevel)
102 {
103 if (!has_loglevel)
104 loglevel = TRACE_DEFAULT;
105 switch (req_type) {
106 case LTTNG_UST_LOGLEVEL_RANGE:
107 if (loglevel <= req_loglevel
108 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
109 return 1;
110 else
111 return 0;
112 case LTTNG_UST_LOGLEVEL_SINGLE:
113 if (loglevel == req_loglevel
114 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_ALL:
119 default:
120 if (loglevel <= TRACE_DEBUG)
121 return 1;
122 else
123 return 0;
124 }
125 }
126
127 void synchronize_trace(void)
128 {
129 synchronize_rcu();
130 }
131
132 struct lttng_session *lttng_session_create(void)
133 {
134 struct lttng_session *session;
135 int i;
136
137 session = zmalloc(sizeof(struct lttng_session));
138 if (!session)
139 return NULL;
140 CDS_INIT_LIST_HEAD(&session->chan_head);
141 CDS_INIT_LIST_HEAD(&session->events_head);
142 CDS_INIT_LIST_HEAD(&session->enablers_head);
143 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
144 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
145 cds_list_add(&session->node, &sessions);
146 return session;
147 }
148
149 /*
150 * Only used internally at session destruction.
151 */
152 static
153 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
154 {
155 struct channel *chan;
156 struct lttng_ust_shm_handle *handle;
157
158 cds_list_del(&lttng_chan->node);
159 lttng_destroy_context(lttng_chan->ctx);
160 chan = lttng_chan->chan;
161 handle = lttng_chan->handle;
162 /*
163 * note: lttng_chan is private data contained within handle. It
164 * will be freed along with the handle.
165 */
166 channel_destroy(chan, handle, 0);
167 }
168
169 static
170 void register_event(struct lttng_event *event)
171 {
172 int ret;
173 const struct lttng_event_desc *desc;
174
175 assert(event->registered == 0);
176 desc = event->desc;
177 ret = __tracepoint_probe_register_queue_release(desc->name,
178 desc->probe_callback,
179 event, desc->signature);
180 WARN_ON_ONCE(ret);
181 if (!ret)
182 event->registered = 1;
183 }
184
185 static
186 void unregister_event(struct lttng_event *event)
187 {
188 int ret;
189 const struct lttng_event_desc *desc;
190
191 assert(event->registered == 1);
192 desc = event->desc;
193 ret = __tracepoint_probe_unregister_queue_release(desc->name,
194 desc->probe_callback,
195 event);
196 WARN_ON_ONCE(ret);
197 if (!ret)
198 event->registered = 0;
199 }
200
201 /*
202 * Only used internally at session destruction.
203 */
204 static
205 void _lttng_event_unregister(struct lttng_event *event)
206 {
207 if (event->registered)
208 unregister_event(event);
209 }
210
211 void lttng_session_destroy(struct lttng_session *session)
212 {
213 struct lttng_channel *chan, *tmpchan;
214 struct lttng_event *event, *tmpevent;
215 struct lttng_enabler *enabler, *tmpenabler;
216
217 CMM_ACCESS_ONCE(session->active) = 0;
218 cds_list_for_each_entry(event, &session->events_head, node) {
219 _lttng_event_unregister(event);
220 }
221 synchronize_trace(); /* Wait for in-flight events to complete */
222 __tracepoint_probe_prune_release_queue();
223 cds_list_for_each_entry_safe(enabler, tmpenabler,
224 &session->enablers_head, node)
225 lttng_enabler_destroy(enabler);
226 cds_list_for_each_entry_safe(event, tmpevent,
227 &session->events_head, node)
228 _lttng_event_destroy(event);
229 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
230 _lttng_channel_unmap(chan);
231 cds_list_del(&session->node);
232 free(session);
233 }
234
235 int lttng_session_enable(struct lttng_session *session)
236 {
237 int ret = 0;
238 struct lttng_channel *chan;
239 int notify_socket;
240
241 if (session->active) {
242 ret = -EBUSY;
243 goto end;
244 }
245
246 notify_socket = lttng_get_notify_socket(session->owner);
247 if (notify_socket < 0)
248 return notify_socket;
249
250 /* Set transient enabler state to "enabled" */
251 session->tstate = 1;
252
253 /*
254 * Snapshot the number of events per channel to know the type of header
255 * we need to use.
256 */
257 cds_list_for_each_entry(chan, &session->chan_head, node) {
258 const struct lttng_ctx *ctx;
259 const struct lttng_ctx_field *fields = NULL;
260 size_t nr_fields = 0;
261 uint32_t chan_id;
262
263 /* don't change it if session stop/restart */
264 if (chan->header_type)
265 continue;
266 ctx = chan->ctx;
267 if (ctx) {
268 nr_fields = ctx->nr_fields;
269 fields = ctx->fields;
270 }
271 ret = ustcomm_register_channel(notify_socket,
272 session->objd,
273 chan->objd,
274 nr_fields,
275 fields,
276 &chan_id,
277 &chan->header_type);
278 if (ret) {
279 DBG("Error (%d) registering channel to sessiond", ret);
280 return ret;
281 }
282 if (chan_id != chan->id) {
283 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
284 chan_id, chan->id);
285 return -EINVAL;
286 }
287 }
288
289 /* We need to sync enablers with session before activation. */
290 lttng_session_sync_enablers(session);
291
292 /* Set atomically the state to "active" */
293 CMM_ACCESS_ONCE(session->active) = 1;
294 CMM_ACCESS_ONCE(session->been_active) = 1;
295
296 session->statedump_pending = 1;
297 lttng_ust_sockinfo_session_enabled(session->owner);
298 end:
299 return ret;
300 }
301
302 int lttng_session_disable(struct lttng_session *session)
303 {
304 int ret = 0;
305
306 if (!session->active) {
307 ret = -EBUSY;
308 goto end;
309 }
310 /* Set atomically the state to "inactive" */
311 CMM_ACCESS_ONCE(session->active) = 0;
312
313 /* Set transient enabler state to "disabled" */
314 session->tstate = 0;
315 lttng_session_sync_enablers(session);
316 end:
317 return ret;
318 }
319
320 int lttng_channel_enable(struct lttng_channel *channel)
321 {
322 int ret = 0;
323
324 if (channel->enabled) {
325 ret = -EBUSY;
326 goto end;
327 }
328 /* Set transient enabler state to "enabled" */
329 channel->tstate = 1;
330 lttng_session_sync_enablers(channel->session);
331 /* Set atomically the state to "enabled" */
332 CMM_ACCESS_ONCE(channel->enabled) = 1;
333 end:
334 return ret;
335 }
336
337 int lttng_channel_disable(struct lttng_channel *channel)
338 {
339 int ret = 0;
340
341 if (!channel->enabled) {
342 ret = -EBUSY;
343 goto end;
344 }
345 /* Set atomically the state to "disabled" */
346 CMM_ACCESS_ONCE(channel->enabled) = 0;
347 /* Set transient enabler state to "enabled" */
348 channel->tstate = 0;
349 lttng_session_sync_enablers(channel->session);
350 end:
351 return ret;
352 }
353
354 /*
355 * Supports event creation while tracing session is active.
356 */
357 static
358 int lttng_event_create(const struct lttng_event_desc *desc,
359 struct lttng_channel *chan)
360 {
361 const char *event_name = desc->name;
362 struct lttng_event *event;
363 struct lttng_session *session = chan->session;
364 struct cds_hlist_head *head;
365 struct cds_hlist_node *node;
366 int ret = 0;
367 size_t name_len = strlen(event_name);
368 uint32_t hash;
369 int notify_socket, loglevel;
370 const char *uri;
371
372 hash = jhash(event_name, name_len, 0);
373 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
374 cds_hlist_for_each_entry(event, node, head, hlist) {
375 assert(event->desc);
376 if (!strncmp(event->desc->name, desc->name,
377 LTTNG_UST_SYM_NAME_LEN - 1)
378 && chan == event->chan) {
379 ret = -EEXIST;
380 goto exist;
381 }
382 }
383
384 notify_socket = lttng_get_notify_socket(session->owner);
385 if (notify_socket < 0) {
386 ret = notify_socket;
387 goto socket_error;
388 }
389
390 /*
391 * Check if loglevel match. Refuse to connect event if not.
392 */
393 event = zmalloc(sizeof(struct lttng_event));
394 if (!event) {
395 ret = -ENOMEM;
396 goto cache_error;
397 }
398 event->chan = chan;
399
400 /* Event will be enabled by enabler sync. */
401 event->enabled = 0;
402 event->registered = 0;
403 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
404 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
405 event->desc = desc;
406
407 if (desc->loglevel)
408 loglevel = *(*event->desc->loglevel);
409 else
410 loglevel = TRACE_DEFAULT;
411 if (desc->u.ext.model_emf_uri)
412 uri = *(desc->u.ext.model_emf_uri);
413 else
414 uri = NULL;
415
416 /* Fetch event ID from sessiond */
417 ret = ustcomm_register_event(notify_socket,
418 session->objd,
419 chan->objd,
420 event_name,
421 loglevel,
422 desc->signature,
423 desc->nr_fields,
424 desc->fields,
425 uri,
426 &event->id);
427 if (ret < 0) {
428 DBG("Error (%d) registering event to sessiond", ret);
429 goto sessiond_register_error;
430 }
431
432 /* Populate lttng_event structure before tracepoint registration. */
433 cmm_smp_wmb();
434 cds_list_add(&event->node, &chan->session->events_head);
435 cds_hlist_add_head(&event->hlist, head);
436 return 0;
437
438 sessiond_register_error:
439 free(event);
440 cache_error:
441 socket_error:
442 exist:
443 return ret;
444 }
445
446 static
447 int lttng_desc_match_wildcard_enabler(const struct lttng_event_desc *desc,
448 struct lttng_enabler *enabler)
449 {
450 int loglevel = 0;
451 unsigned int has_loglevel = 0;
452
453 assert(enabler->type == LTTNG_ENABLER_WILDCARD);
454 /* Compare excluding final '*' */
455 if (strncmp(desc->name, enabler->event_param.name,
456 strlen(enabler->event_param.name) - 1))
457 return 0;
458 if (desc->loglevel) {
459 loglevel = *(*desc->loglevel);
460 has_loglevel = 1;
461 }
462 if (!lttng_loglevel_match(loglevel,
463 has_loglevel,
464 enabler->event_param.loglevel_type,
465 enabler->event_param.loglevel))
466 return 0;
467 return 1;
468 }
469
470 static
471 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
472 struct lttng_enabler *enabler)
473 {
474 int loglevel = 0;
475 unsigned int has_loglevel = 0;
476
477 assert(enabler->type == LTTNG_ENABLER_EVENT);
478 if (strcmp(desc->name, enabler->event_param.name))
479 return 0;
480 if (desc->loglevel) {
481 loglevel = *(*desc->loglevel);
482 has_loglevel = 1;
483 }
484 if (!lttng_loglevel_match(loglevel,
485 has_loglevel,
486 enabler->event_param.loglevel_type,
487 enabler->event_param.loglevel))
488 return 0;
489 return 1;
490 }
491
492 static
493 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
494 struct lttng_enabler *enabler)
495 {
496 struct lttng_ust_excluder_node *excluder;
497
498 /* If event matches with an excluder, return 'does not match' */
499 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
500 int count;
501
502 for (count = 0; count < excluder->excluder.count; count++) {
503 int found, len;
504 char *excluder_name;
505
506 excluder_name = (char *) (excluder->excluder.names)
507 + count * LTTNG_UST_SYM_NAME_LEN;
508 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
509 if (len > 0 && excluder_name[len - 1] == '*') {
510 found = !strncmp(desc->name, excluder_name,
511 len - 1);
512 } else {
513 found = !strncmp(desc->name, excluder_name,
514 LTTNG_UST_SYM_NAME_LEN - 1);
515 }
516 if (found) {
517 return 0;
518 }
519 }
520 }
521 switch (enabler->type) {
522 case LTTNG_ENABLER_WILDCARD:
523 return lttng_desc_match_wildcard_enabler(desc, enabler);
524 case LTTNG_ENABLER_EVENT:
525 return lttng_desc_match_event_enabler(desc, enabler);
526 default:
527 return -EINVAL;
528 }
529 }
530
531 static
532 int lttng_event_match_enabler(struct lttng_event *event,
533 struct lttng_enabler *enabler)
534 {
535 if (lttng_desc_match_enabler(event->desc, enabler)
536 && event->chan == enabler->chan)
537 return 1;
538 else
539 return 0;
540 }
541
542 static
543 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
544 struct lttng_enabler *enabler)
545 {
546 struct lttng_enabler_ref *enabler_ref;
547
548 cds_list_for_each_entry(enabler_ref,
549 &event->enablers_ref_head, node) {
550 if (enabler_ref->ref == enabler)
551 return enabler_ref;
552 }
553 return NULL;
554 }
555
556 /*
557 * Create struct lttng_event if it is missing and present in the list of
558 * tracepoint probes.
559 */
560 static
561 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
562 {
563 struct lttng_session *session = enabler->chan->session;
564 struct lttng_probe_desc *probe_desc;
565 const struct lttng_event_desc *desc;
566 struct lttng_event *event;
567 int i;
568 struct cds_list_head *probe_list;
569
570 probe_list = lttng_get_probe_list_head();
571 /*
572 * For each probe event, if we find that a probe event matches
573 * our enabler, create an associated lttng_event if not
574 * already present.
575 */
576 cds_list_for_each_entry(probe_desc, probe_list, head) {
577 for (i = 0; i < probe_desc->nr_events; i++) {
578 int found = 0, ret;
579 struct cds_hlist_head *head;
580 struct cds_hlist_node *node;
581 const char *event_name;
582 size_t name_len;
583 uint32_t hash;
584
585 desc = probe_desc->event_desc[i];
586 if (!lttng_desc_match_enabler(desc, enabler))
587 continue;
588 event_name = desc->name;
589 name_len = strlen(event_name);
590
591 /*
592 * Check if already created.
593 */
594 hash = jhash(event_name, name_len, 0);
595 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
596 cds_hlist_for_each_entry(event, node, head, hlist) {
597 if (event->desc == desc
598 && event->chan == enabler->chan)
599 found = 1;
600 }
601 if (found)
602 continue;
603
604 /*
605 * We need to create an event for this
606 * event probe.
607 */
608 ret = lttng_event_create(probe_desc->event_desc[i],
609 enabler->chan);
610 if (ret) {
611 DBG("Unable to create event %s, error %d\n",
612 probe_desc->event_desc[i]->name, ret);
613 }
614 }
615 }
616 }
617
618 /*
619 * Create events associated with an enabler (if not already present),
620 * and add backward reference from the event to the enabler.
621 */
622 static
623 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
624 {
625 struct lttng_session *session = enabler->chan->session;
626 struct lttng_event *event;
627
628 /* First ensure that probe events are created for this enabler. */
629 lttng_create_event_if_missing(enabler);
630
631 /* For each event matching enabler in session event list. */
632 cds_list_for_each_entry(event, &session->events_head, node) {
633 struct lttng_enabler_ref *enabler_ref;
634
635 if (!lttng_event_match_enabler(event, enabler))
636 continue;
637
638 enabler_ref = lttng_event_enabler_ref(event, enabler);
639 if (!enabler_ref) {
640 /*
641 * If no backward ref, create it.
642 * Add backward ref from event to enabler.
643 */
644 enabler_ref = zmalloc(sizeof(*enabler_ref));
645 if (!enabler_ref)
646 return -ENOMEM;
647 enabler_ref->ref = enabler;
648 cds_list_add(&enabler_ref->node,
649 &event->enablers_ref_head);
650 }
651
652 /*
653 * Link filter bytecodes if not linked yet.
654 */
655 lttng_enabler_event_link_bytecode(event, enabler);
656
657 /* TODO: merge event context. */
658 }
659 return 0;
660 }
661
662 /*
663 * Called at library load: connect the probe on all enablers matching
664 * this event.
665 * Called with session mutex held.
666 */
667 int lttng_fix_pending_events(void)
668 {
669 struct lttng_session *session;
670
671 cds_list_for_each_entry(session, &sessions, node) {
672 lttng_session_lazy_sync_enablers(session);
673 }
674 return 0;
675 }
676
677 /*
678 * For each session of the owner thread, execute pending statedump.
679 * Only dump state for the sessions owned by the caller thread, because
680 * we don't keep ust_lock across the entire iteration.
681 */
682 void lttng_handle_pending_statedump(void *owner)
683 {
684 struct lttng_session *session;
685
686 /* Execute state dump */
687 do_lttng_ust_statedump(owner);
688
689 /* Clear pending state dump */
690 if (ust_lock()) {
691 goto end;
692 }
693 cds_list_for_each_entry(session, &sessions, node) {
694 if (session->owner != owner)
695 continue;
696 if (!session->statedump_pending)
697 continue;
698 session->statedump_pending = 0;
699 }
700 end:
701 ust_unlock();
702 return;
703 }
704
705 /*
706 * Only used internally at session destruction.
707 */
708 static
709 void _lttng_event_destroy(struct lttng_event *event)
710 {
711 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
712
713 cds_list_del(&event->node);
714 lttng_destroy_context(event->ctx);
715 lttng_free_event_filter_runtime(event);
716 /* Free event enabler refs */
717 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
718 &event->enablers_ref_head, node)
719 free(enabler_ref);
720 free(event);
721 }
722
723 void lttng_ust_events_exit(void)
724 {
725 struct lttng_session *session, *tmpsession;
726
727 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
728 lttng_session_destroy(session);
729 }
730
731 /*
732 * Enabler management.
733 */
734 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
735 struct lttng_ust_event *event_param,
736 struct lttng_channel *chan)
737 {
738 struct lttng_enabler *enabler;
739
740 enabler = zmalloc(sizeof(*enabler));
741 if (!enabler)
742 return NULL;
743 enabler->type = type;
744 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
745 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
746 memcpy(&enabler->event_param, event_param,
747 sizeof(enabler->event_param));
748 enabler->chan = chan;
749 /* ctx left NULL */
750 enabler->enabled = 0;
751 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
752 lttng_session_lazy_sync_enablers(enabler->chan->session);
753 return enabler;
754 }
755
756 int lttng_enabler_enable(struct lttng_enabler *enabler)
757 {
758 enabler->enabled = 1;
759 lttng_session_lazy_sync_enablers(enabler->chan->session);
760 return 0;
761 }
762
763 int lttng_enabler_disable(struct lttng_enabler *enabler)
764 {
765 enabler->enabled = 0;
766 lttng_session_lazy_sync_enablers(enabler->chan->session);
767 return 0;
768 }
769
770 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
771 struct lttng_ust_filter_bytecode_node *bytecode)
772 {
773 bytecode->enabler = enabler;
774 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
775 lttng_session_lazy_sync_enablers(enabler->chan->session);
776 return 0;
777 }
778
779 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
780 struct lttng_ust_excluder_node *excluder)
781 {
782 excluder->enabler = enabler;
783 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
784 lttng_session_lazy_sync_enablers(enabler->chan->session);
785 return 0;
786 }
787
788 int lttng_attach_context(struct lttng_ust_context *context_param,
789 struct lttng_ctx **ctx, struct lttng_session *session)
790 {
791 /*
792 * We cannot attach a context after trace has been started for a
793 * session because the metadata does not allow expressing this
794 * information outside of the original channel scope.
795 */
796 if (session->been_active)
797 return -EPERM;
798
799 switch (context_param->ctx) {
800 case LTTNG_UST_CONTEXT_PTHREAD_ID:
801 return lttng_add_pthread_id_to_ctx(ctx);
802 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
803 {
804 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
805
806 perf_ctx_param = &context_param->u.perf_counter;
807 return lttng_add_perf_counter_to_ctx(
808 perf_ctx_param->type,
809 perf_ctx_param->config,
810 perf_ctx_param->name,
811 ctx);
812 }
813 case LTTNG_UST_CONTEXT_VTID:
814 return lttng_add_vtid_to_ctx(ctx);
815 case LTTNG_UST_CONTEXT_VPID:
816 return lttng_add_vpid_to_ctx(ctx);
817 case LTTNG_UST_CONTEXT_PROCNAME:
818 return lttng_add_procname_to_ctx(ctx);
819 case LTTNG_UST_CONTEXT_IP:
820 return lttng_add_ip_to_ctx(ctx);
821 case LTTNG_UST_CONTEXT_CPU_ID:
822 return lttng_add_cpu_id_to_ctx(ctx);
823 default:
824 return -EINVAL;
825 }
826 }
827
828 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
829 struct lttng_ust_context *context_param)
830 {
831 #if 0 // disabled for now.
832 struct lttng_session *session = enabler->chan->session;
833 int ret;
834
835 ret = lttng_attach_context(context_param, &enabler->ctx,
836 session);
837 if (ret)
838 return ret;
839 lttng_session_lazy_sync_enablers(enabler->chan->session);
840 #endif
841 return -ENOSYS;
842 }
843
844 static
845 void lttng_enabler_destroy(struct lttng_enabler *enabler)
846 {
847 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
848 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
849
850 /* Destroy filter bytecode */
851 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
852 &enabler->filter_bytecode_head, node) {
853 free(filter_node);
854 }
855
856 /* Destroy excluders */
857 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
858 &enabler->excluder_head, node) {
859 free(excluder_node);
860 }
861
862 /* Destroy contexts */
863 lttng_destroy_context(enabler->ctx);
864
865 cds_list_del(&enabler->node);
866 free(enabler);
867 }
868
869 /*
870 * lttng_session_sync_enablers should be called just before starting a
871 * session.
872 */
873 static
874 void lttng_session_sync_enablers(struct lttng_session *session)
875 {
876 struct lttng_enabler *enabler;
877 struct lttng_event *event;
878
879 cds_list_for_each_entry(enabler, &session->enablers_head, node)
880 lttng_enabler_ref_events(enabler);
881 /*
882 * For each event, if at least one of its enablers is enabled,
883 * and its channel and session transient states are enabled, we
884 * enable the event, else we disable it.
885 */
886 cds_list_for_each_entry(event, &session->events_head, node) {
887 struct lttng_enabler_ref *enabler_ref;
888 struct lttng_bytecode_runtime *runtime;
889 int enabled = 0, has_enablers_without_bytecode = 0;
890
891 /* Enable events */
892 cds_list_for_each_entry(enabler_ref,
893 &event->enablers_ref_head, node) {
894 if (enabler_ref->ref->enabled) {
895 enabled = 1;
896 break;
897 }
898 }
899 /*
900 * Enabled state is based on union of enablers, with
901 * intesection of session and channel transient enable
902 * states.
903 */
904 enabled = enabled && session->tstate && event->chan->tstate;
905
906 CMM_STORE_SHARED(event->enabled, enabled);
907 /*
908 * Sync tracepoint registration with event enabled
909 * state.
910 */
911 if (enabled) {
912 if (!event->registered)
913 register_event(event);
914 } else {
915 if (event->registered)
916 unregister_event(event);
917 }
918
919 /* Check if has enablers without bytecode enabled */
920 cds_list_for_each_entry(enabler_ref,
921 &event->enablers_ref_head, node) {
922 if (enabler_ref->ref->enabled
923 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
924 has_enablers_without_bytecode = 1;
925 break;
926 }
927 }
928 event->has_enablers_without_bytecode =
929 has_enablers_without_bytecode;
930
931 /* Enable filters */
932 cds_list_for_each_entry(runtime,
933 &event->bytecode_runtime_head, node) {
934 lttng_filter_sync_state(runtime);
935 }
936 }
937 __tracepoint_probe_prune_release_queue();
938 }
939
940 /*
941 * Apply enablers to session events, adding events to session if need
942 * be. It is required after each modification applied to an active
943 * session, and right before session "start".
944 * "lazy" sync means we only sync if required.
945 */
946 static
947 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
948 {
949 /* We can skip if session is not active */
950 if (!session->active)
951 return;
952 lttng_session_sync_enablers(session);
953 }
This page took 0.059587 seconds and 4 git commands to generate.