Cleanup: remove redundant memory barrier
[lttng-ust.git] / liblttng-ust / lttng-events.c
1 /*
2 * lttng-events.c
3 *
4 * Holds LTTng per-session event registry.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _GNU_SOURCE
24 #define _LGPL_SOURCE
25 #include <stdio.h>
26 #include <urcu/list.h>
27 #include <urcu/hlist.h>
28 #include <pthread.h>
29 #include <errno.h>
30 #include <sys/shm.h>
31 #include <sys/ipc.h>
32 #include <stdint.h>
33 #include <stddef.h>
34 #include <inttypes.h>
35 #include <time.h>
36 #include <stdbool.h>
37 #include <lttng/ust-endian.h>
38 #include "clock.h"
39
40 #include <urcu-bp.h>
41 #include <urcu/compiler.h>
42 #include <urcu/uatomic.h>
43 #include <urcu/arch.h>
44
45 #include <lttng/tracepoint.h>
46 #include <lttng/ust-events.h>
47
48 #include <usterr-signal-safe.h>
49 #include <helper.h>
50 #include <lttng/ust-ctl.h>
51 #include <ust-comm.h>
52 #include <lttng/ust-dynamic-type.h>
53 #include <lttng/ust-context-provider.h>
54 #include "error.h"
55 #include "compat.h"
56 #include "lttng-ust-uuid.h"
57
58 #include "tracepoint-internal.h"
59 #include "string-utils.h"
60 #include "lttng-tracer.h"
61 #include "lttng-tracer-core.h"
62 #include "lttng-ust-statedump.h"
63 #include "wait.h"
64 #include "../libringbuffer/shm.h"
65 #include "jhash.h"
66
67 /*
68 * All operations within this file are called by the communication
69 * thread, under ust_lock protection.
70 */
71
72 static CDS_LIST_HEAD(sessions);
73
74 struct cds_list_head *_lttng_get_sessions(void)
75 {
76 return &sessions;
77 }
78
79 static void _lttng_event_destroy(struct lttng_event *event);
80 static void _lttng_enum_destroy(struct lttng_enum *_enum);
81
82 static
83 void lttng_session_lazy_sync_enablers(struct lttng_session *session);
84 static
85 void lttng_session_sync_enablers(struct lttng_session *session);
86 static
87 void lttng_enabler_destroy(struct lttng_enabler *enabler);
88
89 /*
90 * Called with ust lock held.
91 */
92 int lttng_session_active(void)
93 {
94 struct lttng_session *iter;
95
96 cds_list_for_each_entry(iter, &sessions, node) {
97 if (iter->active)
98 return 1;
99 }
100 return 0;
101 }
102
103 static
104 int lttng_loglevel_match(int loglevel,
105 unsigned int has_loglevel,
106 enum lttng_ust_loglevel_type req_type,
107 int req_loglevel)
108 {
109 if (!has_loglevel)
110 loglevel = TRACE_DEFAULT;
111 switch (req_type) {
112 case LTTNG_UST_LOGLEVEL_RANGE:
113 if (loglevel <= req_loglevel
114 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
115 return 1;
116 else
117 return 0;
118 case LTTNG_UST_LOGLEVEL_SINGLE:
119 if (loglevel == req_loglevel
120 || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
121 return 1;
122 else
123 return 0;
124 case LTTNG_UST_LOGLEVEL_ALL:
125 default:
126 if (loglevel <= TRACE_DEBUG)
127 return 1;
128 else
129 return 0;
130 }
131 }
132
133 void synchronize_trace(void)
134 {
135 synchronize_rcu();
136 }
137
138 struct lttng_session *lttng_session_create(void)
139 {
140 struct lttng_session *session;
141 int i;
142
143 session = zmalloc(sizeof(struct lttng_session));
144 if (!session)
145 return NULL;
146 if (lttng_session_context_init(&session->ctx)) {
147 free(session);
148 return NULL;
149 }
150 CDS_INIT_LIST_HEAD(&session->chan_head);
151 CDS_INIT_LIST_HEAD(&session->events_head);
152 CDS_INIT_LIST_HEAD(&session->enums_head);
153 CDS_INIT_LIST_HEAD(&session->enablers_head);
154 for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
155 CDS_INIT_HLIST_HEAD(&session->events_ht.table[i]);
156 for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
157 CDS_INIT_HLIST_HEAD(&session->enums_ht.table[i]);
158 cds_list_add(&session->node, &sessions);
159 return session;
160 }
161
162 /*
163 * Only used internally at session destruction.
164 */
165 static
166 void _lttng_channel_unmap(struct lttng_channel *lttng_chan)
167 {
168 struct channel *chan;
169 struct lttng_ust_shm_handle *handle;
170
171 cds_list_del(&lttng_chan->node);
172 lttng_destroy_context(lttng_chan->ctx);
173 chan = lttng_chan->chan;
174 handle = lttng_chan->handle;
175 /*
176 * note: lttng_chan is private data contained within handle. It
177 * will be freed along with the handle.
178 */
179 channel_destroy(chan, handle, 0);
180 }
181
182 static
183 void register_event(struct lttng_event *event)
184 {
185 int ret;
186 const struct lttng_event_desc *desc;
187
188 assert(event->registered == 0);
189 desc = event->desc;
190 ret = __tracepoint_probe_register_queue_release(desc->name,
191 desc->probe_callback,
192 event, desc->signature);
193 WARN_ON_ONCE(ret);
194 if (!ret)
195 event->registered = 1;
196 }
197
198 static
199 void unregister_event(struct lttng_event *event)
200 {
201 int ret;
202 const struct lttng_event_desc *desc;
203
204 assert(event->registered == 1);
205 desc = event->desc;
206 ret = __tracepoint_probe_unregister_queue_release(desc->name,
207 desc->probe_callback,
208 event);
209 WARN_ON_ONCE(ret);
210 if (!ret)
211 event->registered = 0;
212 }
213
214 /*
215 * Only used internally at session destruction.
216 */
217 static
218 void _lttng_event_unregister(struct lttng_event *event)
219 {
220 if (event->registered)
221 unregister_event(event);
222 }
223
224 void lttng_session_destroy(struct lttng_session *session)
225 {
226 struct lttng_channel *chan, *tmpchan;
227 struct lttng_event *event, *tmpevent;
228 struct lttng_enum *_enum, *tmp_enum;
229 struct lttng_enabler *enabler, *tmpenabler;
230
231 CMM_ACCESS_ONCE(session->active) = 0;
232 cds_list_for_each_entry(event, &session->events_head, node) {
233 _lttng_event_unregister(event);
234 }
235 synchronize_trace(); /* Wait for in-flight events to complete */
236 __tracepoint_probe_prune_release_queue();
237 cds_list_for_each_entry_safe(enabler, tmpenabler,
238 &session->enablers_head, node)
239 lttng_enabler_destroy(enabler);
240 cds_list_for_each_entry_safe(event, tmpevent,
241 &session->events_head, node)
242 _lttng_event_destroy(event);
243 cds_list_for_each_entry_safe(_enum, tmp_enum,
244 &session->enums_head, node)
245 _lttng_enum_destroy(_enum);
246 cds_list_for_each_entry_safe(chan, tmpchan, &session->chan_head, node)
247 _lttng_channel_unmap(chan);
248 cds_list_del(&session->node);
249 lttng_destroy_context(session->ctx);
250 free(session);
251 }
252
253 static
254 int lttng_enum_create(const struct lttng_enum_desc *desc,
255 struct lttng_session *session)
256 {
257 const char *enum_name = desc->name;
258 struct lttng_enum *_enum;
259 struct cds_hlist_head *head;
260 int ret = 0;
261 size_t name_len = strlen(enum_name);
262 uint32_t hash;
263 int notify_socket;
264
265 /* Check if this enum is already registered for this session. */
266 hash = jhash(enum_name, name_len, 0);
267 head = &session->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
268
269 _enum = lttng_ust_enum_get_from_desc(session, desc);
270 if (_enum) {
271 ret = -EEXIST;
272 goto exist;
273 }
274
275 notify_socket = lttng_get_notify_socket(session->owner);
276 if (notify_socket < 0) {
277 ret = notify_socket;
278 goto socket_error;
279 }
280
281 _enum = zmalloc(sizeof(*_enum));
282 if (!_enum) {
283 ret = -ENOMEM;
284 goto cache_error;
285 }
286 _enum->session = session;
287 _enum->desc = desc;
288
289 ret = ustcomm_register_enum(notify_socket,
290 session->objd,
291 enum_name,
292 desc->nr_entries,
293 desc->entries,
294 &_enum->id);
295 if (ret < 0) {
296 DBG("Error (%d) registering enumeration to sessiond", ret);
297 goto sessiond_register_error;
298 }
299 cds_list_add(&_enum->node, &session->enums_head);
300 cds_hlist_add_head(&_enum->hlist, head);
301 return 0;
302
303 sessiond_register_error:
304 free(_enum);
305 cache_error:
306 socket_error:
307 exist:
308 return ret;
309 }
310
311 static
312 int lttng_create_enum_check(const struct lttng_type *type,
313 struct lttng_session *session)
314 {
315 switch (type->atype) {
316 case atype_enum:
317 {
318 const struct lttng_enum_desc *enum_desc;
319 int ret;
320
321 enum_desc = type->u.basic.enumeration.desc;
322 ret = lttng_enum_create(enum_desc, session);
323 if (ret && ret != -EEXIST) {
324 DBG("Unable to create enum error: (%d)", ret);
325 return ret;
326 }
327 break;
328 }
329 case atype_dynamic:
330 {
331 const struct lttng_event_field *tag_field_generic;
332 const struct lttng_enum_desc *enum_desc;
333 int ret;
334
335 tag_field_generic = lttng_ust_dynamic_type_tag_field();
336 enum_desc = tag_field_generic->type.u.basic.enumeration.desc;
337 ret = lttng_enum_create(enum_desc, session);
338 if (ret && ret != -EEXIST) {
339 DBG("Unable to create enum error: (%d)", ret);
340 return ret;
341 }
342 break;
343 }
344 default:
345 /* TODO: nested types when they become supported. */
346 break;
347 }
348 return 0;
349 }
350
351 static
352 int lttng_create_all_event_enums(size_t nr_fields,
353 const struct lttng_event_field *event_fields,
354 struct lttng_session *session)
355 {
356 size_t i;
357 int ret;
358
359 /* For each field, ensure enum is part of the session. */
360 for (i = 0; i < nr_fields; i++) {
361 const struct lttng_type *type = &event_fields[i].type;
362
363 ret = lttng_create_enum_check(type, session);
364 if (ret)
365 return ret;
366 }
367 return 0;
368 }
369
370 static
371 int lttng_create_all_ctx_enums(size_t nr_fields,
372 const struct lttng_ctx_field *ctx_fields,
373 struct lttng_session *session)
374 {
375 size_t i;
376 int ret;
377
378 /* For each field, ensure enum is part of the session. */
379 for (i = 0; i < nr_fields; i++) {
380 const struct lttng_type *type = &ctx_fields[i].event_field.type;
381
382 ret = lttng_create_enum_check(type, session);
383 if (ret)
384 return ret;
385 }
386 return 0;
387 }
388
389 /*
390 * Ensure that a state-dump will be performed for this session at the end
391 * of the current handle_message().
392 */
393 int lttng_session_statedump(struct lttng_session *session)
394 {
395 session->statedump_pending = 1;
396 lttng_ust_sockinfo_session_enabled(session->owner);
397 return 0;
398 }
399
400 int lttng_session_enable(struct lttng_session *session)
401 {
402 int ret = 0;
403 struct lttng_channel *chan;
404 int notify_socket;
405
406 if (session->active) {
407 ret = -EBUSY;
408 goto end;
409 }
410
411 notify_socket = lttng_get_notify_socket(session->owner);
412 if (notify_socket < 0)
413 return notify_socket;
414
415 /* Set transient enabler state to "enabled" */
416 session->tstate = 1;
417
418 /* We need to sync enablers with session before activation. */
419 lttng_session_sync_enablers(session);
420
421 /*
422 * Snapshot the number of events per channel to know the type of header
423 * we need to use.
424 */
425 cds_list_for_each_entry(chan, &session->chan_head, node) {
426 const struct lttng_ctx *ctx;
427 const struct lttng_ctx_field *fields = NULL;
428 size_t nr_fields = 0;
429 uint32_t chan_id;
430
431 /* don't change it if session stop/restart */
432 if (chan->header_type)
433 continue;
434 ctx = chan->ctx;
435 if (ctx) {
436 nr_fields = ctx->nr_fields;
437 fields = ctx->fields;
438 ret = lttng_create_all_ctx_enums(nr_fields, fields,
439 session);
440 if (ret < 0) {
441 DBG("Error (%d) adding enum to session", ret);
442 return ret;
443 }
444 }
445 ret = ustcomm_register_channel(notify_socket,
446 session,
447 session->objd,
448 chan->objd,
449 nr_fields,
450 fields,
451 &chan_id,
452 &chan->header_type);
453 if (ret) {
454 DBG("Error (%d) registering channel to sessiond", ret);
455 return ret;
456 }
457 if (chan_id != chan->id) {
458 DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
459 chan_id, chan->id);
460 return -EINVAL;
461 }
462 }
463
464 /* Set atomically the state to "active" */
465 CMM_ACCESS_ONCE(session->active) = 1;
466 CMM_ACCESS_ONCE(session->been_active) = 1;
467
468 ret = lttng_session_statedump(session);
469 if (ret)
470 return ret;
471 end:
472 return ret;
473 }
474
475 int lttng_session_disable(struct lttng_session *session)
476 {
477 int ret = 0;
478
479 if (!session->active) {
480 ret = -EBUSY;
481 goto end;
482 }
483 /* Set atomically the state to "inactive" */
484 CMM_ACCESS_ONCE(session->active) = 0;
485
486 /* Set transient enabler state to "disabled" */
487 session->tstate = 0;
488 lttng_session_sync_enablers(session);
489 end:
490 return ret;
491 }
492
493 int lttng_channel_enable(struct lttng_channel *channel)
494 {
495 int ret = 0;
496
497 if (channel->enabled) {
498 ret = -EBUSY;
499 goto end;
500 }
501 /* Set transient enabler state to "enabled" */
502 channel->tstate = 1;
503 lttng_session_sync_enablers(channel->session);
504 /* Set atomically the state to "enabled" */
505 CMM_ACCESS_ONCE(channel->enabled) = 1;
506 end:
507 return ret;
508 }
509
510 int lttng_channel_disable(struct lttng_channel *channel)
511 {
512 int ret = 0;
513
514 if (!channel->enabled) {
515 ret = -EBUSY;
516 goto end;
517 }
518 /* Set atomically the state to "disabled" */
519 CMM_ACCESS_ONCE(channel->enabled) = 0;
520 /* Set transient enabler state to "enabled" */
521 channel->tstate = 0;
522 lttng_session_sync_enablers(channel->session);
523 end:
524 return ret;
525 }
526
527 /*
528 * Supports event creation while tracing session is active.
529 */
530 static
531 int lttng_event_create(const struct lttng_event_desc *desc,
532 struct lttng_channel *chan)
533 {
534 const char *event_name = desc->name;
535 struct lttng_event *event;
536 struct lttng_session *session = chan->session;
537 struct cds_hlist_head *head;
538 int ret = 0;
539 size_t name_len = strlen(event_name);
540 uint32_t hash;
541 int notify_socket, loglevel;
542 const char *uri;
543
544 hash = jhash(event_name, name_len, 0);
545 head = &chan->session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
546
547 notify_socket = lttng_get_notify_socket(session->owner);
548 if (notify_socket < 0) {
549 ret = notify_socket;
550 goto socket_error;
551 }
552
553 ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
554 session);
555 if (ret < 0) {
556 DBG("Error (%d) adding enum to session", ret);
557 goto create_enum_error;
558 }
559
560 /*
561 * Check if loglevel match. Refuse to connect event if not.
562 */
563 event = zmalloc(sizeof(struct lttng_event));
564 if (!event) {
565 ret = -ENOMEM;
566 goto cache_error;
567 }
568 event->chan = chan;
569
570 /* Event will be enabled by enabler sync. */
571 event->enabled = 0;
572 event->registered = 0;
573 CDS_INIT_LIST_HEAD(&event->bytecode_runtime_head);
574 CDS_INIT_LIST_HEAD(&event->enablers_ref_head);
575 event->desc = desc;
576
577 if (desc->loglevel)
578 loglevel = *(*event->desc->loglevel);
579 else
580 loglevel = TRACE_DEFAULT;
581 if (desc->u.ext.model_emf_uri)
582 uri = *(desc->u.ext.model_emf_uri);
583 else
584 uri = NULL;
585
586 /* Fetch event ID from sessiond */
587 ret = ustcomm_register_event(notify_socket,
588 session,
589 session->objd,
590 chan->objd,
591 event_name,
592 loglevel,
593 desc->signature,
594 desc->nr_fields,
595 desc->fields,
596 uri,
597 &event->id);
598 if (ret < 0) {
599 DBG("Error (%d) registering event to sessiond", ret);
600 goto sessiond_register_error;
601 }
602
603 cds_list_add(&event->node, &chan->session->events_head);
604 cds_hlist_add_head(&event->hlist, head);
605 return 0;
606
607 sessiond_register_error:
608 free(event);
609 cache_error:
610 create_enum_error:
611 socket_error:
612 return ret;
613 }
614
615 static
616 int lttng_desc_match_star_glob_enabler(const struct lttng_event_desc *desc,
617 struct lttng_enabler *enabler)
618 {
619 int loglevel = 0;
620 unsigned int has_loglevel = 0;
621
622 assert(enabler->type == LTTNG_ENABLER_STAR_GLOB);
623 if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
624 desc->name, SIZE_MAX))
625 return 0;
626 if (desc->loglevel) {
627 loglevel = *(*desc->loglevel);
628 has_loglevel = 1;
629 }
630 if (!lttng_loglevel_match(loglevel,
631 has_loglevel,
632 enabler->event_param.loglevel_type,
633 enabler->event_param.loglevel))
634 return 0;
635 return 1;
636 }
637
638 static
639 int lttng_desc_match_event_enabler(const struct lttng_event_desc *desc,
640 struct lttng_enabler *enabler)
641 {
642 int loglevel = 0;
643 unsigned int has_loglevel = 0;
644
645 assert(enabler->type == LTTNG_ENABLER_EVENT);
646 if (strcmp(desc->name, enabler->event_param.name))
647 return 0;
648 if (desc->loglevel) {
649 loglevel = *(*desc->loglevel);
650 has_loglevel = 1;
651 }
652 if (!lttng_loglevel_match(loglevel,
653 has_loglevel,
654 enabler->event_param.loglevel_type,
655 enabler->event_param.loglevel))
656 return 0;
657 return 1;
658 }
659
660 static
661 int lttng_desc_match_enabler(const struct lttng_event_desc *desc,
662 struct lttng_enabler *enabler)
663 {
664 switch (enabler->type) {
665 case LTTNG_ENABLER_STAR_GLOB:
666 {
667 struct lttng_ust_excluder_node *excluder;
668
669 if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
670 return 0;
671 }
672
673 /*
674 * If the matching event matches with an excluder,
675 * return 'does not match'
676 */
677 cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
678 int count;
679
680 for (count = 0; count < excluder->excluder.count; count++) {
681 int len;
682 char *excluder_name;
683
684 excluder_name = (char *) (excluder->excluder.names)
685 + count * LTTNG_UST_SYM_NAME_LEN;
686 len = strnlen(excluder_name, LTTNG_UST_SYM_NAME_LEN);
687 if (len > 0 && strutils_star_glob_match(excluder_name, len, desc->name, SIZE_MAX))
688 return 0;
689 }
690 }
691 return 1;
692 }
693 case LTTNG_ENABLER_EVENT:
694 return lttng_desc_match_event_enabler(desc, enabler);
695 default:
696 return -EINVAL;
697 }
698 }
699
700 static
701 int lttng_event_match_enabler(struct lttng_event *event,
702 struct lttng_enabler *enabler)
703 {
704 if (lttng_desc_match_enabler(event->desc, enabler)
705 && event->chan == enabler->chan)
706 return 1;
707 else
708 return 0;
709 }
710
711 static
712 struct lttng_enabler_ref * lttng_event_enabler_ref(struct lttng_event *event,
713 struct lttng_enabler *enabler)
714 {
715 struct lttng_enabler_ref *enabler_ref;
716
717 cds_list_for_each_entry(enabler_ref,
718 &event->enablers_ref_head, node) {
719 if (enabler_ref->ref == enabler)
720 return enabler_ref;
721 }
722 return NULL;
723 }
724
725 /*
726 * Create struct lttng_event if it is missing and present in the list of
727 * tracepoint probes.
728 */
729 static
730 void lttng_create_event_if_missing(struct lttng_enabler *enabler)
731 {
732 struct lttng_session *session = enabler->chan->session;
733 struct lttng_probe_desc *probe_desc;
734 const struct lttng_event_desc *desc;
735 struct lttng_event *event;
736 int i;
737 struct cds_list_head *probe_list;
738
739 probe_list = lttng_get_probe_list_head();
740 /*
741 * For each probe event, if we find that a probe event matches
742 * our enabler, create an associated lttng_event if not
743 * already present.
744 */
745 cds_list_for_each_entry(probe_desc, probe_list, head) {
746 for (i = 0; i < probe_desc->nr_events; i++) {
747 int found = 0, ret;
748 struct cds_hlist_head *head;
749 struct cds_hlist_node *node;
750 const char *event_name;
751 size_t name_len;
752 uint32_t hash;
753
754 desc = probe_desc->event_desc[i];
755 if (!lttng_desc_match_enabler(desc, enabler))
756 continue;
757 event_name = desc->name;
758 name_len = strlen(event_name);
759
760 /*
761 * Check if already created.
762 */
763 hash = jhash(event_name, name_len, 0);
764 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
765 cds_hlist_for_each_entry(event, node, head, hlist) {
766 if (event->desc == desc
767 && event->chan == enabler->chan)
768 found = 1;
769 }
770 if (found)
771 continue;
772
773 /*
774 * We need to create an event for this
775 * event probe.
776 */
777 ret = lttng_event_create(probe_desc->event_desc[i],
778 enabler->chan);
779 if (ret) {
780 DBG("Unable to create event %s, error %d\n",
781 probe_desc->event_desc[i]->name, ret);
782 }
783 }
784 }
785 }
786
787 /*
788 * Iterate over all the UST sessions to unregister and destroy all probes from
789 * the probe provider descriptor received as argument. Must me called with the
790 * ust_lock held.
791 */
792 void lttng_probe_provider_unregister_events(struct lttng_probe_desc *provider_desc)
793 {
794 struct cds_hlist_node *node, *tmp_node;
795 struct cds_list_head *sessionsp;
796 struct lttng_session *session;
797 struct cds_hlist_head *head;
798 struct lttng_event *event;
799 unsigned int i, j;
800
801 /* Get handle on list of sessions. */
802 sessionsp = _lttng_get_sessions();
803
804 /*
805 * Iterate over all events in the probe provider descriptions and sessions
806 * to queue the unregistration of the events.
807 */
808 for (i = 0; i < provider_desc->nr_events; i++) {
809 const struct lttng_event_desc *event_desc;
810 const char *event_name;
811 size_t name_len;
812 uint32_t hash;
813
814 event_desc = provider_desc->event_desc[i];
815 event_name = event_desc->name;
816 name_len = strlen(event_name);
817 hash = jhash(event_name, name_len, 0);
818
819 /* Iterate over all session to find the current event description. */
820 cds_list_for_each_entry(session, sessionsp, node) {
821 /*
822 * Get the list of events in the hashtable bucket and iterate to
823 * find the event matching this descriptor.
824 */
825 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
826 cds_hlist_for_each_entry(event, node, head, hlist) {
827 if (event_desc == event->desc) {
828 /* Queue the unregistration of this event. */
829 _lttng_event_unregister(event);
830 break;
831 }
832 }
833 }
834 }
835
836 /* Wait for grace period. */
837 synchronize_trace();
838 /* Prune the unregistration queue. */
839 __tracepoint_probe_prune_release_queue();
840
841 /*
842 * It is now safe to destroy the events and remove them from the event list
843 * and hashtables.
844 */
845 for (i = 0; i < provider_desc->nr_events; i++) {
846 const struct lttng_event_desc *event_desc;
847 const char *event_name;
848 size_t name_len;
849 uint32_t hash;
850
851 event_desc = provider_desc->event_desc[i];
852 event_name = event_desc->name;
853 name_len = strlen(event_name);
854 hash = jhash(event_name, name_len, 0);
855
856 /* Iterate over all sessions to find the current event description. */
857 cds_list_for_each_entry(session, sessionsp, node) {
858 /*
859 * Get the list of events in the hashtable bucket and iterate to
860 * find the event matching this descriptor.
861 */
862 head = &session->events_ht.table[hash & (LTTNG_UST_EVENT_HT_SIZE - 1)];
863 cds_hlist_for_each_entry_safe(event, node, tmp_node, head, hlist) {
864 if (event_desc == event->desc) {
865 /* Destroy enums of the current event. */
866 for (j = 0; j < event->desc->nr_fields; j++) {
867 const struct lttng_enum_desc *enum_desc;
868 const struct lttng_event_field *field;
869 struct lttng_enum *curr_enum;
870
871 field = &(event->desc->fields[j]);
872 if (field->type.atype != atype_enum) {
873 continue;
874 }
875
876 enum_desc = field->type.u.basic.enumeration.desc;
877 curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
878 if (curr_enum) {
879 _lttng_enum_destroy(curr_enum);
880 }
881 }
882
883 /* Destroy event. */
884 _lttng_event_destroy(event);
885 break;
886 }
887 }
888 }
889 }
890 }
891
892 /*
893 * Create events associated with an enabler (if not already present),
894 * and add backward reference from the event to the enabler.
895 */
896 static
897 int lttng_enabler_ref_events(struct lttng_enabler *enabler)
898 {
899 struct lttng_session *session = enabler->chan->session;
900 struct lttng_event *event;
901
902 /* First ensure that probe events are created for this enabler. */
903 lttng_create_event_if_missing(enabler);
904
905 /* For each event matching enabler in session event list. */
906 cds_list_for_each_entry(event, &session->events_head, node) {
907 struct lttng_enabler_ref *enabler_ref;
908
909 if (!lttng_event_match_enabler(event, enabler))
910 continue;
911
912 enabler_ref = lttng_event_enabler_ref(event, enabler);
913 if (!enabler_ref) {
914 /*
915 * If no backward ref, create it.
916 * Add backward ref from event to enabler.
917 */
918 enabler_ref = zmalloc(sizeof(*enabler_ref));
919 if (!enabler_ref)
920 return -ENOMEM;
921 enabler_ref->ref = enabler;
922 cds_list_add(&enabler_ref->node,
923 &event->enablers_ref_head);
924 }
925
926 /*
927 * Link filter bytecodes if not linked yet.
928 */
929 lttng_enabler_event_link_bytecode(event, enabler);
930
931 /* TODO: merge event context. */
932 }
933 return 0;
934 }
935
936 /*
937 * Called at library load: connect the probe on all enablers matching
938 * this event.
939 * Called with session mutex held.
940 */
941 int lttng_fix_pending_events(void)
942 {
943 struct lttng_session *session;
944
945 cds_list_for_each_entry(session, &sessions, node) {
946 lttng_session_lazy_sync_enablers(session);
947 }
948 return 0;
949 }
950
951 /*
952 * For each session of the owner thread, execute pending statedump.
953 * Only dump state for the sessions owned by the caller thread, because
954 * we don't keep ust_lock across the entire iteration.
955 */
956 void lttng_handle_pending_statedump(void *owner)
957 {
958 struct lttng_session *session;
959
960 /* Execute state dump */
961 do_lttng_ust_statedump(owner);
962
963 /* Clear pending state dump */
964 if (ust_lock()) {
965 goto end;
966 }
967 cds_list_for_each_entry(session, &sessions, node) {
968 if (session->owner != owner)
969 continue;
970 if (!session->statedump_pending)
971 continue;
972 session->statedump_pending = 0;
973 }
974 end:
975 ust_unlock();
976 return;
977 }
978
979 /*
980 * Only used internally at session destruction.
981 */
982 static
983 void _lttng_event_destroy(struct lttng_event *event)
984 {
985 struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
986
987 /* Remove from event list. */
988 cds_list_del(&event->node);
989 /* Remove from event hash table. */
990 cds_hlist_del(&event->hlist);
991
992 lttng_destroy_context(event->ctx);
993 lttng_free_event_filter_runtime(event);
994 /* Free event enabler refs */
995 cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
996 &event->enablers_ref_head, node)
997 free(enabler_ref);
998 free(event);
999 }
1000
1001 static
1002 void _lttng_enum_destroy(struct lttng_enum *_enum)
1003 {
1004 cds_list_del(&_enum->node);
1005 cds_hlist_del(&_enum->hlist);
1006 free(_enum);
1007 }
1008
1009 void lttng_ust_events_exit(void)
1010 {
1011 struct lttng_session *session, *tmpsession;
1012
1013 cds_list_for_each_entry_safe(session, tmpsession, &sessions, node)
1014 lttng_session_destroy(session);
1015 }
1016
1017 /*
1018 * Enabler management.
1019 */
1020 struct lttng_enabler *lttng_enabler_create(enum lttng_enabler_type type,
1021 struct lttng_ust_event *event_param,
1022 struct lttng_channel *chan)
1023 {
1024 struct lttng_enabler *enabler;
1025
1026 enabler = zmalloc(sizeof(*enabler));
1027 if (!enabler)
1028 return NULL;
1029 enabler->type = type;
1030 CDS_INIT_LIST_HEAD(&enabler->filter_bytecode_head);
1031 CDS_INIT_LIST_HEAD(&enabler->excluder_head);
1032 memcpy(&enabler->event_param, event_param,
1033 sizeof(enabler->event_param));
1034 enabler->chan = chan;
1035 /* ctx left NULL */
1036 enabler->enabled = 0;
1037 cds_list_add(&enabler->node, &enabler->chan->session->enablers_head);
1038 lttng_session_lazy_sync_enablers(enabler->chan->session);
1039 return enabler;
1040 }
1041
1042 int lttng_enabler_enable(struct lttng_enabler *enabler)
1043 {
1044 enabler->enabled = 1;
1045 lttng_session_lazy_sync_enablers(enabler->chan->session);
1046 return 0;
1047 }
1048
1049 int lttng_enabler_disable(struct lttng_enabler *enabler)
1050 {
1051 enabler->enabled = 0;
1052 lttng_session_lazy_sync_enablers(enabler->chan->session);
1053 return 0;
1054 }
1055
1056 int lttng_enabler_attach_bytecode(struct lttng_enabler *enabler,
1057 struct lttng_ust_filter_bytecode_node *bytecode)
1058 {
1059 bytecode->enabler = enabler;
1060 cds_list_add_tail(&bytecode->node, &enabler->filter_bytecode_head);
1061 lttng_session_lazy_sync_enablers(enabler->chan->session);
1062 return 0;
1063 }
1064
1065 int lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
1066 struct lttng_ust_excluder_node *excluder)
1067 {
1068 excluder->enabler = enabler;
1069 cds_list_add_tail(&excluder->node, &enabler->excluder_head);
1070 lttng_session_lazy_sync_enablers(enabler->chan->session);
1071 return 0;
1072 }
1073
1074 int lttng_attach_context(struct lttng_ust_context *context_param,
1075 union ust_args *uargs,
1076 struct lttng_ctx **ctx, struct lttng_session *session)
1077 {
1078 /*
1079 * We cannot attach a context after trace has been started for a
1080 * session because the metadata does not allow expressing this
1081 * information outside of the original channel scope.
1082 */
1083 if (session->been_active)
1084 return -EPERM;
1085
1086 switch (context_param->ctx) {
1087 case LTTNG_UST_CONTEXT_PTHREAD_ID:
1088 return lttng_add_pthread_id_to_ctx(ctx);
1089 case LTTNG_UST_CONTEXT_PERF_THREAD_COUNTER:
1090 {
1091 struct lttng_ust_perf_counter_ctx *perf_ctx_param;
1092
1093 perf_ctx_param = &context_param->u.perf_counter;
1094 return lttng_add_perf_counter_to_ctx(
1095 perf_ctx_param->type,
1096 perf_ctx_param->config,
1097 perf_ctx_param->name,
1098 ctx);
1099 }
1100 case LTTNG_UST_CONTEXT_VTID:
1101 return lttng_add_vtid_to_ctx(ctx);
1102 case LTTNG_UST_CONTEXT_VPID:
1103 return lttng_add_vpid_to_ctx(ctx);
1104 case LTTNG_UST_CONTEXT_PROCNAME:
1105 return lttng_add_procname_to_ctx(ctx);
1106 case LTTNG_UST_CONTEXT_IP:
1107 return lttng_add_ip_to_ctx(ctx);
1108 case LTTNG_UST_CONTEXT_CPU_ID:
1109 return lttng_add_cpu_id_to_ctx(ctx);
1110 case LTTNG_UST_CONTEXT_APP_CONTEXT:
1111 return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
1112 ctx);
1113 case LTTNG_UST_CONTEXT_CGROUP_NS:
1114 return lttng_add_cgroup_ns_to_ctx(ctx);
1115 case LTTNG_UST_CONTEXT_IPC_NS:
1116 return lttng_add_ipc_ns_to_ctx(ctx);
1117 case LTTNG_UST_CONTEXT_MNT_NS:
1118 return lttng_add_mnt_ns_to_ctx(ctx);
1119 case LTTNG_UST_CONTEXT_NET_NS:
1120 return lttng_add_net_ns_to_ctx(ctx);
1121 case LTTNG_UST_CONTEXT_PID_NS:
1122 return lttng_add_pid_ns_to_ctx(ctx);
1123 case LTTNG_UST_CONTEXT_USER_NS:
1124 return lttng_add_user_ns_to_ctx(ctx);
1125 case LTTNG_UST_CONTEXT_UTS_NS:
1126 return lttng_add_uts_ns_to_ctx(ctx);
1127 case LTTNG_UST_CONTEXT_VUID:
1128 return lttng_add_vuid_to_ctx(ctx);
1129 case LTTNG_UST_CONTEXT_VEUID:
1130 return lttng_add_veuid_to_ctx(ctx);
1131 case LTTNG_UST_CONTEXT_VSUID:
1132 return lttng_add_vsuid_to_ctx(ctx);
1133 case LTTNG_UST_CONTEXT_VGID:
1134 return lttng_add_vgid_to_ctx(ctx);
1135 case LTTNG_UST_CONTEXT_VEGID:
1136 return lttng_add_vegid_to_ctx(ctx);
1137 case LTTNG_UST_CONTEXT_VSGID:
1138 return lttng_add_vsgid_to_ctx(ctx);
1139 default:
1140 return -EINVAL;
1141 }
1142 }
1143
1144 int lttng_enabler_attach_context(struct lttng_enabler *enabler,
1145 struct lttng_ust_context *context_param)
1146 {
1147 #if 0 // disabled for now.
1148 struct lttng_session *session = enabler->chan->session;
1149 int ret;
1150
1151 ret = lttng_attach_context(context_param, &enabler->ctx,
1152 session);
1153 if (ret)
1154 return ret;
1155 lttng_session_lazy_sync_enablers(enabler->chan->session);
1156 #endif
1157 return -ENOSYS;
1158 }
1159
1160 static
1161 void lttng_enabler_destroy(struct lttng_enabler *enabler)
1162 {
1163 struct lttng_ust_filter_bytecode_node *filter_node, *tmp_filter_node;
1164 struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
1165
1166 /* Destroy filter bytecode */
1167 cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
1168 &enabler->filter_bytecode_head, node) {
1169 free(filter_node);
1170 }
1171
1172 /* Destroy excluders */
1173 cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
1174 &enabler->excluder_head, node) {
1175 free(excluder_node);
1176 }
1177
1178 /* Destroy contexts */
1179 lttng_destroy_context(enabler->ctx);
1180
1181 cds_list_del(&enabler->node);
1182 free(enabler);
1183 }
1184
1185 /*
1186 * lttng_session_sync_enablers should be called just before starting a
1187 * session.
1188 */
1189 static
1190 void lttng_session_sync_enablers(struct lttng_session *session)
1191 {
1192 struct lttng_enabler *enabler;
1193 struct lttng_event *event;
1194
1195 cds_list_for_each_entry(enabler, &session->enablers_head, node)
1196 lttng_enabler_ref_events(enabler);
1197 /*
1198 * For each event, if at least one of its enablers is enabled,
1199 * and its channel and session transient states are enabled, we
1200 * enable the event, else we disable it.
1201 */
1202 cds_list_for_each_entry(event, &session->events_head, node) {
1203 struct lttng_enabler_ref *enabler_ref;
1204 struct lttng_bytecode_runtime *runtime;
1205 int enabled = 0, has_enablers_without_bytecode = 0;
1206
1207 /* Enable events */
1208 cds_list_for_each_entry(enabler_ref,
1209 &event->enablers_ref_head, node) {
1210 if (enabler_ref->ref->enabled) {
1211 enabled = 1;
1212 break;
1213 }
1214 }
1215 /*
1216 * Enabled state is based on union of enablers, with
1217 * intesection of session and channel transient enable
1218 * states.
1219 */
1220 enabled = enabled && session->tstate && event->chan->tstate;
1221
1222 CMM_STORE_SHARED(event->enabled, enabled);
1223 /*
1224 * Sync tracepoint registration with event enabled
1225 * state.
1226 */
1227 if (enabled) {
1228 if (!event->registered)
1229 register_event(event);
1230 } else {
1231 if (event->registered)
1232 unregister_event(event);
1233 }
1234
1235 /* Check if has enablers without bytecode enabled */
1236 cds_list_for_each_entry(enabler_ref,
1237 &event->enablers_ref_head, node) {
1238 if (enabler_ref->ref->enabled
1239 && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
1240 has_enablers_without_bytecode = 1;
1241 break;
1242 }
1243 }
1244 event->has_enablers_without_bytecode =
1245 has_enablers_without_bytecode;
1246
1247 /* Enable filters */
1248 cds_list_for_each_entry(runtime,
1249 &event->bytecode_runtime_head, node) {
1250 lttng_filter_sync_state(runtime);
1251 }
1252 }
1253 __tracepoint_probe_prune_release_queue();
1254 }
1255
1256 /*
1257 * Apply enablers to session events, adding events to session if need
1258 * be. It is required after each modification applied to an active
1259 * session, and right before session "start".
1260 * "lazy" sync means we only sync if required.
1261 */
1262 static
1263 void lttng_session_lazy_sync_enablers(struct lttng_session *session)
1264 {
1265 /* We can skip if session is not active */
1266 if (!session->active)
1267 return;
1268 lttng_session_sync_enablers(session);
1269 }
1270
1271 /*
1272 * Update all sessions with the given app context.
1273 * Called with ust lock held.
1274 * This is invoked when an application context gets loaded/unloaded. It
1275 * ensures the context callbacks are in sync with the application
1276 * context (either app context callbacks, or dummy callbacks).
1277 */
1278 void lttng_ust_context_set_session_provider(const char *name,
1279 size_t (*get_size)(struct lttng_ctx_field *field, size_t offset),
1280 void (*record)(struct lttng_ctx_field *field,
1281 struct lttng_ust_lib_ring_buffer_ctx *ctx,
1282 struct lttng_channel *chan),
1283 void (*get_value)(struct lttng_ctx_field *field,
1284 struct lttng_ctx_value *value))
1285 {
1286 struct lttng_session *session;
1287
1288 cds_list_for_each_entry(session, &sessions, node) {
1289 struct lttng_channel *chan;
1290 struct lttng_event *event;
1291 int ret;
1292
1293 ret = lttng_ust_context_set_provider_rcu(&session->ctx,
1294 name, get_size, record, get_value);
1295 if (ret)
1296 abort();
1297 cds_list_for_each_entry(chan, &session->chan_head, node) {
1298 ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
1299 name, get_size, record, get_value);
1300 if (ret)
1301 abort();
1302 }
1303 cds_list_for_each_entry(event, &session->events_head, node) {
1304 ret = lttng_ust_context_set_provider_rcu(&event->ctx,
1305 name, get_size, record, get_value);
1306 if (ret)
1307 abort();
1308 }
1309 }
1310 }
This page took 0.092693 seconds and 4 git commands to generate.