Update version to 0.16
[ust.git] / libust / trace_event.c
1 /*
2 * Copyright (C) 2010 Nils Carlson
3 *
4 * This library is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU Lesser General Public
6 * License as published by the Free Software Foundation;
7 * version 2.1 of the License.
8 *
9 * This library is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * Lesser General Public License for more details.
13 *
14 * You should have received a copy of the GNU Lesser General Public
15 * License along with this library; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17 *
18 */
19
20 #define _LGPL_SOURCE
21 #include <errno.h>
22 #include <ust/tracepoint.h>
23 #include <ust/tracepoint-internal.h>
24 #include <ust/core.h>
25 #include <ust/kcompat/kcompat.h>
26 #include <urcu-bp.h>
27
28 #include "usterr_signal_safe.h"
29
30 /* libraries that contain trace_events (struct trace_event_lib) */
31 static CDS_LIST_HEAD(libs);
32 /*
33 * Nested mutex is not required here, but provide the same guaranteed
34 * for start/stop iteration vs nested ops as markers and tracepoints.
35 */
36 static __thread int nested_mutex;
37 static DEFINE_MUTEX(trace_events_mutex);
38
39 static
40 int trace_event_get_iter_range(struct trace_event * const **trace_event,
41 struct trace_event * const *begin,
42 struct trace_event * const *end);
43
44 static
45 void lock_trace_events(void)
46 {
47 if (!(nested_mutex++))
48 pthread_mutex_lock(&trace_events_mutex);
49 }
50
51 static
52 void unlock_trace_events(void)
53 {
54 if (!(--nested_mutex))
55 pthread_mutex_unlock(&trace_events_mutex);
56 }
57
58 static
59 int lib_get_iter_trace_events(struct trace_event_iter *iter)
60 {
61 struct trace_event_lib *iter_lib;
62 int found = 0;
63
64 cds_list_for_each_entry(iter_lib, &libs, list) {
65 if (iter_lib < iter->lib)
66 continue;
67 else if (iter_lib > iter->lib)
68 iter->trace_event = NULL;
69 found = trace_event_get_iter_range(&iter->trace_event,
70 iter_lib->trace_events_start,
71 iter_lib->trace_events_start + iter_lib->trace_events_count);
72 if (found) {
73 iter->lib = iter_lib;
74 break;
75 }
76 }
77 return found;
78 }
79
80 /**
81 * trace_event_get_iter_range - Get a next trace_event iterator given a range.
82 * @trace_event: current trace_events (in), next trace_event (out)
83 * @begin: beginning of the range
84 * @end: end of the range
85 *
86 * Returns whether a next trace_event has been found (1) or not (0).
87 * Will return the first trace_event in the range if the input trace_event is NULL.
88 * Called with trace event mutex held.
89 */
90 static
91 int trace_event_get_iter_range(struct trace_event * const **trace_event,
92 struct trace_event * const *begin,
93 struct trace_event * const *end)
94 {
95 if (!*trace_event && begin != end)
96 *trace_event = begin;
97 while (*trace_event >= begin && *trace_event < end) {
98 if (!**trace_event)
99 (*trace_event)++; /* skip dummy */
100 else
101 return 1;
102 }
103 return 0;
104 }
105
106 static void trace_event_get_iter(struct trace_event_iter *iter)
107 {
108 int found = 0;
109
110 found = lib_get_iter_trace_events(iter);
111
112 if (!found)
113 trace_event_iter_reset(iter);
114 }
115
116 void trace_event_iter_start(struct trace_event_iter *iter)
117 {
118 lock_trace_events();
119 trace_event_get_iter(iter);
120 }
121
122 /*
123 * Called with trace event mutex held.
124 */
125 void trace_event_iter_next(struct trace_event_iter *iter)
126 {
127 iter->trace_event++;
128 /*
129 * iter->trace_event may be invalid because we blindly incremented it.
130 * Make sure it is valid by marshalling on the trace_events, getting the
131 * trace_events from following modules if necessary.
132 */
133 trace_event_get_iter(iter);
134 }
135
136 void trace_event_iter_stop(struct trace_event_iter *iter)
137 {
138 unlock_trace_events();
139 }
140
141 void trace_event_iter_reset(struct trace_event_iter *iter)
142 {
143 iter->lib = NULL;
144 iter->trace_event = NULL;
145 }
146
147 int trace_event_register_lib(struct trace_event * const *trace_events_start,
148 int trace_events_count)
149 {
150 struct trace_event_lib *pl, *iter;
151
152 pl = (struct trace_event_lib *) malloc(sizeof(struct trace_event_lib));
153
154 pl->trace_events_start = trace_events_start;
155 pl->trace_events_count = trace_events_count;
156
157 lock_trace_events();
158 /*
159 * We sort the libs by struct lib pointer address.
160 */
161 cds_list_for_each_entry_reverse(iter, &libs, list) {
162 BUG_ON(iter == pl); /* Should never be in the list twice */
163 if (iter < pl) {
164 /* We belong to the location right after iter. */
165 cds_list_add(&pl->list, &iter->list);
166 goto lib_added;
167 }
168 }
169 /* We should be added at the head of the list */
170 cds_list_add(&pl->list, &libs);
171 lib_added:
172 unlock_trace_events();
173
174 /* trace_events_count - 1: skip dummy */
175 DBG("just registered a trace_events section from %p and having %d trace_events (minus dummy trace_event)", trace_events_start, trace_events_count);
176
177 return 0;
178 }
179
180 int trace_event_unregister_lib(struct trace_event * const *trace_events_start)
181 {
182 struct trace_event_lib *lib;
183
184 unlock_trace_events();
185 cds_list_for_each_entry(lib, &libs, list) {
186 if(lib->trace_events_start == trace_events_start) {
187 struct trace_event_lib *lib2free = lib;
188 cds_list_del(&lib->list);
189 free(lib2free);
190 break;
191 }
192 }
193 unlock_trace_events();
194
195 return 0;
196 }
This page took 0.039981 seconds and 4 git commands to generate.