Commit | Line | Data |
---|---|---|
5b093681 | 1 | /* |
ab5be9fa MJ |
2 | * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca> |
3 | * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
4 | * Copyright (C) 2013 Jérémie Galarneau <jeremie.galarneau@efficios.com> | |
5b093681 | 5 | * |
ab5be9fa | 6 | * SPDX-License-Identifier: GPL-2.0-only |
5b093681 | 7 | * |
5b093681 JG |
8 | */ |
9 | ||
10 | #include <common/pipe.h> | |
11 | #include <common/utils.h> | |
12 | ||
13 | #include "manage-kernel.h" | |
14 | #include "testpoint.h" | |
15 | #include "health-sessiond.h" | |
16 | #include "utils.h" | |
17 | #include "thread.h" | |
18 | #include "kernel.h" | |
19 | #include "kernel-consumer.h" | |
20 | ||
21 | struct thread_notifiers { | |
22 | struct lttng_pipe *quit_pipe; | |
23 | int kernel_poll_pipe_read_fd; | |
24 | }; | |
25 | ||
26 | /* | |
27 | * Update the kernel poll set of all channel fd available over all tracing | |
28 | * session. Add the wakeup pipe at the end of the set. | |
29 | */ | |
30 | static int update_kernel_poll(struct lttng_poll_event *events) | |
31 | { | |
32 | int ret; | |
33 | struct ltt_kernel_channel *channel; | |
34 | struct ltt_session *session; | |
35 | const struct ltt_session_list *session_list = session_get_list(); | |
36 | ||
37 | DBG("Updating kernel poll set"); | |
38 | ||
39 | session_lock_list(); | |
40 | cds_list_for_each_entry(session, &session_list->head, list) { | |
41 | if (!session_get(session)) { | |
42 | continue; | |
43 | } | |
44 | session_lock(session); | |
45 | if (session->kernel_session == NULL) { | |
46 | session_unlock(session); | |
47 | session_put(session); | |
48 | continue; | |
49 | } | |
50 | ||
51 | cds_list_for_each_entry(channel, | |
52 | &session->kernel_session->channel_list.head, list) { | |
53 | /* Add channel fd to the kernel poll set */ | |
54 | ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM); | |
55 | if (ret < 0) { | |
56 | session_unlock(session); | |
57 | session_put(session); | |
58 | goto error; | |
59 | } | |
60 | DBG("Channel fd %d added to kernel set", channel->fd); | |
61 | } | |
62 | session_unlock(session); | |
0318876b | 63 | session_put(session); |
5b093681 JG |
64 | } |
65 | session_unlock_list(); | |
66 | ||
67 | return 0; | |
68 | ||
69 | error: | |
70 | session_unlock_list(); | |
71 | return -1; | |
72 | } | |
73 | ||
74 | /* | |
75 | * Find the channel fd from 'fd' over all tracing session. When found, check | |
76 | * for new channel stream and send those stream fds to the kernel consumer. | |
77 | * | |
78 | * Useful for CPU hotplug feature. | |
79 | */ | |
80 | static int update_kernel_stream(int fd) | |
81 | { | |
82 | int ret = 0; | |
83 | struct ltt_session *session; | |
84 | struct ltt_kernel_session *ksess; | |
85 | struct ltt_kernel_channel *channel; | |
86 | const struct ltt_session_list *session_list = session_get_list(); | |
87 | ||
88 | DBG("Updating kernel streams for channel fd %d", fd); | |
89 | ||
90 | session_lock_list(); | |
91 | cds_list_for_each_entry(session, &session_list->head, list) { | |
92 | if (!session_get(session)) { | |
93 | continue; | |
94 | } | |
95 | session_lock(session); | |
96 | if (session->kernel_session == NULL) { | |
97 | session_unlock(session); | |
98 | session_put(session); | |
99 | continue; | |
100 | } | |
101 | ksess = session->kernel_session; | |
102 | ||
103 | cds_list_for_each_entry(channel, | |
104 | &ksess->channel_list.head, list) { | |
105 | struct lttng_ht_iter iter; | |
106 | struct consumer_socket *socket; | |
107 | ||
108 | if (channel->fd != fd) { | |
109 | continue; | |
110 | } | |
111 | DBG("Channel found, updating kernel streams"); | |
112 | ret = kernel_open_channel_stream(channel); | |
113 | if (ret < 0) { | |
114 | goto error; | |
115 | } | |
116 | /* Update the stream global counter */ | |
117 | ksess->stream_count_global += ret; | |
118 | ||
119 | /* | |
120 | * Have we already sent fds to the consumer? If yes, it | |
121 | * means that tracing is started so it is safe to send | |
122 | * our updated stream fds. | |
123 | */ | |
124 | if (ksess->consumer_fds_sent != 1 | |
125 | || ksess->consumer == NULL) { | |
126 | ret = -1; | |
127 | goto error; | |
128 | } | |
129 | ||
130 | rcu_read_lock(); | |
131 | cds_lfht_for_each_entry(ksess->consumer->socks->ht, | |
132 | &iter.iter, socket, node.node) { | |
133 | pthread_mutex_lock(socket->lock); | |
134 | ret = kernel_consumer_send_channel_streams(socket, | |
135 | channel, ksess, | |
136 | session->output_traces ? 1 : 0); | |
137 | pthread_mutex_unlock(socket->lock); | |
138 | if (ret < 0) { | |
139 | rcu_read_unlock(); | |
140 | goto error; | |
141 | } | |
142 | } | |
143 | rcu_read_unlock(); | |
144 | } | |
145 | session_unlock(session); | |
146 | session_put(session); | |
147 | } | |
148 | session_unlock_list(); | |
149 | return ret; | |
150 | ||
151 | error: | |
152 | session_unlock(session); | |
153 | session_put(session); | |
154 | session_unlock_list(); | |
155 | return ret; | |
156 | } | |
157 | ||
158 | /* | |
159 | * This thread manage event coming from the kernel. | |
160 | * | |
161 | * Features supported in this thread: | |
162 | * -) CPU Hotplug | |
163 | */ | |
164 | static void *thread_kernel_management(void *data) | |
165 | { | |
166 | int ret, i, pollfd, update_poll_flag = 1, err = -1; | |
167 | uint32_t revents, nb_fd; | |
168 | char tmp; | |
169 | struct lttng_poll_event events; | |
170 | struct thread_notifiers *notifiers = data; | |
171 | const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe); | |
172 | ||
173 | DBG("[thread] Thread manage kernel started"); | |
174 | ||
412d7227 | 175 | health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL); |
5b093681 JG |
176 | |
177 | /* | |
178 | * This first step of the while is to clean this structure which could free | |
179 | * non NULL pointers so initialize it before the loop. | |
180 | */ | |
181 | lttng_poll_init(&events); | |
182 | ||
183 | if (testpoint(sessiond_thread_manage_kernel)) { | |
184 | goto error_testpoint; | |
185 | } | |
186 | ||
187 | health_code_update(); | |
188 | ||
189 | if (testpoint(sessiond_thread_manage_kernel_before_loop)) { | |
190 | goto error_testpoint; | |
191 | } | |
192 | ||
193 | while (1) { | |
194 | health_code_update(); | |
195 | ||
196 | if (update_poll_flag == 1) { | |
197 | /* Clean events object. We are about to populate it again. */ | |
198 | lttng_poll_clean(&events); | |
199 | ||
200 | ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); | |
201 | if (ret < 0) { | |
202 | goto error_poll_create; | |
203 | } | |
204 | ||
205 | ret = lttng_poll_add(&events, | |
206 | notifiers->kernel_poll_pipe_read_fd, | |
207 | LPOLLIN); | |
208 | if (ret < 0) { | |
209 | goto error; | |
210 | } | |
211 | ||
212 | ret = lttng_poll_add(&events, | |
213 | quit_pipe_read_fd, | |
214 | LPOLLIN); | |
215 | if (ret < 0) { | |
216 | goto error; | |
217 | } | |
218 | ||
219 | /* This will add the available kernel channel if any. */ | |
220 | ret = update_kernel_poll(&events); | |
221 | if (ret < 0) { | |
222 | goto error; | |
223 | } | |
224 | update_poll_flag = 0; | |
225 | } | |
226 | ||
227 | DBG("Thread kernel polling"); | |
228 | ||
229 | /* Poll infinite value of time */ | |
230 | restart: | |
231 | health_poll_entry(); | |
232 | ret = lttng_poll_wait(&events, -1); | |
233 | DBG("Thread kernel return from poll on %d fds", | |
234 | LTTNG_POLL_GETNB(&events)); | |
235 | health_poll_exit(); | |
236 | if (ret < 0) { | |
237 | /* | |
238 | * Restart interrupted system call. | |
239 | */ | |
240 | if (errno == EINTR) { | |
241 | goto restart; | |
242 | } | |
243 | goto error; | |
244 | } else if (ret == 0) { | |
245 | /* Should not happen since timeout is infinite */ | |
246 | ERR("Return value of poll is 0 with an infinite timeout.\n" | |
247 | "This should not have happened! Continuing..."); | |
248 | continue; | |
249 | } | |
250 | ||
251 | nb_fd = ret; | |
252 | ||
253 | for (i = 0; i < nb_fd; i++) { | |
254 | /* Fetch once the poll data */ | |
255 | revents = LTTNG_POLL_GETEV(&events, i); | |
256 | pollfd = LTTNG_POLL_GETFD(&events, i); | |
257 | ||
258 | health_code_update(); | |
259 | ||
5b093681 JG |
260 | if (pollfd == quit_pipe_read_fd) { |
261 | err = 0; | |
262 | goto exit; | |
263 | } | |
264 | ||
265 | /* Check for data on kernel pipe */ | |
266 | if (revents & LPOLLIN) { | |
267 | if (pollfd == notifiers->kernel_poll_pipe_read_fd) { | |
268 | (void) lttng_read(notifiers->kernel_poll_pipe_read_fd, | |
269 | &tmp, 1); | |
270 | /* | |
271 | * Ret value is useless here, if this pipe gets any actions an | |
272 | * update is required anyway. | |
273 | */ | |
274 | update_poll_flag = 1; | |
275 | continue; | |
276 | } else { | |
277 | /* | |
278 | * New CPU detected by the kernel. Adding kernel stream to | |
279 | * kernel session and updating the kernel consumer | |
280 | */ | |
281 | ret = update_kernel_stream(pollfd); | |
282 | if (ret < 0) { | |
283 | continue; | |
284 | } | |
285 | break; | |
286 | } | |
287 | } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) { | |
288 | update_poll_flag = 1; | |
289 | continue; | |
290 | } else { | |
291 | ERR("Unexpected poll events %u for sock %d", revents, pollfd); | |
292 | goto error; | |
293 | } | |
294 | } | |
295 | } | |
296 | ||
297 | exit: | |
298 | error: | |
299 | lttng_poll_clean(&events); | |
300 | error_poll_create: | |
301 | error_testpoint: | |
302 | if (err) { | |
303 | health_error(); | |
304 | ERR("Health error occurred in %s", __func__); | |
305 | WARN("Kernel thread died unexpectedly. " | |
306 | "Kernel tracing can continue but CPU hotplug is disabled."); | |
307 | } | |
412d7227 | 308 | health_unregister(the_health_sessiond); |
5b093681 JG |
309 | DBG("Kernel thread dying"); |
310 | return NULL; | |
311 | } | |
312 | ||
313 | static bool shutdown_kernel_management_thread(void *data) | |
314 | { | |
315 | struct thread_notifiers *notifiers = data; | |
316 | const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe); | |
317 | ||
318 | return notify_thread_pipe(write_fd) == 1; | |
319 | } | |
320 | ||
321 | static void cleanup_kernel_management_thread(void *data) | |
322 | { | |
323 | struct thread_notifiers *notifiers = data; | |
324 | ||
325 | lttng_pipe_destroy(notifiers->quit_pipe); | |
326 | free(notifiers); | |
327 | } | |
328 | ||
329 | bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd) | |
330 | { | |
331 | struct lttng_pipe *quit_pipe; | |
332 | struct thread_notifiers *notifiers = NULL; | |
333 | struct lttng_thread *thread; | |
334 | ||
5b093681 JG |
335 | notifiers = zmalloc(sizeof(*notifiers)); |
336 | if (!notifiers) { | |
21fa020e JG |
337 | goto error_alloc; |
338 | } | |
339 | quit_pipe = lttng_pipe_open(FD_CLOEXEC); | |
340 | if (!quit_pipe) { | |
5b093681 JG |
341 | goto error; |
342 | } | |
343 | notifiers->quit_pipe = quit_pipe; | |
344 | notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd; | |
345 | ||
346 | thread = lttng_thread_create("Kernel management", | |
347 | thread_kernel_management, | |
348 | shutdown_kernel_management_thread, | |
349 | cleanup_kernel_management_thread, | |
350 | notifiers); | |
351 | if (!thread) { | |
352 | goto error; | |
353 | } | |
354 | lttng_thread_put(thread); | |
355 | return true; | |
356 | error: | |
357 | cleanup_kernel_management_thread(notifiers); | |
21fa020e | 358 | error_alloc: |
5b093681 JG |
359 | return false; |
360 | } |