Clean-up: remove redundant exit labels from sessiond initialization
[lttng-tools.git] / src / bin / lttng-sessiond / manage-kernel.c
1 /*
2 * Copyright (C) 2011 - David Goulet <david.goulet@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2013 - Jérémie Galarneau <jeremie.galarneau@efficios.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18 */
19
20 #include <common/pipe.h>
21 #include <common/utils.h>
22
23 #include "manage-kernel.h"
24 #include "testpoint.h"
25 #include "health-sessiond.h"
26 #include "utils.h"
27 #include "thread.h"
28 #include "kernel.h"
29 #include "kernel-consumer.h"
30
31 struct thread_notifiers {
32 struct lttng_pipe *quit_pipe;
33 int kernel_poll_pipe_read_fd;
34 };
35
36 /*
37 * Update the kernel poll set of all channel fd available over all tracing
38 * session. Add the wakeup pipe at the end of the set.
39 */
40 static int update_kernel_poll(struct lttng_poll_event *events)
41 {
42 int ret;
43 struct ltt_kernel_channel *channel;
44 struct ltt_session *session;
45 const struct ltt_session_list *session_list = session_get_list();
46
47 DBG("Updating kernel poll set");
48
49 session_lock_list();
50 cds_list_for_each_entry(session, &session_list->head, list) {
51 if (!session_get(session)) {
52 continue;
53 }
54 session_lock(session);
55 if (session->kernel_session == NULL) {
56 session_unlock(session);
57 session_put(session);
58 continue;
59 }
60
61 cds_list_for_each_entry(channel,
62 &session->kernel_session->channel_list.head, list) {
63 /* Add channel fd to the kernel poll set */
64 ret = lttng_poll_add(events, channel->fd, LPOLLIN | LPOLLRDNORM);
65 if (ret < 0) {
66 session_unlock(session);
67 session_put(session);
68 goto error;
69 }
70 DBG("Channel fd %d added to kernel set", channel->fd);
71 }
72 session_unlock(session);
73 }
74 session_unlock_list();
75
76 return 0;
77
78 error:
79 session_unlock_list();
80 return -1;
81 }
82
83 /*
84 * Find the channel fd from 'fd' over all tracing session. When found, check
85 * for new channel stream and send those stream fds to the kernel consumer.
86 *
87 * Useful for CPU hotplug feature.
88 */
89 static int update_kernel_stream(int fd)
90 {
91 int ret = 0;
92 struct ltt_session *session;
93 struct ltt_kernel_session *ksess;
94 struct ltt_kernel_channel *channel;
95 const struct ltt_session_list *session_list = session_get_list();
96
97 DBG("Updating kernel streams for channel fd %d", fd);
98
99 session_lock_list();
100 cds_list_for_each_entry(session, &session_list->head, list) {
101 if (!session_get(session)) {
102 continue;
103 }
104 session_lock(session);
105 if (session->kernel_session == NULL) {
106 session_unlock(session);
107 session_put(session);
108 continue;
109 }
110 ksess = session->kernel_session;
111
112 cds_list_for_each_entry(channel,
113 &ksess->channel_list.head, list) {
114 struct lttng_ht_iter iter;
115 struct consumer_socket *socket;
116
117 if (channel->fd != fd) {
118 continue;
119 }
120 DBG("Channel found, updating kernel streams");
121 ret = kernel_open_channel_stream(channel);
122 if (ret < 0) {
123 goto error;
124 }
125 /* Update the stream global counter */
126 ksess->stream_count_global += ret;
127
128 /*
129 * Have we already sent fds to the consumer? If yes, it
130 * means that tracing is started so it is safe to send
131 * our updated stream fds.
132 */
133 if (ksess->consumer_fds_sent != 1
134 || ksess->consumer == NULL) {
135 ret = -1;
136 goto error;
137 }
138
139 rcu_read_lock();
140 cds_lfht_for_each_entry(ksess->consumer->socks->ht,
141 &iter.iter, socket, node.node) {
142 pthread_mutex_lock(socket->lock);
143 ret = kernel_consumer_send_channel_streams(socket,
144 channel, ksess,
145 session->output_traces ? 1 : 0);
146 pthread_mutex_unlock(socket->lock);
147 if (ret < 0) {
148 rcu_read_unlock();
149 goto error;
150 }
151 }
152 rcu_read_unlock();
153 }
154 session_unlock(session);
155 session_put(session);
156 }
157 session_unlock_list();
158 return ret;
159
160 error:
161 session_unlock(session);
162 session_put(session);
163 session_unlock_list();
164 return ret;
165 }
166
167 /*
168 * This thread manage event coming from the kernel.
169 *
170 * Features supported in this thread:
171 * -) CPU Hotplug
172 */
173 static void *thread_kernel_management(void *data)
174 {
175 int ret, i, pollfd, update_poll_flag = 1, err = -1;
176 uint32_t revents, nb_fd;
177 char tmp;
178 struct lttng_poll_event events;
179 struct thread_notifiers *notifiers = data;
180 const int quit_pipe_read_fd = lttng_pipe_get_readfd(notifiers->quit_pipe);
181
182 DBG("[thread] Thread manage kernel started");
183
184 health_register(health_sessiond, HEALTH_SESSIOND_TYPE_KERNEL);
185
186 /*
187 * This first step of the while is to clean this structure which could free
188 * non NULL pointers so initialize it before the loop.
189 */
190 lttng_poll_init(&events);
191
192 if (testpoint(sessiond_thread_manage_kernel)) {
193 goto error_testpoint;
194 }
195
196 health_code_update();
197
198 if (testpoint(sessiond_thread_manage_kernel_before_loop)) {
199 goto error_testpoint;
200 }
201
202 while (1) {
203 health_code_update();
204
205 if (update_poll_flag == 1) {
206 /* Clean events object. We are about to populate it again. */
207 lttng_poll_clean(&events);
208
209 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
210 if (ret < 0) {
211 goto error_poll_create;
212 }
213
214 ret = lttng_poll_add(&events,
215 notifiers->kernel_poll_pipe_read_fd,
216 LPOLLIN);
217 if (ret < 0) {
218 goto error;
219 }
220
221 ret = lttng_poll_add(&events,
222 quit_pipe_read_fd,
223 LPOLLIN);
224 if (ret < 0) {
225 goto error;
226 }
227
228 /* This will add the available kernel channel if any. */
229 ret = update_kernel_poll(&events);
230 if (ret < 0) {
231 goto error;
232 }
233 update_poll_flag = 0;
234 }
235
236 DBG("Thread kernel polling");
237
238 /* Poll infinite value of time */
239 restart:
240 health_poll_entry();
241 ret = lttng_poll_wait(&events, -1);
242 DBG("Thread kernel return from poll on %d fds",
243 LTTNG_POLL_GETNB(&events));
244 health_poll_exit();
245 if (ret < 0) {
246 /*
247 * Restart interrupted system call.
248 */
249 if (errno == EINTR) {
250 goto restart;
251 }
252 goto error;
253 } else if (ret == 0) {
254 /* Should not happen since timeout is infinite */
255 ERR("Return value of poll is 0 with an infinite timeout.\n"
256 "This should not have happened! Continuing...");
257 continue;
258 }
259
260 nb_fd = ret;
261
262 for (i = 0; i < nb_fd; i++) {
263 /* Fetch once the poll data */
264 revents = LTTNG_POLL_GETEV(&events, i);
265 pollfd = LTTNG_POLL_GETFD(&events, i);
266
267 health_code_update();
268
269 if (!revents) {
270 /* No activity for this FD (poll implementation). */
271 continue;
272 }
273
274 if (pollfd == quit_pipe_read_fd) {
275 err = 0;
276 goto exit;
277 }
278
279 /* Check for data on kernel pipe */
280 if (revents & LPOLLIN) {
281 if (pollfd == notifiers->kernel_poll_pipe_read_fd) {
282 (void) lttng_read(notifiers->kernel_poll_pipe_read_fd,
283 &tmp, 1);
284 /*
285 * Ret value is useless here, if this pipe gets any actions an
286 * update is required anyway.
287 */
288 update_poll_flag = 1;
289 continue;
290 } else {
291 /*
292 * New CPU detected by the kernel. Adding kernel stream to
293 * kernel session and updating the kernel consumer
294 */
295 ret = update_kernel_stream(pollfd);
296 if (ret < 0) {
297 continue;
298 }
299 break;
300 }
301 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
302 update_poll_flag = 1;
303 continue;
304 } else {
305 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
306 goto error;
307 }
308 }
309 }
310
311 exit:
312 error:
313 lttng_poll_clean(&events);
314 error_poll_create:
315 error_testpoint:
316 if (err) {
317 health_error();
318 ERR("Health error occurred in %s", __func__);
319 WARN("Kernel thread died unexpectedly. "
320 "Kernel tracing can continue but CPU hotplug is disabled.");
321 }
322 health_unregister(health_sessiond);
323 DBG("Kernel thread dying");
324 return NULL;
325 }
326
327 static bool shutdown_kernel_management_thread(void *data)
328 {
329 struct thread_notifiers *notifiers = data;
330 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
331
332 return notify_thread_pipe(write_fd) == 1;
333 }
334
335 static void cleanup_kernel_management_thread(void *data)
336 {
337 struct thread_notifiers *notifiers = data;
338
339 lttng_pipe_destroy(notifiers->quit_pipe);
340 free(notifiers);
341 }
342
343 bool launch_kernel_management_thread(int kernel_poll_pipe_read_fd)
344 {
345 struct lttng_pipe *quit_pipe;
346 struct thread_notifiers *notifiers = NULL;
347 struct lttng_thread *thread;
348
349 quit_pipe = lttng_pipe_open(FD_CLOEXEC);
350 if (!quit_pipe) {
351 goto error;
352 }
353
354 notifiers = zmalloc(sizeof(*notifiers));
355 if (!notifiers) {
356 goto error;
357 }
358 notifiers->quit_pipe = quit_pipe;
359 notifiers->kernel_poll_pipe_read_fd = kernel_poll_pipe_read_fd;
360
361 thread = lttng_thread_create("Kernel management",
362 thread_kernel_management,
363 shutdown_kernel_management_thread,
364 cleanup_kernel_management_thread,
365 notifiers);
366 if (!thread) {
367 goto error;
368 }
369 lttng_thread_put(thread);
370 return true;
371 error:
372 cleanup_kernel_management_thread(notifiers);
373 return false;
374 }
This page took 0.042258 seconds and 4 git commands to generate.