Fix: lttng: -Wshadow error in cmd_snapshot
[lttng-tools.git] / src / bin / lttng-sessiond / health.c
CommitLineData
173fca4f 1/*
ab5be9fa
MJ
2 * Copyright (C) 2012 David Goulet <dgoulet@efficios.com>
3 * Copyright (C) 2018 Jérémie Galarneau <jeremie.galarneau@efficios.com>
173fca4f 4 *
ab5be9fa 5 * SPDX-License-Identifier: GPL-2.0-only
173fca4f 6 *
173fca4f
JG
7 */
8
9#include "lttng-sessiond.h"
10#include "health-sessiond.h"
11#include <common/macros.h>
12#include <common/error.h>
13#include <common/utils.h>
14#include <common/pipe.h>
15#include <inttypes.h>
16#include <sys/stat.h>
17#include "utils.h"
18#include "thread.h"
19
c78d8e86
JG
20struct thread_notifiers {
21 struct lttng_pipe *quit_pipe;
22 sem_t ready;
23};
24
25static
26void mark_thread_as_ready(struct thread_notifiers *notifiers)
173fca4f 27{
c78d8e86
JG
28 DBG("Marking health management thread as ready");
29 sem_post(&notifiers->ready);
30}
173fca4f 31
c78d8e86
JG
32static
33void wait_until_thread_is_ready(struct thread_notifiers *notifiers)
34{
35 DBG("Waiting for health management thread to be ready");
36 sem_wait(&notifiers->ready);
37 DBG("Health management thread is ready");
38}
39
40static void cleanup_health_management_thread(void *data)
41{
42 struct thread_notifiers *notifiers = data;
43
44 lttng_pipe_destroy(notifiers->quit_pipe);
45 sem_destroy(&notifiers->ready);
46 free(notifiers);
173fca4f
JG
47}
48
49/*
50 * Thread managing health check socket.
51 */
c78d8e86 52static void *thread_manage_health(void *data)
173fca4f
JG
53{
54 const bool is_root = (getuid() == 0);
55 int sock = -1, new_sock = -1, ret, i, pollfd, err = -1;
56 uint32_t revents, nb_fd;
57 struct lttng_poll_event events;
58 struct health_comm_msg msg;
59 struct health_comm_reply reply;
60 /* Thread-specific quit pipe. */
c78d8e86
JG
61 struct thread_notifiers *notifiers = data;
62 const int quit_pipe_read_fd = lttng_pipe_get_readfd(
63 notifiers->quit_pipe);
173fca4f
JG
64
65 DBG("[thread] Manage health check started");
66
67 rcu_register_thread();
68
69 /*
70 * Created with a size of two for:
71 * - client socket
72 * - thread quit pipe
73 */
74 ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
75 if (ret < 0) {
76 goto error;
77 }
78
79 /* Create unix socket */
412d7227 80 sock = lttcomm_create_unix_sock(the_config.health_unix_sock_path.value);
173fca4f
JG
81 if (sock < 0) {
82 ERR("Unable to create health check Unix socket");
83 goto error;
84 }
85
86 if (is_root) {
87 /* lttng health client socket path permissions */
28ab59d0
JR
88 gid_t gid;
89
412d7227 90 ret = utils_get_group_id(the_config.tracing_group_name.value, true, &gid);
28ab59d0
JR
91 if (ret) {
92 /* Default to root group. */
93 gid = 0;
94 }
95
412d7227 96 ret = chown(the_config.health_unix_sock_path.value, 0, gid);
173fca4f 97 if (ret < 0) {
412d7227 98 ERR("Unable to set group on %s", the_config.health_unix_sock_path.value);
173fca4f
JG
99 PERROR("chown");
100 goto error;
101 }
102
412d7227 103 ret = chmod(the_config.health_unix_sock_path.value,
173fca4f
JG
104 S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
105 if (ret < 0) {
412d7227 106 ERR("Unable to set permissions on %s", the_config.health_unix_sock_path.value);
173fca4f
JG
107 PERROR("chmod");
108 goto error;
109 }
110 }
111
112 /*
113 * Set the CLOEXEC flag. Return code is useless because either way, the
114 * show must go on.
115 */
116 (void) utils_set_fd_cloexec(sock);
117
118 ret = lttcomm_listen_unix_sock(sock);
119 if (ret < 0) {
120 goto error;
121 }
122
123 ret = lttng_poll_add(&events, quit_pipe_read_fd, LPOLLIN | LPOLLERR);
124 if (ret < 0) {
125 goto error;
126 }
127
128 /* Add the application registration socket */
129 ret = lttng_poll_add(&events, sock, LPOLLIN | LPOLLPRI);
130 if (ret < 0) {
131 goto error;
132 }
133
c78d8e86 134 mark_thread_as_ready(notifiers);
173fca4f
JG
135 while (1) {
136 DBG("Health check ready");
137
138 /* Infinite blocking call, waiting for transmission */
139restart:
140 ret = lttng_poll_wait(&events, -1);
141 if (ret < 0) {
142 /*
143 * Restart interrupted system call.
144 */
145 if (errno == EINTR) {
146 goto restart;
147 }
148 goto error;
149 }
150
151 nb_fd = ret;
152
153 for (i = 0; i < nb_fd; i++) {
154 /* Fetch once the poll data */
155 revents = LTTNG_POLL_GETEV(&events, i);
156 pollfd = LTTNG_POLL_GETFD(&events, i);
157
173fca4f
JG
158 /* Event on the registration socket */
159 if (pollfd == sock) {
160 if (revents & LPOLLIN) {
161 continue;
162 } else if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
163 ERR("Health socket poll error");
164 goto error;
165 } else {
166 ERR("Unexpected poll events %u for sock %d", revents, pollfd);
167 goto error;
168 }
169 } else {
170 /* Event on the thread's quit pipe. */
171 err = 0;
172 goto exit;
173 }
174 }
175
176 new_sock = lttcomm_accept_unix_sock(sock);
177 if (new_sock < 0) {
178 goto error;
179 }
180
181 /*
182 * Set the CLOEXEC flag. Return code is useless because either way, the
183 * show must go on.
184 */
185 (void) utils_set_fd_cloexec(new_sock);
186
187 DBG("Receiving data from client for health...");
188 ret = lttcomm_recv_unix_sock(new_sock, (void *)&msg, sizeof(msg));
189 if (ret <= 0) {
190 DBG("Nothing recv() from client... continuing");
191 ret = close(new_sock);
192 if (ret) {
193 PERROR("close");
194 }
195 continue;
196 }
197
198 rcu_thread_online();
199
200 memset(&reply, 0, sizeof(reply));
201 for (i = 0; i < NR_HEALTH_SESSIOND_TYPES; i++) {
202 /*
203 * health_check_state returns 0 if health is
204 * bad.
205 */
412d7227 206 if (!health_check_state(the_health_sessiond, i)) {
173fca4f
JG
207 reply.ret_code |= 1ULL << i;
208 }
209 }
210
211 DBG2("Health check return value %" PRIx64, reply.ret_code);
212
213 ret = lttcomm_send_unix_sock(new_sock, (void *) &reply,
214 sizeof(reply));
215 if (ret < 0) {
216 ERR("Failed to send health data back to client");
217 }
218
219 /* End of transmission */
220 ret = close(new_sock);
221 if (ret) {
222 PERROR("close");
223 }
224 }
225
226exit:
227error:
228 if (err) {
229 ERR("Health error occurred in %s", __func__);
230 }
231 DBG("Health check thread dying");
412d7227 232 unlink(the_config.health_unix_sock_path.value);
173fca4f
JG
233 if (sock >= 0) {
234 ret = close(sock);
235 if (ret) {
236 PERROR("close");
237 }
238 }
239
240 lttng_poll_clean(&events);
241 rcu_unregister_thread();
242 return NULL;
243}
244
c78d8e86 245static bool shutdown_health_management_thread(void *data)
173fca4f 246{
c78d8e86
JG
247 struct thread_notifiers *notifiers = data;
248 const int write_fd = lttng_pipe_get_writefd(notifiers->quit_pipe);
173fca4f 249
c78d8e86 250 return notify_thread_pipe(write_fd) == 1;
173fca4f
JG
251}
252
253bool launch_health_management_thread(void)
254{
c78d8e86 255 struct thread_notifiers *notifiers;
173fca4f 256 struct lttng_thread *thread;
173fca4f 257
c78d8e86
JG
258 notifiers = zmalloc(sizeof(*notifiers));
259 if (!notifiers) {
21fa020e 260 goto error_alloc;
173fca4f
JG
261 }
262
c78d8e86
JG
263 sem_init(&notifiers->ready, 0, 0);
264 notifiers->quit_pipe = lttng_pipe_open(FD_CLOEXEC);
265 if (!notifiers->quit_pipe) {
266 goto error;
267 }
173fca4f
JG
268 thread = lttng_thread_create("Health management",
269 thread_manage_health,
270 shutdown_health_management_thread,
271 cleanup_health_management_thread,
c78d8e86 272 notifiers);
173fca4f
JG
273 if (!thread) {
274 goto error;
275 }
c78d8e86
JG
276
277 wait_until_thread_is_ready(notifiers);
173fca4f
JG
278 lttng_thread_put(thread);
279 return true;
280error:
c78d8e86 281 cleanup_health_management_thread(notifiers);
21fa020e 282error_alloc:
173fca4f
JG
283 return false;
284}
This page took 0.043128 seconds and 4 git commands to generate.