a3235b6961ad6e0b6c13677c84bd78bb5207b954
[lttng-ust.git] / src / common / ringbuffer / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include "shm.h"
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #ifdef HAVE_LIBNUMA
24 #include <numa.h>
25 #include <numaif.h>
26 #endif
27
28 #include <lttng/ust-utils.h>
29
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
33
34 /*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39 static
40 int zero_file(int fd, size_t len)
41 {
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67 error:
68 free(zeropage);
69 return ret;
70 }
71
72 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
73 {
74 struct shm_object_table *table;
75
76 table = zmalloc(sizeof(struct shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]));
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
86 size_t memory_map_size,
87 int stream_fd)
88 {
89 int shmfd, waitfd[2], ret, i;
90 struct shm_object *obj;
91 char *memory_map;
92
93 if (stream_fd < 0)
94 return NULL;
95 if (table->allocated_len >= table->size)
96 return NULL;
97 obj = &table->objects[table->allocated_len];
98
99 /* wait_fd: create pipe */
100 ret = pipe(waitfd);
101 if (ret < 0) {
102 PERROR("pipe");
103 goto error_pipe;
104 }
105 for (i = 0; i < 2; i++) {
106 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
107 if (ret < 0) {
108 PERROR("fcntl");
109 goto error_fcntl;
110 }
111 }
112 /* The write end of the pipe needs to be non-blocking */
113 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
114 if (ret < 0) {
115 PERROR("fcntl");
116 goto error_fcntl;
117 }
118 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
119
120 /*
121 * Set POSIX shared memory object size
122 *
123 * First, use ftruncate() to set its size, some implementations won't
124 * allow writes past the size set by ftruncate.
125 * Then, use write() to fill it with zeros, this allows us to fully
126 * allocate it and detect a shortage of shm space without dealing with
127 * a SIGBUS.
128 */
129
130 shmfd = stream_fd;
131 ret = ftruncate(shmfd, memory_map_size);
132 if (ret) {
133 PERROR("ftruncate");
134 goto error_ftruncate;
135 }
136 ret = zero_file(shmfd, memory_map_size);
137 if (ret) {
138 PERROR("zero_file");
139 goto error_zero_file;
140 }
141
142 /*
143 * Also ensure the file metadata is synced with the storage by using
144 * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
145 * EINVAL accordingly.
146 */
147 ret = fsync(shmfd);
148 if (ret && errno != EINVAL) {
149 PERROR("fsync");
150 goto error_fsync;
151 }
152 obj->shm_fd_ownership = 0;
153 obj->shm_fd = shmfd;
154
155 /* memory_map: mmap */
156 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
157 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
158 if (memory_map == MAP_FAILED) {
159 PERROR("mmap");
160 goto error_mmap;
161 }
162 obj->type = SHM_OBJECT_SHM;
163 obj->memory_map = memory_map;
164 obj->memory_map_size = memory_map_size;
165 obj->allocated_len = 0;
166 obj->index = table->allocated_len++;
167
168 return obj;
169
170 error_mmap:
171 error_fsync:
172 error_ftruncate:
173 error_zero_file:
174 error_fcntl:
175 for (i = 0; i < 2; i++) {
176 ret = close(waitfd[i]);
177 if (ret) {
178 PERROR("close");
179 assert(0);
180 }
181 }
182 error_pipe:
183 return NULL;
184 }
185
186 static
187 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
188 size_t memory_map_size)
189 {
190 struct shm_object *obj;
191 void *memory_map;
192 int waitfd[2], i, ret;
193
194 if (table->allocated_len >= table->size)
195 return NULL;
196 obj = &table->objects[table->allocated_len];
197
198 memory_map = zmalloc(memory_map_size);
199 if (!memory_map)
200 goto alloc_error;
201
202 /* wait_fd: create pipe */
203 ret = pipe(waitfd);
204 if (ret < 0) {
205 PERROR("pipe");
206 goto error_pipe;
207 }
208 for (i = 0; i < 2; i++) {
209 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
210 if (ret < 0) {
211 PERROR("fcntl");
212 goto error_fcntl;
213 }
214 }
215 /* The write end of the pipe needs to be non-blocking */
216 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
217 if (ret < 0) {
218 PERROR("fcntl");
219 goto error_fcntl;
220 }
221 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
222
223 /* no shm_fd */
224 obj->shm_fd = -1;
225 obj->shm_fd_ownership = 0;
226
227 obj->type = SHM_OBJECT_MEM;
228 obj->memory_map = memory_map;
229 obj->memory_map_size = memory_map_size;
230 obj->allocated_len = 0;
231 obj->index = table->allocated_len++;
232
233 return obj;
234
235 error_fcntl:
236 for (i = 0; i < 2; i++) {
237 ret = close(waitfd[i]);
238 if (ret) {
239 PERROR("close");
240 assert(0);
241 }
242 }
243 error_pipe:
244 free(memory_map);
245 alloc_error:
246 return NULL;
247 }
248
249 /*
250 * libnuma prints errors on the console even for numa_available().
251 * Work-around this limitation by using get_mempolicy() directly to
252 * check whether the kernel supports mempolicy.
253 */
254 #ifdef HAVE_LIBNUMA
255 static bool lttng_is_numa_available(void)
256 {
257 int ret;
258
259 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
260 if (ret && errno == ENOSYS) {
261 return false;
262 }
263 return numa_available() > 0;
264 }
265 #endif
266
267 #ifdef HAVE_LIBNUMA
268 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
269 size_t memory_map_size,
270 enum shm_object_type type,
271 int stream_fd,
272 int cpu)
273 #else
274 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
275 size_t memory_map_size,
276 enum shm_object_type type,
277 int stream_fd,
278 int cpu __attribute__((unused)))
279 #endif
280 {
281 struct shm_object *shm_object;
282 #ifdef HAVE_LIBNUMA
283 int oldnode = 0, node;
284 bool numa_avail;
285
286 numa_avail = lttng_is_numa_available();
287 if (numa_avail) {
288 oldnode = numa_preferred();
289 if (cpu >= 0) {
290 node = numa_node_of_cpu(cpu);
291 if (node >= 0)
292 numa_set_preferred(node);
293 }
294 if (cpu < 0 || node < 0)
295 numa_set_localalloc();
296 }
297 #endif /* HAVE_LIBNUMA */
298 switch (type) {
299 case SHM_OBJECT_SHM:
300 shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
301 stream_fd);
302 break;
303 case SHM_OBJECT_MEM:
304 shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
305 break;
306 default:
307 assert(0);
308 }
309 #ifdef HAVE_LIBNUMA
310 if (numa_avail)
311 numa_set_preferred(oldnode);
312 #endif /* HAVE_LIBNUMA */
313 return shm_object;
314 }
315
316 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
317 int shm_fd, int wakeup_fd, uint32_t stream_nr,
318 size_t memory_map_size)
319 {
320 struct shm_object *obj;
321 char *memory_map;
322 int ret;
323
324 if (table->allocated_len >= table->size)
325 return NULL;
326 /* streams _must_ be received in sequential order, else fail. */
327 if (stream_nr + 1 != table->allocated_len)
328 return NULL;
329
330 obj = &table->objects[table->allocated_len];
331
332 /* wait_fd: set write end of the pipe. */
333 obj->wait_fd[0] = -1; /* read end is unset */
334 obj->wait_fd[1] = wakeup_fd;
335 obj->shm_fd = shm_fd;
336 obj->shm_fd_ownership = 1;
337
338 /* The write end of the pipe needs to be non-blocking */
339 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
340 if (ret < 0) {
341 PERROR("fcntl");
342 goto error_fcntl;
343 }
344
345 /* memory_map: mmap */
346 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
347 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
348 if (memory_map == MAP_FAILED) {
349 PERROR("mmap");
350 goto error_mmap;
351 }
352 obj->type = SHM_OBJECT_SHM;
353 obj->memory_map = memory_map;
354 obj->memory_map_size = memory_map_size;
355 obj->allocated_len = memory_map_size;
356 obj->index = table->allocated_len++;
357
358 return obj;
359
360 error_fcntl:
361 error_mmap:
362 return NULL;
363 }
364
365 /*
366 * Passing ownership of mem to object.
367 */
368 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
369 void *mem, size_t memory_map_size, int wakeup_fd)
370 {
371 struct shm_object *obj;
372 int ret;
373
374 if (table->allocated_len >= table->size)
375 return NULL;
376 obj = &table->objects[table->allocated_len];
377
378 obj->wait_fd[0] = -1; /* read end is unset */
379 obj->wait_fd[1] = wakeup_fd;
380 obj->shm_fd = -1;
381 obj->shm_fd_ownership = 0;
382
383 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
384 if (ret < 0) {
385 PERROR("fcntl");
386 goto error_fcntl;
387 }
388 /* The write end of the pipe needs to be non-blocking */
389 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
390 if (ret < 0) {
391 PERROR("fcntl");
392 goto error_fcntl;
393 }
394
395 obj->type = SHM_OBJECT_MEM;
396 obj->memory_map = mem;
397 obj->memory_map_size = memory_map_size;
398 obj->allocated_len = memory_map_size;
399 obj->index = table->allocated_len++;
400
401 return obj;
402
403 error_fcntl:
404 return NULL;
405 }
406
407 static
408 void shmp_object_destroy(struct shm_object *obj, int consumer)
409 {
410 switch (obj->type) {
411 case SHM_OBJECT_SHM:
412 {
413 int ret, i;
414
415 ret = munmap(obj->memory_map, obj->memory_map_size);
416 if (ret) {
417 PERROR("umnmap");
418 assert(0);
419 }
420
421 if (obj->shm_fd_ownership) {
422 /* Delete FDs only if called from app (not consumer). */
423 if (!consumer) {
424 lttng_ust_lock_fd_tracker();
425 ret = close(obj->shm_fd);
426 if (!ret) {
427 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
428 } else {
429 PERROR("close");
430 assert(0);
431 }
432 lttng_ust_unlock_fd_tracker();
433 } else {
434 ret = close(obj->shm_fd);
435 if (ret) {
436 PERROR("close");
437 assert(0);
438 }
439 }
440 }
441 for (i = 0; i < 2; i++) {
442 if (obj->wait_fd[i] < 0)
443 continue;
444 if (!consumer) {
445 lttng_ust_lock_fd_tracker();
446 ret = close(obj->wait_fd[i]);
447 if (!ret) {
448 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
449 } else {
450 PERROR("close");
451 assert(0);
452 }
453 lttng_ust_unlock_fd_tracker();
454 } else {
455 ret = close(obj->wait_fd[i]);
456 if (ret) {
457 PERROR("close");
458 assert(0);
459 }
460 }
461 }
462 break;
463 }
464 case SHM_OBJECT_MEM:
465 {
466 int ret, i;
467
468 for (i = 0; i < 2; i++) {
469 if (obj->wait_fd[i] < 0)
470 continue;
471 if (!consumer) {
472 lttng_ust_lock_fd_tracker();
473 ret = close(obj->wait_fd[i]);
474 if (!ret) {
475 lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
476 } else {
477 PERROR("close");
478 assert(0);
479 }
480 lttng_ust_unlock_fd_tracker();
481 } else {
482 ret = close(obj->wait_fd[i]);
483 if (ret) {
484 PERROR("close");
485 assert(0);
486 }
487 }
488 }
489 free(obj->memory_map);
490 break;
491 }
492 default:
493 assert(0);
494 }
495 }
496
497 void shm_object_table_destroy(struct shm_object_table *table, int consumer)
498 {
499 int i;
500
501 for (i = 0; i < table->allocated_len; i++)
502 shmp_object_destroy(&table->objects[i], consumer);
503 free(table);
504 }
505
506 /*
507 * zalloc_shm - allocate memory within a shm object.
508 *
509 * Shared memory is already zeroed by shmget.
510 * *NOT* multithread-safe (should be protected by mutex).
511 * Returns a -1, -1 tuple on error.
512 */
513 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
514 {
515 struct shm_ref ref;
516 struct shm_ref shm_ref_error = { -1, -1 };
517
518 if (obj->memory_map_size - obj->allocated_len < len)
519 return shm_ref_error;
520 ref.index = obj->index;
521 ref.offset = obj->allocated_len;
522 obj->allocated_len += len;
523 return ref;
524 }
525
526 void align_shm(struct shm_object *obj, size_t align)
527 {
528 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
529 obj->allocated_len += offset_len;
530 }
This page took 0.053326 seconds and 3 git commands to generate.