Cleanup: ignore mktemp return value
[lttng-ust.git] / libringbuffer / shm.c
1 /*
2 * libringbuffer/shm.c
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include "shm.h"
22 #include <unistd.h>
23 #include <fcntl.h>
24 #include <sys/mman.h>
25 #include <sys/stat.h> /* For mode constants */
26 #include <fcntl.h> /* For O_* constants */
27 #include <assert.h>
28 #include <stdio.h>
29 #include <signal.h>
30 #include <dirent.h>
31 #include <lttng/align.h>
32 #include <helper.h>
33 #include <limits.h>
34 #include <helper.h>
35
36 /*
37 * Ensure we have the required amount of space available by writing 0
38 * into the entire buffer. Not doing so can trigger SIGBUS when going
39 * beyond the available shm space.
40 */
41 static
42 int zero_file(int fd, size_t len)
43 {
44 ssize_t retlen;
45 size_t written = 0;
46 char *zeropage;
47 long pagelen;
48 int ret;
49
50 pagelen = sysconf(_SC_PAGESIZE);
51 if (pagelen < 0)
52 return (int) pagelen;
53 zeropage = calloc(pagelen, 1);
54 if (!zeropage)
55 return -ENOMEM;
56
57 while (len > written) {
58 do {
59 retlen = write(fd, zeropage,
60 min_t(size_t, pagelen, len - written));
61 } while (retlen == -1UL && errno == EINTR);
62 if (retlen < 0) {
63 ret = (int) retlen;
64 goto error;
65 }
66 written += retlen;
67 }
68 ret = 0;
69 error:
70 free(zeropage);
71 return ret;
72 }
73
74 struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
75 {
76 struct shm_object_table *table;
77
78 table = zmalloc(sizeof(struct shm_object_table) +
79 max_nb_obj * sizeof(table->objects[0]));
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
86 size_t memory_map_size)
87 {
88 int shmfd, waitfd[2], ret, i, sigblocked = 0;
89 struct shm_object *obj;
90 char *memory_map;
91 char tmp_name[NAME_MAX] = "/ust-shm-tmp-XXXXXX";
92 sigset_t all_sigs, orig_sigs;
93
94 if (table->allocated_len >= table->size)
95 return NULL;
96 obj = &table->objects[table->allocated_len];
97
98 /* wait_fd: create pipe */
99 ret = pipe(waitfd);
100 if (ret < 0) {
101 PERROR("pipe");
102 goto error_pipe;
103 }
104 for (i = 0; i < 2; i++) {
105 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
106 if (ret < 0) {
107 PERROR("fcntl");
108 goto error_fcntl;
109 }
110 }
111 /* The write end of the pipe needs to be non-blocking */
112 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
113 if (ret < 0) {
114 PERROR("fcntl");
115 goto error_fcntl;
116 }
117 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
118
119 /* shm_fd: create shm */
120
121 /*
122 * Theoretically, we could leak a shm if the application crashes
123 * between open and unlink. Disable signals on this thread for
124 * increased safety against this scenario.
125 */
126 sigfillset(&all_sigs);
127 ret = pthread_sigmask(SIG_BLOCK, &all_sigs, &orig_sigs);
128 if (ret == -1) {
129 PERROR("pthread_sigmask");
130 goto error_pthread_sigmask;
131 }
132 sigblocked = 1;
133
134 /*
135 * Allocate shm, and immediately unlink its shm oject, keeping
136 * only the file descriptor as a reference to the object. If it
137 * already exists (caused by short race window during which the
138 * global object exists in a concurrent shm_open), simply retry.
139 * We specifically do _not_ use the / at the beginning of the
140 * pathname so that some OS implementations can keep it local to
141 * the process (POSIX leaves this implementation-defined).
142 */
143 do {
144 /*
145 * Using mktemp filename with O_CREAT | O_EXCL open
146 * flags.
147 */
148 (void) mktemp(tmp_name);
149 if (tmp_name[0] == '\0') {
150 PERROR("mktemp");
151 goto error_shm_open;
152 }
153 shmfd = shm_open(tmp_name,
154 O_CREAT | O_EXCL | O_RDWR, 0700);
155 } while (shmfd < 0 && (errno == EEXIST || errno == EACCES));
156 if (shmfd < 0) {
157 PERROR("shm_open");
158 goto error_shm_open;
159 }
160 ret = shm_unlink(tmp_name);
161 if (ret < 0 && errno != ENOENT) {
162 PERROR("shm_unlink");
163 goto error_shm_release;
164 }
165 sigblocked = 0;
166 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
167 if (ret == -1) {
168 PERROR("pthread_sigmask");
169 goto error_sigmask_release;
170 }
171 ret = zero_file(shmfd, memory_map_size);
172 if (ret) {
173 PERROR("zero_file");
174 goto error_zero_file;
175 }
176 ret = ftruncate(shmfd, memory_map_size);
177 if (ret) {
178 PERROR("ftruncate");
179 goto error_ftruncate;
180 }
181 obj->shm_fd = shmfd;
182
183 /* memory_map: mmap */
184 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
185 MAP_SHARED, shmfd, 0);
186 if (memory_map == MAP_FAILED) {
187 PERROR("mmap");
188 goto error_mmap;
189 }
190 obj->type = SHM_OBJECT_SHM;
191 obj->memory_map = memory_map;
192 obj->memory_map_size = memory_map_size;
193 obj->allocated_len = 0;
194 obj->index = table->allocated_len++;
195
196 return obj;
197
198 error_mmap:
199 error_ftruncate:
200 error_shm_release:
201 error_zero_file:
202 error_sigmask_release:
203 ret = close(shmfd);
204 if (ret) {
205 PERROR("close");
206 assert(0);
207 }
208 error_shm_open:
209 if (sigblocked) {
210 ret = pthread_sigmask(SIG_SETMASK, &orig_sigs, NULL);
211 if (ret == -1) {
212 PERROR("pthread_sigmask");
213 }
214 }
215 error_pthread_sigmask:
216 error_fcntl:
217 for (i = 0; i < 2; i++) {
218 ret = close(waitfd[i]);
219 if (ret) {
220 PERROR("close");
221 assert(0);
222 }
223 }
224 error_pipe:
225 return NULL;
226 }
227
228 static
229 struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
230 size_t memory_map_size)
231 {
232 struct shm_object *obj;
233 void *memory_map;
234 int waitfd[2], i, ret;
235
236 if (table->allocated_len >= table->size)
237 return NULL;
238 obj = &table->objects[table->allocated_len];
239
240 memory_map = zmalloc(memory_map_size);
241 if (!memory_map)
242 goto alloc_error;
243
244 /* wait_fd: create pipe */
245 ret = pipe(waitfd);
246 if (ret < 0) {
247 PERROR("pipe");
248 goto error_pipe;
249 }
250 for (i = 0; i < 2; i++) {
251 ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
252 if (ret < 0) {
253 PERROR("fcntl");
254 goto error_fcntl;
255 }
256 }
257 /* The write end of the pipe needs to be non-blocking */
258 ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
259 if (ret < 0) {
260 PERROR("fcntl");
261 goto error_fcntl;
262 }
263 memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
264
265 /* no shm_fd */
266 obj->shm_fd = -1;
267
268 obj->type = SHM_OBJECT_MEM;
269 obj->memory_map = memory_map;
270 obj->memory_map_size = memory_map_size;
271 obj->allocated_len = 0;
272 obj->index = table->allocated_len++;
273
274 return obj;
275
276 error_fcntl:
277 for (i = 0; i < 2; i++) {
278 ret = close(waitfd[i]);
279 if (ret) {
280 PERROR("close");
281 assert(0);
282 }
283 }
284 error_pipe:
285 free(memory_map);
286 alloc_error:
287 return NULL;
288 }
289
290 struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
291 size_t memory_map_size,
292 enum shm_object_type type)
293 {
294 switch (type) {
295 case SHM_OBJECT_SHM:
296 return _shm_object_table_alloc_shm(table, memory_map_size);
297 case SHM_OBJECT_MEM:
298 return _shm_object_table_alloc_mem(table, memory_map_size);
299 default:
300 assert(0);
301 }
302 return NULL;
303 }
304
305 struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
306 int shm_fd, int wakeup_fd, uint32_t stream_nr,
307 size_t memory_map_size)
308 {
309 struct shm_object *obj;
310 char *memory_map;
311 int ret;
312
313 if (table->allocated_len >= table->size)
314 return NULL;
315 /* streams _must_ be received in sequential order, else fail. */
316 if (stream_nr + 1 != table->allocated_len)
317 return NULL;
318
319 obj = &table->objects[table->allocated_len];
320
321 /* wait_fd: set write end of the pipe. */
322 obj->wait_fd[0] = -1; /* read end is unset */
323 obj->wait_fd[1] = wakeup_fd;
324 obj->shm_fd = shm_fd;
325
326 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
327 if (ret < 0) {
328 PERROR("fcntl");
329 goto error_fcntl;
330 }
331 /* The write end of the pipe needs to be non-blocking */
332 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
333 if (ret < 0) {
334 PERROR("fcntl");
335 goto error_fcntl;
336 }
337
338 /* memory_map: mmap */
339 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
340 MAP_SHARED, shm_fd, 0);
341 if (memory_map == MAP_FAILED) {
342 PERROR("mmap");
343 goto error_mmap;
344 }
345 obj->type = SHM_OBJECT_SHM;
346 obj->memory_map = memory_map;
347 obj->memory_map_size = memory_map_size;
348 obj->allocated_len = memory_map_size;
349 obj->index = table->allocated_len++;
350
351 return obj;
352
353 error_fcntl:
354 error_mmap:
355 return NULL;
356 }
357
358 /*
359 * Passing ownership of mem to object.
360 */
361 struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
362 void *mem, size_t memory_map_size, int wakeup_fd)
363 {
364 struct shm_object *obj;
365 int ret;
366
367 if (table->allocated_len >= table->size)
368 return NULL;
369 obj = &table->objects[table->allocated_len];
370
371 obj->wait_fd[0] = -1; /* read end is unset */
372 obj->wait_fd[1] = wakeup_fd;
373 obj->shm_fd = -1;
374
375 ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
376 if (ret < 0) {
377 PERROR("fcntl");
378 goto error_fcntl;
379 }
380 /* The write end of the pipe needs to be non-blocking */
381 ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
382 if (ret < 0) {
383 PERROR("fcntl");
384 goto error_fcntl;
385 }
386
387 obj->type = SHM_OBJECT_MEM;
388 obj->memory_map = mem;
389 obj->memory_map_size = memory_map_size;
390 obj->allocated_len = memory_map_size;
391 obj->index = table->allocated_len++;
392
393 return obj;
394
395 error_fcntl:
396 return NULL;
397 }
398
399 static
400 void shmp_object_destroy(struct shm_object *obj)
401 {
402 switch (obj->type) {
403 case SHM_OBJECT_SHM:
404 {
405 int ret, i;
406
407 ret = munmap(obj->memory_map, obj->memory_map_size);
408 if (ret) {
409 PERROR("umnmap");
410 assert(0);
411 }
412 ret = close(obj->shm_fd);
413 if (ret) {
414 PERROR("close");
415 assert(0);
416 }
417 for (i = 0; i < 2; i++) {
418 if (obj->wait_fd[i] < 0)
419 continue;
420 ret = close(obj->wait_fd[i]);
421 if (ret) {
422 PERROR("close");
423 assert(0);
424 }
425 }
426 break;
427 }
428 case SHM_OBJECT_MEM:
429 {
430 int ret, i;
431
432 for (i = 0; i < 2; i++) {
433 if (obj->wait_fd[i] < 0)
434 continue;
435 ret = close(obj->wait_fd[i]);
436 if (ret) {
437 PERROR("close");
438 assert(0);
439 }
440 }
441 free(obj->memory_map);
442 break;
443 }
444 default:
445 assert(0);
446 }
447 }
448
449 void shm_object_table_destroy(struct shm_object_table *table)
450 {
451 int i;
452
453 for (i = 0; i < table->allocated_len; i++)
454 shmp_object_destroy(&table->objects[i]);
455 free(table);
456 }
457
458 /*
459 * zalloc_shm - allocate memory within a shm object.
460 *
461 * Shared memory is already zeroed by shmget.
462 * *NOT* multithread-safe (should be protected by mutex).
463 * Returns a -1, -1 tuple on error.
464 */
465 struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
466 {
467 struct shm_ref ref;
468 struct shm_ref shm_ref_error = { -1, -1 };
469
470 if (obj->memory_map_size - obj->allocated_len < len)
471 return shm_ref_error;
472 ref.index = obj->index;
473 ref.offset = obj->allocated_len;
474 obj->allocated_len += len;
475 return ref;
476 }
477
478 void align_shm(struct shm_object *obj, size_t align)
479 {
480 size_t offset_len = offset_align(obj->allocated_len, align);
481 obj->allocated_len += offset_len;
482 }
This page took 0.044841 seconds and 4 git commands to generate.