8b65d1fc6137dcb80dafbe5c39689ed8c5010c29
[lttng-ust.git] / src / common / counter / shm.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 */
6
7 #define _LGPL_SOURCE
8 #include "shm.h"
9 #include <unistd.h>
10 #include <fcntl.h>
11 #include <sys/mman.h>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
15 #include <assert.h>
16 #include <stdio.h>
17 #include <signal.h>
18 #include <dirent.h>
19 #include <limits.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22
23 #ifdef HAVE_LIBNUMA
24 #include <numa.h>
25 #include <numaif.h>
26 #endif
27
28 #include <lttng/ust-utils.h>
29
30 #include "common/macros.h"
31 #include "common/ust-fd.h"
32 #include "common/compat/mmap.h"
33
34 /*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39 static
40 int zero_file(int fd, size_t len)
41 {
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67 error:
68 free(zeropage);
69 return ret;
70 }
71
72 struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
73 {
74 struct lttng_counter_shm_object_table *table;
75
76 table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]));
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82 }
83
84 static
85 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
86 size_t memory_map_size,
87 int cpu_fd)
88 {
89 int shmfd, ret;
90 struct lttng_counter_shm_object *obj;
91 char *memory_map;
92
93 if (cpu_fd < 0)
94 return NULL;
95 if (table->allocated_len >= table->size)
96 return NULL;
97 obj = &table->objects[table->allocated_len];
98
99 /* create shm */
100
101 shmfd = cpu_fd;
102 ret = zero_file(shmfd, memory_map_size);
103 if (ret) {
104 PERROR("zero_file");
105 goto error_zero_file;
106 }
107 ret = ftruncate(shmfd, memory_map_size);
108 if (ret) {
109 PERROR("ftruncate");
110 goto error_ftruncate;
111 }
112 /*
113 * Also ensure the file metadata is synced with the storage by using
114 * fsync(2).
115 */
116 ret = fsync(shmfd);
117 if (ret) {
118 PERROR("fsync");
119 goto error_fsync;
120 }
121 obj->shm_fd_ownership = 0;
122 obj->shm_fd = shmfd;
123
124 /* memory_map: mmap */
125 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
126 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
127 if (memory_map == MAP_FAILED) {
128 PERROR("mmap");
129 goto error_mmap;
130 }
131 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
132 obj->memory_map = memory_map;
133 obj->memory_map_size = memory_map_size;
134 obj->allocated_len = 0;
135 obj->index = table->allocated_len++;
136
137 return obj;
138
139 error_mmap:
140 error_fsync:
141 error_ftruncate:
142 error_zero_file:
143 return NULL;
144 }
145
146 static
147 struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
148 size_t memory_map_size)
149 {
150 struct lttng_counter_shm_object *obj;
151 void *memory_map;
152
153 if (table->allocated_len >= table->size)
154 return NULL;
155 obj = &table->objects[table->allocated_len];
156
157 memory_map = zmalloc(memory_map_size);
158 if (!memory_map)
159 goto alloc_error;
160
161 /* no shm_fd */
162 obj->shm_fd = -1;
163 obj->shm_fd_ownership = 0;
164
165 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
166 obj->memory_map = memory_map;
167 obj->memory_map_size = memory_map_size;
168 obj->allocated_len = 0;
169 obj->index = table->allocated_len++;
170
171 return obj;
172
173 alloc_error:
174 return NULL;
175 }
176
177 /*
178 * libnuma prints errors on the console even for numa_available().
179 * Work-around this limitation by using get_mempolicy() directly to
180 * check whether the kernel supports mempolicy.
181 */
182 #ifdef HAVE_LIBNUMA
183 static bool lttng_is_numa_available(void)
184 {
185 int ret;
186
187 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
188 if (ret && errno == ENOSYS) {
189 return false;
190 }
191 return numa_available() > 0;
192 }
193 #endif
194
195 #ifdef HAVE_LIBNUMA
196 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
197 size_t memory_map_size,
198 enum lttng_counter_shm_object_type type,
199 int cpu_fd,
200 int cpu)
201 #else
202 struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
203 size_t memory_map_size,
204 enum lttng_counter_shm_object_type type,
205 int cpu_fd,
206 int cpu __attribute__((unused)))
207 #endif
208 {
209 struct lttng_counter_shm_object *shm_object;
210 #ifdef HAVE_LIBNUMA
211 int oldnode = 0, node;
212 bool numa_avail;
213
214 numa_avail = lttng_is_numa_available();
215 if (numa_avail) {
216 oldnode = numa_preferred();
217 if (cpu >= 0) {
218 node = numa_node_of_cpu(cpu);
219 if (node >= 0)
220 numa_set_preferred(node);
221 }
222 if (cpu < 0 || node < 0)
223 numa_set_localalloc();
224 }
225 #endif /* HAVE_LIBNUMA */
226 switch (type) {
227 case LTTNG_COUNTER_SHM_OBJECT_SHM:
228 shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
229 cpu_fd);
230 break;
231 case LTTNG_COUNTER_SHM_OBJECT_MEM:
232 shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
233 break;
234 default:
235 assert(0);
236 }
237 #ifdef HAVE_LIBNUMA
238 if (numa_avail)
239 numa_set_preferred(oldnode);
240 #endif /* HAVE_LIBNUMA */
241 return shm_object;
242 }
243
244 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
245 int shm_fd,
246 size_t memory_map_size)
247 {
248 struct lttng_counter_shm_object *obj;
249 char *memory_map;
250
251 if (table->allocated_len >= table->size)
252 return NULL;
253
254 obj = &table->objects[table->allocated_len];
255
256 obj->shm_fd = shm_fd;
257 obj->shm_fd_ownership = 1;
258
259 /* memory_map: mmap */
260 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
261 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
262 if (memory_map == MAP_FAILED) {
263 PERROR("mmap");
264 goto error_mmap;
265 }
266 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
267 obj->memory_map = memory_map;
268 obj->memory_map_size = memory_map_size;
269 obj->allocated_len = memory_map_size;
270 obj->index = table->allocated_len++;
271
272 return obj;
273
274 error_mmap:
275 return NULL;
276 }
277
278 /*
279 * Passing ownership of mem to object.
280 */
281 struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
282 void *mem, size_t memory_map_size)
283 {
284 struct lttng_counter_shm_object *obj;
285
286 if (table->allocated_len >= table->size)
287 return NULL;
288 obj = &table->objects[table->allocated_len];
289
290 obj->shm_fd = -1;
291 obj->shm_fd_ownership = 0;
292
293 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
294 obj->memory_map = mem;
295 obj->memory_map_size = memory_map_size;
296 obj->allocated_len = memory_map_size;
297 obj->index = table->allocated_len++;
298
299 return obj;
300
301 return NULL;
302 }
303
304 static
305 void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
306 {
307 switch (obj->type) {
308 case LTTNG_COUNTER_SHM_OBJECT_SHM:
309 {
310 int ret;
311
312 ret = munmap(obj->memory_map, obj->memory_map_size);
313 if (ret) {
314 PERROR("umnmap");
315 assert(0);
316 }
317
318 if (obj->shm_fd_ownership) {
319 /* Delete FDs only if called from app (not consumer). */
320 if (!consumer) {
321 lttng_ust_lock_fd_tracker();
322 ret = close(obj->shm_fd);
323 if (!ret) {
324 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
325 } else {
326 PERROR("close");
327 assert(0);
328 }
329 lttng_ust_unlock_fd_tracker();
330 } else {
331 ret = close(obj->shm_fd);
332 if (ret) {
333 PERROR("close");
334 assert(0);
335 }
336 }
337 }
338 break;
339 }
340 case LTTNG_COUNTER_SHM_OBJECT_MEM:
341 {
342 free(obj->memory_map);
343 break;
344 }
345 default:
346 assert(0);
347 }
348 }
349
350 void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
351 {
352 int i;
353
354 for (i = 0; i < table->allocated_len; i++)
355 lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
356 free(table);
357 }
358
359 /*
360 * lttng_counter_zalloc_shm - allocate memory within a shm object.
361 *
362 * Shared memory is already zeroed by shmget.
363 * *NOT* multithread-safe (should be protected by mutex).
364 * Returns a -1, -1 tuple on error.
365 */
366 struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
367 {
368 struct lttng_counter_shm_ref ref;
369 struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
370
371 if (obj->memory_map_size - obj->allocated_len < len)
372 return shm_ref_error;
373 ref.index = obj->index;
374 ref.offset = obj->allocated_len;
375 obj->allocated_len += len;
376 return ref;
377 }
378
379 void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
380 {
381 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
382 obj->allocated_len += offset_len;
383 }
This page took 0.035209 seconds and 3 git commands to generate.