Fix: pass private data to context callbacks
[lttng-ust.git] / libcounter / shm.c
CommitLineData
ebabbf58 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-only
ebabbf58
MD
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
ebabbf58
MD
5 */
6
7#define _LGPL_SOURCE
ebabbf58
MD
8#include "shm.h"
9#include <unistd.h>
10#include <fcntl.h>
11#include <sys/mman.h>
12#include <sys/types.h>
13#include <sys/stat.h> /* For mode constants */
14#include <fcntl.h> /* For O_* constants */
15#include <assert.h>
16#include <stdio.h>
17#include <signal.h>
18#include <dirent.h>
ebabbf58
MD
19#include <limits.h>
20#include <stdbool.h>
21#include <stdint.h>
3d3a2bb8 22
ebabbf58
MD
23#ifdef HAVE_LIBNUMA
24#include <numa.h>
25#include <numaif.h>
26#endif
3d3a2bb8 27
eae3c729 28#include <lttng/ust-utils.h>
3d3a2bb8 29
864a1eda 30#include <ust-helper.h>
ebabbf58
MD
31#include <ust-fd.h>
32#include "../libringbuffer/mmap.h"
33
34/*
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
38 */
39static
40int zero_file(int fd, size_t len)
41{
42 ssize_t retlen;
43 size_t written = 0;
44 char *zeropage;
45 long pagelen;
46 int ret;
47
48 pagelen = sysconf(_SC_PAGESIZE);
49 if (pagelen < 0)
50 return (int) pagelen;
51 zeropage = calloc(pagelen, 1);
52 if (!zeropage)
53 return -ENOMEM;
54
55 while (len > written) {
56 do {
57 retlen = write(fd, zeropage,
58 min_t(size_t, pagelen, len - written));
59 } while (retlen == -1UL && errno == EINTR);
60 if (retlen < 0) {
61 ret = (int) retlen;
62 goto error;
63 }
64 written += retlen;
65 }
66 ret = 0;
67error:
68 free(zeropage);
69 return ret;
70}
71
72struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
73{
74 struct lttng_counter_shm_object_table *table;
75
76 table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
77 max_nb_obj * sizeof(table->objects[0]));
78 if (!table)
79 return NULL;
80 table->size = max_nb_obj;
81 return table;
82}
83
84static
85struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
86 size_t memory_map_size,
87 int cpu_fd)
88{
89 int shmfd, ret;
90 struct lttng_counter_shm_object *obj;
91 char *memory_map;
92
93 if (cpu_fd < 0)
94 return NULL;
95 if (table->allocated_len >= table->size)
96 return NULL;
97 obj = &table->objects[table->allocated_len];
98
99 /* create shm */
100
101 shmfd = cpu_fd;
102 ret = zero_file(shmfd, memory_map_size);
103 if (ret) {
104 PERROR("zero_file");
105 goto error_zero_file;
106 }
107 ret = ftruncate(shmfd, memory_map_size);
108 if (ret) {
109 PERROR("ftruncate");
110 goto error_ftruncate;
111 }
112 /*
113 * Also ensure the file metadata is synced with the storage by using
114 * fsync(2).
115 */
116 ret = fsync(shmfd);
117 if (ret) {
118 PERROR("fsync");
119 goto error_fsync;
120 }
121 obj->shm_fd_ownership = 0;
122 obj->shm_fd = shmfd;
123
124 /* memory_map: mmap */
125 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
126 MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
127 if (memory_map == MAP_FAILED) {
128 PERROR("mmap");
129 goto error_mmap;
130 }
131 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
132 obj->memory_map = memory_map;
133 obj->memory_map_size = memory_map_size;
134 obj->allocated_len = 0;
135 obj->index = table->allocated_len++;
136
137 return obj;
138
139error_mmap:
140error_fsync:
141error_ftruncate:
142error_zero_file:
143 return NULL;
144}
145
146static
147struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
148 size_t memory_map_size)
149{
150 struct lttng_counter_shm_object *obj;
151 void *memory_map;
152
153 if (table->allocated_len >= table->size)
154 return NULL;
155 obj = &table->objects[table->allocated_len];
156
157 memory_map = zmalloc(memory_map_size);
158 if (!memory_map)
159 goto alloc_error;
160
161 /* no shm_fd */
162 obj->shm_fd = -1;
163 obj->shm_fd_ownership = 0;
164
165 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
166 obj->memory_map = memory_map;
167 obj->memory_map_size = memory_map_size;
168 obj->allocated_len = 0;
169 obj->index = table->allocated_len++;
170
171 return obj;
172
173alloc_error:
174 return NULL;
175}
176
177/*
178 * libnuma prints errors on the console even for numa_available().
179 * Work-around this limitation by using get_mempolicy() directly to
180 * check whether the kernel supports mempolicy.
181 */
182#ifdef HAVE_LIBNUMA
183static bool lttng_is_numa_available(void)
184{
185 int ret;
186
187 ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
188 if (ret && errno == ENOSYS) {
189 return false;
190 }
191 return numa_available() > 0;
192}
193#endif
194
195struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
196 size_t memory_map_size,
197 enum lttng_counter_shm_object_type type,
198 int cpu_fd,
199 int cpu)
200{
201 struct lttng_counter_shm_object *shm_object;
202#ifdef HAVE_LIBNUMA
203 int oldnode = 0, node;
204 bool numa_avail;
205
206 numa_avail = lttng_is_numa_available();
207 if (numa_avail) {
208 oldnode = numa_preferred();
209 if (cpu >= 0) {
210 node = numa_node_of_cpu(cpu);
211 if (node >= 0)
212 numa_set_preferred(node);
213 }
214 if (cpu < 0 || node < 0)
215 numa_set_localalloc();
216 }
217#endif /* HAVE_LIBNUMA */
218 switch (type) {
219 case LTTNG_COUNTER_SHM_OBJECT_SHM:
220 shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
221 cpu_fd);
222 break;
223 case LTTNG_COUNTER_SHM_OBJECT_MEM:
224 shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
225 break;
226 default:
227 assert(0);
228 }
229#ifdef HAVE_LIBNUMA
230 if (numa_avail)
231 numa_set_preferred(oldnode);
232#endif /* HAVE_LIBNUMA */
233 return shm_object;
234}
235
236struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
237 int shm_fd,
238 size_t memory_map_size)
239{
240 struct lttng_counter_shm_object *obj;
241 char *memory_map;
242
243 if (table->allocated_len >= table->size)
244 return NULL;
245
246 obj = &table->objects[table->allocated_len];
247
248 obj->shm_fd = shm_fd;
249 obj->shm_fd_ownership = 1;
250
251 /* memory_map: mmap */
252 memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
253 MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
254 if (memory_map == MAP_FAILED) {
255 PERROR("mmap");
256 goto error_mmap;
257 }
258 obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
259 obj->memory_map = memory_map;
260 obj->memory_map_size = memory_map_size;
261 obj->allocated_len = memory_map_size;
262 obj->index = table->allocated_len++;
263
264 return obj;
265
266error_mmap:
267 return NULL;
268}
269
270/*
271 * Passing ownership of mem to object.
272 */
273struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
274 void *mem, size_t memory_map_size)
275{
276 struct lttng_counter_shm_object *obj;
277
278 if (table->allocated_len >= table->size)
279 return NULL;
280 obj = &table->objects[table->allocated_len];
281
282 obj->shm_fd = -1;
283 obj->shm_fd_ownership = 0;
284
285 obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
286 obj->memory_map = mem;
287 obj->memory_map_size = memory_map_size;
288 obj->allocated_len = memory_map_size;
289 obj->index = table->allocated_len++;
290
291 return obj;
292
293 return NULL;
294}
295
296static
297void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
298{
299 switch (obj->type) {
300 case LTTNG_COUNTER_SHM_OBJECT_SHM:
301 {
302 int ret;
303
304 ret = munmap(obj->memory_map, obj->memory_map_size);
305 if (ret) {
306 PERROR("umnmap");
307 assert(0);
308 }
309
310 if (obj->shm_fd_ownership) {
311 /* Delete FDs only if called from app (not consumer). */
312 if (!consumer) {
313 lttng_ust_lock_fd_tracker();
314 ret = close(obj->shm_fd);
315 if (!ret) {
316 lttng_ust_delete_fd_from_tracker(obj->shm_fd);
317 } else {
318 PERROR("close");
319 assert(0);
320 }
321 lttng_ust_unlock_fd_tracker();
322 } else {
323 ret = close(obj->shm_fd);
324 if (ret) {
325 PERROR("close");
326 assert(0);
327 }
328 }
329 }
330 break;
331 }
332 case LTTNG_COUNTER_SHM_OBJECT_MEM:
333 {
334 free(obj->memory_map);
335 break;
336 }
337 default:
338 assert(0);
339 }
340}
341
342void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
343{
344 int i;
345
346 for (i = 0; i < table->allocated_len; i++)
347 lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
348 free(table);
349}
350
351/*
352 * lttng_counter_zalloc_shm - allocate memory within a shm object.
353 *
354 * Shared memory is already zeroed by shmget.
355 * *NOT* multithread-safe (should be protected by mutex).
356 * Returns a -1, -1 tuple on error.
357 */
358struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
359{
360 struct lttng_counter_shm_ref ref;
361 struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
362
363 if (obj->memory_map_size - obj->allocated_len < len)
364 return shm_ref_error;
365 ref.index = obj->index;
366 ref.offset = obj->allocated_len;
367 obj->allocated_len += len;
368 return ref;
369}
370
371void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
372{
cd61d9bf 373 size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
ebabbf58
MD
374 obj->allocated_len += offset_len;
375}
This page took 0.037141 seconds and 4 git commands to generate.