2 * SPDX-License-Identifier: LGPL-2.1-only
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 #include <sys/types.h>
13 #include <sys/stat.h> /* For mode constants */
14 #include <fcntl.h> /* For O_* constants */
28 #include <lttng/ust-utils.h>
30 #include <ust-helper.h>
32 #include "../libringbuffer/mmap.h"
35 * Ensure we have the required amount of space available by writing 0
36 * into the entire buffer. Not doing so can trigger SIGBUS when going
37 * beyond the available shm space.
40 int zero_file(int fd
, size_t len
)
48 pagelen
= sysconf(_SC_PAGESIZE
);
51 zeropage
= calloc(pagelen
, 1);
55 while (len
> written
) {
57 retlen
= write(fd
, zeropage
,
58 min_t(size_t, pagelen
, len
- written
));
59 } while (retlen
== -1UL && errno
== EINTR
);
72 struct lttng_counter_shm_object_table
*lttng_counter_shm_object_table_create(size_t max_nb_obj
)
74 struct lttng_counter_shm_object_table
*table
;
76 table
= zmalloc(sizeof(struct lttng_counter_shm_object_table
) +
77 max_nb_obj
* sizeof(table
->objects
[0]));
80 table
->size
= max_nb_obj
;
85 struct lttng_counter_shm_object
*_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table
*table
,
86 size_t memory_map_size
,
90 struct lttng_counter_shm_object
*obj
;
95 if (table
->allocated_len
>= table
->size
)
97 obj
= &table
->objects
[table
->allocated_len
];
102 ret
= zero_file(shmfd
, memory_map_size
);
105 goto error_zero_file
;
107 ret
= ftruncate(shmfd
, memory_map_size
);
110 goto error_ftruncate
;
113 * Also ensure the file metadata is synced with the storage by using
121 obj
->shm_fd_ownership
= 0;
124 /* memory_map: mmap */
125 memory_map
= mmap(NULL
, memory_map_size
, PROT_READ
| PROT_WRITE
,
126 MAP_SHARED
| LTTNG_MAP_POPULATE
, shmfd
, 0);
127 if (memory_map
== MAP_FAILED
) {
131 obj
->type
= LTTNG_COUNTER_SHM_OBJECT_SHM
;
132 obj
->memory_map
= memory_map
;
133 obj
->memory_map_size
= memory_map_size
;
134 obj
->allocated_len
= 0;
135 obj
->index
= table
->allocated_len
++;
147 struct lttng_counter_shm_object
*_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table
*table
,
148 size_t memory_map_size
)
150 struct lttng_counter_shm_object
*obj
;
153 if (table
->allocated_len
>= table
->size
)
155 obj
= &table
->objects
[table
->allocated_len
];
157 memory_map
= zmalloc(memory_map_size
);
163 obj
->shm_fd_ownership
= 0;
165 obj
->type
= LTTNG_COUNTER_SHM_OBJECT_MEM
;
166 obj
->memory_map
= memory_map
;
167 obj
->memory_map_size
= memory_map_size
;
168 obj
->allocated_len
= 0;
169 obj
->index
= table
->allocated_len
++;
178 * libnuma prints errors on the console even for numa_available().
179 * Work-around this limitation by using get_mempolicy() directly to
180 * check whether the kernel supports mempolicy.
183 static bool lttng_is_numa_available(void)
187 ret
= get_mempolicy(NULL
, NULL
, 0, NULL
, 0);
188 if (ret
&& errno
== ENOSYS
) {
191 return numa_available() > 0;
195 struct lttng_counter_shm_object
*lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table
*table
,
196 size_t memory_map_size
,
197 enum lttng_counter_shm_object_type type
,
201 struct lttng_counter_shm_object
*shm_object
;
203 int oldnode
= 0, node
;
206 numa_avail
= lttng_is_numa_available();
208 oldnode
= numa_preferred();
210 node
= numa_node_of_cpu(cpu
);
212 numa_set_preferred(node
);
214 if (cpu
< 0 || node
< 0)
215 numa_set_localalloc();
217 #endif /* HAVE_LIBNUMA */
219 case LTTNG_COUNTER_SHM_OBJECT_SHM
:
220 shm_object
= _lttng_counter_shm_object_table_alloc_shm(table
, memory_map_size
,
223 case LTTNG_COUNTER_SHM_OBJECT_MEM
:
224 shm_object
= _lttng_counter_shm_object_table_alloc_mem(table
, memory_map_size
);
231 numa_set_preferred(oldnode
);
232 #endif /* HAVE_LIBNUMA */
236 struct lttng_counter_shm_object
*lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table
*table
,
238 size_t memory_map_size
)
240 struct lttng_counter_shm_object
*obj
;
243 if (table
->allocated_len
>= table
->size
)
246 obj
= &table
->objects
[table
->allocated_len
];
248 obj
->shm_fd
= shm_fd
;
249 obj
->shm_fd_ownership
= 1;
251 /* memory_map: mmap */
252 memory_map
= mmap(NULL
, memory_map_size
, PROT_READ
| PROT_WRITE
,
253 MAP_SHARED
| LTTNG_MAP_POPULATE
, shm_fd
, 0);
254 if (memory_map
== MAP_FAILED
) {
258 obj
->type
= LTTNG_COUNTER_SHM_OBJECT_SHM
;
259 obj
->memory_map
= memory_map
;
260 obj
->memory_map_size
= memory_map_size
;
261 obj
->allocated_len
= memory_map_size
;
262 obj
->index
= table
->allocated_len
++;
271 * Passing ownership of mem to object.
273 struct lttng_counter_shm_object
*lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table
*table
,
274 void *mem
, size_t memory_map_size
)
276 struct lttng_counter_shm_object
*obj
;
278 if (table
->allocated_len
>= table
->size
)
280 obj
= &table
->objects
[table
->allocated_len
];
283 obj
->shm_fd_ownership
= 0;
285 obj
->type
= LTTNG_COUNTER_SHM_OBJECT_MEM
;
286 obj
->memory_map
= mem
;
287 obj
->memory_map_size
= memory_map_size
;
288 obj
->allocated_len
= memory_map_size
;
289 obj
->index
= table
->allocated_len
++;
297 void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object
*obj
, int consumer
)
300 case LTTNG_COUNTER_SHM_OBJECT_SHM
:
304 ret
= munmap(obj
->memory_map
, obj
->memory_map_size
);
310 if (obj
->shm_fd_ownership
) {
311 /* Delete FDs only if called from app (not consumer). */
313 lttng_ust_lock_fd_tracker();
314 ret
= close(obj
->shm_fd
);
316 lttng_ust_delete_fd_from_tracker(obj
->shm_fd
);
321 lttng_ust_unlock_fd_tracker();
323 ret
= close(obj
->shm_fd
);
332 case LTTNG_COUNTER_SHM_OBJECT_MEM
:
334 free(obj
->memory_map
);
342 void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table
*table
, int consumer
)
346 for (i
= 0; i
< table
->allocated_len
; i
++)
347 lttng_counter_shmp_object_destroy(&table
->objects
[i
], consumer
);
352 * lttng_counter_zalloc_shm - allocate memory within a shm object.
354 * Shared memory is already zeroed by shmget.
355 * *NOT* multithread-safe (should be protected by mutex).
356 * Returns a -1, -1 tuple on error.
358 struct lttng_counter_shm_ref
lttng_counter_zalloc_shm(struct lttng_counter_shm_object
*obj
, size_t len
)
360 struct lttng_counter_shm_ref ref
;
361 struct lttng_counter_shm_ref shm_ref_error
= { -1, -1 };
363 if (obj
->memory_map_size
- obj
->allocated_len
< len
)
364 return shm_ref_error
;
365 ref
.index
= obj
->index
;
366 ref
.offset
= obj
->allocated_len
;
367 obj
->allocated_len
+= len
;
371 void lttng_counter_align_shm(struct lttng_counter_shm_object
*obj
, size_t align
)
373 size_t offset_len
= lttng_ust_offset_align(obj
->allocated_len
, align
);
374 obj
->allocated_len
+= offset_len
;