cleanup: function attribute 'constructor'
[lttng-ust.git] / liblttng-ust-libc-wrapper / lttng-ust-malloc.c
CommitLineData
b27f8e75 1/*
c0c0989a 2 * SPDX-License-Identifier: LGPL-2.1-or-later
c39c72ee 3 *
c0c0989a
MJ
4 * Copyright (C) 2009 Pierre-Marc Fournier
5 * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
c39c72ee
PMF
6 */
7
d7e89462
MD
8/*
9 * Do _not_ define _LGPL_SOURCE because we don't want to create a
10 * circular dependency loop between this malloc wrapper, liburcu and
11 * libc.
12 */
ae4b659d 13#include <ust-dlfcn.h>
e541a28d
PMF
14#include <sys/types.h>
15#include <stdio.h>
2594a5b4 16#include <assert.h>
4c3536e0
MD
17#include <urcu/system.h>
18#include <urcu/uatomic.h>
2594a5b4 19#include <urcu/compiler.h>
8c06ba6f 20#include <urcu/tls-compat.h>
20ef5166 21#include <urcu/arch.h>
864a1eda 22#include <ust-helper.h>
cd61d9bf 23#include "ust-compat.h"
1622ba22
MD
24
25#define TRACEPOINT_DEFINE
26#define TRACEPOINT_CREATE_PROBES
52c95399 27#define TP_IP_PARAM ip
1622ba22 28#include "ust_libc.h"
fbd8191b 29
f95b2888
SS
30#define STATIC_CALLOC_LEN 4096
31static char static_calloc_buf[STATIC_CALLOC_LEN];
4c3536e0 32static unsigned long static_calloc_buf_offset;
f95b2888 33
2594a5b4
MD
34struct alloc_functions {
35 void *(*calloc)(size_t nmemb, size_t size);
36 void *(*malloc)(size_t size);
37 void (*free)(void *ptr);
38 void *(*realloc)(void *ptr, size_t size);
39 void *(*memalign)(size_t alignment, size_t size);
40 int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
41};
42
43static
44struct alloc_functions cur_alloc;
45
8c06ba6f
MD
46/*
47 * Make sure our own use of the LTS compat layer will not cause infinite
48 * recursion by calling calloc.
49 */
50
51static
52void *static_calloc(size_t nmemb, size_t size);
53
20ef5166
MD
54/*
55 * pthread mutex replacement for URCU tls compat layer.
56 */
57static int ust_malloc_lock;
58
8da9deee
MJ
59static
60void ust_malloc_spin_lock(pthread_mutex_t *lock)
61 __attribute__((unused));
62static
20ef5166
MD
63void ust_malloc_spin_lock(pthread_mutex_t *lock)
64{
65 /*
66 * The memory barrier within cmpxchg takes care of ordering
67 * memory accesses with respect to the start of the critical
68 * section.
69 */
70 while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
71 caa_cpu_relax();
72}
73
8da9deee
MJ
74static
75void ust_malloc_spin_unlock(pthread_mutex_t *lock)
76 __attribute__((unused));
77static
20ef5166
MD
78void ust_malloc_spin_unlock(pthread_mutex_t *lock)
79{
80 /*
81 * Ensure memory accesses within the critical section do not
82 * leak outside.
83 */
84 cmm_smp_mb();
85 uatomic_set(&ust_malloc_lock, 0);
86}
87
8c06ba6f 88#define calloc static_calloc
20ef5166
MD
89#define pthread_mutex_lock ust_malloc_spin_lock
90#define pthread_mutex_unlock ust_malloc_spin_unlock
16adecf1 91static DEFINE_URCU_TLS(int, malloc_nesting);
50170875
CB
92#undef pthread_mutex_unlock
93#undef pthread_mutex_lock
8c06ba6f
MD
94#undef calloc
95
2594a5b4
MD
96/*
97 * Static allocator to use when initially executing dlsym(). It keeps a
98 * size_t value of each object size prior to the object.
99 */
100static
101void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
f95b2888 102{
2594a5b4
MD
103 size_t prev_offset, new_offset, res_offset, aligned_offset;
104
105 if (nmemb * size == 0) {
106 return NULL;
107 }
f95b2888 108
4c3536e0
MD
109 /*
110 * Protect static_calloc_buf_offset from concurrent updates
111 * using a cmpxchg loop rather than a mutex to remove a
112 * dependency on pthread. This will minimize the risk of bad
113 * interaction between mutex and malloc instrumentation.
114 */
115 res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
116 do {
117 prev_offset = res_offset;
b72687b8 118 aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
2594a5b4
MD
119 new_offset = aligned_offset + nmemb * size;
120 if (new_offset > sizeof(static_calloc_buf)) {
121 abort();
4c3536e0 122 }
4c3536e0
MD
123 } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
124 prev_offset, new_offset)) != prev_offset);
2594a5b4
MD
125 *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
126 return &static_calloc_buf[aligned_offset];
127}
128
129static
130void *static_calloc(size_t nmemb, size_t size)
131{
132 void *retval;
133
134 retval = static_calloc_aligned(nmemb, size, 1);
2594a5b4
MD
135 return retval;
136}
137
138static
139void *static_malloc(size_t size)
140{
141 void *retval;
142
143 retval = static_calloc_aligned(1, size, 1);
2594a5b4
MD
144 return retval;
145}
146
147static
148void static_free(void *ptr)
149{
150 /* no-op. */
2594a5b4
MD
151}
152
153static
154void *static_realloc(void *ptr, size_t size)
155{
156 size_t *old_size = NULL;
157 void *retval;
158
159 if (size == 0) {
160 retval = NULL;
161 goto end;
162 }
163
164 if (ptr) {
165 old_size = (size_t *) ptr - 1;
166 if (size <= *old_size) {
167 /* We can re-use the old entry. */
168 *old_size = size;
169 retval = ptr;
170 goto end;
171 }
172 }
173 /* We need to expand. Don't free previous memory location. */
174 retval = static_calloc_aligned(1, size, 1);
175 assert(retval);
176 if (ptr)
177 memcpy(retval, ptr, *old_size);
178end:
2594a5b4
MD
179 return retval;
180}
181
182static
183void *static_memalign(size_t alignment, size_t size)
184{
185 void *retval;
186
187 retval = static_calloc_aligned(1, size, alignment);
2594a5b4
MD
188 return retval;
189}
190
191static
192int static_posix_memalign(void **memptr, size_t alignment, size_t size)
193{
2594a5b4
MD
194 void *ptr;
195
196 /* Check for power of 2, larger than void *. */
197 if (alignment & (alignment - 1)
198 || alignment < sizeof(void *)
199 || alignment == 0) {
2594a5b4
MD
200 goto end;
201 }
202 ptr = static_calloc_aligned(1, size, alignment);
203 *memptr = ptr;
2594a5b4 204end:
2594a5b4
MD
205 return 0;
206}
207
208static
209void setup_static_allocator(void)
210{
211 assert(cur_alloc.calloc == NULL);
212 cur_alloc.calloc = static_calloc;
213 assert(cur_alloc.malloc == NULL);
214 cur_alloc.malloc = static_malloc;
215 assert(cur_alloc.free == NULL);
216 cur_alloc.free = static_free;
217 assert(cur_alloc.realloc == NULL);
218 cur_alloc.realloc = static_realloc;
219 assert(cur_alloc.memalign == NULL);
220 cur_alloc.memalign = static_memalign;
221 assert(cur_alloc.posix_memalign == NULL);
222 cur_alloc.posix_memalign = static_posix_memalign;
223}
224
225static
226void lookup_all_symbols(void)
227{
228 struct alloc_functions af;
229
230 /*
231 * Temporarily redirect allocation functions to
232 * static_calloc_aligned, and free function to static_free
233 * (no-op), until the dlsym lookup has completed.
234 */
235 setup_static_allocator();
236
237 /* Perform the actual lookups */
238 af.calloc = dlsym(RTLD_NEXT, "calloc");
239 af.malloc = dlsym(RTLD_NEXT, "malloc");
240 af.free = dlsym(RTLD_NEXT, "free");
241 af.realloc = dlsym(RTLD_NEXT, "realloc");
242 af.memalign = dlsym(RTLD_NEXT, "memalign");
243 af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
244
245 /* Populate the new allocator functions */
246 memcpy(&cur_alloc, &af, sizeof(cur_alloc));
f95b2888
SS
247}
248
e541a28d
PMF
249void *malloc(size_t size)
250{
1c184644
PMF
251 void *retval;
252
8c06ba6f 253 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
254 if (cur_alloc.malloc == NULL) {
255 lookup_all_symbols();
256 if (cur_alloc.malloc == NULL) {
e541a28d 257 fprintf(stderr, "mallocwrap: unable to find malloc\n");
2594a5b4 258 abort();
e541a28d
PMF
259 }
260 }
2594a5b4 261 retval = cur_alloc.malloc(size);
8c06ba6f 262 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 263 tracepoint(lttng_ust_libc, malloc,
171fcc6f 264 size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
265 }
266 URCU_TLS(malloc_nesting)--;
1c184644
PMF
267 return retval;
268}
269
270void free(void *ptr)
271{
8c06ba6f 272 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
273 /*
274 * Check whether the memory was allocated with
275 * static_calloc_align, in which case there is nothing to free.
f95b2888 276 */
2594a5b4
MD
277 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
278 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
8c06ba6f
MD
279 goto end;
280 }
281
282 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 283 tracepoint(lttng_ust_libc, free,
171fcc6f 284 ptr, LTTNG_UST_CALLER_IP());
f95b2888 285 }
1c184644 286
2594a5b4
MD
287 if (cur_alloc.free == NULL) {
288 lookup_all_symbols();
289 if (cur_alloc.free == NULL) {
1c184644 290 fprintf(stderr, "mallocwrap: unable to find free\n");
2594a5b4 291 abort();
1c184644
PMF
292 }
293 }
2594a5b4 294 cur_alloc.free(ptr);
8c06ba6f
MD
295end:
296 URCU_TLS(malloc_nesting)--;
e541a28d 297}
f95b2888
SS
298
299void *calloc(size_t nmemb, size_t size)
300{
f95b2888
SS
301 void *retval;
302
8c06ba6f 303 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
304 if (cur_alloc.calloc == NULL) {
305 lookup_all_symbols();
306 if (cur_alloc.calloc == NULL) {
f95b2888 307 fprintf(stderr, "callocwrap: unable to find calloc\n");
2594a5b4 308 abort();
f95b2888
SS
309 }
310 }
2594a5b4 311 retval = cur_alloc.calloc(nmemb, size);
8c06ba6f 312 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 313 tracepoint(lttng_ust_libc, calloc,
171fcc6f 314 nmemb, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
315 }
316 URCU_TLS(malloc_nesting)--;
f95b2888
SS
317 return retval;
318}
319
320void *realloc(void *ptr, size_t size)
321{
f95b2888
SS
322 void *retval;
323
8c06ba6f
MD
324 URCU_TLS(malloc_nesting)++;
325 /*
326 * Check whether the memory was allocated with
2594a5b4
MD
327 * static_calloc_align, in which case there is nothing
328 * to free, and we need to copy the old data.
329 */
330 if (caa_unlikely((char *)ptr >= static_calloc_buf &&
331 (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
332 size_t *old_size;
333
334 old_size = (size_t *) ptr - 1;
335 if (cur_alloc.calloc == NULL) {
336 lookup_all_symbols();
337 if (cur_alloc.calloc == NULL) {
338 fprintf(stderr, "reallocwrap: unable to find calloc\n");
339 abort();
340 }
341 }
342 retval = cur_alloc.calloc(1, size);
343 if (retval) {
344 memcpy(retval, ptr, *old_size);
345 }
8c06ba6f
MD
346 /*
347 * Mimick that a NULL pointer has been received, so
348 * memory allocation analysis based on the trace don't
349 * get confused by the address from the static
350 * allocator.
351 */
352 ptr = NULL;
2594a5b4
MD
353 goto end;
354 }
355
356 if (cur_alloc.realloc == NULL) {
357 lookup_all_symbols();
358 if (cur_alloc.realloc == NULL) {
f95b2888 359 fprintf(stderr, "reallocwrap: unable to find realloc\n");
2594a5b4 360 abort();
f95b2888
SS
361 }
362 }
2594a5b4
MD
363 retval = cur_alloc.realloc(ptr, size);
364end:
8c06ba6f 365 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa 366 tracepoint(lttng_ust_libc, realloc,
171fcc6f 367 ptr, size, retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
368 }
369 URCU_TLS(malloc_nesting)--;
f95b2888
SS
370 return retval;
371}
9d34b226
SS
372
373void *memalign(size_t alignment, size_t size)
374{
9d34b226
SS
375 void *retval;
376
8c06ba6f 377 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
378 if (cur_alloc.memalign == NULL) {
379 lookup_all_symbols();
380 if (cur_alloc.memalign == NULL) {
9d34b226 381 fprintf(stderr, "memalignwrap: unable to find memalign\n");
2594a5b4 382 abort();
9d34b226
SS
383 }
384 }
2594a5b4 385 retval = cur_alloc.memalign(alignment, size);
8c06ba6f 386 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
387 tracepoint(lttng_ust_libc, memalign,
388 alignment, size, retval,
171fcc6f 389 LTTNG_UST_CALLER_IP());
8c06ba6f
MD
390 }
391 URCU_TLS(malloc_nesting)--;
9d34b226
SS
392 return retval;
393}
394
395int posix_memalign(void **memptr, size_t alignment, size_t size)
396{
9d34b226
SS
397 int retval;
398
8c06ba6f 399 URCU_TLS(malloc_nesting)++;
2594a5b4
MD
400 if (cur_alloc.posix_memalign == NULL) {
401 lookup_all_symbols();
402 if (cur_alloc.posix_memalign == NULL) {
9d34b226 403 fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
2594a5b4 404 abort();
9d34b226
SS
405 }
406 }
2594a5b4 407 retval = cur_alloc.posix_memalign(memptr, alignment, size);
8c06ba6f 408 if (URCU_TLS(malloc_nesting) == 1) {
6d4658aa
AB
409 tracepoint(lttng_ust_libc, posix_memalign,
410 *memptr, alignment, size,
171fcc6f 411 retval, LTTNG_UST_CALLER_IP());
8c06ba6f
MD
412 }
413 URCU_TLS(malloc_nesting)--;
9d34b226
SS
414 return retval;
415}
2594a5b4 416
f4a90c3e
MD
417static
418void lttng_ust_fixup_malloc_nesting_tls(void)
419{
420 asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
421}
422
d1f1110f 423void lttng_ust_libc_wrapper_malloc_init(void)
2594a5b4
MD
424{
425 /* Initialization already done */
426 if (cur_alloc.calloc) {
427 return;
428 }
f4a90c3e 429 lttng_ust_fixup_malloc_nesting_tls();
2594a5b4
MD
430 /*
431 * Ensure the allocator is in place before the process becomes
432 * multithreaded.
433 */
434 lookup_all_symbols();
435}
This page took 0.05188 seconds and 4 git commands to generate.