*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
*/
#define _GNU_SOURCE
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
#include <lttng/ust-dlfcn.h>
#include <sys/types.h>
#include <stdio.h>
#include <urcu/system.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/arch.h>
#include <lttng/align.h>
+#include <helper.h>
#define TRACEPOINT_DEFINE
#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
#include "ust_libc.h"
#define STATIC_CALLOC_LEN 4096
static
struct alloc_functions cur_alloc;
+/*
+ * Make sure our own use of the LTS compat layer will not cause infinite
+ * recursion by calling calloc.
+ */
+
+static
+void *static_calloc(size_t nmemb, size_t size);
+
+/*
+ * pthread mutex replacement for URCU tls compat layer.
+ */
+static int ust_malloc_lock;
+
+static __attribute__((unused))
+void ust_malloc_spin_lock(pthread_mutex_t *lock)
+{
+ /*
+ * The memory barrier within cmpxchg takes care of ordering
+ * memory accesses with respect to the start of the critical
+ * section.
+ */
+ while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
+ caa_cpu_relax();
+}
+
+static __attribute__((unused))
+void ust_malloc_spin_unlock(pthread_mutex_t *lock)
+{
+ /*
+ * Ensure memory accesses within the critical section do not
+ * leak outside.
+ */
+ cmm_smp_mb();
+ uatomic_set(&ust_malloc_lock, 0);
+}
+
+#define calloc static_calloc
+#define pthread_mutex_lock ust_malloc_spin_lock
+#define pthread_mutex_unlock ust_malloc_spin_unlock
+static DEFINE_URCU_TLS(int, malloc_nesting);
+#undef ust_malloc_spin_unlock
+#undef ust_malloc_spin_lock
+#undef calloc
+
/*
* Static allocator to use when initially executing dlsym(). It keeps a
* size_t value of each object size prior to the object.
void *retval;
retval = static_calloc_aligned(nmemb, size, 1);
- tracepoint(ust_libc, calloc, nmemb, size, retval);
return retval;
}
void *retval;
retval = static_calloc_aligned(1, size, 1);
- tracepoint(ust_libc, malloc, size, retval);
return retval;
}
void static_free(void *ptr)
{
/* no-op. */
- tracepoint(ust_libc, free, ptr);
}
static
if (ptr)
memcpy(retval, ptr, *old_size);
end:
- tracepoint(ust_libc, realloc, ptr, size, retval);
return retval;
}
void *retval;
retval = static_calloc_aligned(1, size, alignment);
- tracepoint(ust_libc, memalign, alignment, size, retval);
return retval;
}
static
int static_posix_memalign(void **memptr, size_t alignment, size_t size)
{
- int retval = 0;
void *ptr;
/* Check for power of 2, larger than void *. */
if (alignment & (alignment - 1)
|| alignment < sizeof(void *)
|| alignment == 0) {
- retval = EINVAL;
goto end;
}
ptr = static_calloc_aligned(1, size, alignment);
*memptr = ptr;
- if (size && !ptr)
- retval = ENOMEM;
end:
- tracepoint(ust_libc, posix_memalign, *memptr, alignment, size, retval);
return 0;
}
{
void *retval;
+ URCU_TLS(malloc_nesting)++;
if (cur_alloc.malloc == NULL) {
lookup_all_symbols();
if (cur_alloc.malloc == NULL) {
}
}
retval = cur_alloc.malloc(size);
- tracepoint(ust_libc, malloc, size, retval);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, malloc,
+ size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
return retval;
}
void free(void *ptr)
{
- tracepoint(ust_libc, free, ptr);
-
+ URCU_TLS(malloc_nesting)++;
/*
* Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing to free.
*/
if (caa_unlikely((char *)ptr >= static_calloc_buf &&
(char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
- return;
+ goto end;
+ }
+
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, free,
+ ptr, LTTNG_UST_CALLER_IP());
}
if (cur_alloc.free == NULL) {
}
}
cur_alloc.free(ptr);
+end:
+ URCU_TLS(malloc_nesting)--;
}
void *calloc(size_t nmemb, size_t size)
{
void *retval;
+ URCU_TLS(malloc_nesting)++;
if (cur_alloc.calloc == NULL) {
lookup_all_symbols();
if (cur_alloc.calloc == NULL) {
}
}
retval = cur_alloc.calloc(nmemb, size);
- tracepoint(ust_libc, calloc, nmemb, size, retval);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, calloc,
+ nmemb, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
return retval;
}
{
void *retval;
- /* Check whether the memory was allocated with
+ URCU_TLS(malloc_nesting)++;
+ /*
+ * Check whether the memory was allocated with
* static_calloc_align, in which case there is nothing
* to free, and we need to copy the old data.
*/
if (retval) {
memcpy(retval, ptr, *old_size);
}
+ /*
+ * Mimick that a NULL pointer has been received, so
+ * memory allocation analysis based on the trace don't
+ * get confused by the address from the static
+ * allocator.
+ */
+ ptr = NULL;
goto end;
}
}
retval = cur_alloc.realloc(ptr, size);
end:
- tracepoint(ust_libc, realloc, ptr, size, retval);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, realloc,
+ ptr, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
return retval;
}
{
void *retval;
+ URCU_TLS(malloc_nesting)++;
if (cur_alloc.memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.memalign == NULL) {
}
}
retval = cur_alloc.memalign(alignment, size);
- tracepoint(ust_libc, memalign, alignment, size, retval);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, memalign,
+ alignment, size, retval,
+ LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
return retval;
}
{
int retval;
+ URCU_TLS(malloc_nesting)++;
if (cur_alloc.posix_memalign == NULL) {
lookup_all_symbols();
if (cur_alloc.posix_memalign == NULL) {
}
}
retval = cur_alloc.posix_memalign(memptr, alignment, size);
- tracepoint(ust_libc, posix_memalign, *memptr, alignment, size, retval);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, posix_memalign,
+ *memptr, alignment, size,
+ retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
return retval;
}
+static
+void lttng_ust_fixup_malloc_nesting_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
+}
+
__attribute__((constructor))
void lttng_ust_malloc_wrapper_init(void)
{
if (cur_alloc.calloc) {
return;
}
+ lttng_ust_fixup_malloc_nesting_tls();
/*
* Ensure the allocator is in place before the process becomes
* multithreaded.