*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
*/
#define _GNU_SOURCE
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
#include <lttng/ust-dlfcn.h>
#include <sys/types.h>
#include <stdio.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/tls-compat.h>
+#include <urcu/arch.h>
#include <lttng/align.h>
+#include <helper.h>
#define TRACEPOINT_DEFINE
#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
#include "ust_libc.h"
#define STATIC_CALLOC_LEN 4096
static
void *static_calloc(size_t nmemb, size_t size);
+/*
+ * pthread mutex replacement for URCU tls compat layer.
+ */
+static int ust_malloc_lock;
+
+static __attribute__((unused))
+void ust_malloc_spin_lock(pthread_mutex_t *lock)
+{
+ /*
+ * The memory barrier within cmpxchg takes care of ordering
+ * memory accesses with respect to the start of the critical
+ * section.
+ */
+ while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
+ caa_cpu_relax();
+}
+
+static __attribute__((unused))
+void ust_malloc_spin_unlock(pthread_mutex_t *lock)
+{
+ /*
+ * Ensure memory accesses within the critical section do not
+ * leak outside.
+ */
+ cmm_smp_mb();
+ uatomic_set(&ust_malloc_lock, 0);
+}
+
#define calloc static_calloc
+#define pthread_mutex_lock ust_malloc_spin_lock
+#define pthread_mutex_unlock ust_malloc_spin_unlock
static DEFINE_URCU_TLS(int, malloc_nesting);
+#undef ust_malloc_spin_unlock
+#undef ust_malloc_spin_lock
#undef calloc
/*
}
retval = cur_alloc.malloc(size);
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, malloc, size, retval);
+ tracepoint(lttng_ust_libc, malloc,
+ size, retval, LTTNG_UST_CALLER_IP());
}
URCU_TLS(malloc_nesting)--;
return retval;
}
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, free, ptr);
+ tracepoint(lttng_ust_libc, free,
+ ptr, LTTNG_UST_CALLER_IP());
}
if (cur_alloc.free == NULL) {
}
retval = cur_alloc.calloc(nmemb, size);
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, calloc, nmemb, size, retval);
+ tracepoint(lttng_ust_libc, calloc,
+ nmemb, size, retval, LTTNG_UST_CALLER_IP());
}
URCU_TLS(malloc_nesting)--;
return retval;
retval = cur_alloc.realloc(ptr, size);
end:
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, realloc, ptr, size, retval);
+ tracepoint(lttng_ust_libc, realloc,
+ ptr, size, retval, LTTNG_UST_CALLER_IP());
}
URCU_TLS(malloc_nesting)--;
return retval;
}
retval = cur_alloc.memalign(alignment, size);
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, memalign, alignment, size, retval);
+ tracepoint(lttng_ust_libc, memalign,
+ alignment, size, retval,
+ LTTNG_UST_CALLER_IP());
}
URCU_TLS(malloc_nesting)--;
return retval;
}
retval = cur_alloc.posix_memalign(memptr, alignment, size);
if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(ust_libc, posix_memalign, *memptr, alignment, size,
- retval);
+ tracepoint(lttng_ust_libc, posix_memalign,
+ *memptr, alignment, size,
+ retval, LTTNG_UST_CALLER_IP());
}
URCU_TLS(malloc_nesting)--;
return retval;