*.swo
*.trs
*.log
-Makefile
.libs/
.deps/
*~
Makefile.in
*.loT
*.info
+*.class
cscope.*
-configure
+
+/configure
/aclocal.m4
-autom4te.cache/
+/autom4te.cache
/include/config.h
/include/config.h.in
/include/stamp-h1
/include/lttng/stamp-h3
/include/lttng/ust-version.h
/include/lttng/ust-config.h
+
/config/ar-lib
/config/compile
/config/config.guess
/config/ltmain.sh
/config/missing
/config/test-driver
+
/m4/libtool.m4
/m4/lt~obsolete.m4
/m4/ltoptions.m4
/m4/ltsugar.m4
/m4/ltversion.m4
-config.log
-config.status
+
+/config.log
+/config.status
/libtool
-lttng-ust.pc
-lttng-ust-ctl.pc
-ustctl/ustctl
-ust-consumerd/ust-consumerd
+/src/lttng-ust.pc
+/src/lttng-ust-ctl.pc
-doc/examples/demo/demo
-doc/examples/easy-ust/sample
-doc/examples/hello-static-lib/hello
-doc/examples/gen-tp/sample
-doc/examples/gen-tp/sample_tracepoint.h
-doc/examples/demo-tracef/demo-tracef
-doc/examples/demo-tracef/demo-vtracef
-doc/examples/demo-tracelog/demo-tracelog
-doc/examples/demo-tracelog/demo-vtracelog
-doc/examples/cmake-multiple-shared-libraries/build/
+/doc/examples/demo/demo
+/doc/examples/easy-ust/sample
+/doc/examples/hello-static-lib/hello
+/doc/examples/gen-tp/sample
+/doc/examples/gen-tp/sample_tracepoint.h
+/doc/examples/demo-tracef/demo-tracef
+/doc/examples/demo-tracef/demo-vtracef
+/doc/examples/demo-tracelog/demo-tracelog
+/doc/examples/demo-tracelog/demo-vtracelog
+/doc/examples/cmake-multiple-shared-libraries/build/
-doc/man/*.xml
-doc/man/lttng-gen-tp.1
-doc/man/lttng-ust-cyg-profile.3
-doc/man/lttng-ust-dl.3
-doc/man/lttng-ust.3
-doc/man/tracef.3
-doc/man/tracelog.3
-doc/man/vtracef.3
-doc/man/vtracelog.3
+/doc/man/*.xml
+/doc/man/lttng-gen-tp.1
+/doc/man/lttng-ust-cyg-profile.3
+/doc/man/lttng-ust-dl.3
+/doc/man/lttng-ust.3
+/doc/man/tracef.3
+/doc/man/tracelog.3
+/doc/man/vtracef.3
+/doc/man/vtracelog.3
-tests/benchmark/bench1
-tests/benchmark/bench2
-tests/compile/ctf-types/ctf-types
-tests/compile/hello.cxx/hello
-tests/compile/hello/hello
-tests/compile/hello-many/hello-many
-tests/compile/same_line_tracepoint/same_line_tracepoint
-tests/compile/test-app-ctx/hello
-tests/unit/gcc-weak-hidden/test_gcc_weak_hidden
-tests/unit/libmsgpack/test_msgpack
-tests/unit/libringbuffer/test_shm
-tests/unit/pthread_name/test_pthread_name
-tests/unit/snprintf/test_snprintf
-tests/unit/ust-elf/ust-elf
-tests/unit/ust-error/test_ust_error
-tests/unit/ust-utils/test_ust_utils
-tests/unit/ust-utils/test_ust_utils_cxx
+/tests/benchmark/bench1
+/tests/benchmark/bench2
+/tests/compile/ctf-types/ctf-types
+/tests/compile/hello.cxx/hello
+/tests/compile/hello/hello
+/tests/compile/hello-many/hello-many
+/tests/compile/same_line_tracepoint/same_line_tracepoint
+/tests/compile/test-app-ctx/hello
+/tests/unit/gcc-weak-hidden/test_gcc_weak_hidden
+/tests/unit/libmsgpack/test_msgpack
+/tests/unit/libringbuffer/test_shm
+/tests/unit/pthread_name/test_pthread_name
+/tests/unit/snprintf/test_snprintf
+/tests/unit/ust-elf/ust-elf
+/tests/unit/ust-error/test_ust_error
+/tests/unit/ust-utils/test_ust_utils
+/tests/unit/ust-utils/test_ust_utils_cxx
# Java agent library
-*.class
-lttng-ust-agent*.jar
-liblttng-ust-agent.jar
-classnoinst.stamp
-jni-header.stamp
-context-jni-header.stamp
-jul-jni-header.stamp
-log4j-jni-header.stamp
-org_lttng_ust_agent_context_LttngContextApi.h
-org_lttng_ust_agent_jul_LttngJulApi.h
-org_lttng_ust_agent_log4j_LttngLog4jApi.h
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-all/*.jar
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-common/classnoinst.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-common/context-jni-header.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-common/*.jar
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-jul/classnoinst.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-jul/jul-jni-header.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-jul/*.jar
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j/classnoinst.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j/log4j-jni-header.stamp
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j/*.jar
+/src/liblttng-ust-java-agent/jni/common/org_lttng_ust_agent_context_LttngContextApi.h
+/src/liblttng-ust-java-agent/jni/jul/org_lttng_ust_agent_jul_LttngJulApi.h
+/src/liblttng-ust-java-agent/jni/log4j/org_lttng_ust_agent_log4j_LttngLog4jApi.h
+/src/liblttng-ust-java/classnoinst.stamp
+/src/liblttng-ust-java/jni-header.stamp
# Python agent
-python-lttngust/lttngust/version.py
-python-lttngust/**/*.pyc
-python-lttngust/build
-python-lttngust/setup.py
-python-lttngust/build-python-bindings.stamp
-python-lttngust/copy-static-deps.stamp
+/src/python-lttngust/lttngust/version.py
+/src/python-lttngust/**/*.pyc
+/src/python-lttngust/build
+/src/python-lttngust/setup.py
+/src/python-lttngust/build-python-bindings.stamp
+/src/python-lttngust/copy-static-deps.stamp
+/src/python-lttngust/installed_files.txt
+
+# Makefiles
+/Makefile
+/doc/Makefile
+/doc/examples/Makefile
+/doc/man/Makefile
+/include/Makefile
+/src/libcounter/Makefile
+/src/liblttng-ust-comm/Makefile
+/src/liblttng-ust-ctl/Makefile
+/src/liblttng-ust-cyg-profile/Makefile
+/src/liblttng-ust-dl/Makefile
+/src/liblttng-ust-fd/Makefile
+/src/liblttng-ust-fork/Makefile
+/src/liblttng-ust-java-agent/Makefile
+/src/liblttng-ust-java-agent/java/Makefile
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-all/Makefile
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-common/Makefile
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-jul/Makefile
+/src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j/Makefile
+/src/liblttng-ust-java-agent/jni/Makefile
+/src/liblttng-ust-java-agent/jni/common/Makefile
+/src/liblttng-ust-java-agent/jni/jul/Makefile
+/src/liblttng-ust-java-agent/jni/log4j/Makefile
+/src/liblttng-ust-java/Makefile
+/src/liblttng-ust-libc-wrapper/Makefile
+/src/liblttng-ust-python-agent/Makefile
+/src/liblttng-ust/Makefile
+/src/libmsgpack/Makefile
+/src/libringbuffer/Makefile
+/src/python-lttngust/Makefile
+/src/snprintf/Makefile
+/src/Makefile
+/tests/Makefile
+/tests/benchmark/Makefile
+/tests/compile/Makefile
+/tests/compile/ctf-types/Makefile
+/tests/compile/hello-many/Makefile
+/tests/compile/hello.cxx/Makefile
+/tests/compile/hello/Makefile
+/tests/compile/same_line_tracepoint/Makefile
+/tests/compile/test-app-ctx/Makefile
+/tests/unit/Makefile
+/tests/unit/gcc-weak-hidden/Makefile
+/tests/unit/libmsgpack/Makefile
+/tests/unit/libringbuffer/Makefile
+/tests/unit/pthread_name/Makefile
+/tests/unit/snprintf/Makefile
+/tests/unit/ust-elf/Makefile
+/tests/unit/ust-error/Makefile
+/tests/unit/ust-utils/Makefile
+/tests/utils/Makefile
+/tools/Makefile
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = . include snprintf libringbuffer liblttng-ust-comm \
- libcounter \
- libmsgpack \
- liblttng-ust \
- liblttng-ust-ctl \
- liblttng-ust-fd \
- liblttng-ust-fork \
- liblttng-ust-libc-wrapper \
- liblttng-ust-cyg-profile \
- tools
-
-if ENABLE_UST_DL
-SUBDIRS += liblttng-ust-dl
-endif
-
-if ENABLE_JNI_INTERFACE
-SUBDIRS += liblttng-ust-java
-endif
-
-if ENABLE_JAVA_AGENT
-SUBDIRS += liblttng-ust-java-agent
-endif
-
-if ENABLE_PYTHON_AGENT
-SUBDIRS += python-lttngust \
- liblttng-ust-python-agent
-endif
-
-SUBDIRS += tests doc
-
-pkgconfigdir = $(libdir)/pkgconfig
-pkgconfig_DATA = lttng-ust.pc lttng-ust-ctl.pc
+SUBDIRS = \
+ include \
+ src \
+ tools \
+ doc \
+ tests
dist_doc_DATA = \
ChangeLog \
# List of files to be generated from '.in' templates by AC_OUTPUT
AC_CONFIG_FILES([
Makefile
- doc/Makefile
doc/examples/Makefile
+ doc/Makefile
doc/man/Makefile
include/Makefile
- snprintf/Makefile
- libcounter/Makefile
- libmsgpack/Makefile
- libringbuffer/Makefile
- liblttng-ust-comm/Makefile
- liblttng-ust/Makefile
- liblttng-ust-ctl/Makefile
- liblttng-ust-fork/Makefile
- liblttng-ust-dl/Makefile
- liblttng-ust-fd/Makefile
- liblttng-ust-java/Makefile
- liblttng-ust-java-agent/Makefile
- liblttng-ust-java-agent/java/Makefile
- liblttng-ust-java-agent/java/lttng-ust-agent-all/Makefile
- liblttng-ust-java-agent/java/lttng-ust-agent-common/Makefile
- liblttng-ust-java-agent/java/lttng-ust-agent-jul/Makefile
- liblttng-ust-java-agent/java/lttng-ust-agent-log4j/Makefile
- liblttng-ust-java-agent/jni/Makefile
- liblttng-ust-java-agent/jni/common/Makefile
- liblttng-ust-java-agent/jni/jul/Makefile
- liblttng-ust-java-agent/jni/log4j/Makefile
- liblttng-ust-libc-wrapper/Makefile
- liblttng-ust-cyg-profile/Makefile
- liblttng-ust-python-agent/Makefile
- python-lttngust/Makefile
- python-lttngust/setup.py
- python-lttngust/lttngust/version.py
- tools/Makefile
- tests/Makefile
- tests/compile/Makefile
+ src/libcounter/Makefile
+ src/liblttng-ust-comm/Makefile
+ src/liblttng-ust-ctl/Makefile
+ src/liblttng-ust-cyg-profile/Makefile
+ src/liblttng-ust-dl/Makefile
+ src/liblttng-ust-fd/Makefile
+ src/liblttng-ust-fork/Makefile
+ src/liblttng-ust-java-agent/java/lttng-ust-agent-all/Makefile
+ src/liblttng-ust-java-agent/java/lttng-ust-agent-common/Makefile
+ src/liblttng-ust-java-agent/java/lttng-ust-agent-jul/Makefile
+ src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j/Makefile
+ src/liblttng-ust-java-agent/java/Makefile
+ src/liblttng-ust-java-agent/jni/common/Makefile
+ src/liblttng-ust-java-agent/jni/jul/Makefile
+ src/liblttng-ust-java-agent/jni/log4j/Makefile
+ src/liblttng-ust-java-agent/jni/Makefile
+ src/liblttng-ust-java-agent/Makefile
+ src/liblttng-ust-java/Makefile
+ src/liblttng-ust-libc-wrapper/Makefile
+ src/liblttng-ust/Makefile
+ src/liblttng-ust-python-agent/Makefile
+ src/libmsgpack/Makefile
+ src/libringbuffer/Makefile
+ src/lttng-ust-ctl.pc
+ src/lttng-ust.pc
+ src/Makefile
+ src/python-lttngust/lttngust/version.py
+ src/python-lttngust/Makefile
+ src/python-lttngust/setup.py
+ src/snprintf/Makefile
+ tests/benchmark/Makefile
tests/compile/ctf-types/Makefile
tests/compile/hello.cxx/Makefile
tests/compile/hello/Makefile
tests/compile/hello-many/Makefile
+ tests/compile/Makefile
tests/compile/same_line_tracepoint/Makefile
tests/compile/test-app-ctx/Makefile
- tests/benchmark/Makefile
+ tests/Makefile
tests/unit/gcc-weak-hidden/Makefile
tests/unit/libmsgpack/Makefile
- tests/unit/Makefile
tests/unit/libringbuffer/Makefile
+ tests/unit/Makefile
tests/unit/pthread_name/Makefile
tests/unit/snprintf/Makefile
tests/unit/ust-elf/Makefile
tests/unit/ust-error/Makefile
tests/unit/ust-utils/Makefile
tests/utils/Makefile
- lttng-ust.pc
- lttng-ust-ctl.pc
+ tools/Makefile
])
AC_OUTPUT
CFLAGS='$(CFLAGS)' \
AM_CFLAGS='$(AM_CFLAGS)' \
LDFLAGS="$(LDFLAGS)" \
- AM_LDFLAGS='$(AM_LDFLAGS) -L../../../liblttng-ust/.libs -Wl,-rpath="$(PWD)/../../liblttng-ust/.libs/" -Wl,-rpath-link="$(PWD)/../../liblttng-ust/.libs/"' \
+ AM_LDFLAGS='$(AM_LDFLAGS) -L../../../src/liblttng-ust/.libs -Wl,-rpath="$(PWD)/../../src/liblttng-ust/.libs/" -Wl,-rpath-link="$(PWD)/../../src/liblttng-ust/.libs/"' \
LTTNG_GEN_TP_PATH="$$rel_src_subdir$(top_srcdir)/tools/" \
AM_V_P="$(AM_V_P)" \
AM_V_at="$(AM_V_at)" \
cd $$subdir && \
$(MAKE) all \
CLASSPATH="$(CLASSPATH)" \
- JAVA_CLASSPATH_OVERRIDE_JUL="../../../liblttng-ust-java-agent/java/lttng-ust-agent-jul" \
- JAVA_CLASSPATH_OVERRIDE_COMMON="../../../liblttng-ust-java-agent/java/lttng-ust-agent-common" \
+ JAVA_CLASSPATH_OVERRIDE_JUL="../../../src/liblttng-ust-java-agent/java/lttng-ust-agent-jul" \
+ JAVA_CLASSPATH_OVERRIDE_COMMON="../../../src/liblttng-ust-java-agent/java/lttng-ust-agent-common" \
$(AM_MAKEFLAGS) \
) || exit 1; \
done; \
cd $$subdir && \
$(MAKE) all \
CLASSPATH="$(CLASSPATH)" \
- JAVA_CLASSPATH_OVERRIDE_LOG4J="../../../liblttng-ust-java-agent/java/lttng-ust-agent-log4j" \
- JAVA_CLASSPATH_OVERRIDE_COMMON="../../../liblttng-ust-java-agent/java/lttng-ust-agent-common" \
+ JAVA_CLASSPATH_OVERRIDE_LOG4J="../../../src/liblttng-ust-java-agent/java/lttng-ust-agent-log4j" \
+ JAVA_CLASSPATH_OVERRIDE_COMMON="../../../src/liblttng-ust-java-agent/java/lttng-ust-agent-common" \
$(AM_MAKEFLAGS) \
) || exit 1; \
done; \
CXX="$(CXX)" \
$(CMAKE) \
-DCMAKE_INCLUDE_PATH="$(abs_top_srcdir)/include;$(abs_top_builddir)/include" \
- -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/liblttng-ust/.libs" \
+ -DCMAKE_LIBRARY_PATH="$(abs_top_builddir)/src/liblttng-ust/.libs" \
-DCMAKE_C_FLAGS="$(AM_CFLAGS) $(CPPFLAGS) $(CFLAGS)" \
-DCMAKE_CXX_FLAGS="$(AM_CXXFLAGS) $(CXXFLAGS) $(CPPFLAGS)" \
-DCMAKE_EXE_LINKER_FLAGS="$(AM_LDFLAGS) $(LDFLAGS)" \
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-noinst_LTLIBRARIES = libcounter.la
-
-libcounter_la_SOURCES = \
- counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \
- counter-api.h counter.h counter-internal.h counter-types.h \
- counter-config.h
-
-libcounter_la_LIBADD = \
- -lpthread \
- -lrt
-
-if ENABLE_NUMA
-libcounter_la_LIBADD += -lnuma
-endif
-
-libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters API, requiring counter/config.h
- */
-
-#ifndef _LTTNG_COUNTER_API_H
-#define _LTTNG_COUNTER_API_H
-
-#include <stdint.h>
-#include <limits.h>
-#include "counter.h"
-#include "counter-internal.h"
-#include <urcu/compiler.h>
-#include <urcu/uatomic.h>
-#include "ust-bitmap.h"
-#include "../libringbuffer/getcpu.h"
-
-/*
- * Using unsigned arithmetic because overflow is defined.
- */
-static inline int __lttng_counter_add(const struct lib_counter_config *config,
- enum lib_counter_config_alloc alloc,
- enum lib_counter_config_sync sync,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v,
- int64_t *remainder)
-{
- size_t index;
- bool overflow = false, underflow = false;
- struct lib_counter_layout *layout;
- int64_t move_sum = 0;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (alloc) {
- case COUNTER_ALLOC_PER_CPU:
- layout = &counter->percpu_counters[lttng_ust_get_cpu()];
- break;
- case COUNTER_ALLOC_GLOBAL:
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- int8_t old, n, res;
- int8_t global_sum_step = counter->global_sum_step.s8;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int8_t) ((uint8_t) old + (uint8_t) v);
- if (caa_unlikely(n > (int8_t) global_sum_step))
- move_sum = (int8_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int8_t) global_sum_step))
- move_sum = -((int8_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int8_t) ((uint8_t) old + (uint8_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT8_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
- underflow = true;
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- int16_t old, n, res;
- int16_t global_sum_step = counter->global_sum_step.s16;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int16_t) ((uint16_t) old + (uint16_t) v);
- if (caa_unlikely(n > (int16_t) global_sum_step))
- move_sum = (int16_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int16_t) global_sum_step))
- move_sum = -((int16_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int16_t) ((uint16_t) old + (uint16_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT16_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
- underflow = true;
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- int32_t old, n, res;
- int32_t global_sum_step = counter->global_sum_step.s32;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int32_t) ((uint32_t) old + (uint32_t) v);
- if (caa_unlikely(n > (int32_t) global_sum_step))
- move_sum = (int32_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int32_t) global_sum_step))
- move_sum = -((int32_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int32_t) ((uint32_t) old + (uint32_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && (v >= UINT32_MAX || n < old))
- overflow = true;
- else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
- underflow = true;
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- int64_t old, n, res;
- int64_t global_sum_step = counter->global_sum_step.s64;
-
- res = *int_p;
- switch (sync) {
- case COUNTER_SYNC_PER_CPU:
- {
- do {
- move_sum = 0;
- old = res;
- n = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (caa_unlikely(n > (int64_t) global_sum_step))
- move_sum = (int64_t) global_sum_step / 2;
- else if (caa_unlikely(n < -(int64_t) global_sum_step))
- move_sum = -((int64_t) global_sum_step / 2);
- n -= move_sum;
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- case COUNTER_SYNC_GLOBAL:
- {
- do {
- old = res;
- n = (int64_t) ((uint64_t) old + (uint64_t) v);
- res = uatomic_cmpxchg(int_p, old, n);
- } while (old != res);
- break;
- }
- default:
- return -EINVAL;
- }
- if (v > 0 && n < old)
- overflow = true;
- else if (v < 0 && n > old)
- underflow = true;
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
- lttng_bitmap_set_bit(index, layout->overflow_bitmap);
- else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
- lttng_bitmap_set_bit(index, layout->underflow_bitmap);
- if (remainder)
- *remainder = move_sum;
- return 0;
-}
-
-static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- int64_t move_sum;
- int ret;
-
- ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
- counter, dimension_indexes, v, &move_sum);
- if (caa_unlikely(ret))
- return ret;
- if (caa_unlikely(move_sum))
- return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
- counter, dimension_indexes, move_sum, NULL);
- return 0;
-}
-
-static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
- dimension_indexes, v, NULL);
-}
-
-static inline int lttng_counter_add(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v)
-{
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
- case COUNTER_ALLOC_GLOBAL:
- return __lttng_counter_add_global(config, counter, dimension_indexes, v);
- default:
- return -EINVAL;
- }
-}
-
-static inline int lttng_counter_inc(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- return lttng_counter_add(config, counter, dimension_indexes, 1);
-}
-
-static inline int lttng_counter_dec(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- return lttng_counter_add(config, counter, dimension_indexes, -1);
-}
-
-#endif /* _LTTNG_COUNTER_API_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Configuration
- */
-
-#ifndef _LTTNG_COUNTER_CONFIG_H
-#define _LTTNG_COUNTER_CONFIG_H
-
-#include <stdint.h>
-
-enum lib_counter_config_alloc {
- COUNTER_ALLOC_PER_CPU = (1 << 0),
- COUNTER_ALLOC_GLOBAL = (1 << 1),
-};
-
-enum lib_counter_config_sync {
- COUNTER_SYNC_PER_CPU,
- COUNTER_SYNC_GLOBAL,
-};
-
-struct lib_counter_config {
- uint32_t alloc; /* enum lib_counter_config_alloc flags */
- enum lib_counter_config_sync sync;
- enum {
- COUNTER_ARITHMETIC_MODULAR,
- COUNTER_ARITHMETIC_SATURATE, /* TODO */
- } arithmetic;
- enum {
- COUNTER_SIZE_8_BIT = 1,
- COUNTER_SIZE_16_BIT = 2,
- COUNTER_SIZE_32_BIT = 4,
- COUNTER_SIZE_64_BIT = 8,
- } counter_size;
-};
-
-#endif /* _LTTNG_COUNTER_CONFIG_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Internal Header
- */
-
-#ifndef _LTTNG_COUNTER_INTERNAL_H
-#define _LTTNG_COUNTER_INTERNAL_H
-
-#include <stdint.h>
-#include <lttng/ust-config.h>
-#include <urcu/compiler.h>
-#include "counter-types.h"
-
-static inline int lttng_counter_validate_indexes(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- size_t nr_dimensions = counter->nr_dimensions, i;
-
- for (i = 0; i < nr_dimensions; i++) {
- if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
- return -EOVERFLOW;
- }
- return 0;
-}
-
-
-static inline size_t lttng_counter_get_index(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- size_t nr_dimensions = counter->nr_dimensions, i;
- size_t index = 0;
-
- for (i = 0; i < nr_dimensions; i++) {
- struct lib_counter_dimension *dimension = &counter->dimensions[i];
- const size_t *dimension_index = &dimension_indexes[i];
-
- index += *dimension_index * dimension->stride;
- }
- return index;
-}
-
-#endif /* _LTTNG_COUNTER_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters Types
- */
-
-#ifndef _LTTNG_COUNTER_TYPES_H
-#define _LTTNG_COUNTER_TYPES_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <stdbool.h>
-#include <sys/types.h>
-#include "counter-config.h"
-#include <lttng/ust-config.h>
-#include "shm_types.h"
-
-struct lib_counter_dimension {
- /*
- * Max. number of indexable elements.
- */
- size_t max_nr_elem;
- /*
- * The stride for a dimension is the multiplication factor which
- * should be applied to its index to take into account other
- * dimensions nested inside.
- */
- size_t stride;
-};
-
-struct lib_counter_layout {
- void *counters;
- unsigned long *overflow_bitmap;
- unsigned long *underflow_bitmap;
- int shm_fd;
- size_t shm_len;
- struct lttng_counter_shm_handle handle;
-};
-
-enum lib_counter_arithmetic {
- LIB_COUNTER_ARITHMETIC_MODULAR,
- LIB_COUNTER_ARITHMETIC_SATURATE,
-};
-
-struct lib_counter {
- size_t nr_dimensions;
- int64_t allocated_elem;
- struct lib_counter_dimension *dimensions;
- enum lib_counter_arithmetic arithmetic;
- union {
- struct {
- int32_t max, min;
- } limits_32_bit;
- struct {
- int64_t max, min;
- } limits_64_bit;
- } saturation;
- union {
- int8_t s8;
- int16_t s16;
- int32_t s32;
- int64_t s64;
- } global_sum_step; /* 0 if unused */
- struct lib_counter_config config;
-
- struct lib_counter_layout global_counters;
- struct lib_counter_layout *percpu_counters;
-
- bool is_daemon;
- struct lttng_counter_shm_object_table *object_table;
-};
-
-#endif /* _LTTNG_COUNTER_TYPES_H */
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
- *
- * counter.c
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <errno.h>
-#include "counter.h"
-#include "counter-internal.h"
-#include <urcu/system.h>
-#include <urcu/compiler.h>
-#include <stdbool.h>
-#include <ust-helper.h>
-#include "smp.h"
-#include "shm.h"
-#include "ust-compat.h"
-
-#include "ust-bitmap.h"
-
-static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
-{
- return dimension->max_nr_elem;
-}
-
-static int lttng_counter_init_stride(
- const struct lib_counter_config *config __attribute__((unused)),
- struct lib_counter *counter)
-{
- size_t nr_dimensions = counter->nr_dimensions;
- size_t stride = 1;
- ssize_t i;
-
- for (i = nr_dimensions - 1; i >= 0; i--) {
- struct lib_counter_dimension *dimension = &counter->dimensions[i];
- size_t nr_elem;
-
- nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
- dimension->stride = stride;
- /* nr_elem should be minimum 1 for each dimension. */
- if (!nr_elem)
- return -EINVAL;
- stride *= nr_elem;
- if (stride > SIZE_MAX / nr_elem)
- return -EINVAL;
- }
- return 0;
-}
-
-static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
-{
- struct lib_counter_layout *layout;
- size_t counter_size;
- size_t nr_elem = counter->allocated_elem;
- size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
- struct lttng_counter_shm_object *shm_object;
-
- if (shm_fd < 0)
- return 0; /* Skip, will be populated later. */
-
- if (cpu == -1)
- layout = &counter->global_counters;
- else
- layout = &counter->percpu_counters[cpu];
- switch (counter->config.counter_size) {
- case COUNTER_SIZE_8_BIT:
- case COUNTER_SIZE_16_BIT:
- case COUNTER_SIZE_32_BIT:
- case COUNTER_SIZE_64_BIT:
- counter_size = (size_t) counter->config.counter_size;
- break;
- default:
- return -EINVAL;
- }
- layout->shm_fd = shm_fd;
- counters_offset = shm_length;
- shm_length += counter_size * nr_elem;
- overflow_offset = shm_length;
- shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
- underflow_offset = shm_length;
- shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
- layout->shm_len = shm_length;
- if (counter->is_daemon) {
- /* Allocate and clear shared memory. */
- shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
- shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
- if (!shm_object)
- return -ENOMEM;
- } else {
- /* Map pre-existing shared memory. */
- shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
- shm_fd, shm_length);
- if (!shm_object)
- return -ENOMEM;
- }
- layout->counters = shm_object->memory_map + counters_offset;
- layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
- layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
- return 0;
-}
-
-int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
-{
- struct lib_counter_config *config = &counter->config;
- struct lib_counter_layout *layout;
-
- if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
- return -EINVAL;
- layout = &counter->global_counters;
- if (layout->shm_fd >= 0)
- return -EBUSY;
- return lttng_counter_layout_init(counter, -1, fd);
-}
-
-int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
-{
- struct lib_counter_config *config = &counter->config;
- struct lib_counter_layout *layout;
-
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
-
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- if (layout->shm_fd >= 0)
- return -EBUSY;
- return lttng_counter_layout_init(counter, cpu, fd);
-}
-
-static
-int lttng_counter_set_global_sum_step(struct lib_counter *counter,
- int64_t global_sum_step)
-{
- if (global_sum_step < 0)
- return -EINVAL;
-
- switch (counter->config.counter_size) {
- case COUNTER_SIZE_8_BIT:
- if (global_sum_step > INT8_MAX)
- return -EINVAL;
- counter->global_sum_step.s8 = (int8_t) global_sum_step;
- break;
- case COUNTER_SIZE_16_BIT:
- if (global_sum_step > INT16_MAX)
- return -EINVAL;
- counter->global_sum_step.s16 = (int16_t) global_sum_step;
- break;
- case COUNTER_SIZE_32_BIT:
- if (global_sum_step > INT32_MAX)
- return -EINVAL;
- counter->global_sum_step.s32 = (int32_t) global_sum_step;
- break;
- case COUNTER_SIZE_64_BIT:
- counter->global_sum_step.s64 = global_sum_step;
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-static
-int validate_args(const struct lib_counter_config *config,
- size_t nr_dimensions __attribute__((unused)),
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds)
-{
- int nr_cpus = lttng_counter_num_possible_cpus();
-
- if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
- WARN_ON_ONCE(1);
- return -1;
- }
- if (!max_nr_elem)
- return -1;
- /*
- * global sum step is only useful with allocating both per-cpu
- * and global counters.
- */
- if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
- !(config->alloc & COUNTER_ALLOC_PER_CPU)))
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
- return -1;
- if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
- return -1;
- if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
- return -1;
- return 0;
-}
-
-struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
- size_t nr_dimensions,
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- struct lib_counter *counter;
- size_t dimension, nr_elem = 1;
- int cpu, ret;
- int nr_handles = 0;
- int nr_cpus = lttng_counter_num_possible_cpus();
-
- if (validate_args(config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds))
- return NULL;
- counter = zmalloc(sizeof(struct lib_counter));
- if (!counter)
- return NULL;
- counter->global_counters.shm_fd = -1;
- counter->config = *config;
- counter->is_daemon = is_daemon;
- if (lttng_counter_set_global_sum_step(counter, global_sum_step))
- goto error_sum_step;
- counter->nr_dimensions = nr_dimensions;
- counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
- if (!counter->dimensions)
- goto error_dimensions;
- for (dimension = 0; dimension < nr_dimensions; dimension++)
- counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
- if (config->alloc & COUNTER_ALLOC_PER_CPU) {
- counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
- if (!counter->percpu_counters)
- goto error_alloc_percpu;
- lttng_counter_for_each_possible_cpu(cpu)
- counter->percpu_counters[cpu].shm_fd = -1;
- }
-
- if (lttng_counter_init_stride(config, counter))
- goto error_init_stride;
- //TODO saturation values.
- for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
- nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
- counter->allocated_elem = nr_elem;
-
- if (config->alloc & COUNTER_ALLOC_GLOBAL)
- nr_handles++;
- if (config->alloc & COUNTER_ALLOC_PER_CPU)
- nr_handles += nr_cpus;
- /* Allocate table for global and per-cpu counters. */
- counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
- if (!counter->object_table)
- goto error_alloc_object_table;
-
- if (config->alloc & COUNTER_ALLOC_GLOBAL) {
- ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
- if (ret)
- goto layout_init_error;
- }
- if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
- lttng_counter_for_each_possible_cpu(cpu) {
- ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
- if (ret)
- goto layout_init_error;
- }
- }
- return counter;
-
-layout_init_error:
- lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
-error_alloc_object_table:
-error_init_stride:
- free(counter->percpu_counters);
-error_alloc_percpu:
- free(counter->dimensions);
-error_dimensions:
-error_sum_step:
- free(counter);
- return NULL;
-}
-
-void lttng_counter_destroy(struct lib_counter *counter)
-{
- struct lib_counter_config *config = &counter->config;
-
- if (config->alloc & COUNTER_ALLOC_PER_CPU)
- free(counter->percpu_counters);
- lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
- free(counter->dimensions);
- free(counter);
-}
-
-int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
-{
- int shm_fd;
-
- shm_fd = counter->global_counters.shm_fd;
- if (shm_fd < 0)
- return -1;
- *fd = shm_fd;
- *len = counter->global_counters.shm_len;
- return 0;
-}
-
-int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
-{
- struct lib_counter_layout *layout;
- int shm_fd;
-
- if (cpu >= lttng_counter_num_possible_cpus())
- return -1;
- layout = &counter->percpu_counters[cpu];
- shm_fd = layout->shm_fd;
- if (shm_fd < 0)
- return -1;
- *fd = shm_fd;
- *len = layout->shm_len;
- return 0;
-}
-
-int lttng_counter_read(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu, int64_t *value, bool *overflow,
- bool *underflow)
-{
- size_t index;
- struct lib_counter_layout *layout;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0) {
- if (cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- } else {
- layout = &counter->global_counters;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0)
- return -EINVAL;
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- *value = (int64_t) CMM_LOAD_SHARED(*int_p);
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- *value = CMM_LOAD_SHARED(*int_p);
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
- *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
- return 0;
-}
-
-int lttng_counter_aggregate(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int64_t *value, bool *overflow,
- bool *underflow)
-{
- int cpu, ret;
- int64_t v, sum = 0;
- bool of, uf;
-
- *overflow = false;
- *underflow = false;
-
- switch (config->alloc) {
- case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- /* Read global counter. */
- ret = lttng_counter_read(config, counter, dimension_indexes,
- -1, &v, &of, &uf);
- if (ret < 0)
- return ret;
- sum += v;
- *overflow |= of;
- *underflow |= uf;
- break;
- case COUNTER_ALLOC_PER_CPU:
- break;
- default:
- return -EINVAL;
- }
-
- switch (config->alloc) {
- case COUNTER_ALLOC_GLOBAL:
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU:
- lttng_counter_for_each_possible_cpu(cpu) {
- int64_t old = sum;
-
- ret = lttng_counter_read(config, counter, dimension_indexes,
- cpu, &v, &of, &uf);
- if (ret < 0)
- return ret;
- *overflow |= of;
- *underflow |= uf;
- /* Overflow is defined on unsigned types. */
- sum = (int64_t) ((uint64_t) old + (uint64_t) v);
- if (v > 0 && sum < old)
- *overflow = true;
- else if (v < 0 && sum > old)
- *underflow = true;
- }
- break;
- default:
- return -EINVAL;
- }
- *value = sum;
- return 0;
-}
-
-static
-int lttng_counter_clear_cpu(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu)
-{
- size_t index;
- struct lib_counter_layout *layout;
-
- if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
- return -EOVERFLOW;
- index = lttng_counter_get_index(config, counter, dimension_indexes);
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- break;
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0) {
- if (cpu >= lttng_counter_num_possible_cpus())
- return -EINVAL;
- layout = &counter->percpu_counters[cpu];
- } else {
- layout = &counter->global_counters;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- if (cpu >= 0)
- return -EINVAL;
- layout = &counter->global_counters;
- break;
- default:
- return -EINVAL;
- }
- if (caa_unlikely(!layout->counters))
- return -ENODEV;
-
- switch (config->counter_size) {
- case COUNTER_SIZE_8_BIT:
- {
- int8_t *int_p = (int8_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
- case COUNTER_SIZE_16_BIT:
- {
- int16_t *int_p = (int16_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
- case COUNTER_SIZE_32_BIT:
- {
- int32_t *int_p = (int32_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
-#if CAA_BITS_PER_LONG == 64
- case COUNTER_SIZE_64_BIT:
- {
- int64_t *int_p = (int64_t *) layout->counters + index;
- CMM_STORE_SHARED(*int_p, 0);
- break;
- }
-#endif
- default:
- return -EINVAL;
- }
- lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
- lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
- return 0;
-}
-
-int lttng_counter_clear(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
-{
- int cpu, ret;
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU:
- break;
- case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- /* Clear global counter. */
- ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
- if (ret < 0)
- return ret;
- break;
- default:
- return -EINVAL;
- }
-
- switch (config->alloc) {
- case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
- case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
- lttng_counter_for_each_possible_cpu(cpu) {
- ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
- if (ret < 0)
- return ret;
- }
- break;
- case COUNTER_ALLOC_GLOBAL:
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng Counters API
- */
-
-#ifndef _LTTNG_COUNTER_H
-#define _LTTNG_COUNTER_H
-
-#include <stdint.h>
-#include <lttng/ust-config.h>
-#include "counter-types.h"
-
-/* max_nr_elem is for each dimension. */
-struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
- size_t nr_dimensions,
- const size_t *max_nr_elem,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_destroy(struct lib_counter *counter)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_read(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int cpu, int64_t *value,
- bool *overflow, bool *underflow)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_aggregate(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes,
- int64_t *value,
- bool *overflow, bool *underflow)
- __attribute__((visibility("hidden")));
-
-int lttng_counter_clear(const struct lib_counter_config *config,
- struct lib_counter *counter,
- const size_t *dimension_indexes)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_COUNTER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "shm.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h> /* For mode constants */
-#include <fcntl.h> /* For O_* constants */
-#include <assert.h>
-#include <stdio.h>
-#include <signal.h>
-#include <dirent.h>
-#include <limits.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-#ifdef HAVE_LIBNUMA
-#include <numa.h>
-#include <numaif.h>
-#endif
-
-#include <lttng/ust-utils.h>
-
-#include <ust-helper.h>
-#include <ust-fd.h>
-#include "../libringbuffer/mmap.h"
-
-/*
- * Ensure we have the required amount of space available by writing 0
- * into the entire buffer. Not doing so can trigger SIGBUS when going
- * beyond the available shm space.
- */
-static
-int zero_file(int fd, size_t len)
-{
- ssize_t retlen;
- size_t written = 0;
- char *zeropage;
- long pagelen;
- int ret;
-
- pagelen = sysconf(_SC_PAGESIZE);
- if (pagelen < 0)
- return (int) pagelen;
- zeropage = calloc(pagelen, 1);
- if (!zeropage)
- return -ENOMEM;
-
- while (len > written) {
- do {
- retlen = write(fd, zeropage,
- min_t(size_t, pagelen, len - written));
- } while (retlen == -1UL && errno == EINTR);
- if (retlen < 0) {
- ret = (int) retlen;
- goto error;
- }
- written += retlen;
- }
- ret = 0;
-error:
- free(zeropage);
- return ret;
-}
-
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
-{
- struct lttng_counter_shm_object_table *table;
-
- table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
- if (!table)
- return NULL;
- table->size = max_nb_obj;
- return table;
-}
-
-static
-struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- int cpu_fd)
-{
- int shmfd, ret;
- struct lttng_counter_shm_object *obj;
- char *memory_map;
-
- if (cpu_fd < 0)
- return NULL;
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- /* create shm */
-
- shmfd = cpu_fd;
- ret = zero_file(shmfd, memory_map_size);
- if (ret) {
- PERROR("zero_file");
- goto error_zero_file;
- }
- ret = ftruncate(shmfd, memory_map_size);
- if (ret) {
- PERROR("ftruncate");
- goto error_ftruncate;
- }
- /*
- * Also ensure the file metadata is synced with the storage by using
- * fsync(2).
- */
- ret = fsync(shmfd);
- if (ret) {
- PERROR("fsync");
- goto error_fsync;
- }
- obj->shm_fd_ownership = 0;
- obj->shm_fd = shmfd;
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_mmap:
-error_fsync:
-error_ftruncate:
-error_zero_file:
- return NULL;
-}
-
-static
-struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
- void *memory_map;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- memory_map = zmalloc(memory_map_size);
- if (!memory_map)
- goto alloc_error;
-
- /* no shm_fd */
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-alloc_error:
- return NULL;
-}
-
-/*
- * libnuma prints errors on the console even for numa_available().
- * Work-around this limitation by using get_mempolicy() directly to
- * check whether the kernel supports mempolicy.
- */
-#ifdef HAVE_LIBNUMA
-static bool lttng_is_numa_available(void)
-{
- int ret;
-
- ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
- if (ret && errno == ENOSYS) {
- return false;
- }
- return numa_available() > 0;
-}
-#endif
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- enum lttng_counter_shm_object_type type,
- int cpu_fd,
- int cpu)
-{
- struct lttng_counter_shm_object *shm_object;
-#ifdef HAVE_LIBNUMA
- int oldnode = 0, node;
- bool numa_avail;
-
- numa_avail = lttng_is_numa_available();
- if (numa_avail) {
- oldnode = numa_preferred();
- if (cpu >= 0) {
- node = numa_node_of_cpu(cpu);
- if (node >= 0)
- numa_set_preferred(node);
- }
- if (cpu < 0 || node < 0)
- numa_set_localalloc();
- }
-#endif /* HAVE_LIBNUMA */
- switch (type) {
- case LTTNG_COUNTER_SHM_OBJECT_SHM:
- shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
- cpu_fd);
- break;
- case LTTNG_COUNTER_SHM_OBJECT_MEM:
- shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
- break;
- default:
- assert(0);
- }
-#ifdef HAVE_LIBNUMA
- if (numa_avail)
- numa_set_preferred(oldnode);
-#endif /* HAVE_LIBNUMA */
- return shm_object;
-}
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd,
- size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
- char *memory_map;
-
- if (table->allocated_len >= table->size)
- return NULL;
-
- obj = &table->objects[table->allocated_len];
-
- obj->shm_fd = shm_fd;
- obj->shm_fd_ownership = 1;
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_mmap:
- return NULL;
-}
-
-/*
- * Passing ownership of mem to object.
- */
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
- void *mem, size_t memory_map_size)
-{
- struct lttng_counter_shm_object *obj;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
- obj->memory_map = mem;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
- return NULL;
-}
-
-static
-void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
-{
- switch (obj->type) {
- case LTTNG_COUNTER_SHM_OBJECT_SHM:
- {
- int ret;
-
- ret = munmap(obj->memory_map, obj->memory_map_size);
- if (ret) {
- PERROR("umnmap");
- assert(0);
- }
-
- if (obj->shm_fd_ownership) {
- /* Delete FDs only if called from app (not consumer). */
- if (!consumer) {
- lttng_ust_lock_fd_tracker();
- ret = close(obj->shm_fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(obj->shm_fd);
- } else {
- PERROR("close");
- assert(0);
- }
- lttng_ust_unlock_fd_tracker();
- } else {
- ret = close(obj->shm_fd);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
- }
- break;
- }
- case LTTNG_COUNTER_SHM_OBJECT_MEM:
- {
- free(obj->memory_map);
- break;
- }
- default:
- assert(0);
- }
-}
-
-void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
-{
- int i;
-
- for (i = 0; i < table->allocated_len; i++)
- lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
- free(table);
-}
-
-/*
- * lttng_counter_zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
-{
- struct lttng_counter_shm_ref ref;
- struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
-
- if (obj->memory_map_size - obj->allocated_len < len)
- return shm_ref_error;
- ref.index = obj->index;
- ref.offset = obj->allocated_len;
- obj->allocated_len += len;
- return ref;
-}
-
-void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
-{
- size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
- obj->allocated_len += offset_len;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_H
-#define _LIBCOUNTER_SHM_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <usterr-signal-safe.h>
-#include <urcu/compiler.h>
-#include "shm_types.h"
-
-/* lttng_counter_handle_create - for UST. */
-extern
-struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
- uint64_t memory_map_size, int wakeup_fd);
-/* lttng_counter_handle_add_cpu - for UST. */
-extern
-int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
- int shm_fd, uint32_t cpu_nr,
- uint64_t memory_map_size);
-
-unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * Pointer dereferencing. We don't trust the shm_ref, so we validate
- * both the index and offset with known boundaries.
- *
- * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
- * target type, even in the occurrence of shm_ref modification by an
- * untrusted process having write access to the shm_ref. We return a
- * NULL pointer if the ranges are invalid.
- */
-static inline
-char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
- struct lttng_counter_shm_ref *ref,
- size_t idx, size_t elem_size)
-{
- struct lttng_counter_shm_object *obj;
- size_t objindex, ref_offset;
-
- objindex = (size_t) ref->index;
- if (caa_unlikely(objindex >= table->allocated_len))
- return NULL;
- obj = &table->objects[objindex];
- ref_offset = (size_t) ref->offset;
- ref_offset += idx * elem_size;
- /* Check if part of the element returned would exceed the limits. */
- if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
- return NULL;
- return &obj->memory_map[ref_offset];
-}
-
-#define lttng_counter_shmp_index(handle, ref, index) \
- ({ \
- __typeof__((ref)._type) ____ptr_ret; \
- ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
- ____ptr_ret; \
- })
-
-#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
-
-static inline
-void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
-{
- *ref = src;
-}
-
-#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
-
-struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
- size_t memory_map_size,
- enum lttng_counter_shm_object_type type,
- const int cpu_fd,
- int cpu)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
- int shm_fd, size_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
-struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
- void *mem, size_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_counter_zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
- __attribute__((visibility("hidden")));
-
-static inline
-int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
-{
- struct lttng_counter_shm_object_table *table = handle->table;
- struct lttng_counter_shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- return obj->shm_fd;
-}
-
-
-static inline
-int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
- uint64_t *size)
-{
- struct lttng_counter_shm_object_table *table = handle->table;
- struct lttng_counter_shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- *size = obj->memory_map_size;
- return 0;
-}
-
-#endif /* _LIBCOUNTER_SHM_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_INTERNAL_H
-#define _LIBCOUNTER_SHM_INTERNAL_H
-
-struct lttng_counter_shm_ref {
- volatile ssize_t index; /* within the object table */
- volatile ssize_t offset; /* within the object */
-};
-
-#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
- union { \
- struct lttng_counter_shm_ref _ref; \
- type *_type; \
- } name
-
-#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SHM_TYPES_H
-#define _LIBCOUNTER_SHM_TYPES_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <limits.h>
-#include "shm_internal.h"
-
-enum lttng_counter_shm_object_type {
- LTTNG_COUNTER_SHM_OBJECT_SHM,
- LTTNG_COUNTER_SHM_OBJECT_MEM,
-};
-
-struct lttng_counter_shm_object {
- enum lttng_counter_shm_object_type type;
- size_t index; /* within the object table */
- int shm_fd; /* shm fd */
- char *memory_map;
- size_t memory_map_size;
- uint64_t allocated_len;
- int shm_fd_ownership;
-};
-
-struct lttng_counter_shm_object_table {
- size_t size;
- size_t allocated_len;
- struct lttng_counter_shm_object objects[];
-};
-
-struct lttng_counter_shm_handle {
- struct lttng_counter_shm_object_table *table;
-};
-
-#endif /* _LIBCOUNTER_SHM_TYPES_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#define _LGPL_SOURCE
-
-#include <unistd.h>
-#include <pthread.h>
-#include "smp.h"
-
-int __lttng_counter_num_possible_cpus;
-
-#if (defined(__GLIBC__) || defined( __UCLIBC__))
-void _lttng_counter_get_num_possible_cpus(void)
-{
- int result;
-
- /* On Linux, when some processors are offline
- * _SC_NPROCESSORS_CONF counts the offline
- * processors, whereas _SC_NPROCESSORS_ONLN
- * does not. If we used _SC_NPROCESSORS_ONLN,
- * getcpu() could return a value greater than
- * this sysconf, in which case the arrays
- * indexed by processor would overflow.
- */
- result = sysconf(_SC_NPROCESSORS_CONF);
- if (result == -1)
- return;
- __lttng_counter_num_possible_cpus = result;
-}
-
-#else
-
-/*
- * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
- * return the number of configured CPUs in the system but relies on the cpu
- * affinity mask of the current task.
- *
- * So instead we use a strategy similar to GLIBC's, counting the cpu
- * directories in "/sys/devices/system/cpu" and fallback on the value from
- * sysconf if it fails.
- */
-
-#include <dirent.h>
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-
-#define __max(a,b) ((a)>(b)?(a):(b))
-
-void _lttng_counter_get_num_possible_cpus(void)
-{
- int result, count = 0;
- DIR *cpudir;
- struct dirent *entry;
-
- cpudir = opendir("/sys/devices/system/cpu");
- if (cpudir == NULL)
- goto end;
-
- /*
- * Count the number of directories named "cpu" followed by and
- * integer. This is the same strategy as glibc uses.
- */
- while ((entry = readdir(cpudir))) {
- if (entry->d_type == DT_DIR &&
- strncmp(entry->d_name, "cpu", 3) == 0) {
-
- char *endptr;
- unsigned long cpu_num;
-
- cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
- if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
- && (*endptr == '\0')) {
- count++;
- }
- }
- }
-
-end:
- /*
- * Get the sysconf value as a fallback. Keep the highest number.
- */
- result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
-
- /*
- * If both methods failed, don't store the value.
- */
- if (result < 1)
- return;
- __lttng_counter_num_possible_cpus = result;
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBCOUNTER_SMP_H
-#define _LIBCOUNTER_SMP_H
-
-/*
- * 4kB of per-cpu data available.
- */
-#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
-
-extern int __lttng_counter_num_possible_cpus
- __attribute__((visibility("hidden")));
-
-extern void _lttng_counter_get_num_possible_cpus(void)
- __attribute__((visibility("hidden")));
-
-static inline
-int lttng_counter_num_possible_cpus(void)
-{
- if (!__lttng_counter_num_possible_cpus)
- _lttng_counter_get_num_possible_cpus();
- return __lttng_counter_num_possible_cpus;
-}
-
-#define lttng_counter_for_each_possible_cpu(cpu) \
- for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
-
-#endif /* _LIBCOUNTER_SMP_H */
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-noinst_LTLIBRARIES = liblttng-ust-comm.la
-
-liblttng_ust_comm_la_SOURCES = lttng-ust-comm.c lttng-ust-fd-tracker.c
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <limits.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/socket.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/un.h>
-#include <unistd.h>
-#include <assert.h>
-#include <errno.h>
-#include <fcntl.h>
-
-#include <lttng/ust-ctl.h>
-#include <ust-comm.h>
-#include <ust-fd.h>
-#include <ust-helper.h>
-#include <lttng/ust-error.h>
-#include <ust-dynamic-type.h>
-#include <usterr-signal-safe.h>
-
-#include "../liblttng-ust/ust-events-internal.h"
-#include "../liblttng-ust/compat.h"
-
-#define USTCOMM_CODE_OFFSET(code) \
- (code == LTTNG_UST_OK ? 0 : (code - LTTNG_UST_ERR + 1))
-
-#define USTCOMM_MAX_SEND_FDS 4
-
-static
-ssize_t count_fields_recursive(size_t nr_fields,
- const struct lttng_ust_event_field **lttng_fields);
-static
-int serialize_one_field(struct lttng_ust_session *session,
- struct ustctl_field *fields, size_t *iter_output,
- const struct lttng_ust_event_field *lf);
-static
-int serialize_fields(struct lttng_ust_session *session,
- struct ustctl_field *ustctl_fields,
- size_t *iter_output, size_t nr_lttng_fields,
- const struct lttng_ust_event_field **lttng_fields);
-
-/*
- * Human readable error message.
- */
-static const char *ustcomm_readable_code[] = {
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_OK) ] = "Success",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR) ] = "Unknown error",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_NOENT) ] = "No entry",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_EXIST) ] = "Object already exists",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL) ] = "Invalid argument",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PERM) ] = "Permission denied",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_NOSYS) ] = "Not implemented",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_EXITING) ] = "Process is exiting",
-
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL_MAGIC) ] = "Invalid magic number",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL_SOCKET_TYPE) ] = "Invalid socket type",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_UNSUP_MAJOR) ] = "Unsupported major version",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PEERCRED) ] = "Cannot get unix socket peer credentials",
- [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PEERCRED_PID) ] = "Peer credentials PID is invalid. Socket appears to belong to a distinct, non-nested pid namespace.",
-};
-
-/*
- * lttng_ust_strerror
- * @code: must be a negative value of enum lttng_ust_error_code (or 0).
- *
- * Returns a ptr to a string representing a human readable error code from the
- * ustcomm_return_code enum.
- */
-const char *lttng_ust_strerror(int code)
-{
- code = -code;
-
- if (code < LTTNG_UST_OK || code >= LTTNG_UST_ERR_NR)
- code = LTTNG_UST_ERR;
-
- return ustcomm_readable_code[USTCOMM_CODE_OFFSET(code)];
-}
-
-/*
- * ustcomm_connect_unix_sock
- *
- * Connect to unix socket using the path name.
- *
- * Caller handles FD tracker.
- */
-int ustcomm_connect_unix_sock(const char *pathname, long timeout)
-{
- struct sockaddr_un sun;
- int fd, ret;
-
- /*
- * libust threads require the close-on-exec flag for all
- * resources so it does not leak file descriptors upon exec.
- * SOCK_CLOEXEC is not used since it is linux specific.
- */
- fd = socket(PF_UNIX, SOCK_STREAM, 0);
- if (fd < 0) {
- PERROR("socket");
- ret = -errno;
- goto error;
- }
- if (timeout >= 0) {
- /* Give at least 10ms. */
- if (timeout < 10)
- timeout = 10;
- ret = ustcomm_setsockopt_snd_timeout(fd, timeout);
- if (ret < 0) {
- WARN("Error setting connect socket send timeout");
- }
- }
- ret = fcntl(fd, F_SETFD, FD_CLOEXEC);
- if (ret < 0) {
- PERROR("fcntl");
- ret = -errno;
- goto error_fcntl;
- }
-
- memset(&sun, 0, sizeof(sun));
- sun.sun_family = AF_UNIX;
- strncpy(sun.sun_path, pathname, sizeof(sun.sun_path));
- sun.sun_path[sizeof(sun.sun_path) - 1] = '\0';
-
- ret = connect(fd, (struct sockaddr *) &sun, sizeof(sun));
- if (ret < 0) {
- /*
- * Don't print message on connect ENOENT error, because
- * connect is used in normal execution to detect if
- * sessiond is alive. ENOENT is when the unix socket
- * file does not exist, and ECONNREFUSED is when the
- * file exists but no sessiond is listening.
- */
- if (errno != ECONNREFUSED && errno != ECONNRESET
- && errno != ENOENT && errno != EACCES)
- PERROR("connect");
- ret = -errno;
- if (ret == -ECONNREFUSED || ret == -ECONNRESET)
- ret = -EPIPE;
- goto error_connect;
- }
-
- return fd;
-
-error_connect:
-error_fcntl:
- {
- int closeret;
-
- closeret = close(fd);
- if (closeret)
- PERROR("close");
- }
-error:
- return ret;
-}
-
-/*
- * ustcomm_accept_unix_sock
- *
- * Do an accept(2) on the sock and return the
- * new file descriptor. The socket MUST be bind(2) before.
- */
-int ustcomm_accept_unix_sock(int sock)
-{
- int new_fd;
- struct sockaddr_un sun;
- socklen_t len = 0;
-
- /* Blocking call */
- new_fd = accept(sock, (struct sockaddr *) &sun, &len);
- if (new_fd < 0) {
- if (errno != ECONNABORTED)
- PERROR("accept");
- new_fd = -errno;
- if (new_fd == -ECONNABORTED)
- new_fd = -EPIPE;
- }
- return new_fd;
-}
-
-/*
- * ustcomm_create_unix_sock
- *
- * Creates a AF_UNIX local socket using pathname
- * bind the socket upon creation and return the fd.
- */
-int ustcomm_create_unix_sock(const char *pathname)
-{
- struct sockaddr_un sun;
- int fd, ret;
-
- /* Create server socket */
- if ((fd = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
- PERROR("socket");
- ret = -errno;
- goto error;
- }
-
- memset(&sun, 0, sizeof(sun));
- sun.sun_family = AF_UNIX;
- strncpy(sun.sun_path, pathname, sizeof(sun.sun_path));
- sun.sun_path[sizeof(sun.sun_path) - 1] = '\0';
-
- /* Unlink the old file if present */
- (void) unlink(pathname);
- ret = bind(fd, (struct sockaddr *) &sun, sizeof(sun));
- if (ret < 0) {
- PERROR("bind");
- ret = -errno;
- goto error_close;
- }
-
- return fd;
-
-error_close:
- {
- int closeret;
-
- closeret = close(fd);
- if (closeret) {
- PERROR("close");
- }
- }
-error:
- return ret;
-}
-
-/*
- * ustcomm_listen_unix_sock
- *
- * Make the socket listen using LTTNG_UST_COMM_MAX_LISTEN.
- */
-int ustcomm_listen_unix_sock(int sock)
-{
- int ret;
-
- ret = listen(sock, LTTNG_UST_COMM_MAX_LISTEN);
- if (ret < 0) {
- ret = -errno;
- PERROR("listen");
- }
-
- return ret;
-}
-
-/*
- * ustcomm_close_unix_sock
- *
- * Shutdown cleanly a unix socket.
- *
- * Handles fd tracker internally.
- */
-int ustcomm_close_unix_sock(int sock)
-{
- int ret;
-
- lttng_ust_lock_fd_tracker();
- ret = close(sock);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(sock);
- } else {
- PERROR("close");
- ret = -errno;
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-/*
- * ustcomm_recv_unix_sock
- *
- * Receive data of size len in put that data into
- * the buf param. Using recvmsg API.
- * Return the size of received data.
- * Return 0 on orderly shutdown.
- */
-ssize_t ustcomm_recv_unix_sock(int sock, void *buf, size_t len)
-{
- struct msghdr msg;
- struct iovec iov[1];
- ssize_t ret = -1;
- size_t len_last;
-
- memset(&msg, 0, sizeof(msg));
-
- iov[0].iov_base = buf;
- iov[0].iov_len = len;
- msg.msg_iov = iov;
- msg.msg_iovlen = 1;
-
- do {
- len_last = iov[0].iov_len;
- ret = recvmsg(sock, &msg, 0);
- if (ret > 0) {
- iov[0].iov_base += ret;
- iov[0].iov_len -= ret;
- assert(ret <= len_last);
- }
- } while ((ret > 0 && ret < len_last) || (ret < 0 && errno == EINTR));
-
- if (ret < 0) {
- int shutret;
-
- if (errno != EPIPE && errno != ECONNRESET && errno != ECONNREFUSED)
- PERROR("recvmsg");
- ret = -errno;
- if (ret == -ECONNRESET || ret == -ECONNREFUSED)
- ret = -EPIPE;
-
- shutret = shutdown(sock, SHUT_RDWR);
- if (shutret)
- ERR("Socket shutdown error");
- } else if (ret > 0) {
- ret = len;
- }
- /* ret = 0 means an orderly shutdown. */
-
- return ret;
-}
-
-/*
- * ustcomm_send_unix_sock
- *
- * Send buf data of size len. Using sendmsg API.
- * Return the size of sent data.
- */
-ssize_t ustcomm_send_unix_sock(int sock, const void *buf, size_t len)
-{
- struct msghdr msg;
- struct iovec iov[1];
- ssize_t ret;
-
- memset(&msg, 0, sizeof(msg));
-
- iov[0].iov_base = (void *) buf;
- iov[0].iov_len = len;
- msg.msg_iov = iov;
- msg.msg_iovlen = 1;
-
- /*
- * Using the MSG_NOSIGNAL when sending data from sessiond to
- * libust, so libust does not receive an unhandled SIGPIPE or
- * SIGURG. The sessiond receiver side can be made more resilient
- * by ignoring SIGPIPE, but we don't have this luxury on the
- * libust side.
- */
- do {
- ret = sendmsg(sock, &msg, MSG_NOSIGNAL);
- } while (ret < 0 && errno == EINTR);
-
- if (ret < 0) {
- int shutret;
-
- if (errno != EPIPE && errno != ECONNRESET)
- PERROR("sendmsg");
- ret = -errno;
- if (ret == -ECONNRESET)
- ret = -EPIPE;
-
- shutret = shutdown(sock, SHUT_RDWR);
- if (shutret)
- ERR("Socket shutdown error");
- }
-
- return ret;
-}
-
-/*
- * Send a message accompanied by fd(s) over a unix socket.
- *
- * Returns the size of data sent, or negative error value.
- */
-ssize_t ustcomm_send_fds_unix_sock(int sock, int *fds, size_t nb_fd)
-{
- struct msghdr msg;
- struct cmsghdr *cmptr;
- struct iovec iov[1];
- ssize_t ret = -1;
- unsigned int sizeof_fds = nb_fd * sizeof(int);
- char tmp[CMSG_SPACE(sizeof_fds)];
- char dummy = 0;
-
- memset(&msg, 0, sizeof(msg));
- memset(tmp, 0, CMSG_SPACE(sizeof_fds) * sizeof(char));
-
- if (nb_fd > USTCOMM_MAX_SEND_FDS)
- return -EINVAL;
-
- msg.msg_control = (caddr_t)tmp;
- msg.msg_controllen = CMSG_LEN(sizeof_fds);
-
- cmptr = CMSG_FIRSTHDR(&msg);
- if (!cmptr)
- return -EINVAL;
- cmptr->cmsg_level = SOL_SOCKET;
- cmptr->cmsg_type = SCM_RIGHTS;
- cmptr->cmsg_len = CMSG_LEN(sizeof_fds);
- memcpy(CMSG_DATA(cmptr), fds, sizeof_fds);
- /* Sum of the length of all control messages in the buffer: */
- msg.msg_controllen = cmptr->cmsg_len;
-
- iov[0].iov_base = &dummy;
- iov[0].iov_len = 1;
- msg.msg_iov = iov;
- msg.msg_iovlen = 1;
-
- do {
- ret = sendmsg(sock, &msg, MSG_NOSIGNAL);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- /*
- * We consider EPIPE and ECONNRESET as expected.
- */
- if (errno != EPIPE && errno != ECONNRESET) {
- PERROR("sendmsg");
- }
- ret = -errno;
- if (ret == -ECONNRESET)
- ret = -EPIPE;
- }
- return ret;
-}
-
-/*
- * Recv a message accompanied by fd(s) from a unix socket.
- *
- * Expect at most "nb_fd" file descriptors. Returns the number of fd
- * actually received in nb_fd.
- * Returns -EPIPE on orderly shutdown.
- */
-ssize_t ustcomm_recv_fds_unix_sock(int sock, int *fds, size_t nb_fd)
-{
- struct iovec iov[1];
- ssize_t ret = 0;
- struct cmsghdr *cmsg;
- size_t sizeof_fds = nb_fd * sizeof(int);
- char recv_fd[CMSG_SPACE(sizeof_fds)];
- struct msghdr msg;
- char dummy;
- int i;
-
- memset(&msg, 0, sizeof(msg));
-
- /* Prepare to receive the structures */
- iov[0].iov_base = &dummy;
- iov[0].iov_len = 1;
- msg.msg_iov = iov;
- msg.msg_iovlen = 1;
- msg.msg_control = recv_fd;
- msg.msg_controllen = sizeof(recv_fd);
-
- do {
- ret = recvmsg(sock, &msg, 0);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- if (errno != EPIPE && errno != ECONNRESET) {
- PERROR("recvmsg fds");
- }
- ret = -errno;
- if (ret == -ECONNRESET)
- ret = -EPIPE;
- goto end;
- }
- if (ret == 0) {
- /* orderly shutdown */
- ret = -EPIPE;
- goto end;
- }
- if (ret != 1) {
- ERR("Error: Received %zd bytes, expected %d\n",
- ret, 1);
- goto end;
- }
- if (msg.msg_flags & MSG_CTRUNC) {
- ERR("Error: Control message truncated.\n");
- ret = -1;
- goto end;
- }
- cmsg = CMSG_FIRSTHDR(&msg);
- if (!cmsg) {
- ERR("Error: Invalid control message header\n");
- ret = -1;
- goto end;
- }
- if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
- ERR("Didn't received any fd\n");
- ret = -1;
- goto end;
- }
- if (cmsg->cmsg_len != CMSG_LEN(sizeof_fds)) {
- ERR("Error: Received %zu bytes of ancillary data, expected %zu\n",
- (size_t) cmsg->cmsg_len, (size_t) CMSG_LEN(sizeof_fds));
- ret = -1;
- goto end;
- }
-
- memcpy(fds, CMSG_DATA(cmsg), sizeof_fds);
-
- /* Set FD_CLOEXEC */
- for (i = 0; i < nb_fd; i++) {
- ret = fcntl(fds[i], F_SETFD, FD_CLOEXEC);
- if (ret < 0) {
- PERROR("fcntl failed to set FD_CLOEXEC on fd %d",
- fds[i]);
- }
- }
-
- ret = nb_fd;
-end:
- return ret;
-}
-
-int ustcomm_send_app_msg(int sock, struct ustcomm_ust_msg *lum)
-{
- ssize_t len;
-
- len = ustcomm_send_unix_sock(sock, lum, sizeof(*lum));
- switch (len) {
- case sizeof(*lum):
- break;
- default:
- if (len < 0) {
- return len;
- } else {
- ERR("incorrect message size: %zd\n", len);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-int ustcomm_recv_app_reply(int sock, struct ustcomm_ust_reply *lur,
- uint32_t expected_handle, uint32_t expected_cmd)
-{
- ssize_t len;
-
- memset(lur, 0, sizeof(*lur));
- len = ustcomm_recv_unix_sock(sock, lur, sizeof(*lur));
- switch (len) {
- case 0: /* orderly shutdown */
- return -EPIPE;
- case sizeof(*lur):
- {
- int err = 0;
-
- if (lur->handle != expected_handle) {
- ERR("Unexpected result message handle: "
- "expected: %u vs received: %u\n",
- expected_handle, lur->handle);
- err = 1;
- }
- if (lur->cmd != expected_cmd) {
- ERR("Unexpected result message command "
- "expected: %u vs received: %u\n",
- expected_cmd, lur->cmd);
- err = 1;
- }
- if (err) {
- return -EINVAL;
- } else {
- return lur->ret_code;
- }
- }
- default:
- if (len >= 0) {
- ERR("incorrect message size: %zd\n", len);
- }
- return len;
- }
-}
-
-int ustcomm_send_app_cmd(int sock,
- struct ustcomm_ust_msg *lum,
- struct ustcomm_ust_reply *lur)
-{
- int ret;
-
- ret = ustcomm_send_app_msg(sock, lum);
- if (ret)
- return ret;
- ret = ustcomm_recv_app_reply(sock, lur, lum->handle, lum->cmd);
- if (ret > 0)
- return -EIO;
- return ret;
-}
-
-/*
- * chan_data is allocated internally if this function returns the
- * expected var_len.
- */
-ssize_t ustcomm_recv_channel_from_sessiond(int sock,
- void **_chan_data, uint64_t var_len,
- int *_wakeup_fd)
-{
- void *chan_data;
- ssize_t len, nr_fd;
- int wakeup_fd, ret;
-
- if (var_len > LTTNG_UST_ABI_CHANNEL_DATA_MAX_LEN) {
- len = -EINVAL;
- goto error_check;
- }
- /* Receive variable length data */
- chan_data = zmalloc(var_len);
- if (!chan_data) {
- len = -ENOMEM;
- goto error_alloc;
- }
- len = ustcomm_recv_unix_sock(sock, chan_data, var_len);
- if (len != var_len) {
- goto error_recv;
- }
- /* recv wakeup fd */
- lttng_ust_lock_fd_tracker();
- nr_fd = ustcomm_recv_fds_unix_sock(sock, &wakeup_fd, 1);
- if (nr_fd <= 0) {
- lttng_ust_unlock_fd_tracker();
- if (nr_fd < 0) {
- len = nr_fd;
- goto error_recv;
- } else {
- len = -EIO;
- goto error_recv;
- }
- }
-
- ret = lttng_ust_add_fd_to_tracker(wakeup_fd);
- if (ret < 0) {
- ret = close(wakeup_fd);
- if (ret) {
- PERROR("close on wakeup_fd");
- }
- len = -EIO;
- lttng_ust_unlock_fd_tracker();
- goto error_recv;
- }
-
- *_wakeup_fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- *_chan_data = chan_data;
- return len;
-
-error_recv:
- free(chan_data);
-error_alloc:
-error_check:
- return len;
-}
-
-ssize_t ustcomm_recv_event_notifier_notif_fd_from_sessiond(int sock,
- int *_event_notifier_notif_fd)
-{
- ssize_t nr_fd;
- int event_notifier_notif_fd, ret;
-
- /* Receive event_notifier notification fd */
- lttng_ust_lock_fd_tracker();
- nr_fd = ustcomm_recv_fds_unix_sock(sock, &event_notifier_notif_fd, 1);
- if (nr_fd <= 0) {
- lttng_ust_unlock_fd_tracker();
- if (nr_fd < 0) {
- ret = nr_fd;
- goto error;
- } else {
- ret = -EIO;
- goto error;
- }
- }
-
- ret = lttng_ust_add_fd_to_tracker(event_notifier_notif_fd);
- if (ret < 0) {
- ret = close(event_notifier_notif_fd);
- if (ret) {
- PERROR("close on event_notifier notif fd");
- }
- ret = -EIO;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- *_event_notifier_notif_fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- ret = nr_fd;
-
-error:
- return ret;
-}
-
-int ustcomm_recv_stream_from_sessiond(int sock,
- uint64_t *memory_map_size __attribute__((unused)),
- int *shm_fd, int *wakeup_fd)
-{
- ssize_t len;
- int ret;
- int fds[2];
-
- /* recv shm fd and wakeup fd */
- lttng_ust_lock_fd_tracker();
- len = ustcomm_recv_fds_unix_sock(sock, fds, 2);
- if (len <= 0) {
- lttng_ust_unlock_fd_tracker();
- if (len < 0) {
- ret = len;
- goto error;
- } else {
- ret = -EIO;
- goto error;
- }
- }
-
- ret = lttng_ust_add_fd_to_tracker(fds[0]);
- if (ret < 0) {
- ret = close(fds[0]);
- if (ret) {
- PERROR("close on received shm_fd");
- }
- ret = -EIO;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
- *shm_fd = ret;
-
- ret = lttng_ust_add_fd_to_tracker(fds[1]);
- if (ret < 0) {
- ret = close(*shm_fd);
- if (ret) {
- PERROR("close on shm_fd");
- }
- *shm_fd = -1;
- ret = close(fds[1]);
- if (ret) {
- PERROR("close on received wakeup_fd");
- }
- ret = -EIO;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
- *wakeup_fd = ret;
- lttng_ust_unlock_fd_tracker();
- return 0;
-
-error:
- return ret;
-}
-
-ssize_t ustcomm_recv_counter_from_sessiond(int sock,
- void **_counter_data, uint64_t var_len)
-{
- void *counter_data;
- ssize_t len;
-
- if (var_len > LTTNG_UST_ABI_COUNTER_DATA_MAX_LEN) {
- len = -EINVAL;
- goto error_check;
- }
- /* Receive variable length data */
- counter_data = zmalloc(var_len);
- if (!counter_data) {
- len = -ENOMEM;
- goto error_alloc;
- }
- len = ustcomm_recv_unix_sock(sock, counter_data, var_len);
- if (len != var_len) {
- goto error_recv;
- }
- *_counter_data = counter_data;
- return len;
-
-error_recv:
- free(counter_data);
-error_alloc:
-error_check:
- return len;
-}
-
-int ustcomm_recv_counter_shm_from_sessiond(int sock,
- int *shm_fd)
-{
- ssize_t len;
- int ret;
- int fds[1];
-
- /* recv shm fd fd */
- lttng_ust_lock_fd_tracker();
- len = ustcomm_recv_fds_unix_sock(sock, fds, 1);
- if (len <= 0) {
- lttng_ust_unlock_fd_tracker();
- if (len < 0) {
- ret = len;
- goto error;
- } else {
- ret = -EIO;
- goto error;
- }
- }
-
- ret = lttng_ust_add_fd_to_tracker(fds[0]);
- if (ret < 0) {
- ret = close(fds[0]);
- if (ret) {
- PERROR("close on received shm_fd");
- }
- ret = -EIO;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
- *shm_fd = ret;
- lttng_ust_unlock_fd_tracker();
- return 0;
-
-error:
- return ret;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustcomm_send_reg_msg(int sock,
- enum ustctl_socket_type type,
- uint32_t bits_per_long,
- uint32_t uint8_t_alignment,
- uint32_t uint16_t_alignment,
- uint32_t uint32_t_alignment,
- uint32_t uint64_t_alignment,
- uint32_t long_alignment)
-{
- ssize_t len;
- struct ustctl_reg_msg reg_msg;
-
- reg_msg.magic = LTTNG_UST_ABI_COMM_MAGIC;
- reg_msg.major = LTTNG_UST_ABI_MAJOR_VERSION;
- reg_msg.minor = LTTNG_UST_ABI_MINOR_VERSION;
- reg_msg.pid = getpid();
- reg_msg.ppid = getppid();
- reg_msg.uid = getuid();
- reg_msg.gid = getgid();
- reg_msg.bits_per_long = bits_per_long;
- reg_msg.uint8_t_alignment = uint8_t_alignment;
- reg_msg.uint16_t_alignment = uint16_t_alignment;
- reg_msg.uint32_t_alignment = uint32_t_alignment;
- reg_msg.uint64_t_alignment = uint64_t_alignment;
- reg_msg.long_alignment = long_alignment;
- reg_msg.socket_type = type;
- lttng_pthread_getname_np(reg_msg.name, LTTNG_UST_ABI_PROCNAME_LEN);
- memset(reg_msg.padding, 0, sizeof(reg_msg.padding));
-
- len = ustcomm_send_unix_sock(sock, ®_msg, sizeof(reg_msg));
- if (len > 0 && len != sizeof(reg_msg))
- return -EIO;
- if (len < 0)
- return len;
- return 0;
-}
-
-static
-ssize_t count_one_type(const struct lttng_ust_type_common *lt)
-{
- switch (lt->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_float:
- case lttng_ust_type_string:
- return 1;
- case lttng_ust_type_enum:
- return count_one_type(lttng_ust_get_type_enum(lt)->container_type) + 1;
- case lttng_ust_type_array:
- return count_one_type(lttng_ust_get_type_array(lt)->elem_type) + 1;
- case lttng_ust_type_sequence:
- return count_one_type(lttng_ust_get_type_sequence(lt)->elem_type) + 1;
- case lttng_ust_type_struct:
- return count_fields_recursive(lttng_ust_get_type_struct(lt)->nr_fields,
- lttng_ust_get_type_struct(lt)->fields) + 1;
-
- case lttng_ust_type_dynamic:
- {
- const struct lttng_ust_event_field **choices;
- size_t nr_choices;
- int ret;
-
- ret = lttng_ust_dynamic_type_choices(&nr_choices,
- &choices);
- if (ret)
- return ret;
- /*
- * Two fields for enum, one field for variant, and
- * one field per choice.
- */
- return count_fields_recursive(nr_choices, choices) + 3;
- }
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-ssize_t count_fields_recursive(size_t nr_fields,
- const struct lttng_ust_event_field **lttng_fields)
-{
- int i;
- ssize_t ret, count = 0;
-
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_event_field *lf;
-
- lf = lttng_fields[i];
- /* skip 'nowrite' fields */
- if (lf->nowrite)
- continue;
- ret = count_one_type(lf->type);
- if (ret < 0)
- return ret; /* error */
- count += ret;
- }
- return count;
-}
-
-static
-ssize_t count_ctx_fields_recursive(size_t nr_fields,
- struct lttng_ust_ctx_field *lttng_fields)
-{
- int i;
- ssize_t ret, count = 0;
-
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_event_field *lf;
-
- lf = lttng_fields[i].event_field;
- /* skip 'nowrite' fields */
- if (lf->nowrite)
- continue;
- ret = count_one_type(lf->type);
- if (ret < 0)
- return ret; /* error */
- count += ret;
- }
- return count;
-}
-
-static
-int serialize_string_encoding(int32_t *ue,
- enum lttng_ust_string_encoding le)
-{
- switch (le) {
- case lttng_ust_string_encoding_none:
- *ue = ustctl_encode_none;
- break;
- case lttng_ust_string_encoding_UTF8:
- *ue = ustctl_encode_UTF8;
- break;
- case lttng_ust_string_encoding_ASCII:
- *ue = ustctl_encode_ASCII;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int serialize_integer_type(struct ustctl_integer_type *uit,
- const struct lttng_ust_type_integer *lit,
- enum lttng_ust_string_encoding lencoding)
-{
- int32_t encoding;
-
- uit->size = lit->size;
- uit->signedness = lit->signedness;
- uit->reverse_byte_order = lit->reverse_byte_order;
- uit->base = lit->base;
- if (serialize_string_encoding(&encoding, lencoding))
- return -EINVAL;
- uit->encoding = encoding;
- uit->alignment = lit->alignment;
- return 0;
-}
-
-static
-int serialize_dynamic_type(struct lttng_ust_session *session,
- struct ustctl_field *fields, size_t *iter_output,
- const char *field_name)
-{
- const struct lttng_ust_event_field **choices;
- char tag_field_name[LTTNG_UST_ABI_SYM_NAME_LEN];
- const struct lttng_ust_type_common *tag_type;
- const struct lttng_ust_event_field *tag_field_generic;
- struct lttng_ust_event_field tag_field = {
- .name = tag_field_name,
- .nowrite = 0,
- };
- struct ustctl_field *uf;
- size_t nr_choices, i;
- int ret;
-
- tag_field_generic = lttng_ust_dynamic_type_tag_field();
- tag_type = tag_field_generic->type;
-
- /* Serialize enum field. */
- strncpy(tag_field_name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- tag_field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- strncat(tag_field_name,
- "_tag",
- LTTNG_UST_ABI_SYM_NAME_LEN - strlen(tag_field_name) - 1);
- tag_field.type = tag_type;
- ret = serialize_one_field(session, fields, iter_output,
- &tag_field);
- if (ret)
- return ret;
-
- /* Serialize variant field. */
- uf = &fields[*iter_output];
- ret = lttng_ust_dynamic_type_choices(&nr_choices, &choices);
- if (ret)
- return ret;
-
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- uf->type.atype = ustctl_atype_variant;
- uf->type.u.variant_nestable.nr_choices = nr_choices;
- strncpy(uf->type.u.variant_nestable.tag_name,
- tag_field_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->type.u.variant_nestable.tag_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- uf->type.u.variant_nestable.alignment = 0;
- (*iter_output)++;
-
- /* Serialize choice fields after variant. */
- for (i = 0; i < nr_choices; i++) {
- ret = serialize_one_field(session, fields,
- iter_output, choices[i]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static
-int serialize_one_type(struct lttng_ust_session *session,
- struct ustctl_field *fields, size_t *iter_output,
- const char *field_name, const struct lttng_ust_type_common *lt,
- enum lttng_ust_string_encoding parent_encoding)
-{
- int ret;
-
- /*
- * Serializing a type (rather than a field) generates a ustctl_field
- * entry with 0-length name.
- */
-
- switch (lt->type) {
- case lttng_ust_type_integer:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- ret = serialize_integer_type(&ut->u.integer, lttng_ust_get_type_integer(lt),
- parent_encoding);
- if (ret)
- return ret;
- ut->atype = ustctl_atype_integer;
- (*iter_output)++;
- break;
- }
- case lttng_ust_type_float:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
- struct ustctl_float_type *uft;
- const struct lttng_ust_type_float *lft;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- uft = &ut->u._float;
- lft = lttng_ust_get_type_float(lt);
- uft->exp_dig = lft->exp_dig;
- uft->mant_dig = lft->mant_dig;
- uft->alignment = lft->alignment;
- uft->reverse_byte_order = lft->reverse_byte_order;
- ut->atype = ustctl_atype_float;
- (*iter_output)++;
- break;
- }
- case lttng_ust_type_string:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
- int32_t encoding;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- ret = serialize_string_encoding(&encoding, lttng_ust_get_type_string(lt)->encoding);
- if (ret)
- return ret;
- ut->u.string.encoding = encoding;
- ut->atype = ustctl_atype_string;
- (*iter_output)++;
- break;
- }
- case lttng_ust_type_array:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- ut->atype = ustctl_atype_array_nestable;
- ut->u.array_nestable.length = lttng_ust_get_type_array(lt)->length;
- ut->u.array_nestable.alignment = lttng_ust_get_type_array(lt)->alignment;
- (*iter_output)++;
-
- ret = serialize_one_type(session, fields, iter_output, NULL,
- lttng_ust_get_type_array(lt)->elem_type,
- lttng_ust_get_type_array(lt)->encoding);
- if (ret)
- return -EINVAL;
- break;
- }
- case lttng_ust_type_sequence:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- ut->atype = ustctl_atype_sequence_nestable;
- strncpy(ut->u.sequence_nestable.length_name,
- lttng_ust_get_type_sequence(lt)->length_name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- ut->u.sequence_nestable.length_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- ut->u.sequence_nestable.alignment = lttng_ust_get_type_sequence(lt)->alignment;
- (*iter_output)++;
-
- ret = serialize_one_type(session, fields, iter_output, NULL,
- lttng_ust_get_type_sequence(lt)->elem_type,
- lttng_ust_get_type_sequence(lt)->encoding);
- if (ret)
- return -EINVAL;
- break;
- }
- case lttng_ust_type_dynamic:
- {
- ret = serialize_dynamic_type(session, fields, iter_output,
- field_name);
- if (ret)
- return -EINVAL;
- break;
- }
- case lttng_ust_type_struct:
- {
- struct ustctl_field *uf = &fields[*iter_output];
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- uf->type.atype = ustctl_atype_struct_nestable;
- uf->type.u.struct_nestable.nr_fields = lttng_ust_get_type_struct(lt)->nr_fields;
- uf->type.u.struct_nestable.alignment = lttng_ust_get_type_struct(lt)->alignment;
- (*iter_output)++;
-
- ret = serialize_fields(session, fields, iter_output,
- lttng_ust_get_type_struct(lt)->nr_fields,
- lttng_ust_get_type_struct(lt)->fields);
- if (ret)
- return -EINVAL;
- break;
- }
- case lttng_ust_type_enum:
- {
- struct ustctl_field *uf = &fields[*iter_output];
- struct ustctl_type *ut = &uf->type;
-
- if (field_name) {
- strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- } else {
- uf->name[0] = '\0';
- }
- strncpy(ut->u.enum_nestable.name, lttng_ust_get_type_enum(lt)->desc->name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- ut->u.enum_nestable.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- ut->atype = ustctl_atype_enum_nestable;
- (*iter_output)++;
-
- ret = serialize_one_type(session, fields, iter_output, NULL,
- lttng_ust_get_type_enum(lt)->container_type,
- lttng_ust_string_encoding_none);
- if (ret)
- return -EINVAL;
- if (session) {
- const struct lttng_enum *_enum;
-
- _enum = lttng_ust_enum_get_from_desc(session, lttng_ust_get_type_enum(lt)->desc);
- if (!_enum)
- return -EINVAL;
- ut->u.enum_nestable.id = _enum->id;
- } else {
- ut->u.enum_nestable.id = -1ULL;
- }
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int serialize_one_field(struct lttng_ust_session *session,
- struct ustctl_field *fields, size_t *iter_output,
- const struct lttng_ust_event_field *lf)
-{
- /* skip 'nowrite' fields */
- if (lf->nowrite)
- return 0;
-
- return serialize_one_type(session, fields, iter_output, lf->name, lf->type, lttng_ust_string_encoding_none);
-}
-
-static
-int serialize_fields(struct lttng_ust_session *session,
- struct ustctl_field *ustctl_fields,
- size_t *iter_output, size_t nr_lttng_fields,
- const struct lttng_ust_event_field **lttng_fields)
-{
- int ret;
- size_t i;
-
- for (i = 0; i < nr_lttng_fields; i++) {
- ret = serialize_one_field(session, ustctl_fields,
- iter_output, lttng_fields[i]);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static
-int alloc_serialize_fields(struct lttng_ust_session *session,
- size_t *_nr_write_fields,
- struct ustctl_field **ustctl_fields,
- size_t nr_fields,
- const struct lttng_ust_event_field **lttng_fields)
-{
- struct ustctl_field *fields;
- int ret;
- size_t iter_output = 0;
- ssize_t nr_write_fields;
-
- nr_write_fields = count_fields_recursive(nr_fields, lttng_fields);
- if (nr_write_fields < 0) {
- return (int) nr_write_fields;
- }
-
- fields = zmalloc(nr_write_fields * sizeof(*fields));
- if (!fields)
- return -ENOMEM;
-
- ret = serialize_fields(session, fields, &iter_output, nr_fields,
- lttng_fields);
- if (ret)
- goto error_type;
-
- *_nr_write_fields = nr_write_fields;
- *ustctl_fields = fields;
- return 0;
-
-error_type:
- free(fields);
- return ret;
-}
-
-static
-int serialize_entries(struct ustctl_enum_entry **_entries,
- size_t nr_entries,
- const struct lttng_ust_enum_entry **lttng_entries)
-{
- struct ustctl_enum_entry *entries;
- int i;
-
- /* Serialize the entries */
- entries = zmalloc(nr_entries * sizeof(*entries));
- if (!entries)
- return -ENOMEM;
- for (i = 0; i < nr_entries; i++) {
- struct ustctl_enum_entry *uentry;
- const struct lttng_ust_enum_entry *lentry;
-
- uentry = &entries[i];
- lentry = lttng_entries[i];
-
- uentry->start.value = lentry->start.value;
- uentry->start.signedness = lentry->start.signedness;
- uentry->end.value = lentry->end.value;
- uentry->end.signedness = lentry->end.signedness;
- strncpy(uentry->string, lentry->string, LTTNG_UST_ABI_SYM_NAME_LEN);
- uentry->string[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-
- if (lentry->options & LTTNG_UST_ENUM_ENTRY_OPTION_IS_AUTO) {
- uentry->u.extra.options |=
- USTCTL_UST_ENUM_ENTRY_OPTION_IS_AUTO;
- }
- }
- *_entries = entries;
- return 0;
-}
-
-static
-int serialize_ctx_fields(struct lttng_ust_session *session,
- size_t *_nr_write_fields,
- struct ustctl_field **ustctl_fields,
- size_t nr_fields,
- struct lttng_ust_ctx_field *lttng_fields)
-{
- struct ustctl_field *fields;
- int ret;
- size_t i, iter_output = 0;
- ssize_t nr_write_fields;
-
- nr_write_fields = count_ctx_fields_recursive(nr_fields,
- lttng_fields);
- if (nr_write_fields < 0) {
- return (int) nr_write_fields;
- }
-
- fields = zmalloc(nr_write_fields * sizeof(*fields));
- if (!fields)
- return -ENOMEM;
-
- for (i = 0; i < nr_fields; i++) {
- ret = serialize_one_field(session, fields, &iter_output,
- lttng_fields[i].event_field);
- if (ret)
- goto error_type;
- }
-
- *_nr_write_fields = nr_write_fields;
- *ustctl_fields = fields;
- return 0;
-
-error_type:
- free(fields);
- return ret;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustcomm_register_event(int sock,
- struct lttng_ust_session *session,
- int session_objd, /* session descriptor */
- int channel_objd, /* channel descriptor */
- const char *event_name, /* event name (input) */
- int loglevel,
- const char *signature, /* event signature (input) */
- size_t nr_fields, /* fields */
- const struct lttng_ust_event_field **lttng_fields,
- const char *model_emf_uri,
- uint32_t *id) /* event id (output) */
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_event_msg m;
- } msg;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_event_reply r;
- } reply;
- size_t signature_len, fields_len, model_emf_uri_len;
- struct ustctl_field *fields = NULL;
- size_t nr_write_fields = 0;
- int ret;
-
- memset(&msg, 0, sizeof(msg));
- msg.header.notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
- msg.m.session_objd = session_objd;
- msg.m.channel_objd = channel_objd;
- strncpy(msg.m.event_name, event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- msg.m.event_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- msg.m.loglevel = loglevel;
- signature_len = strlen(signature) + 1;
- msg.m.signature_len = signature_len;
-
- /* Calculate fields len, serialize fields. */
- if (nr_fields > 0) {
- ret = alloc_serialize_fields(session, &nr_write_fields, &fields,
- nr_fields, lttng_fields);
- if (ret)
- return ret;
- }
-
- fields_len = sizeof(*fields) * nr_write_fields;
- msg.m.fields_len = fields_len;
- if (model_emf_uri) {
- model_emf_uri_len = strlen(model_emf_uri) + 1;
- } else {
- model_emf_uri_len = 0;
- }
- msg.m.model_emf_uri_len = model_emf_uri_len;
-
- len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg)) {
- ret = -EIO;
- goto error_fields;
- }
- if (len < 0) {
- ret = len;
- goto error_fields;
- }
-
- /* send signature */
- len = ustcomm_send_unix_sock(sock, signature, signature_len);
- if (len > 0 && len != signature_len) {
- ret = -EIO;
- goto error_fields;
- }
- if (len < 0) {
- ret = len;
- goto error_fields;
- }
-
- /* send fields */
- if (fields_len > 0) {
- len = ustcomm_send_unix_sock(sock, fields, fields_len);
- if (len > 0 && len != fields_len) {
- ret = -EIO;
- goto error_fields;
- }
- if (len < 0) {
- ret = len;
- goto error_fields;
- }
- }
- free(fields);
-
- if (model_emf_uri_len) {
- /* send model_emf_uri */
- len = ustcomm_send_unix_sock(sock, model_emf_uri,
- model_emf_uri_len);
- if (len > 0 && len != model_emf_uri_len) {
- return -EIO;
- }
- if (len < 0) {
- return len;
- }
- }
-
- /* receive reply */
- len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
- switch (len) {
- case 0: /* orderly shutdown */
- return -EPIPE;
- case sizeof(reply):
- if (reply.header.notify_cmd != msg.header.notify_cmd) {
- ERR("Unexpected result message command "
- "expected: %u vs received: %u\n",
- msg.header.notify_cmd, reply.header.notify_cmd);
- return -EINVAL;
- }
- if (reply.r.ret_code > 0)
- return -EINVAL;
- if (reply.r.ret_code < 0)
- return reply.r.ret_code;
- *id = reply.r.event_id;
- DBG("Sent register event notification for name \"%s\": ret_code %d, event_id %u\n",
- event_name, reply.r.ret_code, reply.r.event_id);
- return 0;
- default:
- if (len < 0) {
- /* Transport level error */
- if (errno == EPIPE || errno == ECONNRESET)
- len = -errno;
- return len;
- } else {
- ERR("incorrect message size: %zd\n", len);
- return len;
- }
- }
- /* Unreached. */
-
- /* Error path only. */
-error_fields:
- free(fields);
- return ret;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- * Returns -EPIPE or -ECONNRESET if other end has hung up.
- */
-int ustcomm_register_enum(int sock,
- int session_objd, /* session descriptor */
- const char *enum_name, /* enum name (input) */
- size_t nr_entries, /* entries */
- const struct lttng_ust_enum_entry **lttng_entries,
- uint64_t *id)
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_enum_msg m;
- } msg;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_enum_reply r;
- } reply;
- size_t entries_len;
- struct ustctl_enum_entry *entries = NULL;
- int ret;
-
- memset(&msg, 0, sizeof(msg));
- msg.header.notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
- msg.m.session_objd = session_objd;
- strncpy(msg.m.enum_name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- msg.m.enum_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
-
- /* Calculate entries len, serialize entries. */
- if (nr_entries > 0) {
- ret = serialize_entries(&entries,
- nr_entries, lttng_entries);
- if (ret)
- return ret;
- }
-
- entries_len = sizeof(*entries) * nr_entries;
- msg.m.entries_len = entries_len;
-
- len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg)) {
- ret = -EIO;
- goto error_entries;
- }
- if (len < 0) {
- ret = len;
- goto error_entries;
- }
-
- /* send entries */
- if (entries_len > 0) {
- len = ustcomm_send_unix_sock(sock, entries, entries_len);
- if (len > 0 && len != entries_len) {
- ret = -EIO;
- goto error_entries;
- }
- if (len < 0) {
- ret = len;
- goto error_entries;
- }
- }
- free(entries);
- entries = NULL;
-
- /* receive reply */
- len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
- switch (len) {
- case 0: /* orderly shutdown */
- return -EPIPE;
- case sizeof(reply):
- if (reply.header.notify_cmd != msg.header.notify_cmd) {
- ERR("Unexpected result message command "
- "expected: %u vs received: %u\n",
- msg.header.notify_cmd, reply.header.notify_cmd);
- return -EINVAL;
- }
- if (reply.r.ret_code > 0)
- return -EINVAL;
- if (reply.r.ret_code < 0)
- return reply.r.ret_code;
- *id = reply.r.enum_id;
- DBG("Sent register enum notification for name \"%s\": ret_code %d\n",
- enum_name, reply.r.ret_code);
- return 0;
- default:
- if (len < 0) {
- /* Transport level error */
- if (errno == EPIPE || errno == ECONNRESET)
- len = -errno;
- return len;
- } else {
- ERR("incorrect message size: %zd\n", len);
- return len;
- }
- }
- return ret;
-
-error_entries:
- free(entries);
- return ret;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- * Returns -EPIPE or -ECONNRESET if other end has hung up.
- */
-int ustcomm_register_channel(int sock,
- struct lttng_ust_session *session,
- int session_objd, /* session descriptor */
- int channel_objd, /* channel descriptor */
- size_t nr_ctx_fields,
- struct lttng_ust_ctx_field *ctx_fields,
- uint32_t *chan_id, /* channel id (output) */
- int *header_type) /* header type (output) */
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_channel_msg m;
- } msg;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_channel_reply r;
- } reply;
- size_t fields_len;
- struct ustctl_field *fields = NULL;
- int ret;
- size_t nr_write_fields = 0;
-
- memset(&msg, 0, sizeof(msg));
- msg.header.notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
- msg.m.session_objd = session_objd;
- msg.m.channel_objd = channel_objd;
-
- /* Calculate fields len, serialize fields. */
- if (nr_ctx_fields > 0) {
- ret = serialize_ctx_fields(session, &nr_write_fields, &fields,
- nr_ctx_fields, ctx_fields);
- if (ret)
- return ret;
- }
-
- fields_len = sizeof(*fields) * nr_write_fields;
- msg.m.ctx_fields_len = fields_len;
- len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg)) {
- free(fields);
- return -EIO;
- }
- if (len < 0) {
- free(fields);
- return len;
- }
-
- /* send fields */
- if (fields_len > 0) {
- len = ustcomm_send_unix_sock(sock, fields, fields_len);
- free(fields);
- if (len > 0 && len != fields_len) {
- return -EIO;
- }
- if (len < 0) {
- return len;
- }
- } else {
- free(fields);
- }
-
- len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
- switch (len) {
- case 0: /* orderly shutdown */
- return -EPIPE;
- case sizeof(reply):
- if (reply.header.notify_cmd != msg.header.notify_cmd) {
- ERR("Unexpected result message command "
- "expected: %u vs received: %u\n",
- msg.header.notify_cmd, reply.header.notify_cmd);
- return -EINVAL;
- }
- if (reply.r.ret_code > 0)
- return -EINVAL;
- if (reply.r.ret_code < 0)
- return reply.r.ret_code;
- *chan_id = reply.r.chan_id;
- switch (reply.r.header_type) {
- case 1:
- case 2:
- *header_type = reply.r.header_type;
- break;
- default:
- ERR("Unexpected channel header type %u\n",
- reply.r.header_type);
- return -EINVAL;
- }
- DBG("Sent register channel notification: chan_id %d, header_type %d\n",
- reply.r.chan_id, reply.r.header_type);
- return 0;
- default:
- if (len < 0) {
- /* Transport level error */
- if (errno == EPIPE || errno == ECONNRESET)
- len = -errno;
- return len;
- } else {
- ERR("incorrect message size: %zd\n", len);
- return len;
- }
- }
-}
-
-/*
- * Set socket reciving timeout.
- */
-int ustcomm_setsockopt_rcv_timeout(int sock, unsigned int msec)
-{
- int ret;
- struct timeval tv;
-
- tv.tv_sec = msec / 1000;
- tv.tv_usec = (msec * 1000 % 1000000);
-
- ret = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
- if (ret < 0) {
- PERROR("setsockopt SO_RCVTIMEO");
- ret = -errno;
- }
-
- return ret;
-}
-
-/*
- * Set socket sending timeout.
- */
-int ustcomm_setsockopt_snd_timeout(int sock, unsigned int msec)
-{
- int ret;
- struct timeval tv;
-
- tv.tv_sec = msec / 1000;
- tv.tv_usec = (msec * 1000) % 1000000;
-
- ret = setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
- if (ret < 0) {
- PERROR("setsockopt SO_SNDTIMEO");
- ret = -errno;
- }
-
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <assert.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/select.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <fcntl.h>
-#include <pthread.h>
-#include <signal.h>
-#include <stdbool.h>
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <urcu/system.h>
-
-#include <ust-fd.h>
-#include <ust-helper.h>
-#include <lttng/ust-error.h>
-#include <usterr-signal-safe.h>
-
-#include "../liblttng-ust/compat.h"
-#include "../liblttng-ust/lttng-tracer-core.h"
-
-/* Operations on the fd set. */
-#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
-#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
-#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
-#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
-
-/* Check fd validity before calling these. */
-#define ADD_FD_TO_SET(fd, fd_sets) \
- FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define IS_FD_SET(fd, fd_sets) \
- FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-#define DEL_FD_FROM_SET(fd, fd_sets) \
- FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
-
-/*
- * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
- * within the libc dl lock. Therefore, we need to fixup the TLS before
- * nesting into this lock.
- *
- * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
- * is also held across fork.
- */
-static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
- * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
- */
-static int ust_safe_guard_saved_cancelstate;
-
-/*
- * Track whether we are within lttng-ust or application, for close
- * system call override by LD_PRELOAD library. This also tracks whether
- * we are invoking close() from a signal handler nested on an
- * application thread.
- */
-static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
-
-/* fd_set used to book keep fd being used by lttng-ust. */
-static fd_set *lttng_fd_set;
-static int lttng_ust_max_fd;
-static int num_fd_sets;
-static int init_done;
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_ust_fixup_fd_tracker_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
-}
-
-/*
- * Allocate the fd set array based on the hard limit set for this
- * process. This will be called during the constructor execution
- * and will also be called in the child after fork via lttng_ust_init.
- */
-void lttng_ust_init_fd_tracker(void)
-{
- struct rlimit rlim;
- int i;
-
- if (CMM_LOAD_SHARED(init_done))
- return;
-
- memset(&rlim, 0, sizeof(rlim));
- /* Get the current possible max number of fd for this process. */
- if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
- abort();
- /*
- * FD set array size determined using the hard limit. Even if
- * the process wishes to increase its limit using setrlimit, it
- * can only do so with the softlimit which will be less than the
- * hard limit.
- */
- lttng_ust_max_fd = rlim.rlim_max;
- num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
- if (lttng_ust_max_fd % FD_SETSIZE)
- ++num_fd_sets;
- if (lttng_fd_set != NULL) {
- free(lttng_fd_set);
- lttng_fd_set = NULL;
- }
- lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
- if (!lttng_fd_set)
- abort();
- for (i = 0; i < num_fd_sets; i++)
- FD_ZERO((<tng_fd_set[i]));
- CMM_STORE_SHARED(init_done, 1);
-}
-
-void lttng_ust_lock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_fd_mutex_nest)++) {
- /*
- * Ensure the compiler don't move the store after the close()
- * call in case close() would be marked as leaf.
- */
- cmm_barrier();
- pthread_mutex_lock(&ust_safe_guard_fd_mutex);
- ust_safe_guard_saved_cancelstate = oldstate;
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-void lttng_ust_unlock_fd_tracker(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, newstate, oldstate;
- bool restore_cancel = false;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- /*
- * Ensure the compiler don't move the store before the close()
- * call, in case close() would be marked as leaf.
- */
- cmm_barrier();
- if (!--URCU_TLS(ust_fd_mutex_nest)) {
- newstate = ust_safe_guard_saved_cancelstate;
- restore_cancel = true;
- pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (restore_cancel) {
- ret = pthread_setcancelstate(newstate, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- }
-}
-
-static int dup_std_fd(int fd)
-{
- int ret, i;
- int fd_to_close[STDERR_FILENO + 1];
- int fd_to_close_count = 0;
- int dup_cmd = F_DUPFD; /* Default command */
- int fd_valid = -1;
-
- if (!(IS_FD_STD(fd))) {
- /* Should not be here */
- ret = -1;
- goto error;
- }
-
- /* Check for FD_CLOEXEC flag */
- ret = fcntl(fd, F_GETFD);
- if (ret < 0) {
- PERROR("fcntl on f_getfd");
- ret = -1;
- goto error;
- }
-
- if (ret & FD_CLOEXEC) {
- dup_cmd = F_DUPFD_CLOEXEC;
- }
-
- /* Perform dup */
- for (i = 0; i < STDERR_FILENO + 1; i++) {
- ret = fcntl(fd, dup_cmd, 0);
- if (ret < 0) {
- PERROR("fcntl dup fd");
- goto error;
- }
-
- if (!(IS_FD_STD(ret))) {
- /* fd is outside of STD range, use it. */
- fd_valid = ret;
- /* Close fd received as argument. */
- fd_to_close[i] = fd;
- fd_to_close_count++;
- break;
- }
-
- fd_to_close[i] = ret;
- fd_to_close_count++;
- }
-
- /* Close intermediary fds */
- for (i = 0; i < fd_to_close_count; i++) {
- ret = close(fd_to_close[i]);
- if (ret) {
- PERROR("close on temporary fd: %d.", fd_to_close[i]);
- /*
- * Not using an abort here would yield a complicated
- * error handling for the caller. If a failure occurs
- * here, the system is already in a bad state.
- */
- abort();
- }
- }
-
- ret = fd_valid;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking of fd validity.
- *
- * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
- * problems that can be encountered if UST uses stdin, stdout, stderr
- * fds for internal use (daemon etc.). This can happen if the
- * application closes either of those file descriptors. Intermediary fds
- * are closed as needed.
- *
- * Return -1 on error.
- *
- */
-int lttng_ust_add_fd_to_tracker(int fd)
-{
- int ret;
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
- assert(URCU_TLS(ust_fd_mutex_nest));
-
- if (IS_FD_STD(fd)) {
- ret = dup_std_fd(fd);
- if (ret < 0) {
- goto error;
- }
- fd = ret;
- }
-
- /* Trying to add an fd which we can not accommodate. */
- assert(IS_FD_VALID(fd));
- /* Setting an fd thats already set. */
- assert(!IS_FD_SET(fd, lttng_fd_set));
-
- ADD_FD_TO_SET(fd, lttng_fd_set);
- return fd;
-error:
- return ret;
-}
-
-/*
- * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
- * Has strict checking for fd validity.
- */
-void lttng_ust_delete_fd_from_tracker(int fd)
-{
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- assert(URCU_TLS(ust_fd_mutex_nest));
- /* Not a valid fd. */
- assert(IS_FD_VALID(fd));
- /* Deleting an fd which was not set. */
- assert(IS_FD_SET(fd, lttng_fd_set));
-
- DEL_FD_FROM_SET(fd, lttng_fd_set);
-}
-
-/*
- * Interface allowing applications to close arbitrary file descriptors.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
-{
- int ret = 0;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return close_cb(fd);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = close_cb(fd);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-/*
- * Interface allowing applications to close arbitrary streams.
- * We check if it is owned by lttng-ust, and return -1, errno=EBADF
- * instead of closing it if it is the case.
- */
-int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
-{
- int ret = 0, fd;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- /*
- * If called from lttng-ust, we directly call fclose without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest))
- return fclose_cb(stream);
-
- fd = fileno(stream);
-
- lttng_ust_lock_fd_tracker();
- if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
- ret = -1;
- errno = EBADF;
- } else {
- ret = fclose_cb(stream);
- }
- lttng_ust_unlock_fd_tracker();
-
- return ret;
-}
-
-#ifdef __OpenBSD__
-static void set_close_success(int *p)
-{
- *p = 1;
-}
-static int test_close_success(const int *p)
-{
- return *p;
-}
-#else
-static void set_close_success(int *p __attribute__((unused)))
-{
-}
-static int test_close_success(const int *p __attribute__((unused)))
-{
- return 1;
-}
-#endif
-
-/*
- * Implement helper for closefrom() override.
- */
-int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
-{
- int ret = 0, close_success = 0, i;
-
- lttng_ust_fixup_fd_tracker_tls();
-
- /*
- * Ensure the tracker is initialized when called from
- * constructors.
- */
- lttng_ust_init_fd_tracker();
-
- if (lowfd < 0) {
- /*
- * NetBSD return EBADF if fd is invalid.
- */
- errno = EBADF;
- ret = -1;
- goto end;
- }
- /*
- * If called from lttng-ust, we directly call close without
- * validating whether the FD is part of the tracked set.
- */
- if (URCU_TLS(ust_fd_mutex_nest)) {
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- } else {
- lttng_ust_lock_fd_tracker();
- for (i = lowfd; i < lttng_ust_max_fd; i++) {
- if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
- continue;
- if (close_cb(i) < 0) {
- switch (errno) {
- case EBADF:
- continue;
- case EINTR:
- default:
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- goto end;
- }
- }
- set_close_success(&close_success);
- }
- lttng_ust_unlock_fd_tracker();
- }
- if (!test_close_success(&close_success)) {
- /*
- * OpenBSD return EBADF if fd is greater than all open
- * file descriptors.
- */
- ret = -1;
- errno = EBADF;
- }
-end:
- return ret;
-}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CPPFLAGS += -I$(top_srcdir)/liblttng-ust-comm
-AM_CFLAGS += -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-ctl.la
-
-liblttng_ust_ctl_la_SOURCES = ustctl.c
-liblttng_ust_ctl_la_LDFLAGS = \
- -version-info $(LTTNG_UST_CTL_LIBRARY_VERSION)
-
-liblttng_ust_ctl_la_LIBADD = \
- $(top_builddir)/liblttng-ust-comm/liblttng-ust-comm.la \
- $(top_builddir)/liblttng-ust/liblttng-ust-support.la \
- $(top_builddir)/snprintf/libustsnprintf.la \
- -ldl
+++ /dev/null
-/*
- * SPDX-License-Identifier: GPL-2.0-only
- *
- * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stdint.h>
-#include <string.h>
-#include <sys/mman.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-
-#include <lttng/ust-config.h>
-#include <lttng/ust-ctl.h>
-#include <lttng/ust-abi.h>
-#include <lttng/ust-endian.h>
-
-#include <usterr-signal-safe.h>
-#include <ust-comm.h>
-#include <ust-helper.h>
-#include "ust-compat.h"
-
-#include "../libringbuffer/backend.h"
-#include "../libringbuffer/frontend.h"
-#include "../liblttng-ust/ust-events-internal.h"
-#include "../liblttng-ust/wait.h"
-#include "../liblttng-ust/lttng-rb-clients.h"
-#include "../liblttng-ust/clock.h"
-#include "../liblttng-ust/getenv.h"
-#include "../liblttng-ust/lttng-tracer-core.h"
-#include "../liblttng-ust/lttng-counter-client.h"
-
-#include "../libcounter/shm.h"
-#include "../libcounter/smp.h"
-#include "../libcounter/counter.h"
-
-/*
- * Number of milliseconds to retry before failing metadata writes on
- * buffer full condition. (10 seconds)
- */
-#define LTTNG_METADATA_TIMEOUT_MSEC 10000
-
-/*
- * Channel representation within consumer.
- */
-struct ustctl_consumer_channel {
- struct lttng_ust_channel_buffer *chan; /* lttng channel buffers */
-
- /* initial attributes */
- struct ustctl_consumer_channel_attr attr;
- int wait_fd; /* monitor close() */
- int wakeup_fd; /* monitor close() */
-};
-
-/*
- * Stream representation within consumer.
- */
-struct ustctl_consumer_stream {
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *chan;
- int shm_fd, wait_fd, wakeup_fd;
- int cpu;
- uint64_t memory_map_size;
-};
-
-#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8
-struct ustctl_counter_attr {
- enum ustctl_counter_arithmetic arithmetic;
- enum ustctl_counter_bitness bitness;
- uint32_t nr_dimensions;
- int64_t global_sum_step;
- struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX];
- bool coalesce_hits;
-};
-
-/*
- * Counter representation within daemon.
- */
-struct ustctl_daemon_counter {
- struct lib_counter *counter;
- const struct lttng_counter_ops *ops;
- struct ustctl_counter_attr *attr; /* initial attributes */
-};
-
-int ustctl_release_handle(int sock, int handle)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
-
- if (sock < 0 || handle < 0)
- return 0;
- memset(&lum, 0, sizeof(lum));
- lum.handle = handle;
- lum.cmd = LTTNG_UST_ABI_RELEASE;
- return ustcomm_send_app_cmd(sock, &lum, &lur);
-}
-
-/*
- * If sock is negative, it means we don't have to notify the other side
- * (e.g. application has already vanished).
- */
-int ustctl_release_object(int sock, struct lttng_ust_abi_object_data *data)
-{
- int ret;
-
- if (!data)
- return -EINVAL;
-
- switch (data->type) {
- case LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL:
- if (data->u.channel.wakeup_fd >= 0) {
- ret = close(data->u.channel.wakeup_fd);
- if (ret < 0) {
- ret = -errno;
- return ret;
- }
- data->u.channel.wakeup_fd = -1;
- }
- free(data->u.channel.data);
- data->u.channel.data = NULL;
- break;
- case LTTNG_UST_ABI_OBJECT_TYPE_STREAM:
- if (data->u.stream.shm_fd >= 0) {
- ret = close(data->u.stream.shm_fd);
- if (ret < 0) {
- ret = -errno;
- return ret;
- }
- data->u.stream.shm_fd = -1;
- }
- if (data->u.stream.wakeup_fd >= 0) {
- ret = close(data->u.stream.wakeup_fd);
- if (ret < 0) {
- ret = -errno;
- return ret;
- }
- data->u.stream.wakeup_fd = -1;
- }
- break;
- case LTTNG_UST_ABI_OBJECT_TYPE_EVENT:
- case LTTNG_UST_ABI_OBJECT_TYPE_CONTEXT:
- case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER_GROUP:
- case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER:
- break;
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER:
- free(data->u.counter.data);
- data->u.counter.data = NULL;
- break;
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL:
- if (data->u.counter_global.shm_fd >= 0) {
- ret = close(data->u.counter_global.shm_fd);
- if (ret < 0) {
- ret = -errno;
- return ret;
- }
- data->u.counter_global.shm_fd = -1;
- }
- break;
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU:
- if (data->u.counter_cpu.shm_fd >= 0) {
- ret = close(data->u.counter_cpu.shm_fd);
- if (ret < 0) {
- ret = -errno;
- return ret;
- }
- data->u.counter_cpu.shm_fd = -1;
- }
- break;
- default:
- assert(0);
- }
- return ustctl_release_handle(sock, data->handle);
-}
-
-/*
- * Send registration done packet to the application.
- */
-int ustctl_register_done(int sock)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- DBG("Sending register done command to %d", sock);
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_REGISTER_DONE;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- return 0;
-}
-
-/*
- * returns session handle.
- */
-int ustctl_create_session(int sock)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret, session_handle;
-
- /* Create session */
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_SESSION;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- session_handle = lur.ret_val;
- DBG("received session handle %u", session_handle);
- return session_handle;
-}
-
-int ustctl_create_event(int sock, struct lttng_ust_abi_event *ev,
- struct lttng_ust_abi_object_data *channel_data,
- struct lttng_ust_abi_object_data **_event_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- struct lttng_ust_abi_object_data *event_data;
- int ret;
-
- if (!channel_data || !_event_data)
- return -EINVAL;
-
- event_data = zmalloc(sizeof(*event_data));
- if (!event_data)
- return -ENOMEM;
- event_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT;
- memset(&lum, 0, sizeof(lum));
- lum.handle = channel_data->handle;
- lum.cmd = LTTNG_UST_ABI_EVENT;
- strncpy(lum.u.event.name, ev->name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- lum.u.event.instrumentation = ev->instrumentation;
- lum.u.event.loglevel_type = ev->loglevel_type;
- lum.u.event.loglevel = ev->loglevel;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret) {
- free(event_data);
- return ret;
- }
- event_data->handle = lur.ret_val;
- DBG("received event handle %u", event_data->handle);
- *_event_data = event_data;
- return 0;
-}
-
-int ustctl_add_context(int sock, struct lttng_ust_context_attr *ctx,
- struct lttng_ust_abi_object_data *obj_data,
- struct lttng_ust_abi_object_data **_context_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- struct lttng_ust_abi_object_data *context_data = NULL;
- char *buf = NULL;
- size_t len;
- int ret;
-
- if (!obj_data || !_context_data) {
- ret = -EINVAL;
- goto end;
- }
-
- context_data = zmalloc(sizeof(*context_data));
- if (!context_data) {
- ret = -ENOMEM;
- goto end;
- }
- context_data->type = LTTNG_UST_ABI_OBJECT_TYPE_CONTEXT;
- memset(&lum, 0, sizeof(lum));
- lum.handle = obj_data->handle;
- lum.cmd = LTTNG_UST_ABI_CONTEXT;
-
- lum.u.context.ctx = ctx->ctx;
- switch (ctx->ctx) {
- case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
- lum.u.context.u.perf_counter = ctx->u.perf_counter;
- break;
- case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- {
- size_t provider_name_len = strlen(
- ctx->u.app_ctx.provider_name) + 1;
- size_t ctx_name_len = strlen(ctx->u.app_ctx.ctx_name) + 1;
-
- lum.u.context.u.app_ctx.provider_name_len = provider_name_len;
- lum.u.context.u.app_ctx.ctx_name_len = ctx_name_len;
-
- len = provider_name_len + ctx_name_len;
- buf = zmalloc(len);
- if (!buf) {
- ret = -ENOMEM;
- goto end;
- }
- memcpy(buf, ctx->u.app_ctx.provider_name,
- provider_name_len);
- memcpy(buf + provider_name_len, ctx->u.app_ctx.ctx_name,
- ctx_name_len);
- break;
- }
- default:
- break;
- }
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- goto end;
- if (buf) {
- /* send var len ctx_name */
- ret = ustcomm_send_unix_sock(sock, buf, len);
- if (ret < 0) {
- goto end;
- }
- if (ret != len) {
- ret = -EINVAL;
- goto end;
- }
- }
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (ret < 0) {
- goto end;
- }
- context_data->handle = -1;
- DBG("Context created successfully");
- *_context_data = context_data;
- context_data = NULL;
-end:
- free(context_data);
- free(buf);
- return ret;
-}
-
-int ustctl_set_filter(int sock, struct lttng_ust_abi_filter_bytecode *bytecode,
- struct lttng_ust_abi_object_data *obj_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!obj_data)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = obj_data->handle;
- lum.cmd = LTTNG_UST_ABI_FILTER;
- lum.u.filter.data_size = bytecode->len;
- lum.u.filter.reloc_offset = bytecode->reloc_offset;
- lum.u.filter.seqnum = bytecode->seqnum;
-
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
- /* send var len bytecode */
- ret = ustcomm_send_unix_sock(sock, bytecode->data,
- bytecode->len);
- if (ret < 0) {
- return ret;
- }
- if (ret != bytecode->len)
- return -EINVAL;
- return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
-}
-
-int ustctl_set_capture(int sock, struct lttng_ust_abi_capture_bytecode *bytecode,
- struct lttng_ust_abi_object_data *obj_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!obj_data)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = obj_data->handle;
- lum.cmd = LTTNG_UST_ABI_CAPTURE;
- lum.u.capture.data_size = bytecode->len;
- lum.u.capture.reloc_offset = bytecode->reloc_offset;
- lum.u.capture.seqnum = bytecode->seqnum;
-
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
- /* send var len bytecode */
- ret = ustcomm_send_unix_sock(sock, bytecode->data,
- bytecode->len);
- if (ret < 0) {
- return ret;
- }
- if (ret != bytecode->len)
- return -EINVAL;
- return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
-}
-
-int ustctl_set_exclusion(int sock, struct lttng_ust_abi_event_exclusion *exclusion,
- struct lttng_ust_abi_object_data *obj_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!obj_data) {
- return -EINVAL;
- }
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = obj_data->handle;
- lum.cmd = LTTNG_UST_ABI_EXCLUSION;
- lum.u.exclusion.count = exclusion->count;
-
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret) {
- return ret;
- }
-
- /* send var len exclusion names */
- ret = ustcomm_send_unix_sock(sock,
- exclusion->names,
- exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN);
- if (ret < 0) {
- return ret;
- }
- if (ret != exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) {
- return -EINVAL;
- }
- return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
-}
-
-/* Enable event, channel and session ioctl */
-int ustctl_enable(int sock, struct lttng_ust_abi_object_data *object)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!object)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = object->handle;
- lum.cmd = LTTNG_UST_ABI_ENABLE;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("enabled handle %u", object->handle);
- return 0;
-}
-
-/* Disable event, channel and session ioctl */
-int ustctl_disable(int sock, struct lttng_ust_abi_object_data *object)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!object)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = object->handle;
- lum.cmd = LTTNG_UST_ABI_DISABLE;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("disable handle %u", object->handle);
- return 0;
-}
-
-int ustctl_start_session(int sock, int handle)
-{
- struct lttng_ust_abi_object_data obj;
-
- obj.handle = handle;
- return ustctl_enable(sock, &obj);
-}
-
-int ustctl_stop_session(int sock, int handle)
-{
- struct lttng_ust_abi_object_data obj;
-
- obj.handle = handle;
- return ustctl_disable(sock, &obj);
-}
-
-int ustctl_create_event_notifier_group(int sock, int pipe_fd,
- struct lttng_ust_abi_object_data **_event_notifier_group_data)
-{
- struct lttng_ust_abi_object_data *event_notifier_group_data;
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- ssize_t len;
- int ret;
-
- if (!_event_notifier_group_data)
- return -EINVAL;
-
- event_notifier_group_data = zmalloc(sizeof(*event_notifier_group_data));
- if (!event_notifier_group_data)
- return -ENOMEM;
-
- event_notifier_group_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER_GROUP;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE;
-
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- goto error;
-
- /* Send event_notifier notification pipe. */
- len = ustcomm_send_fds_unix_sock(sock, &pipe_fd, 1);
- if (len <= 0) {
- ret = len;
- goto error;
- }
-
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (ret)
- goto error;
-
- event_notifier_group_data->handle = lur.ret_val;
- DBG("received event_notifier group handle %d", event_notifier_group_data->handle);
-
- *_event_notifier_group_data = event_notifier_group_data;
-
- ret = 0;
- goto end;
-error:
- free(event_notifier_group_data);
-
-end:
- return ret;
-}
-
-int ustctl_create_event_notifier(int sock, struct lttng_ust_abi_event_notifier *event_notifier,
- struct lttng_ust_abi_object_data *event_notifier_group,
- struct lttng_ust_abi_object_data **_event_notifier_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- struct lttng_ust_abi_object_data *event_notifier_data;
- ssize_t len;
- int ret;
-
- if (!event_notifier_group || !_event_notifier_data)
- return -EINVAL;
-
- event_notifier_data = zmalloc(sizeof(*event_notifier_data));
- if (!event_notifier_data)
- return -ENOMEM;
-
- event_notifier_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = event_notifier_group->handle;
- lum.cmd = LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE;
- lum.u.event_notifier.len = sizeof(*event_notifier);
-
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret) {
- free(event_notifier_data);
- return ret;
- }
- /* Send struct lttng_ust_abi_event_notifier */
- len = ustcomm_send_unix_sock(sock, event_notifier, sizeof(*event_notifier));
- if (len != sizeof(*event_notifier)) {
- free(event_notifier_data);
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (ret) {
- free(event_notifier_data);
- return ret;
- }
- event_notifier_data->handle = lur.ret_val;
- DBG("received event_notifier handle %u", event_notifier_data->handle);
- *_event_notifier_data = event_notifier_data;
-
- return ret;
-}
-
-int ustctl_tracepoint_list(int sock)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret, tp_list_handle;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_TRACEPOINT_LIST;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- tp_list_handle = lur.ret_val;
- DBG("received tracepoint list handle %u", tp_list_handle);
- return tp_list_handle;
-}
-
-int ustctl_tracepoint_list_get(int sock, int tp_list_handle,
- struct lttng_ust_abi_tracepoint_iter *iter)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!iter)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = tp_list_handle;
- lum.cmd = LTTNG_UST_ABI_TRACEPOINT_LIST_GET;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("received tracepoint list entry name %s loglevel %d",
- lur.u.tracepoint.name,
- lur.u.tracepoint.loglevel);
- memcpy(iter, &lur.u.tracepoint, sizeof(*iter));
- return 0;
-}
-
-int ustctl_tracepoint_field_list(int sock)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret, tp_field_list_handle;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- tp_field_list_handle = lur.ret_val;
- DBG("received tracepoint field list handle %u", tp_field_list_handle);
- return tp_field_list_handle;
-}
-
-int ustctl_tracepoint_field_list_get(int sock, int tp_field_list_handle,
- struct lttng_ust_abi_field_iter *iter)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
- ssize_t len;
-
- if (!iter)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = tp_field_list_handle;
- lum.cmd = LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- len = ustcomm_recv_unix_sock(sock, iter, sizeof(*iter));
- if (len != sizeof(*iter)) {
- return -EINVAL;
- }
- DBG("received tracepoint field list entry event_name %s event_loglevel %d field_name %s field_type %d",
- iter->event_name,
- iter->loglevel,
- iter->field_name,
- iter->type);
- return 0;
-}
-
-int ustctl_tracer_version(int sock, struct lttng_ust_abi_tracer_version *v)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!v)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_TRACER_VERSION;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- memcpy(v, &lur.u.version, sizeof(*v));
- DBG("received tracer version");
- return 0;
-}
-
-int ustctl_wait_quiescent(int sock)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
- lum.cmd = LTTNG_UST_ABI_WAIT_QUIESCENT;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("waited for quiescent state");
- return 0;
-}
-
-int ustctl_calibrate(int sock __attribute__((unused)),
- struct lttng_ust_abi_calibrate *calibrate)
-{
- if (!calibrate)
- return -EINVAL;
-
- return -ENOSYS;
-}
-
-int ustctl_sock_flush_buffer(int sock, struct lttng_ust_abi_object_data *object)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!object)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = object->handle;
- lum.cmd = LTTNG_UST_ABI_FLUSH_BUFFER;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("flushed buffer handle %u", object->handle);
- return 0;
-}
-
-static
-int ustctl_send_channel(int sock,
- enum lttng_ust_abi_chan_type type,
- void *data,
- uint64_t size,
- int wakeup_fd,
- int send_fd_only)
-{
- ssize_t len;
-
- if (!send_fd_only) {
- /* Send mmap size */
- len = ustcomm_send_unix_sock(sock, &size, sizeof(size));
- if (len != sizeof(size)) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- /* Send channel type */
- len = ustcomm_send_unix_sock(sock, &type, sizeof(type));
- if (len != sizeof(type)) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- }
-
- /* Send channel data */
- len = ustcomm_send_unix_sock(sock, data, size);
- if (len != size) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- /* Send wakeup fd */
- len = ustcomm_send_fds_unix_sock(sock, &wakeup_fd, 1);
- if (len <= 0) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- return 0;
-}
-
-static
-int ustctl_send_stream(int sock,
- uint32_t stream_nr,
- uint64_t memory_map_size,
- int shm_fd, int wakeup_fd,
- int send_fd_only)
-{
- ssize_t len;
- int fds[2];
-
- if (!send_fd_only) {
- if (shm_fd < 0) {
- /* finish iteration */
- uint64_t v = -1;
-
- len = ustcomm_send_unix_sock(sock, &v, sizeof(v));
- if (len != sizeof(v)) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- return 0;
- }
-
- /* Send mmap size */
- len = ustcomm_send_unix_sock(sock, &memory_map_size,
- sizeof(memory_map_size));
- if (len != sizeof(memory_map_size)) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- /* Send stream nr */
- len = ustcomm_send_unix_sock(sock, &stream_nr,
- sizeof(stream_nr));
- if (len != sizeof(stream_nr)) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- }
-
- /* Send shm fd and wakeup fd */
- fds[0] = shm_fd;
- fds[1] = wakeup_fd;
- len = ustcomm_send_fds_unix_sock(sock, fds, 2);
- if (len <= 0) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
- return 0;
-}
-
-int ustctl_recv_channel_from_consumer(int sock,
- struct lttng_ust_abi_object_data **_channel_data)
-{
- struct lttng_ust_abi_object_data *channel_data;
- ssize_t len;
- int wakeup_fd;
- int ret;
-
- channel_data = zmalloc(sizeof(*channel_data));
- if (!channel_data) {
- ret = -ENOMEM;
- goto error_alloc;
- }
- channel_data->type = LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL;
- channel_data->handle = -1;
-
- /* recv mmap size */
- len = ustcomm_recv_unix_sock(sock, &channel_data->size,
- sizeof(channel_data->size));
- if (len != sizeof(channel_data->size)) {
- if (len < 0)
- ret = len;
- else
- ret = -EINVAL;
- goto error;
- }
-
- /* recv channel type */
- len = ustcomm_recv_unix_sock(sock, &channel_data->u.channel.type,
- sizeof(channel_data->u.channel.type));
- if (len != sizeof(channel_data->u.channel.type)) {
- if (len < 0)
- ret = len;
- else
- ret = -EINVAL;
- goto error;
- }
-
- /* recv channel data */
- channel_data->u.channel.data = zmalloc(channel_data->size);
- if (!channel_data->u.channel.data) {
- ret = -ENOMEM;
- goto error;
- }
- len = ustcomm_recv_unix_sock(sock, channel_data->u.channel.data,
- channel_data->size);
- if (len != channel_data->size) {
- if (len < 0)
- ret = len;
- else
- ret = -EINVAL;
- goto error_recv_data;
- }
- /* recv wakeup fd */
- len = ustcomm_recv_fds_unix_sock(sock, &wakeup_fd, 1);
- if (len <= 0) {
- if (len < 0) {
- ret = len;
- goto error_recv_data;
- } else {
- ret = -EIO;
- goto error_recv_data;
- }
- }
- channel_data->u.channel.wakeup_fd = wakeup_fd;
- *_channel_data = channel_data;
- return 0;
-
-error_recv_data:
- free(channel_data->u.channel.data);
-error:
- free(channel_data);
-error_alloc:
- return ret;
-}
-
-int ustctl_recv_stream_from_consumer(int sock,
- struct lttng_ust_abi_object_data **_stream_data)
-{
- struct lttng_ust_abi_object_data *stream_data;
- ssize_t len;
- int ret;
- int fds[2];
-
- stream_data = zmalloc(sizeof(*stream_data));
- if (!stream_data) {
- ret = -ENOMEM;
- goto error_alloc;
- }
-
- stream_data->type = LTTNG_UST_ABI_OBJECT_TYPE_STREAM;
- stream_data->handle = -1;
-
- /* recv mmap size */
- len = ustcomm_recv_unix_sock(sock, &stream_data->size,
- sizeof(stream_data->size));
- if (len != sizeof(stream_data->size)) {
- if (len < 0)
- ret = len;
- else
- ret = -EINVAL;
- goto error;
- }
- if (stream_data->size == -1) {
- ret = -LTTNG_UST_ERR_NOENT;
- goto error;
- }
-
- /* recv stream nr */
- len = ustcomm_recv_unix_sock(sock, &stream_data->u.stream.stream_nr,
- sizeof(stream_data->u.stream.stream_nr));
- if (len != sizeof(stream_data->u.stream.stream_nr)) {
- if (len < 0)
- ret = len;
- else
- ret = -EINVAL;
- goto error;
- }
-
- /* recv shm fd and wakeup fd */
- len = ustcomm_recv_fds_unix_sock(sock, fds, 2);
- if (len <= 0) {
- if (len < 0) {
- ret = len;
- goto error;
- } else {
- ret = -EIO;
- goto error;
- }
- }
- stream_data->u.stream.shm_fd = fds[0];
- stream_data->u.stream.wakeup_fd = fds[1];
- *_stream_data = stream_data;
- return 0;
-
-error:
- free(stream_data);
-error_alloc:
- return ret;
-}
-
-int ustctl_send_channel_to_ust(int sock, int session_handle,
- struct lttng_ust_abi_object_data *channel_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- if (!channel_data)
- return -EINVAL;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = session_handle;
- lum.cmd = LTTNG_UST_ABI_CHANNEL;
- lum.u.channel.len = channel_data->size;
- lum.u.channel.type = channel_data->u.channel.type;
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
-
- ret = ustctl_send_channel(sock,
- channel_data->u.channel.type,
- channel_data->u.channel.data,
- channel_data->size,
- channel_data->u.channel.wakeup_fd,
- 1);
- if (ret)
- return ret;
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (!ret) {
- channel_data->handle = lur.ret_val;
- }
- return ret;
-}
-
-int ustctl_send_stream_to_ust(int sock,
- struct lttng_ust_abi_object_data *channel_data,
- struct lttng_ust_abi_object_data *stream_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = channel_data->handle;
- lum.cmd = LTTNG_UST_ABI_STREAM;
- lum.u.stream.len = stream_data->size;
- lum.u.stream.stream_nr = stream_data->u.stream.stream_nr;
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
-
- assert(stream_data);
- assert(stream_data->type == LTTNG_UST_ABI_OBJECT_TYPE_STREAM);
-
- ret = ustctl_send_stream(sock,
- stream_data->u.stream.stream_nr,
- stream_data->size,
- stream_data->u.stream.shm_fd,
- stream_data->u.stream.wakeup_fd, 1);
- if (ret)
- return ret;
- return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
-}
-
-int ustctl_duplicate_ust_object_data(struct lttng_ust_abi_object_data **dest,
- struct lttng_ust_abi_object_data *src)
-{
- struct lttng_ust_abi_object_data *obj;
- int ret;
-
- if (src->handle != -1) {
- ret = -EINVAL;
- goto error;
- }
-
- obj = zmalloc(sizeof(*obj));
- if (!obj) {
- ret = -ENOMEM;
- goto error;
- }
-
- obj->type = src->type;
- obj->handle = src->handle;
- obj->size = src->size;
-
- switch (obj->type) {
- case LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL:
- {
- obj->u.channel.type = src->u.channel.type;
- if (src->u.channel.wakeup_fd >= 0) {
- obj->u.channel.wakeup_fd =
- dup(src->u.channel.wakeup_fd);
- if (obj->u.channel.wakeup_fd < 0) {
- ret = errno;
- goto chan_error_wakeup_fd;
- }
- } else {
- obj->u.channel.wakeup_fd =
- src->u.channel.wakeup_fd;
- }
- obj->u.channel.data = zmalloc(obj->size);
- if (!obj->u.channel.data) {
- ret = -ENOMEM;
- goto chan_error_alloc;
- }
- memcpy(obj->u.channel.data, src->u.channel.data, obj->size);
- break;
-
- chan_error_alloc:
- if (src->u.channel.wakeup_fd >= 0) {
- int closeret;
-
- closeret = close(obj->u.channel.wakeup_fd);
- if (closeret) {
- PERROR("close");
- }
- }
- chan_error_wakeup_fd:
- goto error_type;
-
- }
-
- case LTTNG_UST_ABI_OBJECT_TYPE_STREAM:
- {
- obj->u.stream.stream_nr = src->u.stream.stream_nr;
- if (src->u.stream.wakeup_fd >= 0) {
- obj->u.stream.wakeup_fd =
- dup(src->u.stream.wakeup_fd);
- if (obj->u.stream.wakeup_fd < 0) {
- ret = errno;
- goto stream_error_wakeup_fd;
- }
- } else {
- obj->u.stream.wakeup_fd =
- src->u.stream.wakeup_fd;
- }
-
- if (src->u.stream.shm_fd >= 0) {
- obj->u.stream.shm_fd =
- dup(src->u.stream.shm_fd);
- if (obj->u.stream.shm_fd < 0) {
- ret = errno;
- goto stream_error_shm_fd;
- }
- } else {
- obj->u.stream.shm_fd =
- src->u.stream.shm_fd;
- }
- break;
-
- stream_error_shm_fd:
- if (src->u.stream.wakeup_fd >= 0) {
- int closeret;
-
- closeret = close(obj->u.stream.wakeup_fd);
- if (closeret) {
- PERROR("close");
- }
- }
- stream_error_wakeup_fd:
- goto error_type;
- }
-
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER:
- {
- obj->u.counter.data = zmalloc(obj->size);
- if (!obj->u.counter.data) {
- ret = -ENOMEM;
- goto error_type;
- }
- memcpy(obj->u.counter.data, src->u.counter.data, obj->size);
- break;
- }
-
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL:
- {
- if (src->u.counter_global.shm_fd >= 0) {
- obj->u.counter_global.shm_fd =
- dup(src->u.counter_global.shm_fd);
- if (obj->u.counter_global.shm_fd < 0) {
- ret = errno;
- goto error_type;
- }
- }
- break;
- }
-
- case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU:
- {
- obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr;
- if (src->u.counter_cpu.shm_fd >= 0) {
- obj->u.counter_cpu.shm_fd =
- dup(src->u.counter_cpu.shm_fd);
- if (obj->u.counter_cpu.shm_fd < 0) {
- ret = errno;
- goto error_type;
- }
- }
- break;
- }
-
- default:
- ret = -EINVAL;
- goto error_type;
- }
-
- *dest = obj;
- return 0;
-
-error_type:
- free(obj);
-error:
- return ret;
-}
-
-
-/* Buffer operations */
-
-int ustctl_get_nr_stream_per_channel(void)
-{
- return num_possible_cpus();
-}
-
-struct ustctl_consumer_channel *
- ustctl_create_channel(struct ustctl_consumer_channel_attr *attr,
- const int *stream_fds, int nr_stream_fds)
-{
- struct ustctl_consumer_channel *chan;
- const char *transport_name;
- struct lttng_transport *transport;
-
- switch (attr->type) {
- case LTTNG_UST_ABI_CHAN_PER_CPU:
- if (attr->output == LTTNG_UST_ABI_MMAP) {
- if (attr->overwrite) {
- if (attr->read_timer_interval == 0) {
- transport_name = "relay-overwrite-mmap";
- } else {
- transport_name = "relay-overwrite-rt-mmap";
- }
- } else {
- if (attr->read_timer_interval == 0) {
- transport_name = "relay-discard-mmap";
- } else {
- transport_name = "relay-discard-rt-mmap";
- }
- }
- } else {
- return NULL;
- }
- break;
- case LTTNG_UST_ABI_CHAN_METADATA:
- if (attr->output == LTTNG_UST_ABI_MMAP)
- transport_name = "relay-metadata-mmap";
- else
- return NULL;
- break;
- default:
- transport_name = "<unknown>";
- return NULL;
- }
-
- transport = lttng_ust_transport_find(transport_name);
- if (!transport) {
- DBG("LTTng transport %s not found\n",
- transport_name);
- return NULL;
- }
-
- chan = zmalloc(sizeof(*chan));
- if (!chan)
- return NULL;
-
- chan->chan = transport->ops.priv->channel_create(transport_name, NULL,
- attr->subbuf_size, attr->num_subbuf,
- attr->switch_timer_interval,
- attr->read_timer_interval,
- attr->uuid, attr->chan_id,
- stream_fds, nr_stream_fds,
- attr->blocking_timeout);
- if (!chan->chan) {
- goto chan_error;
- }
- chan->chan->ops = &transport->ops;
- memcpy(&chan->attr, attr, sizeof(chan->attr));
- chan->wait_fd = ustctl_channel_get_wait_fd(chan);
- chan->wakeup_fd = ustctl_channel_get_wakeup_fd(chan);
- return chan;
-
-chan_error:
- free(chan);
- return NULL;
-}
-
-void ustctl_destroy_channel(struct ustctl_consumer_channel *chan)
-{
- (void) ustctl_channel_close_wait_fd(chan);
- (void) ustctl_channel_close_wakeup_fd(chan);
- chan->chan->ops->priv->channel_destroy(chan->chan);
- free(chan);
-}
-
-int ustctl_send_channel_to_sessiond(int sock,
- struct ustctl_consumer_channel *channel)
-{
- struct shm_object_table *table;
-
- table = channel->chan->priv->rb_chan->handle->table;
- if (table->size <= 0)
- return -EINVAL;
- return ustctl_send_channel(sock,
- channel->attr.type,
- table->objects[0].memory_map,
- table->objects[0].memory_map_size,
- channel->wakeup_fd,
- 0);
-}
-
-int ustctl_send_stream_to_sessiond(int sock,
- struct ustctl_consumer_stream *stream)
-{
- if (!stream)
- return ustctl_send_stream(sock, -1U, -1U, -1, -1, 0);
-
- return ustctl_send_stream(sock,
- stream->cpu,
- stream->memory_map_size,
- stream->shm_fd, stream->wakeup_fd,
- 0);
-}
-
-int ustctl_write_metadata_to_channel(
- struct ustctl_consumer_channel *channel,
- const char *metadata_str, /* NOT null-terminated */
- size_t len) /* metadata length */
-{
- struct lttng_ust_lib_ring_buffer_ctx ctx;
- struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
- const char *str = metadata_str;
- int ret = 0, waitret;
- size_t reserve_len, pos;
-
- for (pos = 0; pos < len; pos += reserve_len) {
- reserve_len = min_t(size_t,
- lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
- len - pos);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
- /*
- * We don't care about metadata buffer's records lost
- * count, because we always retry here. Report error if
- * we need to bail out after timeout or being
- * interrupted.
- */
- waitret = wait_cond_interruptible_timeout(
- ({
- ret = lttng_chan_buf->ops->event_reserve(&ctx);
- ret != -ENOBUFS || !ret;
- }),
- LTTNG_METADATA_TIMEOUT_MSEC);
- if (waitret == -ETIMEDOUT || waitret == -EINTR || ret) {
- DBG("LTTng: Failure to write metadata to buffers (%s)\n",
- waitret == -EINTR ? "interrupted" :
- (ret == -ENOBUFS ? "timeout" : "I/O error"));
- if (waitret == -EINTR)
- ret = waitret;
- goto end;
- }
- lttng_chan_buf->ops->event_write(&ctx, &str[pos], reserve_len, 1);
- lttng_chan_buf->ops->event_commit(&ctx);
- }
-end:
- return ret;
-}
-
-/*
- * Write at most one packet in the channel.
- * Returns the number of bytes written on success, < 0 on error.
- */
-ssize_t ustctl_write_one_packet_to_channel(
- struct ustctl_consumer_channel *channel,
- const char *metadata_str, /* NOT null-terminated */
- size_t len) /* metadata length */
-{
- struct lttng_ust_lib_ring_buffer_ctx ctx;
- struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
- const char *str = metadata_str;
- ssize_t reserve_len;
- int ret;
-
- reserve_len = min_t(ssize_t,
- lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
- len);
- lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
- ret = lttng_chan_buf->ops->event_reserve(&ctx);
- if (ret != 0) {
- DBG("LTTng: event reservation failed");
- assert(ret < 0);
- reserve_len = ret;
- goto end;
- }
- lttng_chan_buf->ops->event_write(&ctx, str, reserve_len, 1);
- lttng_chan_buf->ops->event_commit(&ctx);
-
-end:
- return reserve_len;
-}
-
-int ustctl_channel_close_wait_fd(struct ustctl_consumer_channel *consumer_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- int ret;
-
- chan = consumer_chan->chan->priv->rb_chan;
- ret = ring_buffer_channel_close_wait_fd(&chan->backend.config,
- chan, chan->handle);
- if (!ret)
- consumer_chan->wait_fd = -1;
- return ret;
-}
-
-int ustctl_channel_close_wakeup_fd(struct ustctl_consumer_channel *consumer_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- int ret;
-
- chan = consumer_chan->chan->priv->rb_chan;
- ret = ring_buffer_channel_close_wakeup_fd(&chan->backend.config,
- chan, chan->handle);
- if (!ret)
- consumer_chan->wakeup_fd = -1;
- return ret;
-}
-
-int ustctl_stream_close_wait_fd(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- chan = stream->chan->chan->priv->rb_chan;
- return ring_buffer_stream_close_wait_fd(&chan->backend.config,
- chan, chan->handle, stream->cpu);
-}
-
-int ustctl_stream_close_wakeup_fd(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- chan = stream->chan->chan->priv->rb_chan;
- return ring_buffer_stream_close_wakeup_fd(&chan->backend.config,
- chan, chan->handle, stream->cpu);
-}
-
-struct ustctl_consumer_stream *
- ustctl_create_stream(struct ustctl_consumer_channel *channel,
- int cpu)
-{
- struct ustctl_consumer_stream *stream;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- int shm_fd, wait_fd, wakeup_fd;
- uint64_t memory_map_size;
- struct lttng_ust_lib_ring_buffer *buf;
- int ret;
-
- if (!channel)
- return NULL;
- rb_chan = channel->chan->priv->rb_chan;
- handle = rb_chan->handle;
- if (!handle)
- return NULL;
-
- buf = channel_get_ring_buffer(&rb_chan->backend.config,
- rb_chan, cpu, handle, &shm_fd, &wait_fd,
- &wakeup_fd, &memory_map_size);
- if (!buf)
- return NULL;
- ret = lib_ring_buffer_open_read(buf, handle);
- if (ret)
- return NULL;
-
- stream = zmalloc(sizeof(*stream));
- if (!stream)
- goto alloc_error;
- stream->buf = buf;
- stream->chan = channel;
- stream->shm_fd = shm_fd;
- stream->wait_fd = wait_fd;
- stream->wakeup_fd = wakeup_fd;
- stream->memory_map_size = memory_map_size;
- stream->cpu = cpu;
- return stream;
-
-alloc_error:
- return NULL;
-}
-
-void ustctl_destroy_stream(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- assert(stream);
- buf = stream->buf;
- consumer_chan = stream->chan;
- (void) ustctl_stream_close_wait_fd(stream);
- (void) ustctl_stream_close_wakeup_fd(stream);
- lib_ring_buffer_release_read(buf, consumer_chan->chan->priv->rb_chan->handle);
- free(stream);
-}
-
-int ustctl_channel_get_wait_fd(struct ustctl_consumer_channel *chan)
-{
- if (!chan)
- return -EINVAL;
- return shm_get_wait_fd(chan->chan->priv->rb_chan->handle,
- &chan->chan->priv->rb_chan->handle->chan._ref);
-}
-
-int ustctl_channel_get_wakeup_fd(struct ustctl_consumer_channel *chan)
-{
- if (!chan)
- return -EINVAL;
- return shm_get_wakeup_fd(chan->chan->priv->rb_chan->handle,
- &chan->chan->priv->rb_chan->handle->chan._ref);
-}
-
-int ustctl_stream_get_wait_fd(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return shm_get_wait_fd(consumer_chan->chan->priv->rb_chan->handle, &buf->self._ref);
-}
-
-int ustctl_stream_get_wakeup_fd(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return shm_get_wakeup_fd(consumer_chan->chan->priv->rb_chan->handle, &buf->self._ref);
-}
-
-/* For mmap mode, readable without "get" operation */
-
-void *ustctl_get_mmap_base(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return NULL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return shmp(consumer_chan->chan->priv->rb_chan->handle, buf->backend.memory_map);
-}
-
-/* returns the length to mmap. */
-int ustctl_get_mmap_len(struct ustctl_consumer_stream *stream,
- unsigned long *len)
-{
- struct ustctl_consumer_channel *consumer_chan;
- unsigned long mmap_buf_len;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
-
- if (!stream)
- return -EINVAL;
- consumer_chan = stream->chan;
- rb_chan = consumer_chan->chan->priv->rb_chan;
- if (rb_chan->backend.config.output != RING_BUFFER_MMAP)
- return -EINVAL;
- mmap_buf_len = rb_chan->backend.buf_size;
- if (rb_chan->backend.extra_reader_sb)
- mmap_buf_len += rb_chan->backend.subbuf_size;
- if (mmap_buf_len > INT_MAX)
- return -EFBIG;
- *len = mmap_buf_len;
- return 0;
-}
-
-/* returns the maximum size for sub-buffers. */
-int ustctl_get_max_subbuf_size(struct ustctl_consumer_stream *stream,
- unsigned long *len)
-{
- struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
-
- if (!stream)
- return -EINVAL;
- consumer_chan = stream->chan;
- rb_chan = consumer_chan->chan->priv->rb_chan;
- *len = rb_chan->backend.subbuf_size;
- return 0;
-}
-
-/*
- * For mmap mode, operate on the current packet (between get/put or
- * get_next/put_next).
- */
-
-/* returns the offset of the subbuffer belonging to the mmap reader. */
-int ustctl_get_mmap_read_offset(struct ustctl_consumer_stream *stream,
- unsigned long *off)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *barray_idx;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- rb_chan = consumer_chan->chan->priv->rb_chan;
- if (rb_chan->backend.config.output != RING_BUFFER_MMAP)
- return -EINVAL;
- sb_bindex = subbuffer_id_get_index(&rb_chan->backend.config,
- buf->backend.buf_rsb.id);
- barray_idx = shmp_index(rb_chan->handle, buf->backend.array,
- sb_bindex);
- if (!barray_idx)
- return -EINVAL;
- pages = shmp(rb_chan->handle, barray_idx->shmp);
- if (!pages)
- return -EINVAL;
- *off = pages->mmap_offset;
- return 0;
-}
-
-/* returns the size of the current sub-buffer, without padding (for mmap). */
-int ustctl_get_subbuf_size(struct ustctl_consumer_stream *stream,
- unsigned long *len)
-{
- struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream)
- return -EINVAL;
-
- buf = stream->buf;
- consumer_chan = stream->chan;
- rb_chan = consumer_chan->chan->priv->rb_chan;
- *len = lib_ring_buffer_get_read_data_size(&rb_chan->backend.config, buf,
- rb_chan->handle);
- return 0;
-}
-
-/* returns the size of the current sub-buffer, without padding (for mmap). */
-int ustctl_get_padded_subbuf_size(struct ustctl_consumer_stream *stream,
- unsigned long *len)
-{
- struct ustctl_consumer_channel *consumer_chan;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- rb_chan = consumer_chan->chan->priv->rb_chan;
- *len = lib_ring_buffer_get_read_data_size(&rb_chan->backend.config, buf,
- rb_chan->handle);
- *len = LTTNG_UST_PAGE_ALIGN(*len);
- return 0;
-}
-
-/* Get exclusive read access to the next sub-buffer that can be read. */
-int ustctl_get_next_subbuf(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return lib_ring_buffer_get_next_subbuf(buf,
- consumer_chan->chan->priv->rb_chan->handle);
-}
-
-
-/* Release exclusive sub-buffer access, move consumer forward. */
-int ustctl_put_next_subbuf(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- lib_ring_buffer_put_next_subbuf(buf, consumer_chan->chan->priv->rb_chan->handle);
- return 0;
-}
-
-/* snapshot */
-
-/* Get a snapshot of the current ring buffer producer and consumer positions */
-int ustctl_snapshot(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
- &buf->prod_snapshot, consumer_chan->chan->priv->rb_chan->handle);
-}
-
-/*
- * Get a snapshot of the current ring buffer producer and consumer positions
- * even if the consumed and produced positions are contained within the same
- * subbuffer.
- */
-int ustctl_snapshot_sample_positions(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return lib_ring_buffer_snapshot_sample_positions(buf,
- &buf->cons_snapshot, &buf->prod_snapshot,
- consumer_chan->chan->priv->rb_chan->handle);
-}
-
-/* Get the consumer position (iteration start) */
-int ustctl_snapshot_get_consumed(struct ustctl_consumer_stream *stream,
- unsigned long *pos)
-{
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- *pos = buf->cons_snapshot;
- return 0;
-}
-
-/* Get the producer position (iteration end) */
-int ustctl_snapshot_get_produced(struct ustctl_consumer_stream *stream,
- unsigned long *pos)
-{
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- *pos = buf->prod_snapshot;
- return 0;
-}
-
-/* Get exclusive read access to the specified sub-buffer position */
-int ustctl_get_subbuf(struct ustctl_consumer_stream *stream,
- unsigned long *pos)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- return lib_ring_buffer_get_subbuf(buf, *pos,
- consumer_chan->chan->priv->rb_chan->handle);
-}
-
-/* Release exclusive sub-buffer access */
-int ustctl_put_subbuf(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- if (!stream)
- return -EINVAL;
- buf = stream->buf;
- consumer_chan = stream->chan;
- lib_ring_buffer_put_subbuf(buf, consumer_chan->chan->priv->rb_chan->handle);
- return 0;
-}
-
-void ustctl_flush_buffer(struct ustctl_consumer_stream *stream,
- int producer_active)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- assert(stream);
- buf = stream->buf;
- consumer_chan = stream->chan;
- lib_ring_buffer_switch_slow(buf,
- producer_active ? SWITCH_ACTIVE : SWITCH_FLUSH,
- consumer_chan->chan->priv->rb_chan->handle);
-}
-
-void ustctl_clear_buffer(struct ustctl_consumer_stream *stream)
-{
- struct lttng_ust_lib_ring_buffer *buf;
- struct ustctl_consumer_channel *consumer_chan;
-
- assert(stream);
- buf = stream->buf;
- consumer_chan = stream->chan;
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
- consumer_chan->chan->priv->rb_chan->handle);
- lib_ring_buffer_clear_reader(buf, consumer_chan->chan->priv->rb_chan->handle);
-}
-
-static
-struct lttng_ust_client_lib_ring_buffer_client_cb *get_client_cb(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- const struct lttng_ust_lib_ring_buffer_config *config;
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
-
- config = &chan->backend.config;
- if (!config->cb_ptr)
- return NULL;
- client_cb = caa_container_of(config->cb_ptr,
- struct lttng_ust_client_lib_ring_buffer_client_cb,
- parent);
- return client_cb;
-}
-
-int ustctl_get_timestamp_begin(struct ustctl_consumer_stream *stream,
- uint64_t *timestamp_begin)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !timestamp_begin)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->timestamp_begin(buf, chan, timestamp_begin);
-}
-
-int ustctl_get_timestamp_end(struct ustctl_consumer_stream *stream,
- uint64_t *timestamp_end)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !timestamp_end)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->timestamp_end(buf, chan, timestamp_end);
-}
-
-int ustctl_get_events_discarded(struct ustctl_consumer_stream *stream,
- uint64_t *events_discarded)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !events_discarded)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->events_discarded(buf, chan, events_discarded);
-}
-
-int ustctl_get_content_size(struct ustctl_consumer_stream *stream,
- uint64_t *content_size)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !content_size)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->content_size(buf, chan, content_size);
-}
-
-int ustctl_get_packet_size(struct ustctl_consumer_stream *stream,
- uint64_t *packet_size)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !packet_size)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->packet_size(buf, chan, packet_size);
-}
-
-int ustctl_get_stream_id(struct ustctl_consumer_stream *stream,
- uint64_t *stream_id)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !stream_id)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->stream_id(buf, chan, stream_id);
-}
-
-int ustctl_get_current_timestamp(struct ustctl_consumer_stream *stream,
- uint64_t *ts)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !ts)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb || !client_cb->current_timestamp)
- return -ENOSYS;
- return client_cb->current_timestamp(buf, chan, ts);
-}
-
-int ustctl_get_sequence_number(struct ustctl_consumer_stream *stream,
- uint64_t *seq)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !seq)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb || !client_cb->sequence_number)
- return -ENOSYS;
- return client_cb->sequence_number(buf, chan, seq);
-}
-
-int ustctl_get_instance_id(struct ustctl_consumer_stream *stream,
- uint64_t *id)
-{
- struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer *buf;
-
- if (!stream || !id)
- return -EINVAL;
- buf = stream->buf;
- chan = stream->chan->chan->priv->rb_chan;
- client_cb = get_client_cb(buf, chan);
- if (!client_cb)
- return -ENOSYS;
- return client_cb->instance_id(buf, chan, id);
-}
-
-#ifdef HAVE_LINUX_PERF_EVENT_H
-
-int ustctl_has_perf_counters(void)
-{
- return 1;
-}
-
-#else
-
-int ustctl_has_perf_counters(void)
-{
- return 0;
-}
-
-#endif
-
-#ifdef __linux__
-/*
- * Override application pid/uid/gid with unix socket credentials. If
- * the application announced a pid matching our view, it means it is
- * within the same pid namespace, so expose the ppid provided by the
- * application.
- */
-static
-int get_cred(int sock,
- const struct ustctl_reg_msg *reg_msg,
- uint32_t *pid,
- uint32_t *ppid,
- uint32_t *uid,
- uint32_t *gid)
-{
- struct ucred ucred;
- socklen_t ucred_len = sizeof(struct ucred);
- int ret;
-
- ret = getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len);
- if (ret) {
- return -LTTNG_UST_ERR_PEERCRED;
- }
- DBG("Unix socket peercred [ pid: %u, uid: %u, gid: %u ], "
- "application registered claiming [ pid: %u, ppid: %u, uid: %u, gid: %u ]",
- ucred.pid, ucred.uid, ucred.gid,
- reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
- if (!ucred.pid) {
- ERR("Unix socket credential pid=0. Refusing application in distinct, non-nested pid namespace.");
- return -LTTNG_UST_ERR_PEERCRED_PID;
- }
- *pid = ucred.pid;
- *uid = ucred.uid;
- *gid = ucred.gid;
- if (ucred.pid == reg_msg->pid) {
- *ppid = reg_msg->ppid;
- } else {
- *ppid = 0;
- }
- return 0;
-}
-#elif defined(__FreeBSD__)
-#include <sys/ucred.h>
-#include <sys/un.h>
-
-/*
- * Override application uid/gid with unix socket credentials. Use the
- * first group of the cr_groups.
- * Use the pid and ppid provided by the application on registration.
- */
-static
-int get_cred(int sock,
- const struct ustctl_reg_msg *reg_msg,
- uint32_t *pid,
- uint32_t *ppid,
- uint32_t *uid,
- uint32_t *gid)
-{
- struct xucred xucred;
- socklen_t xucred_len = sizeof(struct xucred);
- int ret;
-
- ret = getsockopt(sock, SOL_SOCKET, LOCAL_PEERCRED, &xucred, &xucred_len);
- if (ret) {
- return -LTTNG_UST_ERR_PEERCRED;
- }
- if (xucred.cr_version != XUCRED_VERSION || xucred.cr_ngroups < 1) {
- return -LTTNG_UST_ERR_PEERCRED;
- }
- DBG("Unix socket peercred [ uid: %u, gid: %u ], "
- "application registered claiming [ pid: %d, ppid: %d, uid: %u, gid: %u ]",
- xucred.cr_uid, xucred.cr_groups[0],
- reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
- *pid = reg_msg->pid;
- *ppid = reg_msg->ppid;
- *uid = xucred.cr_uid;
- *gid = xucred.cr_groups[0];
- return 0;
-}
-#else
-#warning "Using insecure fallback: trusting user id provided by registered applications. Please consider implementing use of unix socket credentials on your platform."
-static
-int get_cred(int sock,
- const struct ustctl_reg_msg *reg_msg,
- uint32_t *pid,
- uint32_t *ppid,
- uint32_t *uid,
- uint32_t *gid)
-{
- DBG("Application registered claiming [ pid: %u, ppid: %d, uid: %u, gid: %u ]",
- reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
- *pid = reg_msg->pid;
- *ppid = reg_msg->ppid;
- *uid = reg_msg->uid;
- *gid = reg_msg->gid;
- return 0;
-}
-#endif
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustctl_recv_reg_msg(int sock,
- enum ustctl_socket_type *type,
- uint32_t *major,
- uint32_t *minor,
- uint32_t *pid,
- uint32_t *ppid,
- uint32_t *uid,
- uint32_t *gid,
- uint32_t *bits_per_long,
- uint32_t *uint8_t_alignment,
- uint32_t *uint16_t_alignment,
- uint32_t *uint32_t_alignment,
- uint32_t *uint64_t_alignment,
- uint32_t *long_alignment,
- int *byte_order,
- char *name)
-{
- ssize_t len;
- struct ustctl_reg_msg reg_msg;
-
- len = ustcomm_recv_unix_sock(sock, ®_msg, sizeof(reg_msg));
- if (len > 0 && len != sizeof(reg_msg))
- return -EIO;
- if (len == 0)
- return -EPIPE;
- if (len < 0)
- return len;
-
- if (reg_msg.magic == LTTNG_UST_ABI_COMM_MAGIC) {
- *byte_order = BYTE_ORDER == BIG_ENDIAN ?
- BIG_ENDIAN : LITTLE_ENDIAN;
- } else if (reg_msg.magic == bswap_32(LTTNG_UST_ABI_COMM_MAGIC)) {
- *byte_order = BYTE_ORDER == BIG_ENDIAN ?
- LITTLE_ENDIAN : BIG_ENDIAN;
- } else {
- return -LTTNG_UST_ERR_INVAL_MAGIC;
- }
- switch (reg_msg.socket_type) {
- case 0: *type = USTCTL_SOCKET_CMD;
- break;
- case 1: *type = USTCTL_SOCKET_NOTIFY;
- break;
- default:
- return -LTTNG_UST_ERR_INVAL_SOCKET_TYPE;
- }
- *major = reg_msg.major;
- *minor = reg_msg.minor;
- *bits_per_long = reg_msg.bits_per_long;
- *uint8_t_alignment = reg_msg.uint8_t_alignment;
- *uint16_t_alignment = reg_msg.uint16_t_alignment;
- *uint32_t_alignment = reg_msg.uint32_t_alignment;
- *uint64_t_alignment = reg_msg.uint64_t_alignment;
- *long_alignment = reg_msg.long_alignment;
- memcpy(name, reg_msg.name, LTTNG_UST_ABI_PROCNAME_LEN);
- if (reg_msg.major < LTTNG_UST_ABI_MAJOR_VERSION_OLDEST_COMPATIBLE ||
- reg_msg.major > LTTNG_UST_ABI_MAJOR_VERSION) {
- return -LTTNG_UST_ERR_UNSUP_MAJOR;
- }
- return get_cred(sock, ®_msg, pid, ppid, uid, gid);
-}
-
-int ustctl_recv_notify(int sock, enum ustctl_notify_cmd *notify_cmd)
-{
- struct ustcomm_notify_hdr header;
- ssize_t len;
-
- len = ustcomm_recv_unix_sock(sock, &header, sizeof(header));
- if (len > 0 && len != sizeof(header))
- return -EIO;
- if (len == 0)
- return -EPIPE;
- if (len < 0)
- return len;
- switch (header.notify_cmd) {
- case 0:
- *notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
- break;
- case 1:
- *notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
- break;
- case 2:
- *notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustctl_recv_register_event(int sock,
- int *session_objd,
- int *channel_objd,
- char *event_name,
- int *loglevel,
- char **signature,
- size_t *nr_fields,
- struct ustctl_field **fields,
- char **model_emf_uri)
-{
- ssize_t len;
- struct ustcomm_notify_event_msg msg;
- size_t signature_len, fields_len, model_emf_uri_len;
- char *a_sign = NULL, *a_model_emf_uri = NULL;
- struct ustctl_field *a_fields = NULL;
-
- len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg))
- return -EIO;
- if (len == 0)
- return -EPIPE;
- if (len < 0)
- return len;
-
- *session_objd = msg.session_objd;
- *channel_objd = msg.channel_objd;
- strncpy(event_name, msg.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- event_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- *loglevel = msg.loglevel;
- signature_len = msg.signature_len;
- fields_len = msg.fields_len;
-
- if (fields_len % sizeof(*a_fields) != 0) {
- return -EINVAL;
- }
-
- model_emf_uri_len = msg.model_emf_uri_len;
-
- /* recv signature. contains at least \0. */
- a_sign = zmalloc(signature_len);
- if (!a_sign)
- return -ENOMEM;
- len = ustcomm_recv_unix_sock(sock, a_sign, signature_len);
- if (len > 0 && len != signature_len) {
- len = -EIO;
- goto signature_error;
- }
- if (len == 0) {
- len = -EPIPE;
- goto signature_error;
- }
- if (len < 0) {
- goto signature_error;
- }
- /* Enforce end of string */
- a_sign[signature_len - 1] = '\0';
-
- /* recv fields */
- if (fields_len) {
- a_fields = zmalloc(fields_len);
- if (!a_fields) {
- len = -ENOMEM;
- goto signature_error;
- }
- len = ustcomm_recv_unix_sock(sock, a_fields, fields_len);
- if (len > 0 && len != fields_len) {
- len = -EIO;
- goto fields_error;
- }
- if (len == 0) {
- len = -EPIPE;
- goto fields_error;
- }
- if (len < 0) {
- goto fields_error;
- }
- }
-
- if (model_emf_uri_len) {
- /* recv model_emf_uri_len */
- a_model_emf_uri = zmalloc(model_emf_uri_len);
- if (!a_model_emf_uri) {
- len = -ENOMEM;
- goto fields_error;
- }
- len = ustcomm_recv_unix_sock(sock, a_model_emf_uri,
- model_emf_uri_len);
- if (len > 0 && len != model_emf_uri_len) {
- len = -EIO;
- goto model_error;
- }
- if (len == 0) {
- len = -EPIPE;
- goto model_error;
- }
- if (len < 0) {
- goto model_error;
- }
- /* Enforce end of string */
- a_model_emf_uri[model_emf_uri_len - 1] = '\0';
- }
-
- *signature = a_sign;
- *nr_fields = fields_len / sizeof(*a_fields);
- *fields = a_fields;
- *model_emf_uri = a_model_emf_uri;
-
- return 0;
-
-model_error:
- free(a_model_emf_uri);
-fields_error:
- free(a_fields);
-signature_error:
- free(a_sign);
- return len;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustctl_reply_register_event(int sock,
- uint32_t id,
- int ret_code)
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_event_reply r;
- } reply;
-
- memset(&reply, 0, sizeof(reply));
- reply.header.notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
- reply.r.ret_code = ret_code;
- reply.r.event_id = id;
- len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
- if (len > 0 && len != sizeof(reply))
- return -EIO;
- if (len < 0)
- return len;
- return 0;
-}
-
-/*
- * Returns 0 on success, negative UST or system error value on error.
- */
-int ustctl_recv_register_enum(int sock,
- int *session_objd,
- char *enum_name,
- struct ustctl_enum_entry **entries,
- size_t *nr_entries)
-{
- ssize_t len;
- struct ustcomm_notify_enum_msg msg;
- size_t entries_len;
- struct ustctl_enum_entry *a_entries = NULL;
-
- len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg))
- return -EIO;
- if (len == 0)
- return -EPIPE;
- if (len < 0)
- return len;
-
- *session_objd = msg.session_objd;
- strncpy(enum_name, msg.enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- enum_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- entries_len = msg.entries_len;
-
- if (entries_len % sizeof(*a_entries) != 0) {
- return -EINVAL;
- }
-
- /* recv entries */
- if (entries_len) {
- a_entries = zmalloc(entries_len);
- if (!a_entries)
- return -ENOMEM;
- len = ustcomm_recv_unix_sock(sock, a_entries, entries_len);
- if (len > 0 && len != entries_len) {
- len = -EIO;
- goto entries_error;
- }
- if (len == 0) {
- len = -EPIPE;
- goto entries_error;
- }
- if (len < 0) {
- goto entries_error;
- }
- }
- *nr_entries = entries_len / sizeof(*a_entries);
- *entries = a_entries;
-
- return 0;
-
-entries_error:
- free(a_entries);
- return len;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustctl_reply_register_enum(int sock,
- uint64_t id,
- int ret_code)
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_enum_reply r;
- } reply;
-
- memset(&reply, 0, sizeof(reply));
- reply.header.notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
- reply.r.ret_code = ret_code;
- reply.r.enum_id = id;
- len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
- if (len > 0 && len != sizeof(reply))
- return -EIO;
- if (len < 0)
- return len;
- return 0;
-}
-
-/*
- * Returns 0 on success, negative UST or system error value on error.
- */
-int ustctl_recv_register_channel(int sock,
- int *session_objd, /* session descriptor (output) */
- int *channel_objd, /* channel descriptor (output) */
- size_t *nr_fields,
- struct ustctl_field **fields)
-{
- ssize_t len;
- struct ustcomm_notify_channel_msg msg;
- size_t fields_len;
- struct ustctl_field *a_fields;
-
- len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
- if (len > 0 && len != sizeof(msg))
- return -EIO;
- if (len == 0)
- return -EPIPE;
- if (len < 0)
- return len;
-
- *session_objd = msg.session_objd;
- *channel_objd = msg.channel_objd;
- fields_len = msg.ctx_fields_len;
-
- if (fields_len % sizeof(*a_fields) != 0) {
- return -EINVAL;
- }
-
- /* recv fields */
- if (fields_len) {
- a_fields = zmalloc(fields_len);
- if (!a_fields) {
- len = -ENOMEM;
- goto alloc_error;
- }
- len = ustcomm_recv_unix_sock(sock, a_fields, fields_len);
- if (len > 0 && len != fields_len) {
- len = -EIO;
- goto fields_error;
- }
- if (len == 0) {
- len = -EPIPE;
- goto fields_error;
- }
- if (len < 0) {
- goto fields_error;
- }
- *fields = a_fields;
- } else {
- *fields = NULL;
- }
- *nr_fields = fields_len / sizeof(*a_fields);
- return 0;
-
-fields_error:
- free(a_fields);
-alloc_error:
- return len;
-}
-
-/*
- * Returns 0 on success, negative error value on error.
- */
-int ustctl_reply_register_channel(int sock,
- uint32_t chan_id,
- enum ustctl_channel_header header_type,
- int ret_code)
-{
- ssize_t len;
- struct {
- struct ustcomm_notify_hdr header;
- struct ustcomm_notify_channel_reply r;
- } reply;
-
- memset(&reply, 0, sizeof(reply));
- reply.header.notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
- reply.r.ret_code = ret_code;
- reply.r.chan_id = chan_id;
- switch (header_type) {
- case USTCTL_CHANNEL_HEADER_COMPACT:
- reply.r.header_type = 1;
- break;
- case USTCTL_CHANNEL_HEADER_LARGE:
- reply.r.header_type = 2;
- break;
- default:
- reply.r.header_type = 0;
- break;
- }
- len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
- if (len > 0 && len != sizeof(reply))
- return -EIO;
- if (len < 0)
- return len;
- return 0;
-}
-
-/* Regenerate the statedump. */
-int ustctl_regenerate_statedump(int sock, int handle)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
-
- memset(&lum, 0, sizeof(lum));
- lum.handle = handle;
- lum.cmd = LTTNG_UST_ABI_SESSION_STATEDUMP;
- ret = ustcomm_send_app_cmd(sock, &lum, &lur);
- if (ret)
- return ret;
- DBG("Regenerated statedump for handle %u", handle);
- return 0;
-}
-
-/* counter operations */
-
-int ustctl_get_nr_cpu_per_counter(void)
-{
- return lttng_counter_num_possible_cpus();
-}
-
-struct ustctl_daemon_counter *
- ustctl_create_counter(size_t nr_dimensions,
- const struct ustctl_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- enum ustctl_counter_bitness bitness,
- enum ustctl_counter_arithmetic arithmetic,
- uint32_t alloc_flags,
- bool coalesce_hits)
-{
- const char *transport_name;
- struct ustctl_daemon_counter *counter;
- struct lttng_counter_transport *transport;
- struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX];
- size_t i;
-
- if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
- return NULL;
- /* Currently, only per-cpu allocation is supported. */
- switch (alloc_flags) {
- case USTCTL_COUNTER_ALLOC_PER_CPU:
- break;
-
- case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL:
- case USTCTL_COUNTER_ALLOC_GLOBAL:
- default:
- return NULL;
- }
- switch (bitness) {
- case USTCTL_COUNTER_BITNESS_32:
- switch (arithmetic) {
- case USTCTL_COUNTER_ARITHMETIC_MODULAR:
- transport_name = "counter-per-cpu-32-modular";
- break;
- case USTCTL_COUNTER_ARITHMETIC_SATURATION:
- transport_name = "counter-per-cpu-32-saturation";
- break;
- default:
- return NULL;
- }
- break;
- case USTCTL_COUNTER_BITNESS_64:
- switch (arithmetic) {
- case USTCTL_COUNTER_ARITHMETIC_MODULAR:
- transport_name = "counter-per-cpu-64-modular";
- break;
- case USTCTL_COUNTER_ARITHMETIC_SATURATION:
- transport_name = "counter-per-cpu-64-saturation";
- break;
- default:
- return NULL;
- }
- break;
- default:
- return NULL;
- }
-
- transport = lttng_counter_transport_find(transport_name);
- if (!transport) {
- DBG("LTTng transport %s not found\n",
- transport_name);
- return NULL;
- }
-
- counter = zmalloc(sizeof(*counter));
- if (!counter)
- return NULL;
- counter->attr = zmalloc(sizeof(*counter->attr));
- if (!counter->attr)
- goto free_counter;
- counter->attr->bitness = bitness;
- counter->attr->arithmetic = arithmetic;
- counter->attr->nr_dimensions = nr_dimensions;
- counter->attr->global_sum_step = global_sum_step;
- counter->attr->coalesce_hits = coalesce_hits;
- for (i = 0; i < nr_dimensions; i++)
- counter->attr->dimensions[i] = dimensions[i];
-
- for (i = 0; i < nr_dimensions; i++) {
- ust_dim[i].size = dimensions[i].size;
- ust_dim[i].underflow_index = dimensions[i].underflow_index;
- ust_dim[i].overflow_index = dimensions[i].overflow_index;
- ust_dim[i].has_underflow = dimensions[i].has_underflow;
- ust_dim[i].has_overflow = dimensions[i].has_overflow;
- }
- counter->counter = transport->ops.counter_create(nr_dimensions,
- ust_dim, global_sum_step, global_counter_fd,
- nr_counter_cpu_fds, counter_cpu_fds, true);
- if (!counter->counter)
- goto free_attr;
- counter->ops = &transport->ops;
- return counter;
-
-free_attr:
- free(counter->attr);
-free_counter:
- free(counter);
- return NULL;
-}
-
-int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
- struct lttng_ust_abi_object_data **_counter_data)
-{
- struct lttng_ust_abi_object_data *counter_data;
- struct lttng_ust_abi_counter_conf counter_conf = {0};
- size_t i;
- int ret;
-
- switch (counter->attr->arithmetic) {
- case USTCTL_COUNTER_ARITHMETIC_MODULAR:
- counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR;
- break;
- case USTCTL_COUNTER_ARITHMETIC_SATURATION:
- counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION;
- break;
- default:
- return -EINVAL;
- }
- switch (counter->attr->bitness) {
- case USTCTL_COUNTER_BITNESS_32:
- counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_32;
- break;
- case USTCTL_COUNTER_BITNESS_64:
- counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_64;
- break;
- default:
- return -EINVAL;
- }
- counter_conf.number_dimensions = counter->attr->nr_dimensions;
- counter_conf.global_sum_step = counter->attr->global_sum_step;
- counter_conf.coalesce_hits = counter->attr->coalesce_hits;
- for (i = 0; i < counter->attr->nr_dimensions; i++) {
- counter_conf.dimensions[i].size = counter->attr->dimensions[i].size;
- counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index;
- counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index;
- counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow;
- counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow;
- }
-
- counter_data = zmalloc(sizeof(*counter_data));
- if (!counter_data) {
- ret = -ENOMEM;
- goto error_alloc;
- }
- counter_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER;
- counter_data->handle = -1;
-
- counter_data->size = sizeof(counter_conf);
- counter_data->u.counter.data = zmalloc(sizeof(counter_conf));
- if (!counter_data->u.counter.data) {
- ret = -ENOMEM;
- goto error_alloc_data;
- }
-
- memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf));
- *_counter_data = counter_data;
-
- return 0;
-
-error_alloc_data:
- free(counter_data);
-error_alloc:
- return ret;
-}
-
-int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
- struct lttng_ust_abi_object_data **_counter_global_data)
-{
- struct lttng_ust_abi_object_data *counter_global_data;
- int ret, fd;
- size_t len;
-
- if (lttng_counter_get_global_shm(counter->counter, &fd, &len))
- return -EINVAL;
- counter_global_data = zmalloc(sizeof(*counter_global_data));
- if (!counter_global_data) {
- ret = -ENOMEM;
- goto error_alloc;
- }
- counter_global_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL;
- counter_global_data->handle = -1;
- counter_global_data->size = len;
- counter_global_data->u.counter_global.shm_fd = fd;
- *_counter_global_data = counter_global_data;
- return 0;
-
-error_alloc:
- return ret;
-}
-
-int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
- struct lttng_ust_abi_object_data **_counter_cpu_data)
-{
- struct lttng_ust_abi_object_data *counter_cpu_data;
- int ret, fd;
- size_t len;
-
- if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len))
- return -EINVAL;
- counter_cpu_data = zmalloc(sizeof(*counter_cpu_data));
- if (!counter_cpu_data) {
- ret = -ENOMEM;
- goto error_alloc;
- }
- counter_cpu_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU;
- counter_cpu_data->handle = -1;
- counter_cpu_data->size = len;
- counter_cpu_data->u.counter_cpu.shm_fd = fd;
- counter_cpu_data->u.counter_cpu.cpu_nr = cpu;
- *_counter_cpu_data = counter_cpu_data;
- return 0;
-
-error_alloc:
- return ret;
-}
-
-void ustctl_destroy_counter(struct ustctl_daemon_counter *counter)
-{
- counter->ops->counter_destroy(counter->counter);
- free(counter->attr);
- free(counter);
-}
-
-int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
- struct lttng_ust_abi_object_data *counter_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret;
- size_t size;
- ssize_t len;
-
- if (!counter_data)
- return -EINVAL;
-
- size = counter_data->size;
- memset(&lum, 0, sizeof(lum));
- lum.handle = parent_handle;
- lum.cmd = LTTNG_UST_ABI_COUNTER;
- lum.u.counter.len = size;
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
-
- /* Send counter data */
- len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size);
- if (len != size) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (!ret) {
- counter_data->handle = lur.ret_val;
- }
- return ret;
-}
-
-int ustctl_send_counter_global_data_to_ust(int sock,
- struct lttng_ust_abi_object_data *counter_data,
- struct lttng_ust_abi_object_data *counter_global_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret, shm_fd[1];
- size_t size;
- ssize_t len;
-
- if (!counter_data || !counter_global_data)
- return -EINVAL;
-
- size = counter_global_data->size;
- memset(&lum, 0, sizeof(lum));
- lum.handle = counter_data->handle; /* parent handle */
- lum.cmd = LTTNG_UST_ABI_COUNTER_GLOBAL;
- lum.u.counter_global.len = size;
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
-
- shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
- len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
- if (len <= 0) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (!ret) {
- counter_global_data->handle = lur.ret_val;
- }
- return ret;
-}
-
-int ustctl_send_counter_cpu_data_to_ust(int sock,
- struct lttng_ust_abi_object_data *counter_data,
- struct lttng_ust_abi_object_data *counter_cpu_data)
-{
- struct ustcomm_ust_msg lum;
- struct ustcomm_ust_reply lur;
- int ret, shm_fd[1];
- size_t size;
- ssize_t len;
-
- if (!counter_data || !counter_cpu_data)
- return -EINVAL;
-
- size = counter_cpu_data->size;
- memset(&lum, 0, sizeof(lum));
- lum.handle = counter_data->handle; /* parent handle */
- lum.cmd = LTTNG_UST_ABI_COUNTER_CPU;
- lum.u.counter_cpu.len = size;
- lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
- ret = ustcomm_send_app_msg(sock, &lum);
- if (ret)
- return ret;
-
- shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
- len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
- if (len <= 0) {
- if (len < 0)
- return len;
- else
- return -EIO;
- }
-
- ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
- if (!ret) {
- counter_cpu_data->handle = lur.ret_val;
- }
- return ret;
-}
-
-int ustctl_counter_read(struct ustctl_daemon_counter *counter,
- const size_t *dimension_indexes,
- int cpu, int64_t *value,
- bool *overflow, bool *underflow)
-{
- return counter->ops->counter_read(counter->counter, dimension_indexes, cpu,
- value, overflow, underflow);
-}
-
-int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
- const size_t *dimension_indexes,
- int64_t *value,
- bool *overflow, bool *underflow)
-{
- return counter->ops->counter_aggregate(counter->counter, dimension_indexes,
- value, overflow, underflow);
-}
-
-int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
- const size_t *dimension_indexes)
-{
- return counter->ops->counter_clear(counter->counter, dimension_indexes);
-}
-
-static
-void ustctl_init(void)
- __attribute__((constructor));
-static
-void ustctl_init(void)
-{
- ust_err_init();
- lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
- lttng_ust_clock_init();
- lttng_ust_ring_buffer_clients_init();
- lttng_ust_counter_clients_init();
- lib_ringbuffer_signal_init();
-}
-
-static
-void ustctl_exit(void)
- __attribute__((destructor));
-static
-void ustctl_exit(void)
-{
- lttng_ust_counter_clients_exit();
- lttng_ust_ring_buffer_clients_exit();
-}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-cyg-profile.la \
- liblttng-ust-cyg-profile-fast.la
-
-liblttng_ust_cyg_profile_la_SOURCES = \
- lttng-ust-cyg-profile.c \
- lttng-ust-cyg-profile.h
-
-liblttng_ust_cyg_profile_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_cyg_profile_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_cyg_profile_fast_la_SOURCES = \
- lttng-ust-cyg-profile-fast.c \
- lttng-ust-cyg-profile-fast.h
-
-liblttng_ust_cyg_profile_fast_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_cyg_profile_fast_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-dist_noinst_SCRIPTS = run run-fast
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <dlfcn.h>
-#include <sys/types.h>
-#include <stdio.h>
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM func_addr
-#include "lttng-ust-cyg-profile-fast.h"
-
-void __cyg_profile_func_enter(void *this_fn, void *call_site)
- __attribute__((no_instrument_function));
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
- __attribute__((no_instrument_function));
-
-void __cyg_profile_func_enter(void *this_fn, void *call_site __attribute__((unused)))
-{
- tracepoint(lttng_ust_cyg_profile_fast, func_entry, this_fn);
-}
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site __attribute__((unused)))
-{
- tracepoint(lttng_ust_cyg_profile_fast, func_exit, this_fn);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_cyg_profile_fast
-
-#if !defined(_TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_cyg_profile_fast, func_entry,
- TP_ARGS(void *, func_addr),
- TP_FIELDS(
- ctf_integer_hex(unsigned long, addr,
- (unsigned long) func_addr)
- )
-)
-
-TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile_fast, func_entry,
- TRACE_DEBUG_FUNCTION)
-
-TRACEPOINT_EVENT(lttng_ust_cyg_profile_fast, func_exit,
- TP_ARGS(void *, func_addr),
- TP_FIELDS(
- ctf_unused(func_addr)
- )
-)
-
-TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile_fast, func_exit,
- TRACE_DEBUG_FUNCTION)
-
-#endif /* _TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng-ust-cyg-profile-fast.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <dlfcn.h>
-#include <sys/types.h>
-#include <stdio.h>
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM func_addr
-#include "lttng-ust-cyg-profile.h"
-
-void __cyg_profile_func_enter(void *this_fn, void *call_site)
- __attribute__((no_instrument_function));
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
- __attribute__((no_instrument_function));
-
-void __cyg_profile_func_enter(void *this_fn, void *call_site)
-{
- tracepoint(lttng_ust_cyg_profile, func_entry, this_fn, call_site);
-}
-
-void __cyg_profile_func_exit(void *this_fn, void *call_site)
-{
- tracepoint(lttng_ust_cyg_profile, func_exit, this_fn, call_site);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_cyg_profile
-
-#if !defined(_TRACEPOINT_LTTNG_UST_CYG_PROFILE_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_CYG_PROFILE_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT_CLASS(lttng_ust_cyg_profile, func_class,
- TP_ARGS(void *, func_addr, void *, call_site),
- TP_FIELDS(
- ctf_integer_hex(unsigned long, addr,
- (unsigned long) func_addr)
- ctf_integer_hex(unsigned long, call_site,
- (unsigned long) call_site)
- )
-)
-
-TRACEPOINT_EVENT_INSTANCE(lttng_ust_cyg_profile, func_class,
- func_entry,
- TP_ARGS(void *, func_addr, void *, call_site)
-)
-
-TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile, func_entry,
- TRACE_DEBUG_FUNCTION)
-
-TRACEPOINT_EVENT_INSTANCE(lttng_ust_cyg_profile, func_class,
- func_exit,
- TP_ARGS(void *, func_addr, void *, call_site)
-)
-
-TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile, func_exit,
- TRACE_DEBUG_FUNCTION)
-
-#endif /* _TRACEPOINT_LTTNG_UST_CYG_PROFILE_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng-ust-cyg-profile.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-#!/bin/sh
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-
-LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-cyg-profile.so ${*}
+++ /dev/null
-#!/bin/sh
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-
-LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-cyg-profile-fast.so ${*}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-dl.la
-liblttng_ust_dl_la_SOURCES = \
- lttng-ust-dl.c \
- ust_dl.c \
- ust_dl.h
-
-liblttng_ust_dl_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(top_builddir)/snprintf/libustsnprintf.la \
- $(DL_LIBS)
-
-liblttng_ust_dl_la_CFLAGS = -DUST_COMPONENT=liblttng-ust-dl $(AM_CFLAGS)
-liblttng_ust_dl_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2013 Paul Woegerer <paul.woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <ust-dlfcn.h>
-#include <ust-elf.h>
-#include <lttng/ust-events.h>
-#include <ust-helper.h>
-#include "usterr-signal-safe.h"
-
-#include "../liblttng-ust/ust-events-internal.h"
-
-/* Include link.h last else it conflicts with ust-dlfcn. */
-#include <link.h>
-
-#define TRACEPOINT_DEFINE
-#include "ust_dl.h"
-
-static void *(*__lttng_ust_plibc_dlopen)(const char *filename, int flags);
-#ifdef HAVE_DLMOPEN
-static void *(*__lttng_ust_plibc_dlmopen)(Lmid_t nsid, const char *filename,
- int flags);
-#endif
-static int (*__lttng_ust_plibc_dlclose)(void *handle);
-
-static
-void _lttng_ust_dl_init(void)
- __attribute__((constructor));
-static
-void _lttng_ust_dl_init(void)
-{
- ust_err_init();
-}
-
-static
-void *_lttng_ust_dl_libc_dlopen(const char *filename, int flags)
-{
- if (!__lttng_ust_plibc_dlopen) {
- __lttng_ust_plibc_dlopen = dlsym(RTLD_NEXT, "dlopen");
- if (!__lttng_ust_plibc_dlopen) {
- fprintf(stderr, "%s\n", dlerror());
- return NULL;
- }
- }
- return __lttng_ust_plibc_dlopen(filename, flags);
-}
-
-#ifdef HAVE_DLMOPEN
-static
-void *_lttng_ust_dl_libc_dlmopen(Lmid_t nsid, const char *filename,
- int flags)
-{
- if (!__lttng_ust_plibc_dlmopen) {
- __lttng_ust_plibc_dlmopen = dlsym(RTLD_NEXT, "dlmopen");
- if (!__lttng_ust_plibc_dlmopen) {
- fprintf(stderr, "%s\n", dlerror());
- return NULL;
- }
- }
- return __lttng_ust_plibc_dlmopen(nsid, filename, flags);
-}
-#endif
-
-static
-int _lttng_ust_dl_libc_dlclose(void *handle)
-{
- if (!__lttng_ust_plibc_dlclose) {
- __lttng_ust_plibc_dlclose = dlsym(RTLD_NEXT, "dlclose");
- if (!__lttng_ust_plibc_dlclose) {
- fprintf(stderr, "%s\n", dlerror());
- return -1;
- }
- }
- return __lttng_ust_plibc_dlclose(handle);
-}
-
-static
-void lttng_ust_dl_dlopen(void *so_base, const char *so_name,
- int flags, void *ip)
-{
- char resolved_path[PATH_MAX];
- struct lttng_ust_elf *elf;
- uint64_t memsz;
- uint8_t *build_id = NULL;
- size_t build_id_len;
- char *dbg_file = NULL;
- uint32_t crc;
- int has_build_id = 0, has_debug_link = 0;
- int ret;
-
- if (!realpath(so_name, resolved_path)) {
- ERR("could not resolve path '%s'", so_name);
- return;
- }
-
- elf = lttng_ust_elf_create(resolved_path);
- if (!elf) {
- ERR("could not access file %s", resolved_path);
- return;
- }
-
- ret = lttng_ust_elf_get_memsz(elf, &memsz);
- if (ret) {
- goto end;
- }
- ret = lttng_ust_elf_get_build_id(
- elf, &build_id, &build_id_len, &has_build_id);
- if (ret) {
- goto end;
- }
- ret = lttng_ust_elf_get_debug_link(
- elf, &dbg_file, &crc, &has_debug_link);
- if (ret) {
- goto end;
- }
-
- tracepoint(lttng_ust_dl, dlopen,
- ip, so_base, resolved_path, flags, memsz,
- has_build_id, has_debug_link);
-
- if (has_build_id) {
- tracepoint(lttng_ust_dl, build_id,
- ip, so_base, build_id, build_id_len);
- }
-
- if (has_debug_link) {
- tracepoint(lttng_ust_dl, debug_link,
- ip, so_base, dbg_file, crc);
- }
-
-end:
- free(dbg_file);
- free(build_id);
- lttng_ust_elf_destroy(elf);
- return;
-}
-
-#ifdef HAVE_DLMOPEN
-static
-void lttng_ust_dl_dlmopen(void *so_base, Lmid_t nsid, const char *so_name,
- int flags, void *ip)
-{
- char resolved_path[PATH_MAX];
- struct lttng_ust_elf *elf;
- uint64_t memsz;
- uint8_t *build_id = NULL;
- size_t build_id_len;
- char *dbg_file = NULL;
- uint32_t crc;
- int has_build_id = 0, has_debug_link = 0;
- int ret;
-
- if (!realpath(so_name, resolved_path)) {
- ERR("could not resolve path '%s'", so_name);
- return;
- }
-
- elf = lttng_ust_elf_create(resolved_path);
- if (!elf) {
- ERR("could not access file %s", resolved_path);
- return;
- }
-
- ret = lttng_ust_elf_get_memsz(elf, &memsz);
- if (ret) {
- goto end;
- }
- ret = lttng_ust_elf_get_build_id(
- elf, &build_id, &build_id_len, &has_build_id);
- if (ret) {
- goto end;
- }
- ret = lttng_ust_elf_get_debug_link(
- elf, &dbg_file, &crc, &has_debug_link);
- if (ret) {
- goto end;
- }
-
- tracepoint(lttng_ust_dl, dlmopen,
- ip, so_base, nsid, resolved_path, flags, memsz,
- has_build_id, has_debug_link);
-
- if (has_build_id) {
- tracepoint(lttng_ust_dl, build_id,
- ip, so_base, build_id, build_id_len);
- }
-
- if (has_debug_link) {
- tracepoint(lttng_ust_dl, debug_link,
- ip, so_base, dbg_file, crc);
- }
-
-end:
- free(dbg_file);
- free(build_id);
- lttng_ust_elf_destroy(elf);
- return;
-}
-#endif
-
-void *dlopen(const char *filename, int flags)
-{
- void *handle;
-
- handle = _lttng_ust_dl_libc_dlopen(filename, flags);
- if (__tracepoint_ptrs_registered && handle) {
- struct link_map *p = NULL;
- int ret;
-
- ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
- if (ret != -1 && p != NULL && p->l_addr != 0) {
- lttng_ust_dl_dlopen((void *) p->l_addr,
- p->l_name, flags, LTTNG_UST_CALLER_IP());
- }
- }
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
- return handle;
-}
-
-#ifdef HAVE_DLMOPEN
-void *dlmopen(Lmid_t nsid, const char *filename, int flags)
-{
- void *handle;
-
- handle = _lttng_ust_dl_libc_dlmopen(nsid, filename, flags);
- if (__tracepoint_ptrs_registered && handle) {
- struct link_map *p = NULL;
- int ret;
-
- ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
- if (ret != -1 && p != NULL && p->l_addr != 0) {
- lttng_ust_dl_dlmopen((void *) p->l_addr,
- nsid, p->l_name, flags,
- LTTNG_UST_CALLER_IP());
- }
- }
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
- return handle;
-
-}
-#endif
-
-int dlclose(void *handle)
-{
- int ret;
-
- if (__tracepoint_ptrs_registered) {
- struct link_map *p = NULL;
-
- ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
- if (ret != -1 && p != NULL && p->l_addr != 0) {
- tracepoint(lttng_ust_dl, dlclose,
- LTTNG_UST_CALLER_IP(),
- (void *) p->l_addr);
- }
- }
- ret = _lttng_ust_dl_libc_dlclose(handle);
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- */
-
-#define _LGPL_SOURCE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_dl.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_dl
-
-#if !defined(_TRACEPOINT_UST_DL_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_DL_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#define LTTNG_UST_DL_PROVIDER
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_dl, dlopen,
- TP_ARGS(void *, ip, void *, baddr, const char *, path,
- int, flags, uint64_t, memsz, uint8_t, has_build_id,
- uint8_t, has_debug_link),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_integer_hex(int, flags, flags)
- ctf_string(path, path)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-
-#ifdef HAVE_DLMOPEN
-TRACEPOINT_EVENT(lttng_ust_dl, dlmopen,
- TP_ARGS(void *, ip, void *, baddr, Lmid_t, nsid,
- const char *, path, int, flags,
- uint64_t, memsz, uint8_t, has_build_id,
- uint8_t, has_debug_link),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_integer(Lmid_t, nsid, nsid)
- ctf_integer_hex(int, flags, flags)
- ctf_string(path, path)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-#endif
-
-TRACEPOINT_EVENT(lttng_ust_dl, build_id,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- uint8_t *, build_id,
- size_t, build_id_len
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_sequence_hex(uint8_t, build_id, build_id,
- size_t, build_id_len)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_dl, debug_link,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- char *, filename,
- uint32_t, crc
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint32_t, crc, crc)
- ctf_string(filename, filename)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_dl, dlclose,
- TP_ARGS(void *, ip, void *, baddr),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- )
-)
-
-#endif /* _TRACEPOINT_UST_DL_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_dl.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-fd.la
-liblttng_ust_fd_la_SOURCES = \
- lttng-ust-fd.c
-
-liblttng_ust_fd_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_fd_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stdio.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <ust-fd.h>
-#include <dlfcn.h>
-
-#include <ust-helper.h>
-
-static int (*__lttng_ust_fd_plibc_close)(int fd);
-static int (*__lttng_ust_fd_plibc_fclose)(FILE *stream);
-
-static
-int _lttng_ust_fd_libc_close(int fd)
-{
- if (!__lttng_ust_fd_plibc_close) {
- __lttng_ust_fd_plibc_close = dlsym(RTLD_NEXT, "close");
- if (!__lttng_ust_fd_plibc_close) {
- fprintf(stderr, "%s\n", dlerror());
- return -1;
- }
- }
- return lttng_ust_safe_close_fd(fd, __lttng_ust_fd_plibc_close);
-}
-
-static
-int _lttng_ust_fd_libc_fclose(FILE *stream)
-{
- if (!__lttng_ust_fd_plibc_fclose) {
- __lttng_ust_fd_plibc_fclose = dlsym(RTLD_NEXT, "fclose");
- if (!__lttng_ust_fd_plibc_fclose) {
- fprintf(stderr, "%s\n", dlerror());
- return -1;
- }
- }
- return lttng_ust_safe_fclose_stream(stream,
- __lttng_ust_fd_plibc_fclose);
-}
-
-int close(int fd)
-{
- return _lttng_ust_fd_libc_close(fd);
-}
-
-/*
- * Note: fcloseall() is not an issue because it fcloses only the
- * streams it knows about, which differs from the problems caused by
- * gnulib close_stdout(), which does an explicit fclose(stdout).
- */
-int fclose(FILE *stream)
-{
- return _lttng_ust_fd_libc_fclose(stream);
-}
-
-#if defined(__sun__) || defined(__FreeBSD__)
-/* Solaris and FreeBSD. */
-void closefrom(int lowfd)
-{
- (void) lttng_ust_safe_closefrom_fd(lowfd, __lttng_ust_fd_plibc_close);
-}
-#elif defined(__NetBSD__) || defined(__OpenBSD__)
-/* NetBSD and OpenBSD. */
-int closefrom(int lowfd)
-{
- return lttng_ust_safe_closefrom_fd(lowfd, __lttng_ust_fd_plibc_close);
-}
-#else
-/* As far as we know, this OS does not implement closefrom. */
-#endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-fork.la
-liblttng_ust_fork_la_SOURCES = ustfork.c
-liblttng_ust_fork_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_fork_la_CFLAGS = -DUST_COMPONENT=liblttng-ust-fork $(AM_CFLAGS)
-
-liblttng_ust_fork_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009 Pierre-Marc Fournier
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <ust-dlfcn.h>
-#include <unistd.h>
-#include <stdio.h>
-#include <signal.h>
-#include <sched.h>
-#include <stdarg.h>
-#include <errno.h>
-
-#include <lttng/ust-fork.h>
-
-pid_t fork(void)
-{
- static pid_t (*plibc_func)(void) = NULL;
- sigset_t sigset;
- pid_t retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "fork");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"fork\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- lttng_ust_before_fork(&sigset);
- /* Do the real fork */
- retval = plibc_func();
- saved_errno = errno;
- if (retval == 0) {
- /* child */
- lttng_ust_after_fork_child(&sigset);
- } else {
- lttng_ust_after_fork_parent(&sigset);
- }
- errno = saved_errno;
- return retval;
-}
-
-int daemon(int nochdir, int noclose)
-{
- static int (*plibc_func)(int nochdir, int noclose) = NULL;
- sigset_t sigset;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "daemon");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"daemon\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- lttng_ust_before_fork(&sigset);
- /* Do the real daemon call */
- retval = plibc_func(nochdir, noclose);
- saved_errno = errno;
- if (retval == 0) {
- /* child, parent called _exit() directly */
- lttng_ust_after_fork_child(&sigset);
- } else {
- /* on error in the parent */
- lttng_ust_after_fork_parent(&sigset);
- }
- errno = saved_errno;
- return retval;
-}
-
-int setuid(uid_t uid)
-{
- static int (*plibc_func)(uid_t uid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setuid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setuid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setuid */
- retval = plibc_func(uid);
- saved_errno = errno;
-
- lttng_ust_after_setuid();
-
- errno = saved_errno;
- return retval;
-}
-
-int setgid(gid_t gid)
-{
- static int (*plibc_func)(gid_t gid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setgid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setgid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setgid */
- retval = plibc_func(gid);
- saved_errno = errno;
-
- lttng_ust_after_setgid();
-
- errno = saved_errno;
- return retval;
-}
-
-int seteuid(uid_t euid)
-{
- static int (*plibc_func)(uid_t euid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "seteuid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"seteuid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real seteuid */
- retval = plibc_func(euid);
- saved_errno = errno;
-
- lttng_ust_after_seteuid();
-
- errno = saved_errno;
- return retval;
-}
-
-int setegid(gid_t egid)
-{
- static int (*plibc_func)(gid_t egid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setegid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setegid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setegid */
- retval = plibc_func(egid);
- saved_errno = errno;
-
- lttng_ust_after_setegid();
-
- errno = saved_errno;
- return retval;
-}
-
-int setreuid(uid_t ruid, uid_t euid)
-{
- static int (*plibc_func)(uid_t ruid, uid_t euid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setreuid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setreuid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setreuid */
- retval = plibc_func(ruid, euid);
- saved_errno = errno;
-
- lttng_ust_after_setreuid();
-
- errno = saved_errno;
- return retval;
-}
-
-int setregid(gid_t rgid, gid_t egid)
-{
- static int (*plibc_func)(gid_t rgid, gid_t egid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setregid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setregid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setregid */
- retval = plibc_func(rgid, egid);
- saved_errno = errno;
-
- lttng_ust_after_setregid();
-
- errno = saved_errno;
- return retval;
-}
-
-#ifdef __linux__
-
-struct user_desc;
-
-struct ustfork_clone_info {
- int (*fn)(void *);
- void *arg;
- sigset_t sigset;
-};
-
-static int clone_fn(void *arg)
-{
- struct ustfork_clone_info *info = (struct ustfork_clone_info *) arg;
-
- /* clone is now done and we are in child */
- lttng_ust_after_fork_child(&info->sigset);
- return info->fn(info->arg);
-}
-
-int clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ...)
-{
- static int (*plibc_func)(int (*fn)(void *), void *child_stack,
- int flags, void *arg, pid_t *ptid,
- struct user_desc *tls, pid_t *ctid) = NULL;
- /* var args */
- pid_t *ptid;
- struct user_desc *tls;
- pid_t *ctid;
- /* end of var args */
- va_list ap;
- int retval;
- int saved_errno;
-
- va_start(ap, arg);
- ptid = va_arg(ap, pid_t *);
- tls = va_arg(ap, struct user_desc *);
- ctid = va_arg(ap, pid_t *);
- va_end(ap);
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "clone");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"clone\" symbol.\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- if (flags & CLONE_VM) {
- /*
- * Creating a thread, no need to intervene, just pass on
- * the arguments.
- */
- retval = plibc_func(fn, child_stack, flags, arg, ptid,
- tls, ctid);
- saved_errno = errno;
- } else {
- /* Creating a real process, we need to intervene. */
- struct ustfork_clone_info info = { .fn = fn, .arg = arg };
-
- lttng_ust_before_fork(&info.sigset);
- retval = plibc_func(clone_fn, child_stack, flags, &info,
- ptid, tls, ctid);
- saved_errno = errno;
- /* The child doesn't get here. */
- lttng_ust_after_fork_parent(&info.sigset);
- }
- errno = saved_errno;
- return retval;
-}
-
-int setns(int fd, int nstype)
-{
- static int (*plibc_func)(int fd, int nstype) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setns");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setns\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setns */
- retval = plibc_func(fd, nstype);
- saved_errno = errno;
-
- lttng_ust_after_setns();
-
- errno = saved_errno;
- return retval;
-}
-
-int unshare(int flags)
-{
- static int (*plibc_func)(int flags) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "unshare");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"unshare\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setns */
- retval = plibc_func(flags);
- saved_errno = errno;
-
- lttng_ust_after_unshare();
-
- errno = saved_errno;
- return retval;
-}
-
-int setresuid(uid_t ruid, uid_t euid, uid_t suid)
-{
- static int (*plibc_func)(uid_t ruid, uid_t euid, uid_t suid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setresuid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setresuid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setresuid */
- retval = plibc_func(ruid, euid, suid);
- saved_errno = errno;
-
- lttng_ust_after_setresuid();
-
- errno = saved_errno;
- return retval;
-}
-
-int setresgid(gid_t rgid, gid_t egid, gid_t sgid)
-{
- static int (*plibc_func)(gid_t rgid, gid_t egid, gid_t sgid) = NULL;
- int retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "setresgid");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"setresgid\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- /* Do the real setresgid */
- retval = plibc_func(rgid, egid, sgid);
- saved_errno = errno;
-
- lttng_ust_after_setresgid();
-
- errno = saved_errno;
- return retval;
-}
-
-#elif defined (__FreeBSD__)
-
-pid_t rfork(int flags)
-{
- static pid_t (*plibc_func)(void) = NULL;
- sigset_t sigset;
- pid_t retval;
- int saved_errno;
-
- if (plibc_func == NULL) {
- plibc_func = dlsym(RTLD_NEXT, "rfork");
- if (plibc_func == NULL) {
- fprintf(stderr, "libustfork: unable to find \"rfork\" symbol\n");
- errno = ENOSYS;
- return -1;
- }
- }
-
- lttng_ust_before_fork(&sigset);
- /* Do the real rfork */
- retval = plibc_func();
- saved_errno = errno;
- if (retval == 0) {
- /* child */
- lttng_ust_after_fork_child(&sigset);
- } else {
- lttng_ust_after_fork_parent(&sigset);
- }
- errno = saved_errno;
- return retval;
-}
-
-/*
- * On BSD, no need to override vfork, because it runs in the context of
- * the parent, with parent waiting until execve or exit is executed in
- * the child.
- */
-
-#else
-#warning "Unknown OS. You might want to ensure that fork/clone/vfork/fork handling is complete."
-#endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-SUBDIRS = java jni
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-SUBDIRS = lttng-ust-agent-common lttng-ust-agent-all
-
-if ENABLE_JAVA_AGENT_WITH_JUL
-SUBDIRS += lttng-ust-agent-jul
-endif
-
-if ENABLE_JAVA_AGENT_WITH_LOG4J
-SUBDIRS += lttng-ust-agent-log4j
-endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-JAVAROOT = .
-
-jarfile_version = 1.0.0
-jarfile_manifest = $(srcdir)/Manifest.txt
-jarfile_symlink = lttng-ust-agent-all.jar
-jarfile = lttng-ust-agent-all-$(jarfile_version).jar
-
-# Compatibility symlink provided for applications expecting the agent
-# jar file installed by UST 2.7.
-jarfile_compat_symlink = liblttng-ust-agent.jar
-
-jardir = $(datadir)/java
-
-dist_noinst_DATA = $(jarfile_manifest)
-
-jar_DATA = $(jarfile)
-
-$(jarfile):
- $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) \
- && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink) \
- && rm -f $(jarfile_compat_symlink) && $(LN_S) $(jarfile_symlink) $(jarfile_compat_symlink)
-
-install-data-hook:
- cd $(DESTDIR)/$(jardir) \
- && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink) \
- && rm -f $(jarfile_compat_symlink) && $(LN_S) $(jarfile_symlink) $(jarfile_compat_symlink)
-
-uninstall-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && rm -f $(jarfile_compat_symlink)
-
-CLEANFILES = *.jar
+++ /dev/null
-Name: org/lttng/ust/agent/all/
-Specification-Title: LTTng UST All Java Agents
-Specification-Version: 1.0.0
-Specification-Vendor: LTTng Project
-Implementation-Title: org.lttng.ust.agent.all
-Implementation-Version: 1.0.0
-Implementation-Vendor: LTTng Project
-Class-Path: lttng-ust-agent-common.jar lttng-ust-agent-jul.jar lttng-ust-agent-log4j.jar
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-JAVAROOT = .
-
-pkgpath = org/lttng/ust/agent
-
-jarfile_version = 1.0.0
-jarfile_manifest = $(srcdir)/Manifest.txt
-jarfile_symlink = lttng-ust-agent-common.jar
-jarfile = lttng-ust-agent-common-$(jarfile_version).jar
-
-jardir = $(datadir)/java
-jnioutdir = ../../jni/common
-
-dist_noinst_JAVA = $(pkgpath)/AbstractLttngAgent.java \
- $(pkgpath)/EventNamePattern.java \
- $(pkgpath)/ILttngAgent.java \
- $(pkgpath)/ILttngHandler.java \
- $(pkgpath)/LTTngAgent.java \
- $(pkgpath)/client/ILttngTcpClientListener.java \
- $(pkgpath)/client/SessiondCommand.java \
- $(pkgpath)/client/LttngAgentResponse.java \
- $(pkgpath)/client/LttngTcpSessiondClient.java \
- $(pkgpath)/client/SessiondCommandHeader.java \
- $(pkgpath)/client/SessiondDisableAppContextCommand.java \
- $(pkgpath)/client/SessiondDisableEventCommand.java \
- $(pkgpath)/client/SessiondEnableAppContextCommand.java \
- $(pkgpath)/client/SessiondEnableEventCommand.java \
- $(pkgpath)/client/SessiondListLoggersCommand.java \
- $(pkgpath)/context/LttngContextApi.java \
- $(pkgpath)/context/ContextInfoManager.java \
- $(pkgpath)/context/ContextInfoSerializer.java \
- $(pkgpath)/context/IContextInfoRetriever.java \
- $(pkgpath)/filter/FilterChangeNotifier.java \
- $(pkgpath)/filter/IFilterChangeListener.java \
- $(pkgpath)/session/EventRule.java \
- $(pkgpath)/session/LogLevelSelector.java \
- $(pkgpath)/utils/LttngUstAgentLogger.java
-
-
-dist_noinst_DATA = $(jarfile_manifest)
-
-jar_DATA = $(jarfile)
-
-classes = $(pkgpath)/*.class \
- $(pkgpath)/client/*.class \
- $(pkgpath)/context/*.class \
- $(pkgpath)/filter/*.class \
- $(pkgpath)/session/*.class \
- $(pkgpath)/utils/*.class
-
-$(jarfile): classnoinst.stamp
- $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
-
-if !HAVE_JAVAH
-# If we don't have javah, assume we are running openjdk >= 10 and use javac
-# to generate the jni header file.
-AM_JAVACFLAGS = -h $(jnioutdir)
-else
-context-jni-header.stamp: $(dist_noinst_JAVA)
- $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(jnioutdir) $(JAVAHFLAGS) org.lttng.ust.agent.context.LttngContextApi && \
- echo "Context API JNI header generated" > context-jni-header.stamp
-
-all-local: context-jni-header.stamp
-endif
-
-install-data-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
-
-uninstall-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
-
-CLEANFILES = *.jar \
- $(pkgpath)/*.class \
- $(pkgpath)/client/*.class \
- $(pkgpath)/context/*.class \
- $(pkgpath)/filter/*.class \
- $(pkgpath)/session/*.class \
- $(pkgpath)/utils/*.class \
- context-jni-header.stamp \
- $(jnioutdir)/org_lttng_ust_agent_context_LttngContextApi.h
+++ /dev/null
-Name: org/lttng/ust/agent/
-Specification-Title: LTTng UST Java Agent
-Specification-Version: 1.0.0
-Specification-Vendor: LTTng Project
-Implementation-Title: org.lttng.ust.agent
-Implementation-Version: 1.0.0
-Implementation-Vendor: LTTng Project
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.regex.Matcher;
-
-import org.lttng.ust.agent.client.ILttngTcpClientListener;
-import org.lttng.ust.agent.client.LttngTcpSessiondClient;
-import org.lttng.ust.agent.filter.FilterChangeNotifier;
-import org.lttng.ust.agent.session.EventRule;
-import org.lttng.ust.agent.utils.LttngUstAgentLogger;
-
-/**
- * Base implementation of a {@link ILttngAgent}.
- *
- * @author Alexandre Montplaisir
- * @param <T>
- * The type of logging handler that should register to this agent
- */
-public abstract class AbstractLttngAgent<T extends ILttngHandler>
- implements ILttngAgent<T>, ILttngTcpClientListener {
-
- private static final int INIT_TIMEOUT = 3; /* Seconds */
-
- /** The handlers registered to this agent */
- private final Set<T> registeredHandlers = new HashSet<T>();
-
- /**
- * The trace events currently enabled in the sessions.
- *
- * The key is the {@link EventNamePattern} that comes from the event name.
- * The value is the ref count (how many different sessions currently have
- * this event enabled). Once the ref count falls to 0, this means we can
- * avoid sending log events through JNI because nobody wants them.
- *
- * Its accesses should be protected by the {@link #enabledEventNamesLock}
- * below.
- */
- private final Map<EventNamePattern, Integer> enabledPatterns = new HashMap<EventNamePattern, Integer>();
-
- /**
- * Cache of already-checked event names. As long as enabled/disabled events
- * don't change in the session, we can avoid re-checking events that were
- * previously checked against all known enabled patterns.
- *
- * Its accesses should be protected by the {@link #enabledEventNamesLock}
- * below, with the exception of concurrent get operations.
- */
- private final Map<String, Boolean> enabledEventNamesCache = new ConcurrentHashMap<String, Boolean>();
-
- /**
- * Lock protecting accesses to the {@link #enabledPatterns} and
- * {@link #enabledEventNamesCache} maps.
- */
- private final Lock enabledEventNamesLock = new ReentrantLock();
-
- /**
- * The application contexts currently enabled in the tracing sessions.
- *
- * It is first indexed by context retriever, then by context name. This
- * allows to efficiently query all the contexts for a given retriever.
- *
- * Works similarly as {@link #enabledEvents}, but for app contexts (and with
- * an extra degree of indexing).
- *
- * TODO Could be changed to a Guava Table once/if we start using it.
- */
- private final Map<String, Map<String, Integer>> enabledAppContexts = new ConcurrentHashMap<String, Map<String, Integer>>();
-
- /** Tracing domain. Defined by the sub-classes via the constructor. */
- private final Domain domain;
-
- /* Lazy-loaded sessiond clients and their thread objects */
- private LttngTcpSessiondClient rootSessiondClient = null;
- private LttngTcpSessiondClient userSessiondClient = null;
- private Thread rootSessiondClientThread = null;
- private Thread userSessiondClientThread = null;
-
- /** Indicates if this agent has been initialized. */
- private boolean initialized = false;
-
- /**
- * Constructor. Should only be called by sub-classes via super(...);
- *
- * @param domain
- * The tracing domain of this agent.
- */
- protected AbstractLttngAgent(Domain domain) {
- this.domain = domain;
- }
-
- @Override
- public Domain getDomain() {
- return domain;
- }
-
- @Override
- public void registerHandler(T handler) {
- synchronized (registeredHandlers) {
- if (registeredHandlers.isEmpty()) {
- /*
- * This is the first handler that registers, we will initialize
- * the agent.
- */
- init();
- }
- registeredHandlers.add(handler);
- }
- }
-
- @Override
- public void unregisterHandler(T handler) {
- synchronized (registeredHandlers) {
- registeredHandlers.remove(handler);
- if (registeredHandlers.isEmpty()) {
- /* There are no more registered handlers, close the connection. */
- dispose();
- }
- }
- }
-
- private void init() {
- /*
- * Only called from a synchronized (registeredHandlers) block, should
- * not need additional synchronization.
- */
- if (initialized) {
- return;
- }
-
- LttngUstAgentLogger.log(AbstractLttngAgent.class, "Initializing Agent for domain: " + domain.name());
-
- String rootClientThreadName = "Root sessiond client started by agent: " + this.getClass().getSimpleName();
-
- rootSessiondClient = new LttngTcpSessiondClient(this, getDomain().value(), true);
- rootSessiondClientThread = new Thread(rootSessiondClient, rootClientThreadName);
- rootSessiondClientThread.setDaemon(true);
- rootSessiondClientThread.start();
-
- String userClientThreadName = "User sessiond client started by agent: " + this.getClass().getSimpleName();
-
- userSessiondClient = new LttngTcpSessiondClient(this, getDomain().value(), false);
- userSessiondClientThread = new Thread(userSessiondClient, userClientThreadName);
- userSessiondClientThread.setDaemon(true);
- userSessiondClientThread.start();
-
- /* Give the threads' registration a chance to end. */
- if (!rootSessiondClient.waitForConnection(INIT_TIMEOUT)) {
- userSessiondClient.waitForConnection(INIT_TIMEOUT);
- }
-
- initialized = true;
- }
-
- /**
- * Dispose the agent
- */
- private void dispose() {
- LttngUstAgentLogger.log(AbstractLttngAgent.class, "Disposing Agent for domain: " + domain.name());
-
- /*
- * Only called from a synchronized (registeredHandlers) block, should
- * not need additional synchronization.
- */
- rootSessiondClient.close();
- userSessiondClient.close();
-
- try {
- rootSessiondClientThread.join();
- userSessiondClientThread.join();
-
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- rootSessiondClient = null;
- rootSessiondClientThread = null;
- userSessiondClient = null;
- userSessiondClientThread = null;
-
- /*
- * Send filter change notifications for all event rules currently
- * active, then clear them.
- */
- FilterChangeNotifier fcn = FilterChangeNotifier.getInstance();
-
- enabledEventNamesLock.lock();
- try {
- for (Map.Entry<EventNamePattern, Integer> entry : enabledPatterns.entrySet()) {
- String eventName = entry.getKey().getEventName();
- Integer nb = entry.getValue();
- for (int i = 0; i < nb.intValue(); i++) {
- fcn.removeEventRules(eventName);
- }
- }
- enabledPatterns.clear();
- enabledEventNamesCache.clear();
- } finally {
- enabledEventNamesLock.unlock();
- }
-
- /*
- * Also clear tracked app contexts (no filter notifications sent for
- * those currently).
- */
- enabledAppContexts.clear();
-
- initialized = false;
- }
-
- @Override
- public boolean eventEnabled(EventRule eventRule) {
- /* Notify the filter change manager of the command */
- FilterChangeNotifier.getInstance().addEventRule(eventRule);
-
- String eventName = eventRule.getEventName();
- EventNamePattern pattern = new EventNamePattern(eventName);
-
- enabledEventNamesLock.lock();
- try {
- boolean ret = incrementRefCount(pattern, enabledPatterns);
- enabledEventNamesCache.clear();
- return ret;
- } finally {
- enabledEventNamesLock.unlock();
- }
- }
-
- @Override
- public boolean eventDisabled(String eventName) {
- /* Notify the filter change manager of the command */
- FilterChangeNotifier.getInstance().removeEventRules(eventName);
-
- EventNamePattern pattern = new EventNamePattern(eventName);
-
- enabledEventNamesLock.lock();
- try {
- boolean ret = decrementRefCount(pattern, enabledPatterns);
- enabledEventNamesCache.clear();
- return ret;
- } finally {
- enabledEventNamesLock.unlock();
- }
- }
-
- @Override
- public boolean appContextEnabled(String contextRetrieverName, String contextName) {
- synchronized (enabledAppContexts) {
- Map<String, Integer> retrieverMap = enabledAppContexts.get(contextRetrieverName);
- if (retrieverMap == null) {
- /* There is no submap for this retriever, let's create one. */
- retrieverMap = new ConcurrentHashMap<String, Integer>();
- enabledAppContexts.put(contextRetrieverName, retrieverMap);
- }
-
- return incrementRefCount(contextName, retrieverMap);
- }
- }
-
- @Override
- public boolean appContextDisabled(String contextRetrieverName, String contextName) {
- synchronized (enabledAppContexts) {
- Map<String, Integer> retrieverMap = enabledAppContexts.get(contextRetrieverName);
- if (retrieverMap == null) {
- /* There was no submap for this retriever, invalid command? */
- return false;
- }
-
- boolean ret = decrementRefCount(contextName, retrieverMap);
-
- /* If the submap is now empty we can remove it from the main map. */
- if (retrieverMap.isEmpty()) {
- enabledAppContexts.remove(contextRetrieverName);
- }
-
- return ret;
- }
- }
-
- /*
- * Implementation of this method is domain-specific.
- */
- @Override
- public abstract Collection<String> listAvailableEvents();
-
- @Override
- public boolean isEventEnabled(String eventName) {
- Boolean cachedEnabled = enabledEventNamesCache.get(eventName);
- if (cachedEnabled != null) {
- /* We have seen this event previously */
- /*
- * Careful! enabled == null could also mean that the null value is
- * associated with the key. But we should have never inserted null
- * values in the map.
- */
- return cachedEnabled.booleanValue();
- }
-
- /*
- * We have not previously checked this event. Run it against all known
- * enabled event patterns to determine if it should pass or not.
- */
- enabledEventNamesLock.lock();
- try {
- boolean enabled = false;
- for (EventNamePattern enabledPattern : enabledPatterns.keySet()) {
- Matcher matcher = enabledPattern.getPattern().matcher(eventName);
- if (matcher.matches()) {
- enabled = true;
- break;
- }
- }
-
- /* Add the result to the cache */
- enabledEventNamesCache.put(eventName, Boolean.valueOf(enabled));
- return enabled;
-
- } finally {
- enabledEventNamesLock.unlock();
- }
- }
-
- @Override
- public Collection<Map.Entry<String, Map<String, Integer>>> getEnabledAppContexts() {
- return enabledAppContexts.entrySet();
- }
-
- private static <T> boolean incrementRefCount(T key, Map<T, Integer> refCountMap) {
- synchronized (refCountMap) {
- Integer count = refCountMap.get(key);
- if (count == null) {
- /* This is the first instance of this event being enabled */
- refCountMap.put(key, Integer.valueOf(1));
- return true;
- }
- if (count.intValue() <= 0) {
- /* It should not have been in the map in the first place! */
- throw new IllegalStateException();
- }
- /* The event was already enabled, increment its refcount */
- refCountMap.put(key, Integer.valueOf(count.intValue() + 1));
- return true;
- }
- }
-
- private static <T> boolean decrementRefCount(T key, Map<T, Integer> refCountMap) {
- synchronized (refCountMap) {
- Integer count = refCountMap.get(key);
- if (count == null || count.intValue() <= 0) {
- /*
- * The sessiond asked us to disable an event that was not
- * enabled previously. Command error?
- */
- return false;
- }
- if (count.intValue() == 1) {
- /*
- * This is the last instance of this event being disabled,
- * remove it from the map so that we stop sending it.
- */
- refCountMap.remove(key);
- return true;
- }
- /*
- * Other sessions are still looking for this event, simply decrement
- * its refcount.
- */
- refCountMap.put(key, Integer.valueOf(count.intValue() - 1));
- return true;
- }
- }
-}
-
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2017 EfficiOS Inc.
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-package org.lttng.ust.agent;
-
-import java.util.regex.Pattern;
-
-/**
- * Class encapsulating an event name from the session daemon, and its
- * corresponding {@link Pattern}. This allows referring back to the original
- * event name, for example when we receive a disable command.
- *
- * @author Philippe Proulx
- * @author Alexandre Montplaisir
- */
-class EventNamePattern {
-
- private final String originalEventName;
-
- /*
- * Note that two Patterns coming from the exact same String will not be
- * equals()! As such, it would be confusing to make the pattern part of this
- * class's equals/hashCode
- */
- private final transient Pattern pattern;
-
- public EventNamePattern(String eventName) {
- if (eventName == null) {
- throw new IllegalArgumentException();
- }
-
- originalEventName = eventName;
- pattern = patternFromEventName(eventName);
- }
-
- public String getEventName() {
- return originalEventName;
- }
-
- public Pattern getPattern() {
- return pattern;
- }
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + originalEventName.hashCode();
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- EventNamePattern other = (EventNamePattern) obj;
- if (!originalEventName.equals(other.originalEventName)) {
- return false;
- }
- return true;
- }
-
- private static Pattern patternFromEventName(String eventName) {
- /*
- * The situation here is that `\*` means a literal `*` in the event
- * name, and `*` is a wildcard star. We check the event name one
- * character at a time and create a list of tokens to be converter to
- * partial patterns.
- */
- StringBuilder bigBuilder = new StringBuilder("^");
- StringBuilder smallBuilder = new StringBuilder();
-
- for (int i = 0; i < eventName.length(); i++) {
- char c = eventName.charAt(i);
-
- switch (c) {
- case '*':
- /* Add current quoted builder's string if not empty. */
- if (smallBuilder.length() > 0) {
- bigBuilder.append(Pattern.quote(smallBuilder.toString()));
- smallBuilder.setLength(0);
- }
-
- /* Append the equivalent regex which is `.*`. */
- bigBuilder.append(".*");
- continue;
-
- case '\\':
- /* We only escape `*` and `\` here. */
- if (i < (eventName.length() - 1)) {
- char nextChar = eventName.charAt(i + 1);
-
- if (nextChar == '*' || nextChar == '\\') {
- smallBuilder.append(nextChar);
- } else {
- smallBuilder.append(c);
- smallBuilder.append(nextChar);
- }
-
- i++;
- continue;
- }
- break;
-
- default:
- break;
- }
-
- smallBuilder.append(c);
- }
-
- /* Add current quoted builder's string if not empty. */
- if (smallBuilder.length() > 0) {
- bigBuilder.append(Pattern.quote(smallBuilder.toString()));
- }
-
- bigBuilder.append("$");
-
- return Pattern.compile(bigBuilder.toString());
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent;
-
-import java.util.Collection;
-import java.util.Map;
-
-/**
- * Interface to define LTTng Java agents.
- *
- * An "agent" is a representative of an LTTng session daemon in the Java world.
- * It tracks the settings of a tracing session as they defined in the session
- * daemon.
- *
- * It also track the current logging handlers that are sending events to UST.
- *
- * @author Alexandre Montplaisir
- *
- * @param <T>
- * The type of logging handler that should register to this agent
- */
-public interface ILttngAgent<T extends ILttngHandler> {
-
- // ------------------------------------------------------------------------
- // Agent configuration elements
- // ------------------------------------------------------------------------
-
- /**
- * Tracing domains. Corresponds to domains defined by LTTng Tools.
- */
- enum Domain {
- JUL(3), LOG4J(4);
- private int value;
-
- private Domain(int value) {
- this.value = value;
- }
-
- public int value() {
- return value;
- }
- }
-
- /**
- * The tracing domain of this agent.
- *
- * @return The tracing domain.
- */
- Domain getDomain();
-
- // ------------------------------------------------------------------------
- // Log handler registering
- // ------------------------------------------------------------------------
-
- /**
- * Register a handler to this agent.
- *
- * @param handler
- * The handler to register
- */
- void registerHandler(T handler);
-
- /**
- * Deregister a handler from this agent.
- *
- * @param handler
- * The handler to deregister.
- */
- void unregisterHandler(T handler);
-
- // ------------------------------------------------------------------------
- // Tracing session parameters
- // ------------------------------------------------------------------------
-
- /**
- * Query if a given event is currently enabled in a current tracing session,
- * meaning it should be sent to UST.
- *
- * @param eventName
- * The name of the event to check.
- * @return True if the event is currently enabled, false if it is not.
- */
- boolean isEventEnabled(String eventName);
-
- /**
- * Return the list of application contexts enabled in the tracing sessions.
- *
- * @return The application contexts, first indexed by retriever name, then
- * by context name
- */
- Collection<Map.Entry<String, Map<String, Integer>>> getEnabledAppContexts();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent;
-
-/**
- * Simple interface to organize all LTTng log handlers under one type.
- *
- * @author Alexandre Montplaisir
- */
-public interface ILttngHandler {
-
- /**
- * Get the number of events logged by this handler since its inception.
- *
- * @return The number of logged events
- */
- long getEventCount();
-
- /**
- * Close the log handler. Should be called once the application is done
- * logging through it.
- */
- void close();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent;
-
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-import java.util.logging.Handler;
-import java.util.logging.Logger;
-
-/**
- * The central agent managing the JUL and Log4j handlers.
- *
- * @author David Goulet
- * @deprecated Applications are now expected to manage their Logger and Handler
- * objects.
- */
-@Deprecated
-public class LTTngAgent {
-
- private static LTTngAgent instance = null;
-
- /**
- * Public getter to acquire a reference to this singleton object.
- *
- * @return The agent instance
- */
- public static synchronized LTTngAgent getLTTngAgent() {
- if (instance == null) {
- instance = new LTTngAgent();
- }
- return instance;
- }
-
- /**
- * Dispose the agent. Applications should call this once they are done
- * logging. This dispose function is non-static for backwards
- * compatibility purposes.
- */
- @SuppressWarnings("static-method")
- public void dispose() {
- synchronized (LTTngAgent.class) {
- if (instance != null) {
- instance.disposeInstance();
- instance = null;
- }
- }
- return;
- }
-
- private ILttngHandler julHandler = null;
- private ILttngHandler log4jAppender = null;
-
- /**
- * Private constructor. This is a singleton and a reference should be
- * acquired using {@link #getLTTngAgent()}.
- */
- private LTTngAgent() {
- initJulHandler();
- initLog4jAppender();
- }
-
- /**
- * "Destructor" method.
- */
- private void disposeInstance() {
- disposeJulHandler();
- disposeLog4jAppender();
- }
-
- /**
- * Create a LTTng-JUL handler, and attach it to the JUL root logger.
- */
- private void initJulHandler() {
- try {
- Class<?> julHandlerClass = Class.forName("org.lttng.ust.agent.jul.LttngLogHandler");
- /*
- * It is safer to use Constructor.newInstance() rather than
- * Class.newInstance(), because it will catch the exceptions thrown
- * by the constructor below (which happens if the Java library is
- * present, but the matching JNI one is not).
- */
- Constructor<?> julHandlerCtor = julHandlerClass.getConstructor();
- julHandler = (ILttngHandler) julHandlerCtor.newInstance();
-
- /* Attach the handler to the root JUL logger */
- Logger.getLogger("").addHandler((Handler) julHandler);
-
- /*
- * If any of the following exceptions happen, it means we could not
- * find or initialize LTTng JUL classes. We will not setup LTTng JUL
- * tracing in this case.
- */
- } catch (SecurityException e) {
- } catch (IllegalAccessException e) {
- } catch (IllegalArgumentException e) {
- } catch (ClassNotFoundException e) {
- } catch (NoSuchMethodException e) {
- } catch (InstantiationException e) {
- } catch (InvocationTargetException e) {
- }
- }
-
- /**
- * Create a LTTng-logj4 appender, and attach it to the log4j root logger.
- */
- private void initLog4jAppender() {
- /*
- * Since Log4j is a 3rd party library, we first need to check if we can
- * load any of its classes.
- */
- if (!testLog4jClasses()) {
- return;
- }
-
- try {
- Class<?> log4jAppenderClass = Class.forName("org.lttng.ust.agent.log4j.LttngLogAppender");
- Constructor<?> log4jAppendCtor = log4jAppenderClass.getConstructor();
- log4jAppender = (ILttngHandler) log4jAppendCtor.newInstance();
-
- /*
- * If any of the following exceptions happen, it means we could not
- * find or initialize LTTng log4j classes. We will not setup LTTng
- * log4j tracing in this case.
- */
- } catch (SecurityException e) {
- return;
- } catch (ClassNotFoundException e) {
- return;
- } catch (NoSuchMethodException e) {
- return;
- } catch (IllegalArgumentException e) {
- return;
- } catch (InstantiationException e) {
- return;
- } catch (IllegalAccessException e) {
- return;
- } catch (InvocationTargetException e) {
- return;
- }
-
- /*
- * Attach the appender to the root Log4j logger. Slightly more tricky
- * here, as log4j.Logger is not in the base Java library, and we do not
- * want the "common" package to depend on log4j. So we have to obtain it
- * through reflection too.
- */
- try {
- Class<?> loggerClass = Class.forName("org.apache.log4j.Logger");
- Class<?> appenderClass = Class.forName("org.apache.log4j.Appender");
-
- Method getRootLoggerMethod = loggerClass.getMethod("getRootLogger", (Class<?>[]) null);
- Method addAppenderMethod = loggerClass.getMethod("addAppender", appenderClass);
-
- Object rootLogger = getRootLoggerMethod.invoke(null, (Object[]) null);
- addAppenderMethod.invoke(rootLogger, log4jAppender);
-
- /*
- * We have checked for the log4j library version previously, none of
- * the following exceptions should happen.
- */
- } catch (SecurityException e) {
- throw new IllegalStateException(e);
- } catch (ClassNotFoundException e) {
- throw new IllegalStateException(e);
- } catch (NoSuchMethodException e) {
- throw new IllegalStateException(e);
- } catch (IllegalArgumentException e) {
- throw new IllegalStateException(e);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(e);
- } catch (InvocationTargetException e) {
- throw new IllegalStateException(e);
- }
- }
-
- /**
- * Check if log4j >= 1.2.15 library is present.
- */
- private static boolean testLog4jClasses() {
- Class<?> loggingEventClass;
-
- try {
- loggingEventClass = Class.forName("org.apache.log4j.spi.LoggingEvent");
- } catch (ClassNotFoundException e) {
- /*
- * Log4j classes not found, no need to create the relevant objects
- */
- return false;
- }
-
- /*
- * Detect capabilities of the log4j library. We only support log4j >=
- * 1.2.15. The getTimeStamp() method was introduced in log4j 1.2.15, so
- * verify that it is available.
- *
- * We can't rely on the getPackage().getImplementationVersion() call
- * that would retrieves information from the manifest file found in the
- * JAR since the manifest file shipped from upstream is known to be
- * broken in several versions of the library.
- *
- * More info: https://issues.apache.org/bugzilla/show_bug.cgi?id=44370
- */
- try {
- loggingEventClass.getDeclaredMethod("getTimeStamp");
- } catch (NoSuchMethodException e) {
- System.err.println(
- "Warning: The loaded log4j library is too old. Log4j tracing with LTTng will be disabled.");
- return false;
- } catch (SecurityException e) {
- return false;
- }
-
- return true;
- }
-
- /**
- * Detach the JUL handler from its logger and close it.
- */
- private void disposeJulHandler() {
- if (julHandler == null) {
- /* The JUL handler was not activated, we have nothing to do */
- return;
- }
- Logger.getLogger("").removeHandler((Handler) julHandler);
- julHandler.close();
- julHandler = null;
- }
-
- /**
- * Detach the log4j appender from its logger and close it.
- */
- private void disposeLog4jAppender() {
- if (log4jAppender == null) {
- /* The log4j appender was not active, we have nothing to do */
- return;
- }
-
- /*
- * Detach the appender from the log4j root logger. Again, we have to do
- * this via reflection.
- */
- try {
- Class<?> loggerClass = Class.forName("org.apache.log4j.Logger");
- Class<?> appenderClass = Class.forName("org.apache.log4j.Appender");
-
- Method getRootLoggerMethod = loggerClass.getMethod("getRootLogger", (Class<?>[]) null);
- Method removeAppenderMethod = loggerClass.getMethod("removeAppender", appenderClass);
-
- Object rootLogger = getRootLoggerMethod.invoke(null, (Object[]) null);
- removeAppenderMethod.invoke(rootLogger, log4jAppender);
-
- /*
- * We were able to attach the appender previously, we should not
- * have problems here either!
- */
- } catch (SecurityException e) {
- throw new IllegalStateException(e);
- } catch (ClassNotFoundException e) {
- throw new IllegalStateException(e);
- } catch (NoSuchMethodException e) {
- throw new IllegalStateException(e);
- } catch (IllegalArgumentException e) {
- throw new IllegalStateException(e);
- } catch (IllegalAccessException e) {
- throw new IllegalStateException(e);
- } catch (InvocationTargetException e) {
- throw new IllegalStateException(e);
- }
-
- /* Close the appender */
- log4jAppender.close();
- log4jAppender = null;
- }
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.util.Collection;
-
-import org.lttng.ust.agent.session.EventRule;
-
-/**
- * TCP client listener interface.
- *
- * This interface contains callbacks that are called when the TCP client
- * receives commands from the session daemon. These callbacks will define what
- * do to with each command.
- *
- * @author Alexandre Montplaisir
- */
-public interface ILttngTcpClientListener {
-
- /**
- * Callback for the TCP client to notify the listener agent that a request
- * for enabling an event rule was sent from the session daemon.
- *
- * @param eventRule
- * The event rule that was requested to be enabled
- * @return Since we do not track individual sessions, right now this command
- * cannot fail. It will always return true.
- */
- boolean eventEnabled(EventRule eventRule);
-
- /**
- * Callback for the TCP client to notify the listener agent that a request
- * for disabling an event was sent from the session daemon.
- *
- * @param eventName
- * The name of the event that was requested to be disabled.
- * @return True if the command completed successfully, false if we should
- * report an error (event was not enabled, etc.)
- */
- boolean eventDisabled(String eventName);
-
- /**
- * Callback for the TCP client to notify the listener agent that a request
- * for enabling an application-specific context was sent from the session
- * daemon.
- *
- * @param contextRetrieverName
- * The name of the retriever in which the context is present.
- * This is used to namespace the contexts.
- * @param contextName
- * The name of the context that was requested to be enabled
- * @return Since we do not track individual sessions, right now this command
- * cannot fail. It will always return true.
- */
- boolean appContextEnabled(String contextRetrieverName, String contextName);
-
- /**
- * Callback for the TCP client to notify the listener agent that a request
- * for disabling an application-specific context was sent from the session
- * daemon.
- *
- * @param contextRetrieverName
- * The name of the retriever in which the context is present.
- * This is used to namespace the contexts.
- * @param contextName
- * The name of the context that was requested to be disabled.
- * @return True if the command completed successfully, false if we should
- * report an error (context was not previously enabled for example)
- */
- boolean appContextDisabled(String contextRetrieverName, String contextName);
-
- /**
- * List the events that are available in the agent's tracing domain.
- *
- * In Java terms, this means loggers that have at least one LTTng log
- * handler of their corresponding domain attached.
- *
- * @return The list of available events
- */
- Collection<String> listAvailableEvents();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-/**
- * Interface for all response messages sent from the Java agent to the sessiond
- * daemon. Normally sent after a command coming from the session daemon was
- * executed.
- *
- * @author Alexandre Montplaisir
- */
-abstract class LttngAgentResponse {
-
- private static final int INT_SIZE = 4;
-
- public static final LttngAgentResponse SUCESS_RESPONSE = new LttngAgentResponse() {
- @Override
- public ReturnCode getReturnCode() {
- return ReturnCode.CODE_SUCCESS_CMD;
- }
- };
-
- public static final LttngAgentResponse FAILURE_RESPONSE = new LttngAgentResponse() {
- @Override
- public ReturnCode getReturnCode() {
- return ReturnCode.CODE_INVALID_CMD;
- }
- };
-
- /**
- * Return codes used in agent responses, to indicate success or different
- * types of failures of the commands.
- */
- protected enum ReturnCode {
-
- CODE_SUCCESS_CMD(1, "sucess"),
- CODE_INVALID_CMD(2, "invalid"),
- CODE_UNKNOWN_LOGGER_NAME(3, "unknown logger name");
-
- private final int code;
- private final String toString;
-
- private ReturnCode(int c, String str) {
- code = c;
- toString = str;
- }
-
- public int getCode() {
- return code;
- }
-
- /**
- * Mainly used for debugging. The strings are not sent through the
- * socket.
- */
- @Override
- public String toString() {
- return toString;
- }
- }
-
- /**
- * Get the {@link ReturnCode} that goes with this response. It is expected
- * by the session daemon, but some commands may require more than this
- * in their response.
- *
- * @return The return code
- */
- public abstract ReturnCode getReturnCode();
-
- /**
- * Gets a byte array of the response so that it may be streamed.
- *
- * @return The byte array of the response
- */
- public byte[] getBytes() {
- byte data[] = new byte[INT_SIZE];
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
- buf.putInt(getReturnCode().getCode());
- return data;
- }
-
- @Override
- public String toString() {
- return "LttngAgentResponse["
- + "code=" + getReturnCode().getCode()
- + ", " + getReturnCode().toString()
- + "]";
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015-2016 EfficiOS Inc.
- * Copyright (C) 2015-2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.DataOutputStream;
-import java.io.FileInputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.lang.management.ManagementFactory;
-import java.net.Socket;
-import java.net.UnknownHostException;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.charset.Charset;
-import java.util.concurrent.CountDownLatch;
-import java.util.concurrent.TimeUnit;
-
-import org.lttng.ust.agent.utils.LttngUstAgentLogger;
-
-/**
- * Client for agents to connect to a local session daemon, using a TCP socket.
- *
- * @author David Goulet
- */
-public class LttngTcpSessiondClient implements Runnable {
-
- private static final String SESSION_HOST = "127.0.0.1";
- private static final String ROOT_PORT_FILE = "/var/run/lttng/agent.port";
- private static final String USER_PORT_FILE = "/.lttng/agent.port";
- private static final Charset PORT_FILE_ENCODING = Charset.forName("UTF-8");
-
- private static final int PROTOCOL_MAJOR_VERSION = 2;
- private static final int PROTOCOL_MINOR_VERSION = 0;
-
- /** Command header from the session deamon. */
- private final CountDownLatch registrationLatch = new CountDownLatch(1);
-
- private Socket sessiondSock;
- private volatile boolean quit = false;
-
- private DataInputStream inFromSessiond;
- private DataOutputStream outToSessiond;
-
- private final ILttngTcpClientListener logAgent;
- private final int domainValue;
- private final boolean isRoot;
-
- /**
- * Constructor
- *
- * @param logAgent
- * The listener this client will operate on, typically an LTTng
- * agent.
- * @param domainValue
- * The integer to send to the session daemon representing the
- * tracing domain to handle.
- * @param isRoot
- * True if this client should connect to the root session daemon,
- * false if it should connect to the user one.
- */
- public LttngTcpSessiondClient(ILttngTcpClientListener logAgent, int domainValue, boolean isRoot) {
- this.logAgent = logAgent;
- this.domainValue = domainValue;
- this.isRoot = isRoot;
- }
-
- /**
- * Wait until this client has successfully established a connection to its
- * target session daemon.
- *
- * @param seconds
- * A timeout in seconds after which this method will return
- * anyway.
- * @return True if the the client actually established the connection, false
- * if we returned because the timeout has elapsed or the thread was
- * interrupted.
- */
- public boolean waitForConnection(int seconds) {
- try {
- return registrationLatch.await(seconds, TimeUnit.SECONDS);
- } catch (InterruptedException e) {
- return false;
- }
- }
-
- @Override
- public void run() {
- for (;;) {
- if (this.quit) {
- break;
- }
-
- try {
-
- /*
- * Connect to the session daemon before anything else.
- */
- log("Connecting to sessiond");
- connectToSessiond();
-
- /*
- * Register to the session daemon as the Java component of the
- * UST application.
- */
- log("Registering to sessiond");
- registerToSessiond();
-
- /*
- * Block on socket receive and wait for command from the
- * session daemon. This will return if and only if there is a
- * fatal error or the socket closes.
- */
- log("Waiting on sessiond commands...");
- handleSessiondCmd();
- } catch (UnknownHostException uhe) {
- uhe.printStackTrace();
- /*
- * Terminate agent thread.
- */
- close();
- } catch (IOException ioe) {
- /*
- * I/O exception may have been triggered by a session daemon
- * closing the socket. Close our own socket and
- * retry connecting after a delay.
- */
- try {
- if (this.sessiondSock != null) {
- this.sessiondSock.close();
- }
- Thread.sleep(3000);
- } catch (InterruptedException e) {
- /*
- * Retry immediately if sleep is interrupted.
- */
- } catch (IOException closeioe) {
- closeioe.printStackTrace();
- /*
- * Terminate agent thread.
- */
- close();
- }
- }
- }
- }
-
- /**
- * Dispose this client and close any socket connection it may hold.
- */
- public void close() {
- log("Closing client");
- this.quit = true;
-
- try {
- if (this.sessiondSock != null) {
- this.sessiondSock.close();
- }
- } catch (IOException e) {
- e.printStackTrace();
- }
- }
-
- private void connectToSessiond() throws IOException {
- int rootPort = getPortFromFile(ROOT_PORT_FILE);
- int userPort = getPortFromFile(getHomePath() + USER_PORT_FILE);
-
- /*
- * Check for the edge case of both files existing but pointing to the
- * same port. In this case, let the root client handle it.
- */
- if ((rootPort != 0) && (rootPort == userPort) && (!isRoot)) {
- log("User and root config files both point to port " + rootPort +
- ". Letting the root client handle it.");
- throw new IOException();
- }
-
- int portToUse = (isRoot ? rootPort : userPort);
-
- if (portToUse == 0) {
- /* No session daemon available. Stop and retry later. */
- throw new IOException();
- }
-
- this.sessiondSock = new Socket(SESSION_HOST, portToUse);
- this.inFromSessiond = new DataInputStream(sessiondSock.getInputStream());
- this.outToSessiond = new DataOutputStream(sessiondSock.getOutputStream());
- }
-
- private static String getHomePath() {
- /*
- * The environment variable LTTNG_HOME overrides HOME if
- * defined.
- */
- String homePath = System.getenv("LTTNG_HOME");
-
- if (homePath == null) {
- homePath = System.getProperty("user.home");
- }
- return homePath;
- }
-
- /**
- * Read port number from file created by the session daemon.
- *
- * @return port value if found else 0.
- */
- private static int getPortFromFile(String path) throws IOException {
- BufferedReader br = null;
-
- try {
- br = new BufferedReader(new InputStreamReader(new FileInputStream(path), PORT_FILE_ENCODING));
- String line = br.readLine();
- if (line == null) {
- /* File exists but is empty. */
- return 0;
- }
-
- int port = Integer.parseInt(line, 10);
- if (port < 0 || port > 65535) {
- /* Invalid value. Ignore. */
- port = 0;
- }
- return port;
-
- } catch (NumberFormatException e) {
- /* File contained something that was not a number. */
- return 0;
- } catch (FileNotFoundException e) {
- /* No port available. */
- return 0;
- } finally {
- if (br != null) {
- br.close();
- }
- }
- }
-
- private void registerToSessiond() throws IOException {
- byte data[] = new byte[16];
- ByteBuffer buf = ByteBuffer.wrap(data);
- String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
-
- buf.putInt(domainValue);
- buf.putInt(Integer.parseInt(pid));
- buf.putInt(PROTOCOL_MAJOR_VERSION);
- buf.putInt(PROTOCOL_MINOR_VERSION);
- this.outToSessiond.write(data, 0, data.length);
- this.outToSessiond.flush();
- }
-
- /**
- * Handle session command from the session daemon.
- */
- private void handleSessiondCmd() throws IOException {
- /* Data read from the socket */
- byte inputData[] = null;
- /* Reply data written to the socket, sent to the sessiond */
- LttngAgentResponse response;
-
- while (true) {
- /* Get header from session daemon. */
- SessiondCommandHeader cmdHeader = recvHeader();
-
- if (cmdHeader.getDataSize() > 0) {
- inputData = recvPayload(cmdHeader);
- }
-
- switch (cmdHeader.getCommandType()) {
- case CMD_REG_DONE:
- {
- /*
- * Countdown the registration latch, meaning registration is
- * done and we can proceed to continue tracing.
- */
- registrationLatch.countDown();
- /*
- * We don't send any reply to the registration done command.
- * This just marks the end of the initial session setup.
- */
- log("Registration done");
- continue;
- }
- case CMD_LIST:
- {
- SessiondCommand listLoggerCmd = new SessiondListLoggersCommand();
- response = listLoggerCmd.execute(logAgent);
- log("Received list loggers command");
- break;
- }
- case CMD_EVENT_ENABLE:
- {
- if (inputData == null) {
- /* Invalid command */
- response = LttngAgentResponse.FAILURE_RESPONSE;
- break;
- }
- SessiondCommand enableEventCmd = new SessiondEnableEventCommand(inputData);
- response = enableEventCmd.execute(logAgent);
- log("Received enable event command: " + enableEventCmd.toString());
- break;
- }
- case CMD_EVENT_DISABLE:
- {
- if (inputData == null) {
- /* Invalid command */
- response = LttngAgentResponse.FAILURE_RESPONSE;
- break;
- }
- SessiondCommand disableEventCmd = new SessiondDisableEventCommand(inputData);
- response = disableEventCmd.execute(logAgent);
- log("Received disable event command: " + disableEventCmd.toString());
- break;
- }
- case CMD_APP_CTX_ENABLE:
- {
- if (inputData == null) {
- /* This commands expects a payload, invalid command */
- response = LttngAgentResponse.FAILURE_RESPONSE;
- break;
- }
- SessiondCommand enableAppCtxCmd = new SessiondEnableAppContextCommand(inputData);
- response = enableAppCtxCmd.execute(logAgent);
- log("Received enable app-context command");
- break;
- }
- case CMD_APP_CTX_DISABLE:
- {
- if (inputData == null) {
- /* This commands expects a payload, invalid command */
- response = LttngAgentResponse.FAILURE_RESPONSE;
- break;
- }
- SessiondCommand disableAppCtxCmd = new SessiondDisableAppContextCommand(inputData);
- response = disableAppCtxCmd.execute(logAgent);
- log("Received disable app-context command");
- break;
- }
- default:
- {
- /* Unknown command, send empty reply */
- response = null;
- log("Received unknown command, ignoring");
- break;
- }
- }
-
- /* Send response to the session daemon. */
- byte[] responseData;
- if (response == null) {
- responseData = new byte[4];
- ByteBuffer buf = ByteBuffer.wrap(responseData);
- buf.order(ByteOrder.BIG_ENDIAN);
- } else {
- log("Sending response: " + response.toString());
- responseData = response.getBytes();
- }
- this.outToSessiond.write(responseData, 0, responseData.length);
- this.outToSessiond.flush();
- }
- }
-
- /**
- * Receive header data from the session daemon using the LTTng command
- * static buffer of the right size.
- */
- private SessiondCommandHeader recvHeader() throws IOException {
- byte data[] = new byte[SessiondCommandHeader.HEADER_SIZE];
- int bytesLeft = data.length;
- int bytesOffset = 0;
-
- while (bytesLeft > 0) {
- int bytesRead = this.inFromSessiond.read(data, bytesOffset, bytesLeft);
-
- if (bytesRead < 0) {
- throw new IOException();
- }
- bytesLeft -= bytesRead;
- bytesOffset += bytesRead;
- }
- return new SessiondCommandHeader(data);
- }
-
- /**
- * Receive payload from the session daemon. This MUST be done after a
- * recvHeader() so the header value of a command are known.
- *
- * The caller SHOULD use isPayload() before which returns true if a payload
- * is expected after the header.
- */
- private byte[] recvPayload(SessiondCommandHeader headerCmd) throws IOException {
- byte payload[] = new byte[(int) headerCmd.getDataSize()];
- int bytesLeft = payload.length;
- int bytesOffset = 0;
-
- /* Failsafe check so we don't waste our time reading 0 bytes. */
- if (bytesLeft == 0) {
- return null;
- }
-
- while (bytesLeft > 0) {
- int bytesRead = inFromSessiond.read(payload, bytesOffset, bytesLeft);
-
- if (bytesRead < 0) {
- throw new IOException();
- }
- bytesLeft -= bytesRead;
- bytesOffset += bytesRead;
- }
- return payload;
- }
-
- /**
- * Wrapper for this class's logging, adds the connection's characteristics
- * to help differentiate between multiple TCP clients.
- */
- private void log(String message) {
- LttngUstAgentLogger.log(getClass(),
- "(root=" + isRoot + ", domain=" + domainValue + ") " + message);
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015-2016 EfficiOS Inc.
- * Copyright (C) 2015-2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.charset.Charset;
-
-/**
- * Base class to represent all commands sent from the session daemon to the Java
- * agent. The agent is then expected to execute the command and provide a
- * response.
- *
- * @author Alexandre Montplaisir
- */
-abstract class SessiondCommand {
-
- /**
- * Encoding that should be used for the strings in the sessiond agent
- * protocol on the socket.
- */
- protected static final Charset SESSIOND_PROTOCOL_CHARSET = Charset.forName("UTF-8");
-
- enum CommandType {
- /** List logger(s). */
- CMD_LIST(1),
- /** Enable logger by name. */
- CMD_EVENT_ENABLE(2),
- /** Disable logger by name. */
- CMD_EVENT_DISABLE(3),
- /** Registration done */
- CMD_REG_DONE(4),
- /** Enable application context */
- CMD_APP_CTX_ENABLE(5),
- /** Disable application context */
- CMD_APP_CTX_DISABLE(6);
-
- private int code;
-
- private CommandType(int c) {
- code = c;
- }
-
- public int getCommandType() {
- return code;
- }
- }
-
- /**
- * Execute the command handler's action on the specified tracing agent.
- *
- * @param agent
- * The agent on which to execute the command
- * @return If the command completed successfully or not
- */
- public abstract LttngAgentResponse execute(ILttngTcpClientListener agent);
-
- /**
- * Utility method to read agent-protocol strings passed on the socket. The
- * buffer will contain a 32-bit integer representing the length, immediately
- * followed by the string itself.
- *
- * @param buffer
- * The ByteBuffer from which to read. It should already be setup
- * and positioned where the read should begin.
- * @return The string that was read, or <code>null</code> if it was badly
- * formatted.
- */
- protected static String readNextString(ByteBuffer buffer) {
- int nbBytes = buffer.getInt();
- if (nbBytes < 0) {
- /* The string length should be positive */
- return null;
- }
- if (nbBytes == 0) {
- /* The string is explicitly an empty string */
- return "";
- }
-
- byte[] stringBytes = new byte[nbBytes];
- buffer.get(stringBytes);
- return new String(stringBytes, SESSIOND_PROTOCOL_CHARSET).trim();
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-import org.lttng.ust.agent.client.SessiondCommand.CommandType;
-
-/**
- * Header of session daemon commands.
- *
- * @author Alexandre Montplaisir
- * @author David Goulet
- */
-class SessiondCommandHeader {
-
- /** ABI size of command header. */
- public static final int HEADER_SIZE = 16;
-
- /** Payload size in bytes following this header. */
- private final long dataSize;
-
- /** Command type. */
- private final CommandType cmd;
-
- public SessiondCommandHeader(byte[] data) {
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
-
- dataSize = buf.getLong();
- cmd = CommandType.values()[buf.getInt() - 1];
- buf.getInt(); // command version, currently unused
- }
-
- public long getDataSize() {
- return dataSize;
- }
-
- public CommandType getCommandType() {
- return cmd;
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-/**
- * Session daemon command indicating to the Java agent that an
- * application-specific context was disabled in the tracing session.
- *
- * @author Alexandre Montplaisir
- */
-class SessiondDisableAppContextCommand extends SessiondCommand {
-
- private final String retrieverName;
- private final String contextName;
-
- private final boolean commandIsValid;
-
- public SessiondDisableAppContextCommand(byte[] data) {
- if (data == null) {
- throw new IllegalArgumentException();
- }
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
-
- /*
- * The buffer contains the retriever name first, followed by the
- * context's name.
- */
- retrieverName = readNextString(buf);
- contextName = readNextString(buf);
-
- /* If any of these strings were null then the command was invalid */
- commandIsValid = ((retrieverName != null) && (contextName != null));
- }
-
- @Override
- public LttngAgentResponse execute(ILttngTcpClientListener agent) {
- if (!commandIsValid) {
- return LttngAgentResponse.FAILURE_RESPONSE;
- }
-
- boolean success = agent.appContextDisabled(retrieverName, contextName);
- return (success ? LttngAgentResponse.SUCESS_RESPONSE : DISABLE_APP_CONTEXT_FAILURE_RESPONSE);
- }
-
- /**
- * Response sent when the disable-context command asks to disable an
- * unknown context name.
- */
- private static final LttngAgentResponse DISABLE_APP_CONTEXT_FAILURE_RESPONSE = new LttngAgentResponse() {
- @Override
- public ReturnCode getReturnCode() {
- /* Same return code used for unknown event/logger names */
- return ReturnCode.CODE_UNKNOWN_LOGGER_NAME;
- }
- };
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-/**
- * Session daemon command indicating to the Java agent that some events were
- * disabled in the tracing session.
- *
- * @author Alexandre Montplaisir
- * @author David Goulet
- */
-class SessiondDisableEventCommand extends SessiondCommand {
-
- /**
- * Response sent when the disable-event command asks to disable an
- * unknown event.
- */
- private static final LttngAgentResponse DISABLE_EVENT_FAILURE_RESPONSE = new LttngAgentResponse() {
- @Override
- public ReturnCode getReturnCode() {
- return ReturnCode.CODE_UNKNOWN_LOGGER_NAME;
- }
- };
-
- /** Event name to disable from the tracing session */
- private final String eventName;
-
- public SessiondDisableEventCommand(byte[] data) {
- if (data == null) {
- throw new IllegalArgumentException();
- }
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
- eventName = new String(data, SESSIOND_PROTOCOL_CHARSET).trim();
- }
-
- @Override
- public LttngAgentResponse execute(ILttngTcpClientListener agent) {
- boolean success = agent.eventDisabled(this.eventName);
- return (success ? LttngAgentResponse.SUCESS_RESPONSE : DISABLE_EVENT_FAILURE_RESPONSE);
- }
-
- @Override
- public String toString() {
- return "SessiondDisableEventCommand["
- + "eventName=" + eventName
- +"]";
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-/**
- * Session daemon command indicating to the Java agent that an
- * application-specific context was enabled in the tracing session.
- *
- * @author Alexandre Montplaisir
- */
-class SessiondEnableAppContextCommand extends SessiondCommand {
-
- private final String retrieverName;
- private final String contextName;
-
- private final boolean commandIsValid;
-
- public SessiondEnableAppContextCommand(byte[] data) {
- if (data == null) {
- throw new IllegalArgumentException();
- }
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
-
- /*
- * The buffer contains the retriever name first, followed by the
- * context's name.
- */
- retrieverName = readNextString(buf);
- contextName = readNextString(buf);
-
- /* If any of these strings were null then the command was invalid */
- commandIsValid = ((retrieverName != null) && (contextName != null));
- }
-
- @Override
- public LttngAgentResponse execute(ILttngTcpClientListener agent) {
- if (!commandIsValid) {
- return LttngAgentResponse.FAILURE_RESPONSE;
- }
-
- boolean success = agent.appContextEnabled(retrieverName, contextName);
- return (success ? LttngAgentResponse.SUCESS_RESPONSE : LttngAgentResponse.FAILURE_RESPONSE);
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-
-import org.lttng.ust.agent.session.EventRule;
-import org.lttng.ust.agent.session.LogLevelSelector;
-
-/**
- * Session daemon command indicating to the Java agent that some events were
- * enabled in the tracing session.
- *
- * @author Alexandre Montplaisir
- * @author David Goulet
- */
-class SessiondEnableEventCommand extends SessiondCommand {
-
- /** Fixed event name length. Value defined by the lttng agent protocol. */
- private static final int EVENT_NAME_LENGTH = 256;
-
- private final boolean commandIsValid;
-
- /* Parameters of the event rule being enabled */
- private final String eventName;
- private final LogLevelSelector logLevelFilter;
- private final String filterString;
-
- public SessiondEnableEventCommand(byte[] data) {
- if (data == null) {
- throw new IllegalArgumentException();
- }
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
- int logLevel = buf.getInt();
- int logLevelType = buf.getInt();
- logLevelFilter = new LogLevelSelector(logLevel, logLevelType);
-
- /* Read the event name */
- byte[] eventNameBytes = new byte[EVENT_NAME_LENGTH];
- buf.get(eventNameBytes);
- eventName = new String(eventNameBytes, SESSIOND_PROTOCOL_CHARSET).trim();
-
- /* Read the filter string */
- filterString = readNextString(buf);
-
- /* The command was invalid if the string could not be read correctly */
- commandIsValid = (filterString != null);
- }
-
- @Override
- public LttngAgentResponse execute(ILttngTcpClientListener agent) {
- if (!commandIsValid) {
- return LttngAgentResponse.FAILURE_RESPONSE;
- }
-
- EventRule rule = new EventRule(eventName, logLevelFilter, filterString);
- boolean success = agent.eventEnabled(rule);
- return (success ? LttngAgentResponse.SUCESS_RESPONSE : LttngAgentResponse.FAILURE_RESPONSE);
- }
-
- @Override
- public String toString() {
- return "SessiondEnableEventCommand["
- + "eventName=" + eventName
- + ", logLevel=" + logLevelFilter.toString()
- + ", filterString=" + filterString
- +"]";
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.client;
-
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.util.Collection;
-
-/**
- * Session daemon command asking the Java agent to list its registered loggers,
- * which corresponds to event names in the tracing session.
- *
- * @author Alexandre Montplaisir
- * @author David Goulet
- */
-class SessiondListLoggersCommand extends SessiondCommand {
-
- @Override
- public LttngAgentResponse execute(ILttngTcpClientListener agent) {
- final Collection<String> loggerList = agent.listAvailableEvents();
- return new SessiondListLoggersResponse(loggerList);
- }
-
- private static class SessiondListLoggersResponse extends LttngAgentResponse {
-
- private final static int SIZE = 12;
-
- private final Collection<String> loggers;
-
- public SessiondListLoggersResponse(Collection<String> loggers) {
- this.loggers = loggers;
- }
-
- @Override
- public ReturnCode getReturnCode() {
- /* This command can't really fail */
- return ReturnCode.CODE_SUCCESS_CMD;
- }
-
- @Override
- public byte[] getBytes() {
- /*
- * Compute the data size, which is the number of bytes of each
- * encoded string, +1 per string for the \0
- */
- int dataSize = 0;
- for (String logger : loggers) {
- dataSize += logger.getBytes(SESSIOND_PROTOCOL_CHARSET).length + 1;
- }
-
- /* Prepare the buffer */
- byte data[] = new byte[SIZE + dataSize];
- ByteBuffer buf = ByteBuffer.wrap(data);
- buf.order(ByteOrder.BIG_ENDIAN);
-
- /* Write the header section of the response */
- buf.putInt(getReturnCode().getCode());
- buf.putInt(dataSize);
- buf.putInt(loggers.size());
-
- /* Write the payload */
- for (String logger : loggers) {
- buf.put(logger.getBytes(SESSIOND_PROTOCOL_CHARSET));
- /* NULL terminated byte after the logger name. */
- buf.put((byte) 0x0);
- }
- return data;
- }
- }
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.context;
-
-import java.io.IOException;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-/**
- * The singleton manager of {@link IContextInfoRetriever} objects.
- *
- * @author Alexandre Montplaisir
- */
-public final class ContextInfoManager {
-
- private static final String SHARED_LIBRARY_NAME = "lttng-ust-context-jni";
-
- private static final Pattern VALID_CONTEXT_NAME_PATTERN = Pattern.compile("^[a-zA-Z0-9_\\.]+$");
-
- private static ContextInfoManager instance;
-
- private final Map<String, IContextInfoRetriever> contextInfoRetrievers = new ConcurrentHashMap<String, IContextInfoRetriever>();
- private final Map<String, Long> contextInforRetrieverRefs = new HashMap<String, Long>();
-
- /**
- * Lock used to keep the two maps above in sync when retrievers are
- * registered or unregistered.
- */
- private final Object retrieverLock = new Object();
-
- /** Singleton class, constructor should not be accessed directly */
- private ContextInfoManager() {
- }
-
- /**
- * Get the singleton instance.
- *
- * <p>
- * Usage of this class requires the "liblttng-ust-context-jni.so" native
- * library to be present on the system and available (passing
- * -Djava.library.path=path to the JVM may be needed).
- * </p>
- *
- * @return The singleton instance
- * @throws IOException
- * If the shared library cannot be found.
- * @throws SecurityException
- * We will forward any SecurityExcepion that may be thrown when
- * trying to load the JNI library.
- */
- public static synchronized ContextInfoManager getInstance() throws IOException, SecurityException {
- if (instance == null) {
- try {
- System.loadLibrary(SHARED_LIBRARY_NAME);
- } catch (UnsatisfiedLinkError e) {
- throw new IOException(e);
- }
- instance = new ContextInfoManager();
- }
- return instance;
- }
-
- /**
- * Register a new context info retriever.
- *
- * <p>
- * Each context info retriever is registered with a given "retriever name",
- * which specifies the namespace of the context elements. This name is
- * specified separately from the retriever objects, which would allow
- * register the same retriever under different namespaces for example.
- * </p>
- *
- * <p>
- * If the method returns false (indicating registration failure), then the
- * retriever object will *not* be used for context information.
- * </p>
- *
- * @param retrieverName
- * The name to register to the context retriever object with.
- * @param contextInfoRetriever
- * The context info retriever to register
- * @return True if the retriever was successfully registered, false if there
- * was an error, for example if a retriever is already registered
- * with that name.
- */
- public boolean registerContextInfoRetriever(String retrieverName, IContextInfoRetriever contextInfoRetriever) {
- synchronized (retrieverLock) {
- if (!validateRetrieverName(retrieverName)) {
- return false;
- }
-
- if (contextInfoRetrievers.containsKey(retrieverName)) {
- /*
- * There is already a retriever registered with that name,
- * refuse the new registration.
- */
- return false;
- }
- /*
- * Inform LTTng-UST of the new retriever. The names have to start
- * with "$app." on the UST side!
- */
- long ref = LttngContextApi.registerProvider("$app." + retrieverName);
- if (ref == 0) {
- return false;
- }
-
- contextInfoRetrievers.put(retrieverName, contextInfoRetriever);
- contextInforRetrieverRefs.put(retrieverName, Long.valueOf(ref));
-
- return true;
- }
- }
-
- /**
- * Unregister a previously added context info retriever.
- *
- * This method has no effect if the retriever was not already registered.
- *
- * @param retrieverName
- * The context info retriever to unregister
- * @return True if unregistration was successful, false if there was an
- * error
- */
- public boolean unregisterContextInfoRetriever(String retrieverName) {
- synchronized (retrieverLock) {
- if (!contextInfoRetrievers.containsKey(retrieverName)) {
- /*
- * There was no retriever registered with that name.
- */
- return false;
- }
- contextInfoRetrievers.remove(retrieverName);
- long ref = contextInforRetrieverRefs.remove(retrieverName).longValue();
-
- /* Unregister the retriever on the UST side too */
- LttngContextApi.unregisterProvider(ref);
-
- return true;
- }
- }
-
- /**
- * Return the context info retriever object registered with the given name.
- *
- * @param retrieverName
- * The retriever name to look for
- * @return The corresponding retriever object, or <code>null</code> if there
- * was none
- */
- public IContextInfoRetriever getContextInfoRetriever(String retrieverName) {
- /*
- * Note that this method does not take the retrieverLock, it lets
- * concurrent threads access the ConcurrentHashMap directly.
- *
- * It's fine for a get() to happen during a registration or
- * unregistration, it's first-come-first-serve.
- */
- return contextInfoRetrievers.get(retrieverName);
- }
-
- /**
- * Validate that the given retriever name contains only the allowed
- * characters, which are alphanumerical characters, period "." and
- * underscore "_". The name must also not start with a number.
- */
- private static boolean validateRetrieverName(String contextName) {
- if (contextName.isEmpty()) {
- return false;
- }
-
- /* First character must not be a number */
- if (Character.isDigit(contextName.charAt(0))) {
- return false;
- }
-
- /* Validate the other characters of the string */
- Matcher matcher = VALID_CONTEXT_NAME_PATTERN.matcher(contextName);
- return matcher.matches();
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.context;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.nio.ByteOrder;
-import java.nio.charset.Charset;
-import java.util.Collection;
-import java.util.Map;
-
-import org.lttng.ust.agent.utils.LttngUstAgentLogger;
-
-/**
- * This class is used to serialize the list of "context info" objects to pass
- * through JNI.
- *
- * The protocol expects two byte array parameters, which are contained here in
- * the {@link SerializedContexts} inner class.
- *
- * The first byte array is called the "entries array", and contains fixed-size
- * entries, one per context element.
- *
- * The second one is the "strings array", it is of variable length and used to
- * hold the variable-length strings. Each one of these strings is formatted as a
- * UTF-8 C-string, meaning in will end with a "\0" byte to indicate its end.
- * Entries in the first array may refer to offsets in the second array to point
- * to relevant strings.
- *
- * The fixed-size entries in the entries array contain the following elements
- * (size in bytes in parentheses):
- *
- * <ul>
- * <li>The offset in the strings array pointing to the full context name, like
- * "$app.myprovider:mycontext" (4)</li>
- * <li>The context value type (1)</li>
- * <li>The context value itself (8)</li>
- * </ul>
- *
- * The context value type will indicate how many bytes are used for the value.
- * If the it is of String type, then we use 4 bytes to represent the offset in
- * the strings array.
- *
- * So the total size of each entry is 13 bytes. All unused bytes (for context
- * values shorter than 8 bytes for example) will be zero'ed.
- *
- * @author Alexandre Montplaisir
- */
-public class ContextInfoSerializer {
-
- private enum DataType {
- NULL(0),
- INTEGER(1),
- LONG(2),
- DOUBLE(3),
- FLOAT(4),
- BYTE(5),
- SHORT(6),
- BOOLEAN(7),
- STRING(8);
-
- private final byte value;
-
- private DataType(int value) {
- this.value = (byte) value;
- }
-
- public byte getValue() {
- return value;
- }
- }
-
- /**
- * Class used to wrap the two byte arrays returned by
- * {@link #queryAndSerializeRequestedContexts}.
- */
- public static class SerializedContexts {
-
- private final byte[] contextEntries;
- private final byte[] contextStrings;
-
- /**
- * Constructor
- *
- * @param entries
- * Arrays for the fixed-size context entries.
- * @param strings
- * Arrays for variable-length strings
- */
- public SerializedContexts(byte[] entries, byte[] strings) {
- contextEntries = entries;
- contextStrings = strings;
- }
-
- /**
- * @return The entries array
- */
- public byte[] getEntriesArray() {
- return contextEntries;
- }
-
- /**
- * @return The strings array
- */
- public byte[] getStringsArray() {
- return contextStrings;
- }
- }
-
- private static final String UST_APP_CTX_PREFIX = "$app.";
- private static final int ENTRY_LENGTH = 13;
- private static final ByteOrder NATIVE_ORDER = ByteOrder.nativeOrder();
- private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
- private static final SerializedContexts EMPTY_CONTEXTS = new SerializedContexts(new byte[0], new byte[0]);
-
- /**
- * From the list of requested contexts in the tracing session, look them up
- * in the {@link ContextInfoManager}, retrieve the available ones, and
- * serialize them into a byte array.
- *
- * @param enabledContexts
- * The contexts that are enabled in the tracing session (indexed
- * first by retriever name, then by index names). Should come
- * from the LTTng Agent.
- * @return The byte array representing the intersection of the requested and
- * available contexts.
- */
- public static SerializedContexts queryAndSerializeRequestedContexts(Collection<Map.Entry<String, Map<String, Integer>>> enabledContexts) {
- if (enabledContexts.isEmpty()) {
- /* Early return if there is no requested context information */
- return EMPTY_CONTEXTS;
- }
-
- ContextInfoManager contextManager;
- try {
- contextManager = ContextInfoManager.getInstance();
- } catch (IOException e) {
- /*
- * The JNI library is not available, do not send any context
- * information. No retriever could have been defined anyways.
- */
- return EMPTY_CONTEXTS;
- }
-
- /* Compute the total number of contexts (flatten the map) */
- int totalArraySize = 0;
- for (Map.Entry<String, Map<String, Integer>> contexts : enabledContexts) {
- totalArraySize += contexts.getValue().size() * ENTRY_LENGTH;
- }
-
- /* Prepare the ByteBuffer that will generate the "entries" array */
- ByteBuffer entriesBuffer = ByteBuffer.allocate(totalArraySize);
- entriesBuffer.order(NATIVE_ORDER);
- entriesBuffer.clear();
-
- /* Prepare the streams that will generate the "strings" array */
- ByteArrayOutputStream stringsBaos = new ByteArrayOutputStream();
- DataOutputStream stringsDos = new DataOutputStream(stringsBaos);
-
- try {
- for (Map.Entry<String, Map<String, Integer>> entry : enabledContexts) {
- String requestedRetrieverName = entry.getKey();
- Map<String, Integer> requestedContexts = entry.getValue();
-
- IContextInfoRetriever retriever = contextManager.getContextInfoRetriever(requestedRetrieverName);
-
- for (String requestedContext : requestedContexts.keySet()) {
- Object contextInfo;
- if (retriever == null) {
- contextInfo = null;
- } else {
- contextInfo = retriever.retrieveContextInfo(requestedContext);
- /*
- * 'contextInfo' can still be null here, which would
- * indicate the retriever does not supply this context.
- * We will still write this information so that the
- * tracer can know about it.
- */
- }
-
- /* Serialize the result to the buffers */
- // FIXME Eventually pass the retriever name only once?
- String fullContextName = (UST_APP_CTX_PREFIX + requestedRetrieverName + ':' + requestedContext);
- byte[] strArray = fullContextName.getBytes(UTF8_CHARSET);
-
- entriesBuffer.putInt(stringsDos.size());
- stringsDos.write(strArray);
- stringsDos.writeChar('\0');
-
- LttngUstAgentLogger.log(ContextInfoSerializer.class,
- "ContextInfoSerializer: Context to be sent through JNI: " + fullContextName + '=' +
- (contextInfo == null ? "null" : contextInfo.toString()));
-
- serializeContextInfo(entriesBuffer, stringsDos, contextInfo);
- }
- }
-
- stringsDos.flush();
- stringsBaos.flush();
-
- } catch (IOException e) {
- /*
- * Should not happen because we are wrapping a
- * ByteArrayOutputStream, which writes to memory
- */
- e.printStackTrace();
- }
-
- byte[] entriesArray = entriesBuffer.array();
- byte[] stringsArray = stringsBaos.toByteArray();
- return new SerializedContexts(entriesArray, stringsArray);
- }
-
- private static final int CONTEXT_VALUE_LENGTH = 8;
-
- private static void serializeContextInfo(ByteBuffer entriesBuffer, DataOutputStream stringsDos, Object contextInfo) throws IOException {
- int remainingBytes;
- if (contextInfo == null) {
- entriesBuffer.put(DataType.NULL.getValue());
- remainingBytes = CONTEXT_VALUE_LENGTH;
-
- } else if (contextInfo instanceof Integer) {
- entriesBuffer.put(DataType.INTEGER.getValue());
- entriesBuffer.putInt(((Integer) contextInfo).intValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 4;
-
- } else if (contextInfo instanceof Long) {
- entriesBuffer.put(DataType.LONG.getValue());
- entriesBuffer.putLong(((Long) contextInfo).longValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 8;
-
- } else if (contextInfo instanceof Double) {
- entriesBuffer.put(DataType.DOUBLE.getValue());
- entriesBuffer.putDouble(((Double) contextInfo).doubleValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 8;
-
- } else if (contextInfo instanceof Float) {
- entriesBuffer.put(DataType.FLOAT.getValue());
- entriesBuffer.putFloat(((Float) contextInfo).floatValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 4;
-
- } else if (contextInfo instanceof Byte) {
- entriesBuffer.put(DataType.BYTE.getValue());
- entriesBuffer.put(((Byte) contextInfo).byteValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 1;
-
- } else if (contextInfo instanceof Short) {
- entriesBuffer.put(DataType.SHORT.getValue());
- entriesBuffer.putShort(((Short) contextInfo).shortValue());
- remainingBytes = CONTEXT_VALUE_LENGTH - 2;
-
- } else if (contextInfo instanceof Boolean) {
- entriesBuffer.put(DataType.BOOLEAN.getValue());
- boolean b = ((Boolean) contextInfo).booleanValue();
- /* Converted to one byte, write 1 for true, 0 for false */
- entriesBuffer.put((byte) (b ? 1 : 0));
- remainingBytes = CONTEXT_VALUE_LENGTH - 1;
-
- } else {
- /* Also includes the case of Character. */
- /*
- * We'll write the object as a string, into the strings array. We
- * will write the corresponding offset to the entries array.
- */
- String str = contextInfo.toString();
- byte[] strArray = str.getBytes(UTF8_CHARSET);
-
- entriesBuffer.put(DataType.STRING.getValue());
-
- entriesBuffer.putInt(stringsDos.size());
- stringsDos.write(strArray);
- stringsDos.writeChar('\0');
-
- remainingBytes = CONTEXT_VALUE_LENGTH - 4;
- }
- entriesBuffer.position(entriesBuffer.position() + remainingBytes);
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.context;
-
-/**
- * Context-retrieving object specified by the application to extract
- * application-specific context information, which can then be passed on to the
- * Java agents and saved to a trace.
- *
- * Retriever objects should be registered to the {@link ContextInfoManager} to
- * make them available to the LTTng agents.
- *
- * @author Alexandre Montplaisir
- */
-public interface IContextInfoRetriever {
-
- /**
- * Retrieve a piece of context information from the application, identified
- * by a key.
- *
- * @param key
- * The key identifying the context information
- * @return The context information.
- */
- Object retrieveContextInfo(String key);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.context;
-
-/**
- * Virtual class containing the Java side of the LTTng-UST context provider
- * registering/unregistering methods.
- *
- * @author Alexandre Montplaisir
- */
-final class LttngContextApi {
-
- private LttngContextApi() {}
-
- /**
- * Register a context provider to UST.
- *
- * The callbacks are the same for all providers, and are defined in the .c
- * file. The only needed information is the retriever (which is called
- * "provider" from UST'S point of view) name.
- *
- * @param provider_name
- * The name of the provider
- * @return The pointer to the created provider object. It's useless in the
- * Java space, but will be needed for
- * {@link #unregisterProvider(long)}.
- */
- static native long registerProvider(String provider_name);
-
- /**
- * Unregister a previously-registered context provider from UST.
- *
- * @param provider_ref
- * The pointer to the provider object, obtained from
- * {@link #registerProvider}
- */
- static native void unregisterProvider(long provider_ref);
-}
-
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.filter;
-
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.lttng.ust.agent.session.EventRule;
-
-/**
- * Singleton class managing the filter notifications.
- *
- * Applications can register a {@link IFilterChangeListener} to be notified when
- * event filtering rules change in the tracing sessions.
- *
- * @author Alexandre Montplaisir
- */
-public final class FilterChangeNotifier {
-
- /** Lazy-loaded singleton instance object */
- private static FilterChangeNotifier instance = null;
-
- private final Map<EventRule, Integer> enabledEventRules = new HashMap<EventRule, Integer>();
- private final Collection<IFilterChangeListener> registeredListeners = new LinkedList<IFilterChangeListener>();
-
-
- /**
- * Private constructor, singleton class should not be instantiated directly.
- */
- private FilterChangeNotifier() {
- }
-
- /**
- * Get the singleton instance, initializing it if needed.
- *
- * @return The singleton instance
- */
- public static synchronized FilterChangeNotifier getInstance() {
- if (instance == null) {
- instance = new FilterChangeNotifier();
- }
- return instance;
- }
-
- /**
- * Notify the filter manager that a new rule was enabled in a tracing
- * session ("lttng enable-event ...")
- *
- * This is meant to be called by the LTTng Agent only. External Java
- * applications should not call this.
- *
- * @param rule
- * The rule that was added
- */
- public synchronized void addEventRule(EventRule rule) {
- Integer count = enabledEventRules.get(rule);
- if (count == null) {
- /*
- * This is the first instance of this rule being enabled. Add it to
- * the map and send notifications to the registered notifiers.
- */
- enabledEventRules.put(rule, Integer.valueOf(1));
- notifyForAddedRule(rule);
- return;
- }
- if (count.intValue() <= 0) {
- /* It should not have been in the map! */
- throw new IllegalStateException();
- }
- /*
- * This exact event rule was already enabled, just increment its
- * refcount without sending notifications
- */
- enabledEventRules.put(rule, Integer.valueOf(count.intValue() + 1));
- }
-
- /**
- * Notify the filter manager that an event name was disabled in the tracing
- * sessions ("lttng disable-event ...").
- *
- * The "disable-event" only specifies an event name. This means all the
- * rules containing this event name are to be disabled.
- *
- * This is meant to be called by the LTTng Agent only. External Java
- * applications should not call this.
- *
- * @param eventName
- * The event name to disable
- */
- public synchronized void removeEventRules(String eventName) {
- List<EventRule> rulesToRemove = new LinkedList<EventRule>();
-
- for (EventRule eventRule : enabledEventRules.keySet()) {
- if (eventRule.getEventName().equals(eventName)) {
- rulesToRemove.add(eventRule);
- }
- }
- /*
- * We cannot modify the map while iterating on it. We have to do the
- * removal separately from the iteration above.
- */
- for (EventRule rule : rulesToRemove) {
- removeEventRule(rule);
- }
- }
-
- private synchronized void removeEventRule(EventRule eventRule) {
- Integer count = enabledEventRules.get(eventRule);
- if (count == null || count.intValue() <= 0) {
- /*
- * We were asked us to disable an event rule that was not enabled
- * previously. Command error?
- */
- throw new IllegalStateException();
- }
- if (count.intValue() == 1) {
- /*
- * This is the last instance of this event rule being disabled,
- * remove it from the map and send notifications of this rule being
- * gone.
- */
- enabledEventRules.remove(eventRule);
- notifyForRemovedRule(eventRule);
- return;
- }
- /*
- * Other sessions/daemons are still looking for this event rule, simply
- * decrement its refcount, and do not send notifications.
- */
- enabledEventRules.put(eventRule, Integer.valueOf(count.intValue() - 1));
-
- }
-
- /**
- * Register a new listener to the manager.
- *
- * @param listener
- * The listener to add
- */
- public synchronized void registerListener(IFilterChangeListener listener) {
- registeredListeners.add(listener);
-
- /* Send the current rules to the new listener ("statedump") */
- for (EventRule rule : enabledEventRules.keySet()) {
- listener.eventRuleAdded(rule);
- }
- }
-
- /**
- * Unregister a listener from the manager.
- *
- * @param listener
- * The listener to remove
- */
- public synchronized void unregisterListener(IFilterChangeListener listener) {
- registeredListeners.remove(listener);
- }
-
- private void notifyForAddedRule(final EventRule rule) {
- for (IFilterChangeListener notifier : registeredListeners) {
- notifier.eventRuleAdded(rule);
- }
- }
-
- private void notifyForRemovedRule(final EventRule rule) {
- for (IFilterChangeListener notifier : registeredListeners) {
- notifier.eventRuleRemoved(rule);
- }
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.filter;
-
-import org.lttng.ust.agent.session.EventRule;
-
-/**
- * Filter notification listener interface.
- * <p>
- * Applications wanting to be notified of event filtering rule changes should
- * implement this interface, then register their listener using
- * {@link FilterChangeNotifier#registerListener}.
- * </p>
- * <p>
- * The callbacks defined in this interface will be called whenever an event rule
- * is added or removed. The manager will take care of the reference-counting in
- * case multiple tracing sessions enable the exact same rules. For example, the
- * {@link #eventRuleRemoved} callback is only called when there are no more
- * session interested into it.
- * </p>
- * <p>
- * Do not forget to unregister the listener after use, using
- * {@link FilterChangeNotifier#unregisterListener}. If you do not, or if
- * you use an anonymous listener for example, these will remain attached until
- * the complete shutdown of the application.
- * </p>
- * <p>
- * Only one thread is used to dispatch notifications, sequentially. This means
- * that if a callback hangs it will prevent other listeners from receiving
- * notifications. Please take care of not blocking inside the listener
- * callbacks, and use separate threads for potentially long or blocking
- * operations.
- * </p>
- *
- * @author Alexandre Montplaisir
- */
-public interface IFilterChangeListener {
-
- /**
- * Notification that a new event rule is now enabled in the tracing
- * sessions.
- *
- * @param rule
- * The event rule that was enabled
- */
- void eventRuleAdded(EventRule rule);
-
- /**
- * Notification that an existing event rule is now disabled in the tracing
- * sessions.
- *
- * @param rule
- * The event rule that was disabled
- */
- void eventRuleRemoved(EventRule rule);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.session;
-
-/**
- * Event filtering rule present in a tracing session.
- *
- * It typically comes from a "lttng enable-event" command, and contains a
- * domain, event name, log level and filter string.
- *
- * @author Alexandre Montplaisir
- */
-public class EventRule {
-
- private final String eventName;
- private final LogLevelSelector logLevelSelector;
- private final String filterString;
-
- /**
- * Constructor.
- *
- * @param eventName
- * The name of the tracepoint
- * @param logLevelSelector
- * The log level of the event rule
- * @param filterString
- * The filtering string. May be null if there is no extra filter.
- */
- public EventRule(String eventName, LogLevelSelector logLevelSelector, String filterString) {
- this.eventName = eventName;
- this.logLevelSelector = logLevelSelector;
- this.filterString = filterString;
- }
-
- /**
- * Get the event name of this rule.
- *
- * @return The event name
- */
- public String getEventName() {
- return eventName;
- }
-
- /**
- * Get the log level filter configuration of the rule.
- *
- * @return The log level selector
- */
- public LogLevelSelector getLogLevelSelector() {
- return logLevelSelector;
- }
-
- /**
- * Get the filter string associated with this rule.
- *
- * @return The filter string, may be null for no filter string.
- */
- public String getFilterString() {
- return filterString;
- }
-
- // ------------------------------------------------------------------------
- // Methods from Object
- // ------------------------------------------------------------------------
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + ((eventName == null) ? 0 : eventName.hashCode());
- result = prime * result + ((filterString == null) ? 0 : filterString.hashCode());
- result = prime * result + ((logLevelSelector == null) ? 0 : logLevelSelector.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- EventRule other = (EventRule) obj;
-
- if (eventName == null) {
- if (other.eventName != null) {
- return false;
- }
- } else if (!eventName.equals(other.eventName)) {
- return false;
- }
- /* else, continue */
-
- if (filterString == null) {
- if (other.filterString != null) {
- return false;
- }
- } else if (!filterString.equals(other.filterString)) {
- return false;
- }
- /* else, continue */
-
- if (logLevelSelector == null) {
- if (other.logLevelSelector != null) {
- return false;
- }
- } else if (!logLevelSelector.equals(other.logLevelSelector)) {
- return false;
- }
- /* else, continue */
-
- return true;
- }
-
- @Override
- public String toString() {
- return "Event name = " + getEventName() +
- ", Log level selector = (" + getLogLevelSelector().toString() + ")" +
- ", Filter string = " + getFilterString();
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.session;
-
-/**
- * Log level filtering element, which is part of an {@link EventRule}.
- *
- * @author Alexandre Montplaisir
- */
-public class LogLevelSelector {
-
- /**
- * The type of log level filter that is enabled.
- *
- * Defined from lttng-tools' include/lttng/event.h.
- */
- public enum LogLevelType {
- /**
- * All log levels are enabled. This overrides the value of
- * {@link LogLevelSelector#getLogLevel}.
- */
- LTTNG_EVENT_LOGLEVEL_ALL(0),
-
- /** This log level along with all log levels of higher severity are enabled. */
- LTTNG_EVENT_LOGLEVEL_RANGE(1),
-
- /** Only this exact log level is enabled. */
- LTTNG_EVENT_LOGLEVEL_SINGLE(2);
-
- private final int value;
-
- private LogLevelType(int value) {
- this.value = value;
- }
-
- /**
- * Get the numerical (int) value representing this log level type in the
- * communication protocol.
- *
- * @return The int value
- */
- public int getValue() {
- return value;
- }
-
- static LogLevelType fromValue(int val) {
- switch (val) {
- case 0:
- return LTTNG_EVENT_LOGLEVEL_ALL;
- case 1:
- return LTTNG_EVENT_LOGLEVEL_RANGE;
- case 2:
- return LTTNG_EVENT_LOGLEVEL_SINGLE;
- default:
- throw new IllegalArgumentException();
- }
- }
- }
-
- private final int logLevel;
- private final LogLevelType logLevelType;
-
- /**
- * Constructor using numerical values straight from the communication
- * protocol.
- *
- * @param logLevel
- * The numerical value of the log level. The exact value depends
- * on the tracing domain, see include/lttng/event.h in the
- * lttng-tools tree for the complete enumeration.
- * @param logLevelType
- * The numerical value of the log level type. It will be
- * converted to a {@link LogLevelType} by this constructor.
- * @throws IllegalArgumentException
- * If the 'logLevelType' does not correspond to a valid value.
- */
- public LogLevelSelector(int logLevel, int logLevelType) {
- this.logLevel = logLevel;
- this.logLevelType = LogLevelType.fromValue(logLevelType);
- }
-
- /**
- * "Manual" constructor, specifying the {@link LogLevelType} directly.
- *
- * @param logLevel
- * The numerical value of the log level. The exact value depends
- * on the tracing domain, see include/lttng/event.h in the
- * lttng-tools tree for the complete enumeration.
- * @param type
- * The log level filter type.
- */
- public LogLevelSelector(int logLevel, LogLevelType type) {
- this.logLevel = logLevel;
- this.logLevelType = type;
- }
-
- /**
- * Get the numerical value of the log level element. Does not apply if
- * {@link #getLogLevelType} returns
- * {@link LogLevelType#LTTNG_EVENT_LOGLEVEL_ALL}.
- *
- * @return The numerical value of the log level
- */
- public int getLogLevel() {
- return logLevel;
- }
-
- /**
- * Get the log level filter type.
- *
- * @return The log level filter type
- */
- public LogLevelType getLogLevelType() {
- return logLevelType;
- }
-
- /**
- * Helper method to determine if an event with the given log level should be
- * traced when considering this filter.
- *
- * For example, if this filter object represents "higher severity than 5",
- * and the log level passed in parameter is "8", it will return that it
- * matches (higher value means higher severity).
- *
- * @param targetLogLevel
- * The log level value of the event to check for
- * @return Should this event be traced, or not
- */
- public boolean matches(int targetLogLevel) {
- switch (logLevelType) {
- case LTTNG_EVENT_LOGLEVEL_ALL:
- return true;
- case LTTNG_EVENT_LOGLEVEL_RANGE:
- return (targetLogLevel >= logLevel);
- case LTTNG_EVENT_LOGLEVEL_SINGLE:
- return (targetLogLevel == logLevel);
- default:
- throw new IllegalStateException();
- }
- }
-
- // ------------------------------------------------------------------------
- // Methods from Object
- // ------------------------------------------------------------------------
-
- @Override
- public int hashCode() {
- final int prime = 31;
- int result = 1;
- result = prime * result + logLevel;
- result = prime * result + ((logLevelType == null) ? 0 : logLevelType.hashCode());
- return result;
- }
-
- @Override
- public boolean equals(Object obj) {
- if (this == obj) {
- return true;
- }
- if (obj == null) {
- return false;
- }
- if (getClass() != obj.getClass()) {
- return false;
- }
- LogLevelSelector other = (LogLevelSelector) obj;
-
- if (logLevel != other.logLevel) {
- return false;
- }
- if (logLevelType != other.logLevelType) {
- return false;
- }
- return true;
- }
-
- @Override
- public String toString() {
- if (getLogLevelType() == LogLevelType.LTTNG_EVENT_LOGLEVEL_ALL) {
- return LogLevelType.LTTNG_EVENT_LOGLEVEL_ALL.toString();
- }
- return String.valueOf(getLogLevel()) + ", " + getLogLevelType().toString();
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.utils;
-
-/**
- * Logging infrastructure for the lttng-ust Java agent. It prints log messages
- * to stderr but only when the environment variable LTTNG_UST_DEBUG is defined.
- *
- * @author Alexandre Montplaisir
- */
-public class LttngUstAgentLogger {
-
- private static final String ENV_VAR_NAME = "LTTNG_UST_DEBUG";
- private static final boolean LOGGING_ENABLED = (System.getenv(ENV_VAR_NAME) == null ? false : true);
-
- /**
- * Log event. Will be printed to stderr if the environment variable
- * "LTTNG_UST_DEBUG" is defined.
- *
- * @param c
- * The class logging the message (should normally be called with
- * {@link #getClass()}).
- * @param message
- * The message to print
- */
- public static void log(Class<?> c, String message) {
- if (LOGGING_ENABLED) {
- System.err.println(c.getSimpleName() + ": " + message);
- }
- }
-}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-JAVAROOT = .
-AM_JAVACFLAGS = -classpath $(CLASSPATH):$(builddir)/../lttng-ust-agent-common/lttng-ust-agent-common.jar
-
-pkgpath = org/lttng/ust/agent/jul
-
-jarfile_version = 1.0.0
-jarfile_manifest = $(srcdir)/Manifest.txt
-jarfile_symlink = lttng-ust-agent-jul.jar
-jarfile = lttng-ust-agent-jul-$(jarfile_version).jar
-
-jardir = $(datadir)/java
-
-juljniout = ../../jni/jul
-
-dist_noinst_JAVA = $(pkgpath)/LttngJulAgent.java \
- $(pkgpath)/LttngJulApi.java \
- $(pkgpath)/LttngLogHandler.java
-
-dist_noinst_DATA = $(jarfile_manifest)
-
-jar_DATA = $(jarfile)
-
-stamp = jul-jni-header.stamp
-classes = $(pkgpath)/*.class
-
-$(jarfile): classnoinst.stamp
- $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
-
-if !HAVE_JAVAH
-# If we don't have javah, assume we are running openjdk >= 10 and use javac
-# to generate the jni header file.
-AM_JAVACFLAGS += -h $(juljniout)
-else
-jul-jni-header.stamp: $(dist_noinst_JAVA)
- $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(juljniout) $(JAVAHFLAGS) org.lttng.ust.agent.jul.LttngJulApi && \
- echo "JUL JNI header generated" > jul-jni-header.stamp
-
-all-local: $(stamp)
-endif
-
-install-data-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
-
-uninstall-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
-
-CLEANFILES = *.jar \
- $(pkgpath)/*.class \
- jul-jni-header.stamp \
- $(juljniout)/org_lttng_ust_agent_jul_LttngJulApi.h
+++ /dev/null
-Name: org/lttng/ust/agent/jul/
-Specification-Title: LTTng UST Java Agent JUL Integration
-Specification-Version: 1.0.0
-Specification-Vendor: LTTng Project
-Implementation-Title: org.lttng.ust.agent.jul
-Implementation-Version: 1.0.0
-Implementation-Vendor: LTTng Project
-Class-Path: lttng-ust-agent-common.jar
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.jul;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.logging.Handler;
-import java.util.logging.LogManager;
-import java.util.logging.Logger;
-
-import org.lttng.ust.agent.AbstractLttngAgent;
-
-/**
- * Agent implementation for tracing from JUL loggers.
- *
- * @author Alexandre Montplaisir
- */
-class LttngJulAgent extends AbstractLttngAgent<LttngLogHandler> {
-
- private static LttngJulAgent instance = null;
-
- private LttngJulAgent() {
- super(Domain.JUL);
- }
-
- public static synchronized LttngJulAgent getInstance() {
- if (instance == null) {
- instance = new LttngJulAgent();
- }
- return instance;
- }
-
- @Override
- public Collection<String> listAvailableEvents() {
- Set<String> ret = new TreeSet<String>();
-
- List<String> loggersNames = Collections.list(LogManager.getLogManager().getLoggerNames());
- for (String name : loggersNames) {
- /*
- * Skip the root logger. An empty string is not a valid event name
- * in LTTng.
- */
- if (name.equals("") || name.equals("global")) {
- continue;
- }
-
- /*
- * Check if that logger has at least one LTTng JUL handler attached.
- */
- Logger logger = Logger.getLogger(name);
- if (hasLttngHandlerAttached(logger)) {
- ret.add(name);
- }
- }
-
- return ret;
- }
-
- private static boolean hasLttngHandlerAttached(Logger logger) {
- for (Handler handler : logger.getHandlers()) {
- if (handler instanceof LttngLogHandler) {
- return true;
- }
- }
-
- /*
- * A parent logger, if any, may be connected to an LTTng handler. In
- * this case, we will want to include this child logger in the output,
- * since it will be accessible by LTTng.
- */
- Logger parent = logger.getParent();
- if (parent != null) {
- return hasLttngHandlerAttached(parent);
- }
-
- /*
- * We have reached the root logger and have not found any LTTng handler,
- * this event will not be accessible.
- */
- return false;
- }
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.jul;
-
-/**
- * Virtual class containing the Java side of the LTTng-JUL JNI API methods.
- *
- * @author Alexandre Montplaisir
- */
-final class LttngJulApi {
-
- private LttngJulApi() {}
-
- static native void tracepoint(String msg,
- String logger_name,
- String class_name,
- String method_name,
- long millis,
- int log_level,
- int thread_id);
-
- static native void tracepointWithContext(String msg,
- String logger_name,
- String class_name,
- String method_name,
- long millis,
- int log_level,
- int thread_id,
- byte[] contextEntries,
- byte[] contextStrings);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
- */
-
-package org.lttng.ust.agent.jul;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.logging.Formatter;
-import java.util.logging.Handler;
-import java.util.logging.LogRecord;
-
-import org.lttng.ust.agent.ILttngAgent;
-import org.lttng.ust.agent.ILttngHandler;
-import org.lttng.ust.agent.context.ContextInfoSerializer;
-
-/**
- * LTTng-UST JUL log handler.
- *
- * Applications can attach this handler to their
- * {@link java.util.logging.Logger} to have it generate UST events from logging
- * events received through the logger.
- *
- * It sends its events to UST via the JNI library "liblttng-ust-jul-jni.so".
- * Make sure this library is available before using this handler.
- *
- * @author Alexandre Montplaisir
- * @author David Goulet
- */
-public class LttngLogHandler extends Handler implements ILttngHandler {
-
- private static final String SHARED_OBJECT_NAME = "lttng-ust-jul-jni";
-
- /**
- * Dummy Formatter object, so we can use its
- * {@link Formatter#formatMessage(LogRecord)} method.
- */
- private static final Formatter FORMATTER = new Formatter() {
- @Override
- public String format(LogRecord record) {
- throw new UnsupportedOperationException();
- }
- };
-
- private final ILttngAgent<LttngLogHandler> agent;
-
- /** Number of events logged (really sent through JNI) by this handler */
- private final AtomicLong eventCount = new AtomicLong(0);
-
- /**
- * Constructor
- *
- * @throws IOException
- * This handler requires the lttng-ust-jul-jni.so native
- * library, through which it will send the trace events. This
- * exception is throw is this library cannot be found.
- * @throws SecurityException
- * We will forward any SecurityExcepion that may be thrown when
- * trying to load the JNI library.
- */
- public LttngLogHandler() throws IOException, SecurityException {
- super();
- /* Initialize LTTng UST tracer. */
- try {
- System.loadLibrary(SHARED_OBJECT_NAME); //$NON-NLS-1$
- } catch (UnsatisfiedLinkError e) {
- throw new IOException(e);
- }
-
- /** Register to the relevant agent */
- agent = LttngJulAgent.getInstance();
- agent.registerHandler(this);
- }
-
- @Override
- public synchronized void close() {
- agent.unregisterHandler(this);
- }
-
- /**
- * Get the number of events logged by this handler so far. This means the
- * number of events actually sent through JNI to UST.
- *
- * @return The number of events logged so far
- */
- @Override
- public long getEventCount() {
- return eventCount.get();
- }
-
- @Override
- public void flush() {
- }
-
- @Override
- public void publish(LogRecord record) {
- /*
- * Check if the current message should be logged, according to the UST
- * session settings.
- */
- if (!agent.isEventEnabled(record.getLoggerName())) {
- return;
- }
-
- String formattedMessage = FORMATTER.formatMessage(record);
-
- /* Retrieve all the requested context information we can find */
- Collection<Entry<String, Map<String, Integer>>> enabledContexts = agent.getEnabledAppContexts();
- ContextInfoSerializer.SerializedContexts contextInfo = ContextInfoSerializer.queryAndSerializeRequestedContexts(enabledContexts);
-
- eventCount.incrementAndGet();
-
- /*
- * Specific tracepoint designed for JUL events. The source class of the
- * caller is used for the event name, the raw message is taken, the
- * loglevel of the record and the thread ID.
- */
- LttngJulApi.tracepointWithContext(formattedMessage,
- record.getLoggerName(),
- record.getSourceClassName(),
- record.getSourceMethodName(),
- record.getMillis(),
- record.getLevel().intValue(),
- record.getThreadID(),
- contextInfo.getEntriesArray(),
- contextInfo.getStringsArray());
- }
-
-}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-JAVAROOT = .
-AM_JAVACFLAGS = -classpath $(CLASSPATH):$(builddir)/../lttng-ust-agent-common/lttng-ust-agent-common.jar
-
-pkgpath = org/lttng/ust/agent/log4j
-
-jarfile_version = 1.0.0
-jarfile_manifest = $(srcdir)/Manifest.txt
-jarfile_symlink = lttng-ust-agent-log4j.jar
-jarfile = lttng-ust-agent-log4j-$(jarfile_version).jar
-
-jardir = $(datadir)/java
-
-log4jjniout = ../../jni/log4j
-
-dist_noinst_JAVA = $(pkgpath)/LttngLog4jAgent.java \
- $(pkgpath)/LttngLog4jApi.java \
- $(pkgpath)/LttngLogAppender.java
-
-dist_noinst_DATA = $(jarfile_manifest)
-
-jar_DATA = $(jarfile)
-
-stamp = log4j-jni-header.stamp
-classes = $(pkgpath)/*.class
-
-$(jarfile): classnoinst.stamp
- $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
-
-if !HAVE_JAVAH
-# If we don't have javah, assume we are running openjdk >= 10 and use javac
-# to generate the jni header file.
-AM_JAVACFLAGS += -h $(log4jjniout)
-else
-log4j-jni-header.stamp: $(dist_noinst_JAVA)
- $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(log4jjniout) $(JAVAHFLAGS) org.lttng.ust.agent.log4j.LttngLog4jApi && \
- echo "Log4j JNI header generated" > log4j-jni-header.stamp
-
-all-local: $(stamp)
-endif
-
-install-data-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
-
-uninstall-hook:
- cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
-
-CLEANFILES = *.jar \
- $(pkgpath)/*.class \
- log4j-jni-header.stamp \
- $(log4jjniout)/org_lttng_ust_agent_log4j_LttngLog4jApi.h
+++ /dev/null
-Name: org/lttng/ust/agent/log4j/
-Specification-Title: LTTng UST Java Agent Log4J 1.x Integration
-Specification-Version: 1.0.0
-Specification-Vendor: LTTng Project
-Implementation-Title: org.lttng.ust.agent.log4j
-Implementation-Version: 1.0.0
-Implementation-Vendor: LTTng Project
-Class-Path: lttng-ust-agent-common.jar
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.log4j;
-
-import java.util.Collection;
-import java.util.Collections;
-import java.util.Enumeration;
-import java.util.List;
-import java.util.Set;
-import java.util.TreeSet;
-
-import org.apache.log4j.Appender;
-import org.apache.log4j.Category;
-import org.apache.log4j.LogManager;
-import org.apache.log4j.Logger;
-import org.lttng.ust.agent.AbstractLttngAgent;
-
-/**
- * Agent implementation for using the Log4j logger, connecting to a root session
- * daemon.
- *
- * @author Alexandre Montplaisir
- */
-class LttngLog4jAgent extends AbstractLttngAgent<LttngLogAppender> {
-
- private static LttngLog4jAgent instance = null;
-
- private LttngLog4jAgent() {
- super(Domain.LOG4J);
- }
-
- public static synchronized LttngLog4jAgent getInstance() {
- if (instance == null) {
- instance = new LttngLog4jAgent();
- }
- return instance;
- }
-
- @Override
- public Collection<String> listAvailableEvents() {
- Set<String> ret = new TreeSet<String>();
-
- @SuppressWarnings("unchecked")
- List<Logger> loggers = Collections.list(LogManager.getCurrentLoggers());
- for (Logger logger : loggers) {
- if (logger == null) {
- continue;
- }
-
- /*
- * Check if that logger has at least one LTTng log4j appender
- * attached.
- */
- if (hasLttngAppenderAttached(logger)) {
- ret.add(logger.getName());
- }
- }
-
- return ret;
- }
-
- private static boolean hasLttngAppenderAttached(Category logger) {
- @SuppressWarnings("unchecked")
- Enumeration<Appender> appenders = logger.getAllAppenders();
- if (appenders != null) {
- for (Appender appender : Collections.list(appenders)) {
- if (appender instanceof LttngLogAppender) {
- return true;
- }
- }
- }
-
- /*
- * A parent logger, if any, may be connected to an LTTng handler. In
- * this case, we will want to include this child logger in the output,
- * since it will be accessible by LTTng.
- */
- Category parent = logger.getParent();
- if (parent != null) {
- return hasLttngAppenderAttached(parent);
- }
-
- /*
- * We have reached the root logger and have not found any LTTng handler,
- * this event will not be accessible.
- */
- return false;
- }
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-package org.lttng.ust.agent.log4j;
-
-/**
- * Virtual class containing the Java side of the LTTng-log4j JNI API methods.
- *
- * @author Alexandre Montplaisir
- */
-final class LttngLog4jApi {
-
- private LttngLog4jApi() {}
-
- static native void tracepoint(String msg,
- String logger_name,
- String class_name,
- String method_name,
- String file_name,
- int line_number,
- long timestamp,
- int loglevel,
- String thread_name);
-
- static native void tracepointWithContext(String msg,
- String logger_name,
- String class_name,
- String method_name,
- String file_name,
- int line_number,
- long timestamp,
- int loglevel,
- String thread_name,
- byte[] contextEntries,
- byte[] contextStrings);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 EfficiOS Inc.
- * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2014 Christian Babeux <christian.babeux@efficios.com>
- */
-
-package org.lttng.ust.agent.log4j;
-
-import java.io.IOException;
-import java.util.Collection;
-import java.util.Map;
-import java.util.Map.Entry;
-import java.util.concurrent.atomic.AtomicLong;
-
-import org.apache.log4j.AppenderSkeleton;
-import org.apache.log4j.spi.LoggingEvent;
-import org.lttng.ust.agent.ILttngAgent;
-import org.lttng.ust.agent.ILttngHandler;
-import org.lttng.ust.agent.context.ContextInfoSerializer;
-
-/**
- * LTTng-UST Log4j 1.x log handler.
- *
- * Applications can attach this appender to their
- * {@link org.apache.log4j.Logger} to have it generate UST events from logging
- * events received through the logger.
- *
- * It sends its events to UST via the JNI library "liblttng-ust-log4j-jni.so".
- * Make sure this library is available before using this appender.
- *
- * @author Alexandre Montplaisir
- * @author Christian Babeux
- */
-public class LttngLogAppender extends AppenderSkeleton implements ILttngHandler {
-
- private static final String SHARED_OBJECT_NAME = "lttng-ust-log4j-jni";
-
- private final AtomicLong eventCount = new AtomicLong(0);
-
- private final ILttngAgent<LttngLogAppender> agent;
-
-
- /**
- * Constructor
- *
- * @throws IOException
- * This handler requires the lttng-ust-log4j-jni.so native
- * library, through which it will send the trace events. This
- * exception is throw is this library cannot be found.
- * @throws SecurityException
- * We will forward any SecurityExcepion that may be thrown when
- * trying to load the JNI library.
- */
- public LttngLogAppender() throws IOException, SecurityException {
- super();
- /* Initialize LTTng UST tracer. */
- try {
- System.loadLibrary(SHARED_OBJECT_NAME); // $NON-NLS-1$
- } catch (UnsatisfiedLinkError e) {
- throw new IOException(e);
- }
-
- /** Register to the relevant agent */
- agent = LttngLog4jAgent.getInstance();
- agent.registerHandler(this);
- }
-
- @Override
- public synchronized void close() {
- agent.unregisterHandler(this);
- }
-
- /**
- * Get the number of events logged by this handler so far. This means the
- * number of events actually sent through JNI to UST.
- *
- * @return The number of events logged so far
- */
- @Override
- public long getEventCount() {
- return eventCount.get();
- }
-
- @Override
- public boolean requiresLayout() {
- return false;
- }
-
- @Override
- protected void append(LoggingEvent event) {
- /*
- * Check if the current message should be logged, according to the UST
- * session settings.
- */
- if (!agent.isEventEnabled(event.getLoggerName())) {
- return;
- }
-
- /*
- * The line number returned from LocationInformation is a string. At
- * least try to convert to a proper int.
- */
- int line;
- try {
- String lineString = event.getLocationInformation().getLineNumber();
- line = Integer.parseInt(lineString);
- } catch (NumberFormatException n) {
- line = -1;
- }
-
- /* Retrieve all the requested context information we can find */
- Collection<Entry<String, Map<String, Integer>>> enabledContexts = agent.getEnabledAppContexts();
- ContextInfoSerializer.SerializedContexts contextInfo = ContextInfoSerializer.queryAndSerializeRequestedContexts(enabledContexts);
-
- eventCount.incrementAndGet();
-
- LttngLog4jApi.tracepointWithContext(event.getRenderedMessage(),
- event.getLoggerName(),
- event.getLocationInformation().getClassName(),
- event.getLocationInformation().getMethodName(),
- event.getLocationInformation().getFileName(),
- line,
- event.getTimeStamp(),
- event.getLevel().toInt(),
- event.getThreadName(),
- contextInfo.getEntriesArray(),
- contextInfo.getStringsArray());
- }
-
-}
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-SUBDIRS = common
-
-if ENABLE_JAVA_AGENT_WITH_JUL
-SUBDIRS += jul
-endif
-
-if ENABLE_JAVA_AGENT_WITH_LOG4J
-SUBDIRS += log4j
-endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
-
-lib_LTLIBRARIES = liblttng-ust-context-jni.la
-liblttng_ust_context_jni_la_SOURCES = lttng_ust_context.c lttng_ust_context.h
-
-nodist_liblttng_ust_context_jni_la_SOURCES = org_lttng_ust_agent_context_LttngContextApi.h
-
-liblttng_ust_context_jni_la_LIBADD = -lc \
- $(top_builddir)/liblttng-ust/liblttng-ust.la
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "org_lttng_ust_agent_context_LttngContextApi.h"
-
-#include <string.h>
-#include <inttypes.h>
-#include <lttng/ust-events.h>
-#include <lttng/ringbuffer-context.h>
-#include <ust-context-provider.h>
-
-#include "ust-helper.h"
-#include "lttng_ust_context.h"
-
-enum lttng_ust_jni_type {
- JNI_TYPE_NULL = 0,
- JNI_TYPE_INTEGER = 1,
- JNI_TYPE_LONG = 2,
- JNI_TYPE_DOUBLE = 3,
- JNI_TYPE_FLOAT = 4,
- JNI_TYPE_BYTE = 5,
- JNI_TYPE_SHORT = 6,
- JNI_TYPE_BOOLEAN = 7,
- JNI_TYPE_STRING = 8,
-};
-
-struct lttng_ust_jni_ctx_entry {
- int32_t context_name_offset;
- char type; /* enum lttng_ust_jni_type */
- union {
- int32_t _integer;
- int64_t _long;
- double _double;
- float _float;
- signed char _byte;
- int16_t _short;
- signed char _boolean;
- int32_t _string_offset;
- } value;
-} __attribute__((packed));
-
-struct lttng_ust_jni_provider {
- struct lttng_ust_registered_context_provider *reg_provider;
- char *name;
- struct lttng_ust_context_provider provider;
-};
-
-/* TLS passing context info from JNI to callbacks. */
-__thread struct lttng_ust_jni_tls lttng_ust_context_info_tls;
-
-static const char *get_ctx_string_at_offset(int32_t offset)
-{
- signed char *ctx_strings_array = lttng_ust_context_info_tls.ctx_strings;
-
- if (offset < 0 || offset >= lttng_ust_context_info_tls.ctx_strings_len) {
- return NULL;
- }
- return (const char *) (ctx_strings_array + offset);
-}
-
-static struct lttng_ust_jni_ctx_entry *lookup_ctx_by_name(const char *ctx_name)
-{
- struct lttng_ust_jni_ctx_entry *ctx_entries_array = lttng_ust_context_info_tls.ctx_entries;
- int i, len = lttng_ust_context_info_tls.ctx_entries_len / sizeof(struct lttng_ust_jni_ctx_entry);
-
- for (i = 0; i < len; i++) {
- int32_t offset = ctx_entries_array[i].context_name_offset;
- const char *string = get_ctx_string_at_offset(offset);
-
- if (string && strcmp(string, ctx_name) == 0) {
- return &ctx_entries_array[i];
- }
- }
- return NULL;
-}
-
-static size_t get_size_cb(void *priv, size_t offset)
-{
- struct lttng_ust_jni_ctx_entry *jctx;
- size_t size = 0;
- struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
- const char *ctx_name = jni_provider->name;
- enum lttng_ust_jni_type jni_type;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
- size += sizeof(char); /* tag */
- jctx = lookup_ctx_by_name(ctx_name);
- if (!jctx) {
- jni_type = JNI_TYPE_NULL;
- } else {
- jni_type = jctx->type;
- }
- switch (jni_type) {
- case JNI_TYPE_NULL:
- break;
- case JNI_TYPE_INTEGER:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
- size += sizeof(int32_t); /* variant */
- break;
- case JNI_TYPE_LONG:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
- size += sizeof(int64_t); /* variant */
- break;
- case JNI_TYPE_DOUBLE:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
- size += sizeof(double); /* variant */
- break;
- case JNI_TYPE_FLOAT:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
- size += sizeof(float); /* variant */
- break;
- case JNI_TYPE_SHORT:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
- size += sizeof(int16_t); /* variant */
- break;
- case JNI_TYPE_BYTE: /* Fall-through. */
- case JNI_TYPE_BOOLEAN:
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
- size += sizeof(char); /* variant */
- break;
- case JNI_TYPE_STRING:
- {
- /* The value is an offset, the string is in the "strings" array */
- int32_t string_offset = jctx->value._string_offset;
- const char *string = get_ctx_string_at_offset(string_offset);
-
- if (string) {
- size += strlen(string) + 1;
- }
- break;
- }
- default:
- abort();
- }
- return size;
-
-}
-
-static void record_cb(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *lttng_chan_buf)
-{
- struct lttng_ust_jni_ctx_entry *jctx;
- struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
- const char *ctx_name = jni_provider->name;
- enum lttng_ust_jni_type jni_type;
- char sel_char;
-
- jctx = lookup_ctx_by_name(ctx_name);
- if (!jctx) {
- jni_type = JNI_TYPE_NULL;
- } else {
- jni_type = jctx->type;
- }
-
- switch (jni_type) {
- case JNI_TYPE_NULL:
- sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- break;
- case JNI_TYPE_INTEGER:
- {
- int32_t v = jctx->value._integer;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_S32;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_LONG:
- {
- int64_t v = jctx->value._long;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_S64;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_DOUBLE:
- {
- double v = jctx->value._double;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_FLOAT:
- {
- float v = jctx->value._float;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_FLOAT;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_SHORT:
- {
- int16_t v = jctx->value._short;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_S16;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_BYTE:
- {
- char v = jctx->value._byte;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_BOOLEAN:
- {
- char v = jctx->value._boolean;
-
- sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
- break;
- }
- case JNI_TYPE_STRING:
- {
- int32_t offset = jctx->value._string_offset;
- const char *str = get_ctx_string_at_offset(offset);
-
- if (str) {
- sel_char = LTTNG_UST_DYNAMIC_TYPE_STRING;
- } else {
- sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
- }
- lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
- if (str) {
- lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1, 1);
- }
- break;
- }
- default:
- abort();
- }
-}
-
-static void get_value_cb(void *priv, struct lttng_ust_ctx_value *value)
-{
- struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
- struct lttng_ust_jni_ctx_entry *jctx;
- const char *ctx_name = jni_provider->name;
- enum lttng_ust_jni_type jni_type;
-
- jctx = lookup_ctx_by_name(ctx_name);
- if (!jctx) {
- jni_type = JNI_TYPE_NULL;
- } else {
- jni_type = jctx->type;
- }
-
- switch (jni_type) {
- case JNI_TYPE_NULL:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
- break;
- case JNI_TYPE_INTEGER:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
- value->u.s64 = (int64_t) jctx->value._integer;
- break;
- case JNI_TYPE_LONG:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
- value->u.s64 = jctx->value._long;
- break;
- case JNI_TYPE_DOUBLE:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
- value->u.d = jctx->value._double;
- break;
- case JNI_TYPE_FLOAT:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
- value->u.d = (double) jctx->value._float;
- break;
- case JNI_TYPE_SHORT:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
- value->u.s64 = (int64_t) jctx->value._short;
- break;
- case JNI_TYPE_BYTE:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
- value->u.s64 = (int64_t) jctx->value._byte;
- break;
- case JNI_TYPE_BOOLEAN:
- value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
- value->u.s64 = (int64_t) jctx->value._boolean;
- break;
- case JNI_TYPE_STRING:
- {
- int32_t offset = jctx->value._string_offset;
- const char *str = get_ctx_string_at_offset(offset);
-
- if (str) {
- value->sel = LTTNG_UST_DYNAMIC_TYPE_STRING;
- value->u.str = str;
- } else {
- value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
- }
- break;
- }
- default:
- abort();
- }
-}
-
-/*
- * Register a context provider to UST.
- *
- * Called from the Java side when an application registers a context retriever,
- * so we create and register a corresponding provider on the C side.
- */
-JNIEXPORT jlong JNICALL Java_org_lttng_ust_agent_context_LttngContextApi_registerProvider(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring provider_name)
-{
- jboolean iscopy;
- const char *provider_name_jstr;
- char *provider_name_cstr;
- struct lttng_ust_context_provider *provider;
- struct lttng_ust_jni_provider *jni_provider;
- /*
- * Note: a "jlong" is 8 bytes on all architectures, whereas a
- * C "long" varies.
- */
- jlong provider_ref;
-
- provider_name_jstr = (*env)->GetStringUTFChars(env, provider_name, &iscopy);
- if (!provider_name_jstr) {
- goto error_jstr;
- }
- /* Keep our own copy of the string so UST can use it. */
- provider_name_cstr = strdup(provider_name_jstr);
- (*env)->ReleaseStringUTFChars(env, provider_name, provider_name_jstr);
- if (!provider_name_cstr) {
- goto error_strdup;
- }
- jni_provider = zmalloc(sizeof(*jni_provider));
- if (!jni_provider) {
- goto error_provider;
- }
- provider = &jni_provider->provider;
- provider->struct_size = sizeof(*provider);
- jni_provider->name = provider_name_cstr;
- provider->name = jni_provider->name;
- provider->get_size = get_size_cb;
- provider->record = record_cb;
- provider->get_value = get_value_cb;
- provider->priv = jni_provider;
-
- jni_provider->reg_provider = lttng_ust_context_provider_register(provider);
- if (!jni_provider->reg_provider) {
- goto error_register;
- }
-
- provider_ref = (jlong) (long) jni_provider;
- return provider_ref;
-
- /* Error handling. */
-error_register:
- free(jni_provider);
-error_provider:
- free(provider_name_cstr);
-error_strdup:
-error_jstr:
- return 0;
-}
-
-/*
- * Unregister a previously-registered context provider.
- *
- * Called from the Java side when an application unregisters a context retriever,
- * so we unregister and delete the corresponding provider on the C side.
- */
-JNIEXPORT void JNICALL Java_org_lttng_ust_agent_context_LttngContextApi_unregisterProvider(JNIEnv *env __attribute__((unused)),
- jobject jobj __attribute__((unused)),
- jlong provider_ref)
-{
- struct lttng_ust_jni_provider *jni_provider =
- (struct lttng_ust_jni_provider *) (unsigned long) provider_ref;
-
- if (!jni_provider) {
- return;
- }
-
- lttng_ust_context_provider_unregister(jni_provider->reg_provider);
-
- free(jni_provider->name);
- free(jni_provider);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- */
-
-#ifndef LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_
-#define LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_
-
-struct lttng_ust_jni_ctx_entry;
-
-struct lttng_ust_jni_tls {
- struct lttng_ust_jni_ctx_entry *ctx_entries;
- int32_t ctx_entries_len;
- signed char *ctx_strings;
- int32_t ctx_strings_len;
-};
-
-extern __thread struct lttng_ust_jni_tls lttng_ust_context_info_tls;
-
-#endif /* LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_ */
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
-
-lib_LTLIBRARIES = liblttng-ust-jul-jni.la
-liblttng_ust_jul_jni_la_SOURCES = lttng_ust_jul.c \
- lttng_ust_jul.h
-
-nodist_liblttng_ust_jul_jni_la_SOURCES = org_lttng_ust_agent_jul_LttngJulApi.h
-
-liblttng_ust_jul_jni_la_LIBADD = -lc \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(top_builddir)/liblttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "org_lttng_ust_agent_jul_LttngJulApi.h"
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#include "lttng_ust_jul.h"
-#include "../common/lttng_ust_context.h"
-
-/*
- * Deprecated function from before the context information was passed.
- */
-JNIEXPORT void JNICALL Java_org_lttng_ust_agent_jul_LttngJulApi_tracepoint(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring msg,
- jstring logger_name,
- jstring class_name,
- jstring method_name,
- jlong millis,
- jint log_level,
- jint thread_id)
-{
- jboolean iscopy;
- const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
- const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
- const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
- const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
-
- tracepoint(lttng_jul, event, msg_cstr, logger_name_cstr,
- class_name_cstr, method_name_cstr, millis, log_level, thread_id);
-
- (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
- (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
- (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
- (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
-}
-
-/*
- * Tracepoint used by Java applications using the JUL handler.
- */
-JNIEXPORT void JNICALL Java_org_lttng_ust_agent_jul_LttngJulApi_tracepointWithContext(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring msg,
- jstring logger_name,
- jstring class_name,
- jstring method_name,
- jlong millis,
- jint log_level,
- jint thread_id,
- jbyteArray context_info_entries,
- jbyteArray context_info_strings)
-{
- jboolean iscopy;
- const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
- const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
- const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
- const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
- signed char *context_info_entries_array;
- signed char *context_info_strings_array;
-
- /*
- * Write these to the TLS variables, so that the UST callbacks in
- * lttng_ust_context.c can access them.
- */
- context_info_entries_array = (*env)->GetByteArrayElements(env, context_info_entries, &iscopy);
- lttng_ust_context_info_tls.ctx_entries = (struct lttng_ust_jni_ctx_entry *) context_info_entries_array;
- lttng_ust_context_info_tls.ctx_entries_len = (*env)->GetArrayLength(env, context_info_entries);
- context_info_strings_array = (*env)->GetByteArrayElements(env, context_info_strings, &iscopy);
- lttng_ust_context_info_tls.ctx_strings = context_info_strings_array;
- lttng_ust_context_info_tls.ctx_strings_len = (*env)->GetArrayLength(env, context_info_strings);
-
- tracepoint(lttng_jul, event, msg_cstr, logger_name_cstr,
- class_name_cstr, method_name_cstr, millis, log_level, thread_id);
-
- lttng_ust_context_info_tls.ctx_entries = NULL;
- lttng_ust_context_info_tls.ctx_entries_len = 0;
- lttng_ust_context_info_tls.ctx_strings = NULL;
- lttng_ust_context_info_tls.ctx_strings_len = 0;
- (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
- (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
- (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
- (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
- (*env)->ReleaseByteArrayElements(env, context_info_entries, context_info_entries_array, 0);
- (*env)->ReleaseByteArrayElements(env, context_info_strings, context_info_strings_array, 0);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_jul
-
-#if !defined(_TRACEPOINT_LTTNG_UST_JUL_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_JUL_H
-
-#include <lttng/tracepoint.h>
-
-/*
- * Tracepoint used by Java applications using the JUL handler.
- */
-TRACEPOINT_EVENT(lttng_jul, event,
- TP_ARGS(
- const char *, msg,
- const char *, logger_name,
- const char *, class_name,
- const char *, method_name,
- long, millis,
- int, log_level,
- int, thread_id),
- TP_FIELDS(
- ctf_string(msg, msg)
- ctf_string(logger_name, logger_name)
- ctf_string(class_name, class_name)
- ctf_string(method_name, method_name)
- ctf_integer(long, long_millis, millis)
- ctf_integer(int, int_loglevel, log_level)
- ctf_integer(int, int_threadid, thread_id)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_JUL_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng_ust_jul.h"
-
-/* This part must be outside protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
-
-lib_LTLIBRARIES = liblttng-ust-log4j-jni.la
-liblttng_ust_log4j_jni_la_SOURCES = lttng_ust_log4j.c \
- lttng_ust_log4j.h
-
-nodist_liblttng_ust_log4j_jni_la_SOURCES = org_lttng_ust_agent_log4j_LttngLog4jApi.h
-
-liblttng_ust_log4j_jni_la_LIBADD = -lc \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(top_builddir)/liblttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 EfficiOS Inc.
- * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "org_lttng_ust_agent_log4j_LttngLog4jApi.h"
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#include "lttng_ust_log4j.h"
-#include "../common/lttng_ust_context.h"
-
-/*
- * Deprecated function from before the context information was passed.
- */
-JNIEXPORT void JNICALL Java_org_lttng_ust_agent_log4j_LttngLog4jApi_tracepoint(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring msg,
- jstring logger_name,
- jstring class_name,
- jstring method_name,
- jstring file_name,
- jint line_number,
- jlong timestamp,
- jint loglevel,
- jstring thread_name)
-{
- jboolean iscopy;
- const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
- const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
- const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
- const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
- const char *file_name_cstr = (*env)->GetStringUTFChars(env, file_name, &iscopy);
- const char *thread_name_cstr = (*env)->GetStringUTFChars(env, thread_name, &iscopy);
-
- tracepoint(lttng_log4j, event, msg_cstr, logger_name_cstr,
- class_name_cstr, method_name_cstr, file_name_cstr,
- line_number, timestamp, loglevel, thread_name_cstr);
-
- (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
- (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
- (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
- (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
- (*env)->ReleaseStringUTFChars(env, file_name, file_name_cstr);
- (*env)->ReleaseStringUTFChars(env, thread_name, thread_name_cstr);
-}
-
-/*
- * Tracepoint used by Java applications using the log4j handler.
- */
-JNIEXPORT void JNICALL Java_org_lttng_ust_agent_log4j_LttngLog4jApi_tracepointWithContext(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring msg,
- jstring logger_name,
- jstring class_name,
- jstring method_name,
- jstring file_name,
- jint line_number,
- jlong timestamp,
- jint loglevel,
- jstring thread_name,
- jbyteArray context_info_entries,
- jbyteArray context_info_strings)
-{
- jboolean iscopy;
- const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
- const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
- const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
- const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
- const char *file_name_cstr = (*env)->GetStringUTFChars(env, file_name, &iscopy);
- const char *thread_name_cstr = (*env)->GetStringUTFChars(env, thread_name, &iscopy);
- signed char *context_info_entries_array;
- signed char *context_info_strings_array;
-
- /*
- * Write these to the TLS variables, so that the UST callbacks in
- * lttng_ust_context.c can access them.
- */
- context_info_entries_array = (*env)->GetByteArrayElements(env, context_info_entries, &iscopy);
- lttng_ust_context_info_tls.ctx_entries = (struct lttng_ust_jni_ctx_entry *) context_info_entries_array;
- lttng_ust_context_info_tls.ctx_entries_len = (*env)->GetArrayLength(env, context_info_entries);
- context_info_strings_array = (*env)->GetByteArrayElements(env, context_info_strings, &iscopy);
- lttng_ust_context_info_tls.ctx_strings = context_info_strings_array;
- lttng_ust_context_info_tls.ctx_strings_len = (*env)->GetArrayLength(env, context_info_strings);
-
- tracepoint(lttng_log4j, event, msg_cstr, logger_name_cstr,
- class_name_cstr, method_name_cstr, file_name_cstr,
- line_number, timestamp, loglevel, thread_name_cstr);
-
- lttng_ust_context_info_tls.ctx_entries = NULL;
- lttng_ust_context_info_tls.ctx_entries_len = 0;
- lttng_ust_context_info_tls.ctx_strings = NULL;
- lttng_ust_context_info_tls.ctx_strings_len = 0;
- (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
- (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
- (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
- (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
- (*env)->ReleaseStringUTFChars(env, file_name, file_name_cstr);
- (*env)->ReleaseStringUTFChars(env, thread_name, thread_name_cstr);
- (*env)->ReleaseByteArrayElements(env, context_info_entries, context_info_entries_array, 0);
- (*env)->ReleaseByteArrayElements(env, context_info_strings, context_info_strings_array, 0);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_log4j
-
-#if !defined(_TRACEPOINT_LTTNG_UST_LOG4J_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_LOG4J_H
-
-#include <lttng/tracepoint.h>
-
-/*
- * Tracepoint used by Java applications using the log4j log appender.
- */
-TRACEPOINT_EVENT(lttng_log4j, event,
- TP_ARGS(
- const char *, msg,
- const char *, logger_name,
- const char *, class_name,
- const char *, method_name,
- const char *, file_name,
- int, line_number,
- long, timestamp,
- int, log_level,
- const char *, thread_name),
- TP_FIELDS(
- ctf_string(msg, msg)
- ctf_string(logger_name, logger_name)
- ctf_string(class_name, class_name)
- ctf_string(method_name, method_name)
- ctf_string(filename, file_name)
- ctf_integer(int, line_number, line_number)
- ctf_integer(long, timestamp, timestamp)
- ctf_integer(int, int_loglevel, log_level)
- ctf_string(thread_name, thread_name)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_LOG4J_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng_ust_log4j.h"
-
-/* This part must be outside protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-org_lttng_ust_LTTngUst.h
-org/
-liblttng-ust-java.jar
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "org_lttng_ust_LTTngUst.h"
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#include "lttng_ust_java.h"
-
-JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointInt(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring ev_name,
- jint payload)
-{
- jboolean iscopy;
- const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
-
- tracepoint(lttng_ust_java, int_event, ev_name_cstr, payload);
-
- (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
-}
-
-JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointIntInt(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring ev_name,
- jint payload1,
- jint payload2)
-{
- jboolean iscopy;
- const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
-
- tracepoint(lttng_ust_java, int_int_event, ev_name_cstr, payload1, payload2);
-
- (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
-}
-
-JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointLong(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring ev_name,
- jlong payload)
-{
- jboolean iscopy;
- const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
-
- tracepoint(lttng_ust_java, long_event, ev_name_cstr, payload);
-
- (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
-}
-
-JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointLongLong(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring ev_name,
- jlong payload1,
- jlong payload2)
-{
- jboolean iscopy;
- const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
-
- tracepoint(lttng_ust_java, long_long_event, ev_name_cstr, payload1, payload2);
-
- (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
-}
-
-JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointString(JNIEnv *env,
- jobject jobj __attribute__((unused)),
- jstring ev_name,
- jstring payload)
-{
- jboolean iscopy;
- const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
- const char *payload_cstr = (*env)->GetStringUTFChars(env, payload, &iscopy);
-
- tracepoint(lttng_ust_java, string_event, ev_name_cstr, payload_cstr);
-
- (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
- (*env)->ReleaseStringUTFChars(env, payload, payload_cstr);
-}
-
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-JAVAROOT = .
-jarfile = liblttng-ust-java.jar
-jardir = $(datadir)/java
-pkgpath = org/lttng/ust
-
-dist_noinst_JAVA = $(pkgpath)/LTTngUst.java
-jar_DATA = $(jarfile)
-BUILT_SOURCES = org_lttng_ust_LTTngUst.h
-
-AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
-lib_LTLIBRARIES = liblttng-ust-java.la
-liblttng_ust_java_la_SOURCES = LTTngUst.c lttng_ust_java.h
-nodist_liblttng_ust_java_la_SOURCES = org_lttng_ust_LTTngUst.h
-
-liblttng_ust_java_la_LIBADD = -lc \
- $(top_builddir)/liblttng-ust/liblttng-ust.la
-
-$(jarfile): classnoinst.stamp
- $(JAR) cf $(JARFLAGS) $@ $(pkgpath)/*.class
-
-if !HAVE_JAVAH
-# If we don't have javah, assume we are running openjdk >= 10 and use javac
-# to generate the jni header file.
-AM_JAVACFLAGS = -h .
-
-org_lttng_ust_LTTngUst.h: $(jarfile)
-else
-org_lttng_ust_LTTngUst.h: jni-header.stamp
-
-jni-header.stamp: $(dist_noinst_JAVA)
- $(JAVAH) -classpath $(srcdir) $(JAVAHFLAGS) org.lttng.ust.LTTngUst && \
- echo "JNI header generated" > jni-header.stamp
-endif
-
-all-local: org_lttng_ust_LTTngUst.h
-
-EXTRA_DIST = README
-
-CLEANFILES = $(jarfile) $(pkgpath)/*.class jni-header.stamp org_lttng_ust_LTTngUst.h
+++ /dev/null
-This directory contains a simple API for instrumenting java applications.
-
-Configuration examples to build this library:
-
-dependency: openjdk-7-jdk
-./configure --enable-jni-interface
-
-Note that the OpenJDK 7 is used for development and continuous integration thus
-we directly support that version for this library. However, it has been tested
-with OpenJDK 6 also. Please let us know if other Java version (commercial or
-not) work with this library.
-
-After building, you can use the liblttng-ust-java.jar file in a Java project.
-It requires the liblttng-ust-java.so* files (which get installed when doing
-`make install') so make sure those are in the linker's library path.
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_java
-
-#if !defined(_TRACEPOINT_LTTNG_UST_JAVA_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_JAVA_H
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_java, int_event,
- TP_ARGS(const char *, name, int, payload),
- TP_FIELDS(
- ctf_string(name, name)
- ctf_integer(int, int_payload, payload)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_java, int_int_event,
- TP_ARGS(const char *, name, int, payload1, int, payload2),
- TP_FIELDS(
- ctf_string(name, name)
- ctf_integer(int, int_payload1, payload1)
- ctf_integer(int, int_payload2, payload2)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_java, long_event,
- TP_ARGS(const char *, name, long, payload),
- TP_FIELDS(
- ctf_string(name, name)
- ctf_integer(long, long_payload, payload)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_java, long_long_event,
- TP_ARGS(const char *, name, long, payload1, long, payload2),
- TP_FIELDS(
- ctf_string(name, name)
- ctf_integer(long, long_payload1, payload1)
- ctf_integer(long, long_payload2, payload2)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_java, string_event,
- TP_ARGS(const char *, name, const char *, payload),
- TP_FIELDS(
- ctf_string(name, name)
- ctf_string(string_payload, payload)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_JAVA_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng_ust_java.h"
-
-/* This part must be outside protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2012 Alexandre Montplaisir <alexandre.montplaisir@polymtl.ca>
- */
-
-package org.lttng.ust;
-
-/**
- * This class implements the the Java side of the LTTng-UST Java interface.
- *
- * First, make sure you have installed "liblttng-ust-java.so" where the linker
- * can find it. You can then call LTTngUst.init() from your Java program to
- * connect the methods exposed here to the native library.
- *
- * Because of limitations in the probe declaration, all trace events generated
- * by this library will have "lttng_ust_java" for domain, and "<type>_event" for
- * event name in the CTF trace files. The "name" parameter will instead appear
- * as the first element of the event's payload.
- *
- * @author Mathieu Desnoyers
- * @author Alexandre Montplaisir
- *
- */
-public abstract class LTTngUst {
-
- /**
- * Initialize the UST tracer. This should always be called first, before any
- * tracepoint* method.
- */
- public static void init() {
- System.loadLibrary("lttng-ust-java"); //$NON-NLS-1$
- }
-
- /**
- * Insert a tracepoint with a payload of type Integer.
- *
- * @param name
- * The name assigned to this event. For best performance, this
- * should be a statically-defined String, or a literal.
- * @param payload
- * The int payload
- */
- public static native void tracepointInt(String name, int payload);
-
- /**
- * Insert a tracepoint with a payload consisting of two integers.
- *
- * @param name
- * The name assigned to this event. For best performance, this
- * should be a statically-defined String, or a literal.
- * @param payload1
- * The first int payload
- * @param payload2
- * The second int payload
- */
- public static native void
- tracepointIntInt(String name, int payload1, int payload2);
-
- /**
- * Insert a tracepoint with a payload of type Long
- *
- * @param name
- * The name assigned to this event. For best performance, this
- * should be a statically-defined String, or a literal.
- * @param payload
- * The long payload
- */
- public static native void tracepointLong(String name, long payload);
-
- /**
- * Insert a tracepoint with a payload consisting of two longs.
- *
- * @param name
- * The name assigned to this event. For best performance, this
- * should be a statically-defined String, or a literal.
- * @param payload1
- * The first long payload
- * @param payload2
- * The second long payload
- */
- public static native void
- tracepointLongLong(String name, long payload1, long payload2);
-
- /**
- * Insert a tracepoint with a String payload.
- *
- * @param name
- * The name assigned to this event. For best performance, this
- * should be a statically-defined String, or a literal.
- * @param payload
- * The String payload
- */
- public static native void tracepointString(String name, String payload);
-
-}
-
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-libc-wrapper.la \
- liblttng-ust-pthread-wrapper.la
-
-liblttng_ust_libc_wrapper_la_SOURCES = \
- lttng-ust-malloc.c \
- ust_libc.h
-
-liblttng_ust_libc_wrapper_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_libc_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_pthread_wrapper_la_SOURCES = \
- lttng-ust-pthread.c \
- ust_pthread.h
-
-liblttng_ust_pthread_wrapper_la_LIBADD = \
- $(top_builddir)/liblttng-ust/liblttng-ust.la \
- $(DL_LIBS)
-
-liblttng_ust_pthread_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-dist_noinst_SCRIPTS = run
-EXTRA_DIST = README
+++ /dev/null
-liblttng-ust-libc is used for instrumenting some calls to libc in a
-program, without need for recompiling it.
-
-This library defines a malloc() function that is instrumented with a
-tracepoint. It also calls the libc malloc afterwards. When loaded with
-LD_PRELOAD, it replaces the libc malloc() function, in effect
-instrumenting all calls to malloc(). The same is performed for free().
-
-See the "run" script for a usage example.
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2009 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-/*
- * Do _not_ define _LGPL_SOURCE because we don't want to create a
- * circular dependency loop between this malloc wrapper, liburcu and
- * libc.
- */
-#include <ust-dlfcn.h>
-#include <sys/types.h>
-#include <stdio.h>
-#include <assert.h>
-#include <malloc.h>
-
-#include <urcu/system.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <urcu/arch.h>
-
-#include <lttng/ust-libc-wrapper.h>
-
-#include <ust-helper.h>
-#include "ust-compat.h"
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_libc.h"
-
-#define STATIC_CALLOC_LEN 4096
-static char static_calloc_buf[STATIC_CALLOC_LEN];
-static unsigned long static_calloc_buf_offset;
-
-struct alloc_functions {
- void *(*calloc)(size_t nmemb, size_t size);
- void *(*malloc)(size_t size);
- void (*free)(void *ptr);
- void *(*realloc)(void *ptr, size_t size);
- void *(*memalign)(size_t alignment, size_t size);
- int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
-};
-
-static
-struct alloc_functions cur_alloc;
-
-/*
- * Make sure our own use of the LTS compat layer will not cause infinite
- * recursion by calling calloc.
- */
-
-static
-void *static_calloc(size_t nmemb, size_t size);
-
-/*
- * pthread mutex replacement for URCU tls compat layer.
- */
-static int ust_malloc_lock;
-
-static
-void ust_malloc_spin_lock(pthread_mutex_t *lock)
- __attribute__((unused));
-static
-void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
-{
- /*
- * The memory barrier within cmpxchg takes care of ordering
- * memory accesses with respect to the start of the critical
- * section.
- */
- while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
- caa_cpu_relax();
-}
-
-static
-void ust_malloc_spin_unlock(pthread_mutex_t *lock)
- __attribute__((unused));
-static
-void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
-{
- /*
- * Ensure memory accesses within the critical section do not
- * leak outside.
- */
- cmm_smp_mb();
- uatomic_set(&ust_malloc_lock, 0);
-}
-
-#define calloc static_calloc
-#define pthread_mutex_lock ust_malloc_spin_lock
-#define pthread_mutex_unlock ust_malloc_spin_unlock
-static DEFINE_URCU_TLS(int, malloc_nesting);
-#undef pthread_mutex_unlock
-#undef pthread_mutex_lock
-#undef calloc
-
-/*
- * Static allocator to use when initially executing dlsym(). It keeps a
- * size_t value of each object size prior to the object.
- */
-static
-void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
-{
- size_t prev_offset, new_offset, res_offset, aligned_offset;
-
- if (nmemb * size == 0) {
- return NULL;
- }
-
- /*
- * Protect static_calloc_buf_offset from concurrent updates
- * using a cmpxchg loop rather than a mutex to remove a
- * dependency on pthread. This will minimize the risk of bad
- * interaction between mutex and malloc instrumentation.
- */
- res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
- do {
- prev_offset = res_offset;
- aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
- new_offset = aligned_offset + nmemb * size;
- if (new_offset > sizeof(static_calloc_buf)) {
- abort();
- }
- } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
- prev_offset, new_offset)) != prev_offset);
- *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
- return &static_calloc_buf[aligned_offset];
-}
-
-static
-void *static_calloc(size_t nmemb, size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(nmemb, size, 1);
- return retval;
-}
-
-static
-void *static_malloc(size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(1, size, 1);
- return retval;
-}
-
-static
-void static_free(void *ptr __attribute__((unused)))
-{
- /* no-op. */
-}
-
-static
-void *static_realloc(void *ptr, size_t size)
-{
- size_t *old_size = NULL;
- void *retval;
-
- if (size == 0) {
- retval = NULL;
- goto end;
- }
-
- if (ptr) {
- old_size = (size_t *) ptr - 1;
- if (size <= *old_size) {
- /* We can re-use the old entry. */
- *old_size = size;
- retval = ptr;
- goto end;
- }
- }
- /* We need to expand. Don't free previous memory location. */
- retval = static_calloc_aligned(1, size, 1);
- assert(retval);
- if (ptr)
- memcpy(retval, ptr, *old_size);
-end:
- return retval;
-}
-
-static
-void *static_memalign(size_t alignment, size_t size)
-{
- void *retval;
-
- retval = static_calloc_aligned(1, size, alignment);
- return retval;
-}
-
-static
-int static_posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- void *ptr;
-
- /* Check for power of 2, larger than void *. */
- if (alignment & (alignment - 1)
- || alignment < sizeof(void *)
- || alignment == 0) {
- goto end;
- }
- ptr = static_calloc_aligned(1, size, alignment);
- *memptr = ptr;
-end:
- return 0;
-}
-
-static
-void setup_static_allocator(void)
-{
- assert(cur_alloc.calloc == NULL);
- cur_alloc.calloc = static_calloc;
- assert(cur_alloc.malloc == NULL);
- cur_alloc.malloc = static_malloc;
- assert(cur_alloc.free == NULL);
- cur_alloc.free = static_free;
- assert(cur_alloc.realloc == NULL);
- cur_alloc.realloc = static_realloc;
- assert(cur_alloc.memalign == NULL);
- cur_alloc.memalign = static_memalign;
- assert(cur_alloc.posix_memalign == NULL);
- cur_alloc.posix_memalign = static_posix_memalign;
-}
-
-static
-void lookup_all_symbols(void)
-{
- struct alloc_functions af;
-
- /*
- * Temporarily redirect allocation functions to
- * static_calloc_aligned, and free function to static_free
- * (no-op), until the dlsym lookup has completed.
- */
- setup_static_allocator();
-
- /* Perform the actual lookups */
- af.calloc = dlsym(RTLD_NEXT, "calloc");
- af.malloc = dlsym(RTLD_NEXT, "malloc");
- af.free = dlsym(RTLD_NEXT, "free");
- af.realloc = dlsym(RTLD_NEXT, "realloc");
- af.memalign = dlsym(RTLD_NEXT, "memalign");
- af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
-
- /* Populate the new allocator functions */
- memcpy(&cur_alloc, &af, sizeof(cur_alloc));
-}
-
-void *malloc(size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.malloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.malloc == NULL) {
- fprintf(stderr, "mallocwrap: unable to find malloc\n");
- abort();
- }
- }
- retval = cur_alloc.malloc(size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, malloc,
- size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void free(void *ptr)
-{
- URCU_TLS(malloc_nesting)++;
- /*
- * Check whether the memory was allocated with
- * static_calloc_align, in which case there is nothing to free.
- */
- if (caa_unlikely((char *)ptr >= static_calloc_buf &&
- (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
- goto end;
- }
-
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, free,
- ptr, LTTNG_UST_CALLER_IP());
- }
-
- if (cur_alloc.free == NULL) {
- lookup_all_symbols();
- if (cur_alloc.free == NULL) {
- fprintf(stderr, "mallocwrap: unable to find free\n");
- abort();
- }
- }
- cur_alloc.free(ptr);
-end:
- URCU_TLS(malloc_nesting)--;
-}
-
-void *calloc(size_t nmemb, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.calloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.calloc == NULL) {
- fprintf(stderr, "callocwrap: unable to find calloc\n");
- abort();
- }
- }
- retval = cur_alloc.calloc(nmemb, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, calloc,
- nmemb, size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void *realloc(void *ptr, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- /*
- * Check whether the memory was allocated with
- * static_calloc_align, in which case there is nothing
- * to free, and we need to copy the old data.
- */
- if (caa_unlikely((char *)ptr >= static_calloc_buf &&
- (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
- size_t *old_size;
-
- old_size = (size_t *) ptr - 1;
- if (cur_alloc.calloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.calloc == NULL) {
- fprintf(stderr, "reallocwrap: unable to find calloc\n");
- abort();
- }
- }
- retval = cur_alloc.calloc(1, size);
- if (retval) {
- memcpy(retval, ptr, *old_size);
- }
- /*
- * Mimick that a NULL pointer has been received, so
- * memory allocation analysis based on the trace don't
- * get confused by the address from the static
- * allocator.
- */
- ptr = NULL;
- goto end;
- }
-
- if (cur_alloc.realloc == NULL) {
- lookup_all_symbols();
- if (cur_alloc.realloc == NULL) {
- fprintf(stderr, "reallocwrap: unable to find realloc\n");
- abort();
- }
- }
- retval = cur_alloc.realloc(ptr, size);
-end:
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, realloc,
- ptr, size, retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-void *memalign(size_t alignment, size_t size)
-{
- void *retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.memalign == NULL) {
- lookup_all_symbols();
- if (cur_alloc.memalign == NULL) {
- fprintf(stderr, "memalignwrap: unable to find memalign\n");
- abort();
- }
- }
- retval = cur_alloc.memalign(alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, memalign,
- alignment, size, retval,
- LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-int posix_memalign(void **memptr, size_t alignment, size_t size)
-{
- int retval;
-
- URCU_TLS(malloc_nesting)++;
- if (cur_alloc.posix_memalign == NULL) {
- lookup_all_symbols();
- if (cur_alloc.posix_memalign == NULL) {
- fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
- abort();
- }
- }
- retval = cur_alloc.posix_memalign(memptr, alignment, size);
- if (URCU_TLS(malloc_nesting) == 1) {
- tracepoint(lttng_ust_libc, posix_memalign,
- *memptr, alignment, size,
- retval, LTTNG_UST_CALLER_IP());
- }
- URCU_TLS(malloc_nesting)--;
- return retval;
-}
-
-static
-void lttng_ust_fixup_malloc_nesting_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
-}
-
-void lttng_ust_libc_wrapper_malloc_init(void)
-{
- /* Initialization already done */
- if (cur_alloc.calloc) {
- return;
- }
- lttng_ust_fixup_malloc_nesting_tls();
- /*
- * Ensure the allocator is in place before the process becomes
- * multithreaded.
- */
- lookup_all_symbols();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Mentor Graphics
- */
-
-/*
- * Do _not_ define _LGPL_SOURCE because we don't want to create a
- * circular dependency loop between this malloc wrapper, liburcu and
- * libc.
- */
-#include <ust-dlfcn.h>
-#include <ust-helper.h>
-#include <pthread.h>
-
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_pthread.h"
-
-static __thread int thread_in_trace;
-
-int pthread_mutex_lock(pthread_mutex_t *mutex)
-{
- static int (*mutex_lock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_lock) {
- mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
- if (!mutex_lock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_lock(mutex);
- }
-
- thread_in_trace = 1;
- tracepoint(lttng_ust_pthread, pthread_mutex_lock_req, mutex,
- LTTNG_UST_CALLER_IP());
- retval = mutex_lock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_lock_acq, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
-
-int pthread_mutex_trylock(pthread_mutex_t *mutex)
-{
- static int (*mutex_trylock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_trylock) {
- mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
- if (!mutex_trylock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_trylock(mutex);
- }
-
- thread_in_trace = 1;
- retval = mutex_trylock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_trylock, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
-
-int pthread_mutex_unlock(pthread_mutex_t *mutex)
-{
- static int (*mutex_unlock)(pthread_mutex_t *);
- int retval;
-
- if (!mutex_unlock) {
- mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
- if (!mutex_unlock) {
- if (thread_in_trace) {
- abort();
- }
- fprintf(stderr, "unable to initialize pthread wrapper library.\n");
- return EINVAL;
- }
- }
- if (thread_in_trace) {
- return mutex_unlock(mutex);
- }
-
- thread_in_trace = 1;
- retval = mutex_unlock(mutex);
- tracepoint(lttng_ust_pthread, pthread_mutex_unlock, mutex,
- retval, LTTNG_UST_CALLER_IP());
- thread_in_trace = 0;
- return retval;
-}
+++ /dev/null
-#!/bin/sh
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-
-LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-libc-wrapper.so ${*}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_libc
-
-#if !defined(_TRACEPOINT_UST_LIBC_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_LIBC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_libc, malloc,
- TP_ARGS(size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, free,
- TP_ARGS(void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, calloc,
- TP_ARGS(size_t, nmemb, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, nmemb, nmemb)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, realloc,
- TP_ARGS(void *, in_ptr, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, in_ptr, in_ptr)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, memalign,
- TP_ARGS(size_t, alignment, size_t, size, void *, ptr, void *, ip),
- TP_FIELDS(
- ctf_integer(size_t, alignment, alignment)
- ctf_integer(size_t, size, size)
- ctf_integer_hex(void *, ptr, ptr)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_libc, posix_memalign,
- TP_ARGS(void *, out_ptr, size_t, alignment, size_t, size, int, result, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, out_ptr, out_ptr)
- ctf_integer(size_t, alignment, alignment)
- ctf_integer(size_t, size, size)
- ctf_integer(int, result, result)
- ctf_unused(ip)
- )
-)
-
-#endif /* _TRACEPOINT_UST_LIBC_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_libc.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Mentor Graphics
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_pthread
-
-#if !defined(_TRACEPOINT_UST_PTHREAD_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_PTHREAD_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_req,
- TP_ARGS(pthread_mutex_t *, mutex, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_acq,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_trylock,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_unlock,
- TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
- TP_FIELDS(
- ctf_integer_hex(void *, mutex, mutex)
- ctf_integer(int, status, status)
- ctf_unused(ip)
- )
-)
-
-#endif /* _TRACEPOINT_UST_PTHREAD_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_pthread.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-# tracepoint provider: always built/installed (does not depend on Python per se)
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-lib_LTLIBRARIES = liblttng-ust-python-agent.la
-
-liblttng_ust_python_agent_la_SOURCES = lttng_ust_python.c lttng_ust_python.h
-liblttng_ust_python_agent_la_LIBADD = -lc \
- $(top_builddir)/liblttng-ust/liblttng-ust.la
-
-# Follow the main library soname for co-installability
-liblttng_ust_python_agent_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#define TRACEPOINT_DEFINE
-#define TRACEPOINT_CREATE_PROBES
-#include "lttng_ust_python.h"
-
-/*
- * The tracepoint fired by the agent.
- */
-
-void py_tracepoint(const char *asctime, const char *msg,
- const char *logger_name, const char *funcName, unsigned int lineno,
- unsigned int int_loglevel, unsigned int thread, const char *threadName);
-void py_tracepoint(const char *asctime, const char *msg,
- const char *logger_name, const char *funcName, unsigned int lineno,
- unsigned int int_loglevel, unsigned int thread, const char *threadName)
-{
- tracepoint(lttng_python, event, asctime, msg, logger_name, funcName,
- lineno, int_loglevel, thread, threadName);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 - David Goulet <dgoulet@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_python
-
-#if !defined(_TRACEPOINT_LTTNG_UST_PYTHON_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_PYTHON_H
-
-#include <lttng/tracepoint.h>
-#include <stdbool.h>
-
-TRACEPOINT_EVENT(lttng_python, event,
- TP_ARGS(
- const char *, asctime,
- const char *, msg,
- const char *, logger_name,
- const char *, funcName,
- int, lineno,
- int, int_loglevel,
- int, thread,
- const char *, threadName
- ),
- TP_FIELDS(
- ctf_string(asctime, asctime)
- ctf_string(msg, msg)
- ctf_string(logger_name, logger_name)
- ctf_string(funcName, funcName)
- ctf_integer(unsigned int, lineno, lineno)
- ctf_integer(unsigned int, int_loglevel, int_loglevel)
- ctf_integer(unsigned int, thread, thread)
- ctf_string(threadName, threadName)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_PYTHON_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng_ust_python.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
-
-noinst_LTLIBRARIES = liblttng-ust-runtime.la liblttng-ust-support.la
-
-lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
-
-liblttng_ust_common_la_SOURCES = \
- lttng-ust-urcu.c \
- lttng-ust-urcu-pointer.c
-
-liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_tracepoint_la_SOURCES = \
- tracepoint.c \
- tracepoint-weak-test.c \
- tracepoint-internal.h \
- lttng-tracer-core.h \
- jhash.h \
- error.h
-
-liblttng_ust_tracepoint_la_LIBADD = \
- liblttng-ust-common.la \
- $(top_builddir)/snprintf/libustsnprintf.la \
- $(DL_LIBS)
-
-liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS)
-
-liblttng_ust_runtime_la_SOURCES = \
- bytecode.h \
- lttng-ust-comm.c \
- lttng-ust-abi.c \
- lttng-probes.c \
- lttng-bytecode.c \
- lttng-bytecode.h \
- lttng-bytecode-validator.c \
- lttng-bytecode-specialize.c \
- lttng-bytecode-interpreter.c \
- lttng-context-provider.c \
- lttng-context-vtid.c \
- lttng-context-vpid.c \
- lttng-context-pthread-id.c \
- lttng-context-procname.c \
- lttng-context-ip.c \
- lttng-context-cpu-id.c \
- lttng-context-cgroup-ns.c \
- lttng-context-ipc-ns.c \
- lttng-context-mnt-ns.c \
- lttng-context-net-ns.c \
- lttng-context-pid-ns.c \
- lttng-context-time-ns.c \
- lttng-context-user-ns.c \
- lttng-context-uts-ns.c \
- lttng-context-vuid.c \
- lttng-context-veuid.c \
- lttng-context-vsuid.c \
- lttng-context-vgid.c \
- lttng-context-vegid.c \
- lttng-context-vsgid.c \
- lttng-context.c \
- lttng-events.c \
- lttng-hash-helper.h \
- lttng-ust-elf.c \
- lttng-ust-elf.h \
- lttng-ust-statedump.c \
- lttng-ust-statedump.h \
- lttng-ust-statedump-provider.h \
- ust_lib.c \
- ust_lib.h \
- context-internal.h \
- context-provider-internal.h \
- tracepoint-internal.h \
- ust-events-internal.h \
- clock.h \
- compat.h \
- wait.h \
- jhash.h \
- lttng-ust-uuid.h \
- error.h \
- tracef.c \
- lttng-ust-tracef-provider.h \
- tracelog.c \
- lttng-ust-tracelog-provider.h \
- getenv.h \
- string-utils.c \
- string-utils.h \
- event-notifier-notification.c \
- ns.h \
- creds.h \
- rculfhash.c \
- rculfhash.h \
- rculfhash-internal.h \
- rculfhash-mm-chunk.c \
- rculfhash-mm-mmap.c \
- rculfhash-mm-order.c \
- compat_futex.c \
- futex.h
-
-if HAVE_PERF_EVENT
-liblttng_ust_runtime_la_SOURCES += \
- lttng-context-perf-counters.c \
- perf_event.h
-endif
-
-liblttng_ust_support_la_SOURCES = \
- lttng-tracer.h \
- lttng-tracer-core.h \
- ust-core.c \
- getenv.h \
- getenv.c \
- lttng-ust-dynamic-type.c \
- lttng-rb-clients.h \
- lttng-ring-buffer-client-template.h \
- lttng-ring-buffer-client-discard.c \
- lttng-ring-buffer-client-discard-rt.c \
- lttng-ring-buffer-client-overwrite.c \
- lttng-ring-buffer-client-overwrite-rt.c \
- lttng-ring-buffer-metadata-client-template.h \
- lttng-ring-buffer-metadata-client.c \
- lttng-counter-client.h \
- lttng-counter-client-percpu-32-modular.c \
- lttng-counter-client-percpu-64-modular.c \
- lttng-clock.c lttng-getcpu.c
-
-liblttng_ust_la_SOURCES =
-
-liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
-
-liblttng_ust_support_la_LIBADD = \
- $(top_builddir)/libringbuffer/libringbuffer.la \
- $(top_builddir)/libcounter/libcounter.la
-
-liblttng_ust_la_LIBADD = \
- -lrt \
- liblttng-ust-common.la \
- $(top_builddir)/snprintf/libustsnprintf.la \
- $(top_builddir)/liblttng-ust-comm/liblttng-ust-comm.la \
- liblttng-ust-tracepoint.la \
- liblttng-ust-runtime.la liblttng-ust-support.la \
- $(top_builddir)/libmsgpack/libmsgpack.la \
- $(DL_LIBS)
-
-liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _BYTECODE_H
-#define _BYTECODE_H
-
-#include <stdint.h>
-#include <lttng/ust-abi.h>
-
-/*
- * offsets are absolute from start of bytecode.
- */
-
-struct field_ref {
- /* Initially, symbol offset. After link, field offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_symbol {
- /* Symbol offset. */
- uint16_t offset;
-} __attribute__((packed));
-
-struct get_index_u16 {
- uint16_t index;
-} __attribute__((packed));
-
-struct get_index_u64 {
- uint64_t index;
-} __attribute__((packed));
-
-struct literal_numeric {
- int64_t v;
-} __attribute__((packed));
-
-struct literal_double {
- double v;
-} __attribute__((packed));
-
-struct literal_string {
- char string[0];
-} __attribute__((packed));
-
-enum bytecode_op {
- BYTECODE_OP_UNKNOWN = 0,
-
- BYTECODE_OP_RETURN = 1,
-
- /* binary */
- BYTECODE_OP_MUL = 2,
- BYTECODE_OP_DIV = 3,
- BYTECODE_OP_MOD = 4,
- BYTECODE_OP_PLUS = 5,
- BYTECODE_OP_MINUS = 6,
- BYTECODE_OP_BIT_RSHIFT = 7,
- BYTECODE_OP_BIT_LSHIFT = 8,
- BYTECODE_OP_BIT_AND = 9,
- BYTECODE_OP_BIT_OR = 10,
- BYTECODE_OP_BIT_XOR = 11,
-
- /* binary comparators */
- BYTECODE_OP_EQ = 12,
- BYTECODE_OP_NE = 13,
- BYTECODE_OP_GT = 14,
- BYTECODE_OP_LT = 15,
- BYTECODE_OP_GE = 16,
- BYTECODE_OP_LE = 17,
-
- /* string binary comparator: apply to */
- BYTECODE_OP_EQ_STRING = 18,
- BYTECODE_OP_NE_STRING = 19,
- BYTECODE_OP_GT_STRING = 20,
- BYTECODE_OP_LT_STRING = 21,
- BYTECODE_OP_GE_STRING = 22,
- BYTECODE_OP_LE_STRING = 23,
-
- /* s64 binary comparator */
- BYTECODE_OP_EQ_S64 = 24,
- BYTECODE_OP_NE_S64 = 25,
- BYTECODE_OP_GT_S64 = 26,
- BYTECODE_OP_LT_S64 = 27,
- BYTECODE_OP_GE_S64 = 28,
- BYTECODE_OP_LE_S64 = 29,
-
- /* double binary comparator */
- BYTECODE_OP_EQ_DOUBLE = 30,
- BYTECODE_OP_NE_DOUBLE = 31,
- BYTECODE_OP_GT_DOUBLE = 32,
- BYTECODE_OP_LT_DOUBLE = 33,
- BYTECODE_OP_GE_DOUBLE = 34,
- BYTECODE_OP_LE_DOUBLE = 35,
-
- /* Mixed S64-double binary comparators */
- BYTECODE_OP_EQ_DOUBLE_S64 = 36,
- BYTECODE_OP_NE_DOUBLE_S64 = 37,
- BYTECODE_OP_GT_DOUBLE_S64 = 38,
- BYTECODE_OP_LT_DOUBLE_S64 = 39,
- BYTECODE_OP_GE_DOUBLE_S64 = 40,
- BYTECODE_OP_LE_DOUBLE_S64 = 41,
-
- BYTECODE_OP_EQ_S64_DOUBLE = 42,
- BYTECODE_OP_NE_S64_DOUBLE = 43,
- BYTECODE_OP_GT_S64_DOUBLE = 44,
- BYTECODE_OP_LT_S64_DOUBLE = 45,
- BYTECODE_OP_GE_S64_DOUBLE = 46,
- BYTECODE_OP_LE_S64_DOUBLE = 47,
-
- /* unary */
- BYTECODE_OP_UNARY_PLUS = 48,
- BYTECODE_OP_UNARY_MINUS = 49,
- BYTECODE_OP_UNARY_NOT = 50,
- BYTECODE_OP_UNARY_PLUS_S64 = 51,
- BYTECODE_OP_UNARY_MINUS_S64 = 52,
- BYTECODE_OP_UNARY_NOT_S64 = 53,
- BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
- BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
- BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
-
- /* logical */
- BYTECODE_OP_AND = 57,
- BYTECODE_OP_OR = 58,
-
- /* load field ref */
- BYTECODE_OP_LOAD_FIELD_REF = 59,
- BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
- BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
- BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
- BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
-
- /* load immediate from operand */
- BYTECODE_OP_LOAD_STRING = 64,
- BYTECODE_OP_LOAD_S64 = 65,
- BYTECODE_OP_LOAD_DOUBLE = 66,
-
- /* cast */
- BYTECODE_OP_CAST_TO_S64 = 67,
- BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
- BYTECODE_OP_CAST_NOP = 69,
-
- /* get context ref */
- BYTECODE_OP_GET_CONTEXT_REF = 70,
- BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
- BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
- BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
-
- /* load userspace field ref */
- BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
- BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate
- */
- BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
-
- /* globbing pattern binary operator: apply to */
- BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
- BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- BYTECODE_OP_GET_CONTEXT_ROOT = 79,
- BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
- BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
-
- BYTECODE_OP_GET_SYMBOL = 82,
- BYTECODE_OP_GET_SYMBOL_FIELD = 83,
- BYTECODE_OP_GET_INDEX_U16 = 84,
- BYTECODE_OP_GET_INDEX_U64 = 85,
-
- BYTECODE_OP_LOAD_FIELD = 86,
- BYTECODE_OP_LOAD_FIELD_S8 = 87,
- BYTECODE_OP_LOAD_FIELD_S16 = 88,
- BYTECODE_OP_LOAD_FIELD_S32 = 89,
- BYTECODE_OP_LOAD_FIELD_S64 = 90,
- BYTECODE_OP_LOAD_FIELD_U8 = 91,
- BYTECODE_OP_LOAD_FIELD_U16 = 92,
- BYTECODE_OP_LOAD_FIELD_U32 = 93,
- BYTECODE_OP_LOAD_FIELD_U64 = 94,
- BYTECODE_OP_LOAD_FIELD_STRING = 95,
- BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
- BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
-
- BYTECODE_OP_UNARY_BIT_NOT = 98,
-
- BYTECODE_OP_RETURN_S64 = 99,
-
- NR_BYTECODE_OPS,
-};
-
-typedef uint8_t bytecode_opcode_t;
-
-struct load_op {
- bytecode_opcode_t op;
- /*
- * data to load. Size known by enum bytecode_opcode and null-term char.
- */
- char data[0];
-} __attribute__((packed));
-
-struct binary_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-struct unary_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-/* skip_offset is absolute from start of bytecode */
-struct logical_op {
- bytecode_opcode_t op;
- uint16_t skip_offset; /* bytecode insn, if skip second test */
-} __attribute__((packed));
-
-struct cast_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-struct return_op {
- bytecode_opcode_t op;
-} __attribute__((packed));
-
-#endif /* _BYTECODE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _UST_CLOCK_H
-#define _UST_CLOCK_H
-
-#include <time.h>
-#include <sys/time.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-#include <lttng/ust-clock.h>
-
-#include "lttng-ust-uuid.h"
-
-struct lttng_ust_trace_clock {
- uint64_t (*read64)(void);
- uint64_t (*freq)(void);
- int (*uuid)(char *uuid);
- const char *(*name)(void);
- const char *(*description)(void);
-};
-
-extern struct lttng_ust_trace_clock *lttng_ust_trace_clock
- __attribute__((visibility("hidden")));
-
-void lttng_ust_clock_init(void);
-
-/* Use the kernel MONOTONIC clock. */
-
-static __inline__
-uint64_t trace_clock_read64_monotonic(void)
-{
- struct timespec ts;
-
- if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts))) {
- ts.tv_sec = 0;
- ts.tv_nsec = 0;
- }
- return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
-}
-
-static __inline__
-uint64_t trace_clock_read64(void)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- return trace_clock_read64_monotonic();
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- return ltc->read64();
- }
-}
-
-#endif /* _UST_CLOCK_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2016 Raphaël Beamonte <raphael.beamonte@gmail.com>
- * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _UST_COMPAT_H
-#define _UST_COMPAT_H
-
-#include <pthread.h>
-#include <errno.h>
-#include <string.h>
-
-#ifdef __FreeBSD__
-#include <pthread_np.h>
-#endif
-
-#include <lttng/ust-abi.h>
-
-#define LTTNG_UST_PROCNAME_SUFFIX "-ust"
-
-
-#if defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
-static inline
-int lttng_pthread_setname_np(const char *name)
-{
- /*
- * Some implementations don't error out, replicate this behavior for
- * consistency.
- */
- if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
- return ERANGE;
- }
-
- return pthread_setname_np(pthread_self(), name);
-}
-#elif defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
-static inline
-int lttng_pthread_setname_np(const char *name)
-{
- return pthread_setname_np(name);
-}
-#elif defined(HAVE_PTHREAD_SET_NAME_NP_WITH_TID)
-
-static inline
-int lttng_pthread_setname_np(const char *name)
-{
- /* Replicate pthread_setname_np's behavior */
- if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
- return ERANGE;
- }
-
- pthread_set_name_np(pthread_self(), name);
- return 0;
-}
-#elif defined(__linux__)
-
-/* Fallback on prtctl on Linux */
-#include <sys/prctl.h>
-
-static inline
-int lttng_pthread_setname_np(const char *name)
-{
- /* Replicate pthread_setname_np's behavior */
- if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
- return ERANGE;
- }
- return prctl(PR_SET_NAME, name, 0, 0, 0);
-}
-#else
-#error "Please add pthread set name support for your OS."
-#endif
-
-
-#if defined(HAVE_PTHREAD_GETNAME_NP_WITH_TID)
-static inline
-int lttng_pthread_getname_np(char *name, size_t len)
-{
- return pthread_getname_np(pthread_self(), name, len);
-}
-#elif defined(HAVE_PTHREAD_GETNAME_NP_WITHOUT_TID)
-static inline
-int lttng_pthread_getname_np(char *name, size_t len)
-{
- return pthread_getname_np(name, len);
-}
-#elif defined(HAVE_PTHREAD_GET_NAME_NP_WITH_TID)
-
-static inline
-int lttng_pthread_getname_np(char *name, size_t len)
-{
- pthread_get_name_np(pthread_self(), name, len);
- return 0;
-}
-#elif defined(__linux__)
-
-/* Fallback on prtctl on Linux */
-#include <sys/prctl.h>
-
-static inline
-int lttng_pthread_getname_np(char *name, size_t len)
-{
- return prctl(PR_GET_NAME, name, 0, 0, 0);
-}
-
-#else
-#error "Please add pthread get name support for your OS."
-#endif
-
-/*
- * If a pthread setname/set_name function is available, declare
- * the setustprocname() function that will add '-ust' to the end
- * of the current process name, while truncating it if needed.
- */
-static inline
-int lttng_ust_setustprocname(void)
-{
- int ret = 0, len;
- char name[LTTNG_UST_ABI_PROCNAME_LEN];
- int limit = LTTNG_UST_ABI_PROCNAME_LEN - strlen(LTTNG_UST_PROCNAME_SUFFIX) - 1;
-
- /*
- * Get the current thread name.
- */
- ret = lttng_pthread_getname_np(name, LTTNG_UST_ABI_PROCNAME_LEN);
- if (ret) {
- goto error;
- }
-
- len = strlen(name);
- if (len > limit) {
- len = limit;
- }
-
- ret = sprintf(name + len, LTTNG_UST_PROCNAME_SUFFIX);
- if (ret != strlen(LTTNG_UST_PROCNAME_SUFFIX)) {
- goto error;
- }
-
- ret = lttng_pthread_setname_np(name);
-
-error:
- return ret;
-}
-
-#include <errno.h>
-
-#ifndef ENODATA
-#define ENODATA ENOMSG
-#endif
-
-#endif /* _UST_COMPAT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Userspace RCU library - sys_futex compatibility code
- */
-
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-#include <assert.h>
-#include <errno.h>
-#include <poll.h>
-#include <stdint.h>
-
-#include <urcu/arch.h>
-#include <urcu/system.h>
-#include "futex.h"
-
-/*
- * Using attribute "weak" for __lttng_ust_compat_futex_lock and
- * __lttng_ust_compat_futex_cond. Those are globally visible by the entire
- * program, even though many shared objects may have their own version.
- * The first version that gets loaded will be used by the entire program
- * (executable and all shared objects).
- */
-
-__attribute__((weak))
-pthread_mutex_t __lttng_ust_compat_futex_lock = PTHREAD_MUTEX_INITIALIZER;
-__attribute__((weak))
-pthread_cond_t __lttng_ust_compat_futex_cond = PTHREAD_COND_INITIALIZER;
-
-/*
- * _NOT SIGNAL-SAFE_. pthread_cond is not signal-safe anyway. Though.
- * For now, timeout, uaddr2 and val3 are unused.
- * Waiter will relinquish the CPU until woken up.
- */
-
-int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret = 0, lockret;
-
- /*
- * Check if NULL. Don't let users expect that they are taken into
- * account.
- */
- assert(!timeout);
- assert(!uaddr2);
- assert(!val3);
-
- /*
- * memory barriers to serialize with the previous uaddr modification.
- */
- cmm_smp_mb();
-
- lockret = pthread_mutex_lock(&__lttng_ust_compat_futex_lock);
- if (lockret) {
- errno = lockret;
- ret = -1;
- goto end;
- }
- switch (op) {
- case FUTEX_WAIT:
- /*
- * Wait until *uaddr is changed to something else than "val".
- * Comparing *uaddr content against val figures out which
- * thread has been awakened.
- */
- while (CMM_LOAD_SHARED(*uaddr) == val)
- pthread_cond_wait(&__lttng_ust_compat_futex_cond,
- &__lttng_ust_compat_futex_lock);
- break;
- case FUTEX_WAKE:
- /*
- * Each wake is sending a broadcast, thus attempting wakeup of
- * all awaiting threads, independently of their respective
- * uaddr.
- */
- pthread_cond_broadcast(&__lttng_ust_compat_futex_cond);
- break;
- default:
- errno = EINVAL;
- ret = -1;
- }
- lockret = pthread_mutex_unlock(&__lttng_ust_compat_futex_lock);
- if (lockret) {
- errno = lockret;
- ret = -1;
- }
-end:
- return ret;
-}
-
-/*
- * _ASYNC SIGNAL-SAFE_.
- * For now, timeout, uaddr2 and val3 are unused.
- * Waiter will busy-loop trying to read the condition.
- * It is OK to use compat_futex_async() on a futex address on which
- * futex() WAKE operations are also performed.
- */
-
-int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret = 0;
-
- /*
- * Check if NULL. Don't let users expect that they are taken into
- * account.
- */
- assert(!timeout);
- assert(!uaddr2);
- assert(!val3);
-
- /*
- * Ensure previous memory operations on uaddr have completed.
- */
- cmm_smp_mb();
-
- switch (op) {
- case FUTEX_WAIT:
- while (CMM_LOAD_SHARED(*uaddr) == val) {
- if (poll(NULL, 0, 10) < 0) {
- ret = -1;
- /* Keep poll errno. Caller handles EINTR. */
- goto end;
- }
- }
- break;
- case FUTEX_WAKE:
- break;
- default:
- errno = EINVAL;
- ret = -1;
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2020 (C) Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H
-#define _LTTNG_UST_CONTEXT_INTERNAL_H
-
-#include <lttng/ust-events.h>
-#include "ust-events-internal.h"
-#include "ust-context-provider.h"
-
-int lttng_context_init_all(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_attach_context(struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
- __attribute__((visibility("hidden")));
-
-int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_destroy_context(struct lttng_ust_ctx *ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
- __attribute__((visibility("hidden")));
-
-int lttng_context_is_app(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vtid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vpid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_cgroup_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_ipc_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_mnt_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_net_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_pid_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_user_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_uts_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_time_ns_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_veuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vsuid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vgid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vegid_reset(void)
- __attribute__((visibility("hidden")));
-
-void lttng_context_vsgid_reset(void)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_dyntest_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
-#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
-
-#include <stddef.h>
-#include <lttng/ust-events.h>
-
-void lttng_ust_context_set_event_notifier_group_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv,
- struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_CREDS_H
-#define _LTTNG_CREDS_H
-
-/*
- * This is used in the kernel as an invalid value.
- */
-
-#define INVALID_UID (uid_t) -1
-#define INVALID_GID (gid_t) -1
-
-#endif /* _LTTNG_CREDS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_ERROR_H
-#define _LTTNG_ERROR_H
-
-#include <urcu/compiler.h>
-#include <unistd.h>
-
-#define MAX_ERRNO 4095
-
-static inline
-int IS_ERR_VALUE(long value)
-{
- if (caa_unlikely((unsigned long) value >= (unsigned long) -MAX_ERRNO))
- return 1;
- else
- return 0;
-}
-
-static inline
-void *ERR_PTR(long error)
-{
- return (void *) error;
-}
-
-static inline
-long PTR_ERR(const void *ptr)
-{
- return (long) ptr;
-}
-
-static inline
-int IS_ERR(const void *ptr)
-{
- return IS_ERR_VALUE((long) ptr);
-}
-
-#endif /* _LTTNG_ERROR_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-
-#include <lttng/ust-endian.h>
-#include <usterr-signal-safe.h>
-#include <urcu/rculist.h>
-
-#include "lttng-tracer-core.h"
-#include "ust-events-internal.h"
-#include "../libmsgpack/msgpack.h"
-#include "lttng-bytecode.h"
-#include "ust-share.h"
-
-/*
- * We want this write to be atomic AND non-blocking, meaning that we
- * want to write either everything OR nothing.
- * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be
- * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size
- * of the notification struct we are sending alongside the capture buffer.
- */
-#define CAPTURE_BUFFER_SIZE \
- (PIPE_BUF - sizeof(struct lttng_ust_abi_event_notifier_notification) - 1)
-
-struct lttng_event_notifier_notification {
- int notification_fd;
- uint64_t event_notifier_token;
- uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
- struct lttng_msgpack_writer writer;
- bool has_captures;
-};
-
-static
-void capture_enum(struct lttng_msgpack_writer *writer,
- struct lttng_interpreter_output *output)
-{
- lttng_msgpack_begin_map(writer, 2);
- lttng_msgpack_write_str(writer, "type");
- lttng_msgpack_write_str(writer, "enum");
-
- lttng_msgpack_write_str(writer, "value");
-
- switch (output->type) {
- case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
- lttng_msgpack_write_signed_integer(writer, output->u.s);
- break;
- case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
- lttng_msgpack_write_signed_integer(writer, output->u.u);
- break;
- default:
- abort();
- }
-
- lttng_msgpack_end_map(writer);
-}
-
-static
-int64_t capture_sequence_element_signed(uint8_t *ptr,
- const struct lttng_ust_type_integer *integer_type)
-{
- int64_t value;
- unsigned int size = integer_type->size;
- bool byte_order_reversed = integer_type->reverse_byte_order;
-
- switch (size) {
- case 8:
- value = *ptr;
- break;
- case 16:
- {
- int16_t tmp;
- tmp = *(int16_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_16(tmp);
-
- value = tmp;
- break;
- }
- case 32:
- {
- int32_t tmp;
- tmp = *(int32_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_32(tmp);
-
- value = tmp;
- break;
- }
- case 64:
- {
- int64_t tmp;
- tmp = *(int64_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_64(tmp);
-
- value = tmp;
- break;
- }
- default:
- abort();
- }
-
- return value;
-}
-
-static
-uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
- const struct lttng_ust_type_integer *integer_type)
-{
- uint64_t value;
- unsigned int size = integer_type->size;
- bool byte_order_reversed = integer_type->reverse_byte_order;
-
- switch (size) {
- case 8:
- value = *ptr;
- break;
- case 16:
- {
- uint16_t tmp;
- tmp = *(uint16_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_16(tmp);
-
- value = tmp;
- break;
- }
- case 32:
- {
- uint32_t tmp;
- tmp = *(uint32_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_32(tmp);
-
- value = tmp;
- break;
- }
- case 64:
- {
- uint64_t tmp;
- tmp = *(uint64_t *) ptr;
- if (byte_order_reversed)
- tmp = bswap_64(tmp);
-
- value = tmp;
- break;
- }
- default:
- abort();
- }
-
- return value;
-}
-
-static
-void capture_sequence(struct lttng_msgpack_writer *writer,
- struct lttng_interpreter_output *output)
-{
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_type_common *nested_type;
- uint8_t *ptr;
- bool signedness;
- int i;
-
- lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
-
- ptr = (uint8_t *) output->u.sequence.ptr;
- nested_type = output->u.sequence.nested_type;
- switch (nested_type->type) {
- case lttng_ust_type_integer:
- integer_type = lttng_ust_get_type_integer(nested_type);
- break;
- case lttng_ust_type_enum:
- /* Treat enumeration as an integer. */
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_enum(nested_type)->container_type);
- break;
- default:
- /* Capture of array of non-integer are not supported. */
- abort();
- }
- signedness = integer_type->signedness;
- for (i = 0; i < output->u.sequence.nr_elem; i++) {
- if (signedness) {
- lttng_msgpack_write_signed_integer(writer,
- capture_sequence_element_signed(ptr, integer_type));
- } else {
- lttng_msgpack_write_unsigned_integer(writer,
- capture_sequence_element_unsigned(ptr, integer_type));
- }
-
- /*
- * We assume that alignment is smaller or equal to the size.
- * This currently holds true but if it changes in the future,
- * we will want to change the pointer arithmetics below to
- * take into account that the next element might be further
- * away.
- */
- assert(integer_type->alignment <= integer_type->size);
-
- /* Size is in number of bits. */
- ptr += (integer_type->size / CHAR_BIT) ;
- }
-
- lttng_msgpack_end_array(writer);
-}
-
-static
-void notification_init(struct lttng_event_notifier_notification *notif,
- struct lttng_ust_event_notifier *event_notifier)
-{
- struct lttng_msgpack_writer *writer = ¬if->writer;
-
- notif->event_notifier_token = event_notifier->priv->parent.user_token;
- notif->notification_fd = event_notifier->priv->group->notification_fd;
- notif->has_captures = false;
-
- if (event_notifier->priv->num_captures > 0) {
- lttng_msgpack_writer_init(writer, notif->capture_buf,
- CAPTURE_BUFFER_SIZE);
-
- lttng_msgpack_begin_array(writer, event_notifier->priv->num_captures);
- notif->has_captures = true;
- }
-}
-
-static
-void notification_append_capture(
- struct lttng_event_notifier_notification *notif,
- struct lttng_interpreter_output *output)
-{
- struct lttng_msgpack_writer *writer = ¬if->writer;
-
- switch (output->type) {
- case LTTNG_INTERPRETER_TYPE_S64:
- lttng_msgpack_write_signed_integer(writer, output->u.s);
- break;
- case LTTNG_INTERPRETER_TYPE_U64:
- lttng_msgpack_write_unsigned_integer(writer, output->u.u);
- break;
- case LTTNG_INTERPRETER_TYPE_DOUBLE:
- lttng_msgpack_write_double(writer, output->u.d);
- break;
- case LTTNG_INTERPRETER_TYPE_STRING:
- lttng_msgpack_write_str(writer, output->u.str.str);
- break;
- case LTTNG_INTERPRETER_TYPE_SEQUENCE:
- capture_sequence(writer, output);
- break;
- case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
- case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
- capture_enum(writer, output);
- break;
- default:
- abort();
- }
-}
-
-static
-void notification_append_empty_capture(
- struct lttng_event_notifier_notification *notif)
-{
- lttng_msgpack_write_nil(¬if->writer);
-}
-
-static void record_error(struct lttng_ust_event_notifier *event_notifier)
-{
- struct lttng_event_notifier_group *event_notifier_group =
- event_notifier->priv->group;
- struct lttng_counter *error_counter;
- size_t dimension_index[1];
- int ret;
-
- error_counter = CMM_LOAD_SHARED(event_notifier_group->error_counter);
- /*
- * load-acquire paired with store-release orders creation of the
- * error counter and setting error_counter_len before the
- * error_counter is used.
- * Currently a full memory barrier is used, which could be
- * turned into acquire-release barriers.
- */
- cmm_smp_mb();
- /* This group may not have an error counter attached to it. */
- if (!error_counter)
- return;
-
- dimension_index[0] = event_notifier->priv->error_counter_index;
- ret = event_notifier_group->error_counter->ops->counter_add(
- error_counter->counter, dimension_index, 1);
- if (ret)
- WARN_ON_ONCE(1);
-}
-
-static
-void notification_send(struct lttng_event_notifier_notification *notif,
- struct lttng_ust_event_notifier *event_notifier)
-{
- ssize_t ret;
- size_t content_len;
- int iovec_count = 1;
- struct lttng_ust_abi_event_notifier_notification ust_notif = {0};
- struct iovec iov[2];
-
- assert(notif);
-
- ust_notif.token = event_notifier->priv->parent.user_token;
-
- /*
- * Prepare sending the notification from multiple buffers using an
- * array of `struct iovec`. The first buffer of the vector is
- * notification structure itself and is always present.
- */
- iov[0].iov_base = &ust_notif;
- iov[0].iov_len = sizeof(ust_notif);
-
- if (notif->has_captures) {
- /*
- * If captures were requested, the second buffer of the array
- * is the capture buffer.
- */
- assert(notif->writer.buffer);
- content_len = notif->writer.write_pos - notif->writer.buffer;
-
- assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE);
-
- iov[1].iov_base = notif->capture_buf;
- iov[1].iov_len = content_len;
-
- iovec_count++;
- } else {
- content_len = 0;
- }
-
- /*
- * Update the capture buffer size so that receiver of the buffer will
- * know how much to expect.
- */
- ust_notif.capture_buf_size = content_len;
-
- /* Send all the buffers. */
- ret = ust_patient_writev(notif->notification_fd, iov, iovec_count);
- if (ret == -1) {
- if (errno == EAGAIN) {
- record_error(event_notifier);
- DBG("Cannot send event_notifier notification without blocking: %s",
- strerror(errno));
- } else {
- DBG("Error to sending event notifier notification: %s",
- strerror(errno));
- abort();
- }
- }
-}
-
-void lttng_event_notifier_notification_send(
- struct lttng_ust_event_notifier *event_notifier,
- const char *stack_data,
- struct lttng_ust_notification_ctx *notif_ctx)
-{
- /*
- * This function is called from the probe, we must do dynamic
- * allocation in this context.
- */
- struct lttng_event_notifier_notification notif = {0};
-
- notification_init(¬if, event_notifier);
-
- if (caa_unlikely(notif_ctx->eval_capture)) {
- struct lttng_ust_bytecode_runtime *capture_bc_runtime;
-
- /*
- * Iterate over all the capture bytecodes. If the interpreter
- * functions returns successfully, append the value of the
- * `output` parameter to the capture buffer. If the interpreter
- * fails, append an empty capture to the buffer.
- */
- cds_list_for_each_entry_rcu(capture_bc_runtime,
- &event_notifier->priv->capture_bytecode_runtime_head, node) {
- struct lttng_interpreter_output output;
-
- if (capture_bc_runtime->interpreter_func(capture_bc_runtime,
- stack_data, &output) == LTTNG_UST_BYTECODE_INTERPRETER_OK)
- notification_append_capture(¬if, &output);
- else
- notification_append_empty_capture(¬if);
- }
- }
-
- /*
- * Send the notification (including the capture buffer) to the
- * sessiond.
- */
- notification_send(¬if, event_notifier);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Userspace RCU - sys_futex/compat_futex header.
- */
-
-#ifndef _LTTNG_UST_FUTEX_H
-#define _LTTNG_UST_FUTEX_H
-
-#include <errno.h>
-#include <stdint.h>
-#include <time.h>
-#include <sys/syscall.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define FUTEX_WAIT 0
-#define FUTEX_WAKE 1
-
-/*
- * sys_futex compatibility header.
- * Use *only* *either of* futex_noasync OR futex_async on a given address.
- *
- * futex_noasync cannot be executed in signal handlers, but ensures that
- * it will be put in a wait queue even in compatibility mode.
- *
- * futex_async is signal-handler safe for the wakeup. It uses polling
- * on the wait-side in compatibility mode.
- *
- * BEWARE: sys_futex() FUTEX_WAIT may return early if interrupted
- * (returns EINTR).
- */
-
-extern int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
- __attribute__((visibility("hidden")));
-
-#if (defined(__linux__) && defined(__NR_futex))
-
-#include <unistd.h>
-#include <errno.h>
-#include <urcu/compiler.h>
-#include <urcu/arch.h>
-
-static inline int lttng_ust_futex(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return syscall(__NR_futex, uaddr, op, val, timeout,
- uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret;
-
- ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
- if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
- /*
- * The fallback on ENOSYS is the async-safe version of
- * the compat futex implementation, because the
- * async-safe compat implementation allows being used
- * concurrently with calls to futex(). Indeed, sys_futex
- * FUTEX_WAIT, on some architectures (mips and parisc),
- * within a given process, spuriously return ENOSYS due
- * to signal restart bugs on some kernel versions.
- */
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
- uaddr2, val3);
- }
- return ret;
-
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int ret;
-
- ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
- if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
- uaddr2, val3);
- }
- return ret;
-}
-
-#elif defined(__FreeBSD__)
-
-#include <sys/types.h>
-#include <sys/umtx.h>
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- int umtx_op;
- void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
- struct _umtx_time umtx_timeout = {
- ._flags = UMTX_ABSTIME,
- ._clockid = CLOCK_MONOTONIC,
- };
-
- switch (op) {
- case FUTEX_WAIT:
- /* On FreeBSD, a "u_int" is a 32-bit integer. */
- umtx_op = UMTX_OP_WAIT_UINT;
- if (timeout != NULL) {
- umtx_timeout._timeout = *timeout;
- umtx_uaddr = (void *) sizeof(umtx_timeout);
- umtx_uaddr2 = (void *) &umtx_timeout;
- }
- break;
- case FUTEX_WAKE:
- umtx_op = UMTX_OP_WAKE;
- break;
- default:
- errno = EINVAL;
- return -1;
- }
-
- return _umtx_op(uaddr, umtx_op, (uint32_t) val, umtx_uaddr,
- umtx_uaddr2);
-}
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#elif defined(__CYGWIN__)
-
-/*
- * The futex_noasync compat code uses a weak symbol to share state across
- * different shared object which is not possible on Windows with the
- * Portable Executable format. Use the async compat code for both cases.
- */
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#else
-
-static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
- const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
-{
- return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
-}
-
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _LTTNG_UST_FUTEX_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <usterr-signal-safe.h>
-#include <ust-helper.h>
-#include "getenv.h"
-
-enum lttng_env_secure {
- LTTNG_ENV_SECURE,
- LTTNG_ENV_NOT_SECURE,
-};
-
-struct lttng_env {
- const char *key;
- enum lttng_env_secure secure;
- char *value;
-};
-
-static struct lttng_env lttng_env[] = {
- /*
- * LTTNG_UST_DEBUG is used directly by snprintf, because it
- * needs to be already set for ERR() used in
- * lttng_ust_getenv_init().
- */
- { "LTTNG_UST_DEBUG", LTTNG_ENV_NOT_SECURE, NULL, },
-
- /* Env. var. which can be used in setuid/setgid executables. */
- { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
- { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
-
- /* Env. var. which are not fetched in setuid/setgid executables. */
- { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_UST_GETCPU_PLUGIN", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_UST_ALLOW_BLOCKING", LTTNG_ENV_SECURE, NULL, },
- { "HOME", LTTNG_ENV_SECURE, NULL, },
- { "LTTNG_HOME", LTTNG_ENV_SECURE, NULL, },
-};
-
-static
-int lttng_is_setuid_setgid(void)
-{
- return geteuid() != getuid() || getegid() != getgid();
-}
-
-char *lttng_ust_getenv(const char *name)
-{
- size_t i;
- struct lttng_env *e;
- bool found = false;
-
- for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
- e = <tng_env[i];
-
- if (strcmp(e->key, name) == 0) {
- found = true;
- break;
- }
- }
- if (!found) {
- return NULL;
- }
- return e->value;
-}
-
-void lttng_ust_getenv_init(void)
-{
- size_t i;
-
- for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
- struct lttng_env *e = <tng_env[i];
-
- if (e->secure == LTTNG_ENV_SECURE && lttng_is_setuid_setgid()) {
- ERR("Getting environment variable '%s' from setuid/setgid binary refused for security reasons.",
- e->key);
- continue;
- }
- e->value = getenv(e->key);
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _COMPAT_GETENV_H
-#define _COMPAT_GETENV_H
-
-/*
- * Always add the lttng-ust environment variables using the lttng_ust_getenv()
- * infrastructure rather than using getenv() directly. This ensures that we
- * don't trigger races between getenv() invoked by lttng-ust listener threads
- * invoked concurrently with setenv() called by an otherwise single-threaded
- * application thread. (the application is not aware that it runs with
- * lttng-ust)
- */
-
-char *lttng_ust_getenv(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_getenv_init(void)
- __attribute__((visibility("hidden")));
-
-#endif /* _COMPAT_GETENV_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stddef.h>
-#include <stdint.h>
-#include <urcu/compiler.h>
-#include <lttng/ust-endian.h>
-
-/*
- * Hash function
- * Source: http://burtleburtle.net/bob/c/lookup3.c
- * Originally Public Domain
- */
-
-#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-#define mix(a, b, c) \
-do { \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c, 16); c += b; \
- b -= a; b ^= rot(a, 19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
-} while (0)
-
-#define final(a, b, c) \
-{ \
- c ^= b; c -= rot(b, 14); \
- a ^= c; a -= rot(c, 11); \
- b ^= a; b -= rot(a, 25); \
- c ^= b; c -= rot(b, 16); \
- a ^= c; a -= rot(c, 4);\
- b ^= a; b -= rot(a, 14); \
- c ^= b; c -= rot(b, 24); \
-}
-
-#if (BYTE_ORDER == LITTLE_ENDIAN)
-#define HASH_LITTLE_ENDIAN 1
-#else
-#define HASH_LITTLE_ENDIAN 0
-#endif
-
-/*
- *
- * hashlittle() -- hash a variable-length key into a 32-bit value
- * k : the key (the unaligned variable-length array of bytes)
- * length : the length of the key, counting by bytes
- * initval : can be any 4-byte value
- * Returns a 32-bit value. Every bit of the key affects every bit of
- * the return value. Two keys differing by one or two bits will have
- * totally different hash values.
- *
- * The best hash table sizes are powers of 2. There is no need to do
- * mod a prime (mod is sooo slow!). If you need less than 32 bits,
- * use a bitmask. For example, if you need only 10 bits, do
- * h = (h & hashmask(10));
- * In which case, the hash table should have hashsize(10) elements.
- *
- * If you are hashing n strings (uint8_t **)k, do it like this:
- * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
- *
- * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
- * code any way you wish, private, educational, or commercial. It's free.
- *
- * Use for hash table lookup, or anything where one collision in 2^^32 is
- * acceptable. Do NOT use for cryptographic purposes.
- */
-static
-uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
-{
- uint32_t a, b, c; /* internal state */
- union {
- const void *ptr;
- size_t i;
- } u;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
-
- u.ptr = key;
- if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
- const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
-
- /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
- while (length > 12) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 12;
- k += 3;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- /*
- * The original jhash.h reads beyond the end of string, and implements
- * a special code path for VALGRIND. It seems to make ASan unhappy too
- * though, so considering that hashing event names is not a fast-path
- * in lttng-ust, remove the "fast" code entirely and use the slower
- * but verifiable VALGRIND version of the code which does not issue
- * out-of-bound reads.
- */
- {
- const uint8_t *k8;
-
- k8 = (const uint8_t *) k;
- switch (length) {
- case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
- case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
- case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[1]; a+=k[0]; break;
- case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
- case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]; break;
- case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
- case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
- case 1 : a+=k8[0]; break;
- case 0 : return c;
- }
- }
-
- } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
- const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
- const uint8_t *k8;
-
- /*--------------- all but last block: aligned reads and different mixing */
- while (length > 12)
- {
- a += k[0] + (((uint32_t) k[1])<<16);
- b += k[2] + (((uint32_t) k[3])<<16);
- c += k[4] + (((uint32_t) k[5])<<16);
- mix(a, b, c);
- length -= 12;
- k += 6;
- }
-
- /*----------------------------- handle the last (probably partial) block */
- k8 = (const uint8_t *) k;
- switch(length)
- {
- case 12: c+=k[4]+(((uint32_t) k[5])<<16);
- b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
- case 10: c+=k[4];
- b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 9 : c+=k8[8]; /* fall through */
- case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
- case 6 : b+=k[2];
- a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 5 : b+=k8[4]; /* fall through */
- case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
- break;
- case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
- case 2 : a+=k[0];
- break;
- case 1 : a+=k8[0];
- break;
- case 0 : return c; /* zero length requires no mixing */
- }
-
- } else { /* need to read the key one byte at a time */
- const uint8_t *k = (const uint8_t *)key;
-
- /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
- while (length > 12) {
- a += k[0];
- a += ((uint32_t) k[1])<<8;
- a += ((uint32_t) k[2])<<16;
- a += ((uint32_t) k[3])<<24;
- b += k[4];
- b += ((uint32_t) k[5])<<8;
- b += ((uint32_t) k[6])<<16;
- b += ((uint32_t) k[7])<<24;
- c += k[8];
- c += ((uint32_t) k[9])<<8;
- c += ((uint32_t) k[10])<<16;
- c += ((uint32_t) k[11])<<24;
- mix(a,b,c);
- length -= 12;
- k += 12;
- }
-
- /*-------------------------------- last block: affect all 32 bits of (c) */
- switch (length) { /* all the case statements fall through */
- case 12: c+=((uint32_t) k[11])<<24; /* fall through */
- case 11: c+=((uint32_t) k[10])<<16; /* fall through */
- case 10: c+=((uint32_t) k[9])<<8; /* fall through */
- case 9 : c+=k[8]; /* fall through */
- case 8 : b+=((uint32_t) k[7])<<24; /* fall through */
- case 7 : b+=((uint32_t) k[6])<<16; /* fall through */
- case 6 : b+=((uint32_t) k[5])<<8; /* fall through */
- case 5 : b+=k[4]; /* fall through */
- case 4 : a+=((uint32_t) k[3])<<24; /* fall through */
- case 3 : a+=((uint32_t) k[2])<<16; /* fall through */
- case 2 : a+=((uint32_t) k[1])<<8; /* fall through */
- case 1 : a+=k[0];
- break;
- case 0 : return c;
- }
- }
-
- final(a, b, c);
- return c;
-}
-
-static inline
-uint32_t jhash(const void *key, size_t length, uint32_t seed)
-{
- return hashlittle(key, length, seed);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode interpreter.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include <lttng/urcu/pointer.h>
-#include <urcu/rculist.h>
-#include <lttng/ust-endian.h>
-#include <lttng/ust-events.h>
-#include "ust-events-internal.h"
-
-#include "lttng-bytecode.h"
-#include "string-utils.h"
-
-
-/*
- * -1: wildcard found.
- * -2: unknown escape char.
- * 0: normal char.
- */
-
-static
-int parse_char(const char **p)
-{
- switch (**p) {
- case '\\':
- (*p)++;
- switch (**p) {
- case '\\':
- case '*':
- return 0;
- default:
- return -2;
- }
- case '*':
- return -1;
- default:
- return 0;
- }
-}
-
-/*
- * Returns SIZE_MAX if the string is null-terminated, or the number of
- * characters if not.
- */
-static
-size_t get_str_or_seq_len(const struct estack_entry *entry)
-{
- return entry->u.s.seq_len;
-}
-
-static
-int stack_star_glob_match(struct estack *stack, int top,
- const char *cmp_type __attribute__((unused)))
-{
- const char *pattern;
- const char *candidate;
- size_t pattern_len;
- size_t candidate_len;
-
- /* Find out which side is the pattern vs. the candidate. */
- if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
- pattern = estack_ax(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_ax(stack, top));
- candidate = estack_bx(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_bx(stack, top));
- } else {
- pattern = estack_bx(stack, top)->u.s.str;
- pattern_len = get_str_or_seq_len(estack_bx(stack, top));
- candidate = estack_ax(stack, top)->u.s.str;
- candidate_len = get_str_or_seq_len(estack_ax(stack, top));
- }
-
- /* Perform the match. Returns 0 when the result is true. */
- return !strutils_star_glob_match(pattern, pattern_len, candidate,
- candidate_len);
-}
-
-static
-int stack_strcmp(struct estack *stack, int top, const char *cmp_type __attribute__((unused)))
-{
- const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
- int ret;
- int diff;
-
- for (;;) {
- int escaped_r0 = 0;
-
- if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
- if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
- return 0;
- } else {
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1)
- return 0;
- }
- return -1;
- }
- }
- if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1)
- return 0;
- }
- return 1;
- }
- if (estack_bx(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&p);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- escaped_r0 = 1;
- }
- /* else compare both char */
- }
- if (estack_ax(stack, top)->u.s.literal_type ==
- ESTACK_STRING_LITERAL_TYPE_PLAIN) {
- ret = parse_char(&q);
- if (ret == -1) {
- return 0;
- } else if (ret == -2) {
- if (!escaped_r0)
- return -1;
- } else {
- if (escaped_r0)
- return 1;
- }
- } else {
- if (escaped_r0)
- return 1;
- }
- diff = *p - *q;
- if (diff != 0)
- break;
- p++;
- q++;
- }
- return diff;
-}
-
-int lttng_bytecode_interpret_error(
- struct lttng_ust_bytecode_runtime *bytecode_runtime __attribute__((unused)),
- const char *stack_data __attribute__((unused)),
- void *ctx __attribute__((unused)))
-{
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
-}
-
-#ifdef INTERPRETER_USE_SWITCH
-
-/*
- * Fallback for compilers that do not support taking address of labels.
- */
-
-#define START_OP \
- start_pc = &bytecode->data[0]; \
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
- pc = next_pc) { \
- dbg_printf("Executing op %s (%u)\n", \
- lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
- (unsigned int) *(bytecode_opcode_t *) pc); \
- switch (*(bytecode_opcode_t *) pc) {
-
-#define OP(name) jump_target_##name: __attribute__((unused)); \
- case name
-
-#define PO break
-
-#define END_OP } \
- }
-
-#define JUMP_TO(name) \
- goto jump_target_##name
-
-#else
-
-/*
- * Dispatch-table based interpreter.
- */
-
-#define START_OP \
- start_pc = &bytecode->code[0]; \
- pc = next_pc = start_pc; \
- if (unlikely(pc - start_pc >= bytecode->len)) \
- goto end; \
- goto *dispatch[*(bytecode_opcode_t *) pc];
-
-#define OP(name) \
-LABEL_##name
-
-#define PO \
- pc = next_pc; \
- goto *dispatch[*(bytecode_opcode_t *) pc];
-
-#define END_OP
-
-#define JUMP_TO(name) \
- goto LABEL_##name
-
-#endif
-
-#define IS_INTEGER_REGISTER(reg_type) \
- (reg_type == REG_U64 || reg_type == REG_S64)
-
-static int context_get_index(struct lttng_ust_ctx *ctx,
- struct load_ptr *ptr,
- uint32_t idx)
-{
-
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct lttng_ust_ctx_value v;
-
- ctx_field = &ctx->fields[idx];
- field = ctx_field->event_field;
- ptr->type = LOAD_OBJECT;
- ptr->field = field;
-
- switch (field->type->type) {
- case lttng_ust_type_integer:
- ctx_field->get_value(ctx_field->priv, &v);
- if (lttng_ust_get_type_integer(field->type)->signedness) {
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_type_integer *itype;
-
- itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
- ctx_field->get_value(ctx_field->priv, &v);
- if (itype->signedness) {
- ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- } else {
- ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
- ptr->u.u64 = v.u.s64; /* Cast. */
- ptr->ptr = &ptr->u.u64;
- }
- break;
- }
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
- ERR("Only string arrays are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
- ERR("Only string sequences are supported for contexts.");
- return -EINVAL;
- }
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_string:
- ptr->object_type = OBJECT_TYPE_STRING;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->ptr = v.u.str;
- break;
- case lttng_ust_type_float:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ctx_field->get_value(ctx_field->priv, &v);
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- break;
- case lttng_ust_type_dynamic:
- ctx_field->get_value(ctx_field->priv, &v);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- return -EINVAL;
- case LTTNG_UST_DYNAMIC_TYPE_U8:
- case LTTNG_UST_DYNAMIC_TYPE_U16:
- case LTTNG_UST_DYNAMIC_TYPE_U32:
- case LTTNG_UST_DYNAMIC_TYPE_U64:
- ptr->object_type = OBJECT_TYPE_U64;
- ptr->u.u64 = v.u.u64;
- ptr->ptr = &ptr->u.u64;
- dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_S8:
- case LTTNG_UST_DYNAMIC_TYPE_S16:
- case LTTNG_UST_DYNAMIC_TYPE_S32:
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- ptr->object_type = OBJECT_TYPE_S64;
- ptr->u.s64 = v.u.s64;
- ptr->ptr = &ptr->u.s64;
- dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- ptr->object_type = OBJECT_TYPE_DOUBLE;
- ptr->u.d = v.u.d;
- ptr->ptr = &ptr->u.d;
- dbg_printf("context get index dynamic double %g\n", ptr->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- ptr->object_type = OBJECT_TYPE_STRING;
- ptr->ptr = v.u.str;
- dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
- break;
- default:
- dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
- return -EINVAL;
- }
- break;
- default:
- ERR("Unknown type: %d", (int) field->type->type);
- return -EINVAL;
- }
- return 0;
-}
-
-static int dynamic_get_index(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *runtime,
- uint64_t index, struct estack_entry *stack_top)
-{
- int ret;
- const struct bytecode_get_index_data *gid;
-
- gid = (const struct bytecode_get_index_data *) &runtime->data[index];
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const char *ptr;
-
- assert(gid->offset < gid->array_len);
- /* Skip count (unsigned long) */
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- assert(stack_top->u.ptr.field->type->type == lttng_ust_type_array);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const char *ptr;
- size_t ptr_seq_len;
-
- ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
- ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
- if (gid->offset >= gid->elem.len * ptr_seq_len) {
- ret = -EINVAL;
- goto end;
- }
- ptr = ptr + gid->offset;
- stack_top->u.ptr.ptr = ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- assert(stack_top->u.ptr.field->type->type == lttng_ust_type_sequence);
- stack_top->u.ptr.field = NULL;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- ERR("Nested structures are not supported yet.");
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_VARIANT:
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->u.ptr.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
- {
- ret = context_get_index(ctx,
- &stack_top->u.ptr,
- gid->ctx_index);
- if (ret) {
- goto end;
- }
- break;
- }
- case LOAD_ROOT_PAYLOAD:
- stack_top->u.ptr.ptr += gid->offset;
- if (gid->elem.type == OBJECT_TYPE_STRING)
- stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
- stack_top->u.ptr.object_type = gid->elem.type;
- stack_top->u.ptr.type = LOAD_OBJECT;
- stack_top->u.ptr.field = gid->field;
- stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
- break;
- }
-
- stack_top->type = REG_PTR;
-
- return 0;
-
-end:
- return ret;
-}
-
-static int dynamic_load_field(struct estack_entry *stack_top)
-{
- int ret;
-
- switch (stack_top->u.ptr.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Interpreter warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_S64;
- break;
- case OBJECT_TYPE_S16:
- {
- int16_t tmp;
-
- dbg_printf("op load field s16\n");
- tmp = *(int16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S32:
- {
- int32_t tmp;
-
- dbg_printf("op load field s32\n");
- tmp = *(int32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_S64:
- {
- int64_t tmp;
-
- dbg_printf("op load field s64\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_SIGNED_ENUM:
- {
- int64_t tmp;
-
- dbg_printf("op load field signed enumeration\n");
- tmp = *(int64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_S64;
- break;
- }
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
- stack_top->type = REG_U64;
- break;
- case OBJECT_TYPE_U16:
- {
- uint16_t tmp;
-
- dbg_printf("op load field u16\n");
- tmp = *(uint16_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_16(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U32:
- {
- uint32_t tmp;
-
- dbg_printf("op load field u32\n");
- tmp = *(uint32_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_32(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_U64:
- {
- uint64_t tmp;
-
- dbg_printf("op load field u64\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_UNSIGNED_ENUM:
- {
- uint64_t tmp;
-
- dbg_printf("op load field unsigned enumeration\n");
- tmp = *(uint64_t *) stack_top->u.ptr.ptr;
- if (stack_top->u.ptr.rev_bo)
- tmp = bswap_64(tmp);
- stack_top->u.v = tmp;
- stack_top->type = REG_U64;
- break;
- }
- case OBJECT_TYPE_DOUBLE:
- memcpy(&stack_top->u.d,
- stack_top->u.ptr.ptr,
- sizeof(struct literal_double));
- stack_top->type = REG_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) stack_top->u.ptr.ptr;
- stack_top->u.s.str = str;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.seq_len = SIZE_MAX;
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- stack_top->type = REG_STRING;
- break;
- }
- case OBJECT_TYPE_STRING_SEQUENCE:
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = stack_top->u.ptr.ptr;
- stack_top->u.s.seq_len = *(unsigned long *) ptr;
- stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- stack_top->type = REG_STRING;
- if (unlikely(!stack_top->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- stack_top->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- break;
- }
- case OBJECT_TYPE_DYNAMIC:
- /*
- * Dynamic types in context are looked up
- * by context get index.
- */
- ret = -EINVAL;
- goto end;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static
-int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
- struct lttng_interpreter_output *output)
-{
- int ret;
-
-again:
- switch (ax->type) {
- case REG_S64:
- output->type = LTTNG_INTERPRETER_TYPE_S64;
- output->u.s = ax->u.v;
- break;
- case REG_U64:
- output->type = LTTNG_INTERPRETER_TYPE_U64;
- output->u.u = (uint64_t) ax->u.v;
- break;
- case REG_DOUBLE:
- output->type = LTTNG_INTERPRETER_TYPE_DOUBLE;
- output->u.d = ax->u.d;
- break;
- case REG_STRING:
- output->type = LTTNG_INTERPRETER_TYPE_STRING;
- output->u.str.str = ax->u.s.str;
- output->u.str.len = ax->u.s.seq_len;
- break;
- case REG_PTR:
- switch (ax->u.ptr.object_type) {
- case OBJECT_TYPE_S8:
- case OBJECT_TYPE_S16:
- case OBJECT_TYPE_S32:
- case OBJECT_TYPE_S64:
- case OBJECT_TYPE_U8:
- case OBJECT_TYPE_U16:
- case OBJECT_TYPE_U32:
- case OBJECT_TYPE_U64:
- case OBJECT_TYPE_DOUBLE:
- case OBJECT_TYPE_STRING:
- case OBJECT_TYPE_STRING_SEQUENCE:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- /* Retry after loading ptr into stack top. */
- goto again;
- case OBJECT_TYPE_SEQUENCE:
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
- output->u.sequence.nested_type = lttng_ust_get_type_sequence(ax->u.ptr.field->type)->elem_type;
- break;
- case OBJECT_TYPE_ARRAY:
- /* Skip count (unsigned long) */
- output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
- output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
- output->u.sequence.nr_elem = lttng_ust_get_type_array(ax->u.ptr.field->type)->length;
- output->u.sequence.nested_type = lttng_ust_get_type_array(ax->u.ptr.field->type)->elem_type;
- break;
- case OBJECT_TYPE_SIGNED_ENUM:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
- output->u.s = ax->u.v;
- break;
- case OBJECT_TYPE_UNSIGNED_ENUM:
- ret = dynamic_load_field(ax);
- if (ret)
- return ret;
- output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
- output->u.u = ax->u.v;
- break;
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- default:
- return -EINVAL;
- }
-
- break;
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
-/*
- * Return LTTNG_UST_BYTECODE_INTERPRETER_OK on success.
- * Return LTTNG_UST_BYTECODE_INTERPRETER_ERROR on error.
- *
- * For FILTER bytecode: expect a struct lttng_ust_bytecode_filter_ctx *
- * as @ctx argument.
- * For CAPTURE bytecode: expect a struct lttng_interpreter_output *
- * as @ctx argument.
- */
-int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *ust_bytecode,
- const char *interpreter_stack_data,
- void *caller_ctx)
-{
- struct bytecode_runtime *bytecode = caa_container_of(ust_bytecode, struct bytecode_runtime, p);
- struct lttng_ust_ctx *ctx = lttng_ust_rcu_dereference(*ust_bytecode->pctx);
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL, retval = 0;
- struct estack _stack;
- struct estack *stack = &_stack;
- register int64_t ax = 0, bx = 0;
- register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
- register int top = INTERPRETER_STACK_EMPTY;
-#ifndef INTERPRETER_USE_SWITCH
- static void *dispatch[NR_BYTECODE_OPS] = {
- [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
-
- [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
-
- /* binary */
- [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
- [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
- [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
- [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
- [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
- [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
- [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
- [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
- [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
- [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
-
- /* binary comparators */
- [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
- [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
- [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
- [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
- [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
- [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
-
- /* string binary comparator */
- [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
- [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
- [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
- [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
- [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
- [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
-
- /* globbing pattern binary comparator */
- [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
- [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
-
- /* s64 binary comparator */
- [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
- [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
- [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
- [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
- [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
- [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
-
- /* double binary comparator */
- [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
- [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
- [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
- [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
- [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
- [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
-
- /* Mixed S64-double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
- [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
- [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
- [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
- [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
- [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
-
- [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
- [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
- [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
- [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
- [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
- [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
-
- /* unary */
- [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
- [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
- [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
- [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
- [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
- [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
- [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
- [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
- [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
-
- /* logical */
- [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
- [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
-
- /* load field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
- [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
- [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
- [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
- [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
-
- /* load from immediate operand */
- [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
- [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
- [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
- [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
-
- /* cast */
- [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
- [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
- [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
-
- /* get context ref */
- [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
- [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
- [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
- [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
-
- /* Instructions for recursive traversal through composed types. */
- [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
- [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
- [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
-
- [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
- [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
- [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
- [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
-
- [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
- [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
- [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
- [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
- [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
- [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
- [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
- [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
- [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
- [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
- [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
- [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
-
- [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
-
- [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
- };
-#endif /* #ifndef INTERPRETER_USE_SWITCH */
-
- START_OP
-
- OP(BYTECODE_OP_UNKNOWN):
- OP(BYTECODE_OP_LOAD_FIELD_REF):
-#ifdef INTERPRETER_USE_SWITCH
- default:
-#endif /* INTERPRETER_USE_SWITCH */
- ERR("unknown bytecode op %u",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(BYTECODE_OP_RETURN):
- /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
- /* Handle dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- case REG_U64:
- retval = !!estack_ax_v;
- break;
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- if (ust_bytecode->type != LTTNG_UST_BYTECODE_TYPE_CAPTURE) {
- ret = -EINVAL;
- goto end;
- }
- retval = 0;
- break;
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- default:
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- OP(BYTECODE_OP_RETURN_S64):
- /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
- retval = !!estack_ax_v;
- ret = 0;
- goto end;
-
- /* binary */
- OP(BYTECODE_OP_MUL):
- OP(BYTECODE_OP_DIV):
- OP(BYTECODE_OP_MOD):
- OP(BYTECODE_OP_PLUS):
- OP(BYTECODE_OP_MINUS):
- ERR("unsupported bytecode op %u",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- OP(BYTECODE_OP_EQ):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_EQ_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_EQ_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_NE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_NE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_NE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_NE_STRING);
- case REG_STAR_GLOB_STRING:
- JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- case REG_DOUBLE:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_GT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_GT_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_LT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_LT_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_GE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_GE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_GE_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_LE):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LE_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_DOUBLE:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_LE_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- case REG_STRING:
- switch (estack_bx_t) {
- case REG_S64: /* Fall-through */
- case REG_U64: /* Fall-through */
- case REG_DOUBLE: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- JUMP_TO(BYTECODE_OP_LE_STRING);
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_bx_t);
- ret = -EINVAL;
- goto end;
- }
- break;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(BYTECODE_OP_EQ_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">") > 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<") < 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, ">=") >= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_STRING):
- {
- int res;
-
- res = (stack_strcmp(stack, top, "<=") <= 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "==") == 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
- {
- int res;
-
- res = (stack_star_glob_match(stack, top, "!=") != 0);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_S64):
- {
- int res;
-
- res = (estack_bx_v == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_S64):
- {
- int res;
-
- res = (estack_bx_v != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_S64):
- {
- int res;
-
- res = (estack_bx_v > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_S64):
- {
- int res;
-
- res = (estack_bx_v < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_S64):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_S64):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_DOUBLE):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* Mixed S64-double binary comparators */
- OP(BYTECODE_OP_EQ_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d == estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d != estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d > estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d < estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d >= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_DOUBLE_S64):
- {
- int res;
-
- res = (estack_bx(stack, top)->u.d <= estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- OP(BYTECODE_OP_EQ_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v == estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_NE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v != estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v > estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LT_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v < estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_GE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v >= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_LE_S64_DOUBLE):
- {
- int res;
-
- res = (estack_bx_v <= estack_ax(stack, top)->u.d);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_RSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_LSHIFT):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- /* Catch undefined behavior. */
- if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
- ret = -EINVAL;
- goto end;
- }
- res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_AND):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_OR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
- OP(BYTECODE_OP_BIT_XOR):
- {
- int64_t res;
-
- if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = res;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct binary_op);
- PO;
- }
-
- /* unary */
- OP(BYTECODE_OP_UNARY_PLUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_UNARY_MINUS):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
- OP(BYTECODE_OP_UNARY_NOT):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64: /* Fall-through. */
- case REG_U64:
- JUMP_TO(BYTECODE_OP_UNARY_NOT_S64);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE);
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(BYTECODE_OP_UNARY_BIT_NOT):
- {
- /* Dynamic typing. */
- if (!IS_INTEGER_REGISTER(estack_ax_t)) {
- ret = -EINVAL;
- goto end;
- }
-
- estack_ax_v = ~(uint64_t) estack_ax_v;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- OP(BYTECODE_OP_UNARY_PLUS_S64):
- OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
- {
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_MINUS_S64):
- {
- estack_ax_v = -estack_ax_v;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
- {
- estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_NOT_S64):
- {
- estack_ax_v = !estack_ax_v;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
- OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
- {
- estack_ax_v = !estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct unary_op);
- PO;
- }
-
- /* logical */
- OP(BYTECODE_OP_AND):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is 0, skip and evaluate to 0 */
- if (unlikely(estack_ax_v == 0)) {
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
- OP(BYTECODE_OP_OR):
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
- ret = -EINVAL;
- goto end;
- }
- /* If AX is nonzero, skip and evaluate to 1 */
- if (unlikely(estack_ax_v != 0)) {
- estack_ax_v = 1;
- dbg_printf("Jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- next_pc = start_pc + insn->skip_offset;
- } else {
- /* Pop 1 when jump not taken */
- estack_pop(stack, top, ax, bx, ax_t, bx_t);
- next_pc += sizeof(struct logical_op);
- }
- PO;
- }
-
-
- /* load field ref */
- OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type string\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str =
- *(const char * const *) &interpreter_stack_data[ref->offset];
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type sequence\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.seq_len =
- *(unsigned long *) &interpreter_stack_data[ref->offset];
- estack_ax(stack, top)->u.s.str =
- *(const char **) (&interpreter_stack_data[ref->offset
- + sizeof(unsigned long)]);
- estack_ax_t = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type s64\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v =
- ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
- estack_ax_t = REG_S64;
- dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("load field ref offset %u type double\n",
- ref->offset);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset],
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- /* load from immediate operand */
- OP(BYTECODE_OP_LOAD_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load string %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_PLAIN;
- estack_ax_t = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("load globbing pattern %s\n", insn->data);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = insn->data;
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
- estack_ax_t = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = ((struct literal_numeric *) insn->data)->v;
- estack_ax_t = REG_S64;
- dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
-
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, insn->data,
- sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- PO;
- }
-
- /* cast */
- OP(BYTECODE_OP_CAST_TO_S64):
- {
- /* Dynamic typing. */
- switch (estack_ax_t) {
- case REG_S64:
- JUMP_TO(BYTECODE_OP_CAST_NOP);
- case REG_DOUBLE:
- JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64);
- case REG_U64:
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct cast_op); /* Fall-through */
- case REG_STRING: /* Fall-through */
- case REG_STAR_GLOB_STRING:
- ret = -EINVAL;
- goto end;
- default:
- ERR("Unknown interpreter register type (%d)",
- (int) estack_ax_t);
- ret = -EINVAL;
- goto end;
- }
- }
-
- OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
- {
- estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- OP(BYTECODE_OP_CAST_NOP):
- {
- next_pc += sizeof(struct cast_op);
- PO;
- }
-
- /* get context ref */
- OP(BYTECODE_OP_GET_CONTEXT_REF):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type dynamic\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- switch (v.sel) {
- case LTTNG_UST_DYNAMIC_TYPE_NONE:
- ret = -EINVAL;
- goto end;
- case LTTNG_UST_DYNAMIC_TYPE_S64:
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
- estack_ax(stack, top)->u.d = v.u.d;
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
- break;
- case LTTNG_UST_DYNAMIC_TYPE_STRING:
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
- estack_ax_t = REG_STRING;
- break;
- default:
- dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type string\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.s.str = v.u.str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax_t = REG_STRING;
- dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type s64\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax_v = v.u.s64;
- estack_ax_t = REG_S64;
- dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
- const struct lttng_ust_ctx_field *ctx_field;
- struct lttng_ust_ctx_value v;
-
- dbg_printf("get context ref offset %u type double\n",
- ref->offset);
- ctx_field = &ctx->fields[ref->offset];
- ctx_field->get_value(ctx_field->priv, &v);
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
- estack_ax_t = REG_DOUBLE;
- dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- PO;
- }
-
- OP(BYTECODE_OP_GET_CONTEXT_ROOT):
- {
- dbg_printf("op get context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
- {
- dbg_printf("op get app context root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
- {
- dbg_printf("op get app payload root\n");
- estack_push(stack, top, ax, bx, ax_t, bx_t);
- estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
- estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
- /* "field" only needed for variants. */
- estack_ax(stack, top)->u.ptr.field = NULL;
- estack_ax_t = REG_PTR;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_GET_SYMBOL):
- {
- dbg_printf("op get symbol\n");
- switch (estack_ax(stack, top)->u.ptr.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- /*
- * symbol lookup is performed by
- * specialization.
- */
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- PO;
- }
-
- OP(BYTECODE_OP_GET_SYMBOL_FIELD):
- {
- /*
- * Used for first variant encountered in a
- * traversal. Variants are not implemented yet.
- */
- ret = -EINVAL;
- goto end;
- }
-
- OP(BYTECODE_OP_GET_INDEX_U16):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- PO;
- }
-
- OP(BYTECODE_OP_GET_INDEX_U64):
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD):
- {
- dbg_printf("op load field\n");
- ret = dynamic_load_field(estack_ax(stack, top));
- if (ret)
- goto end;
- estack_ax_v = estack_ax(stack, top)->u.v;
- estack_ax_t = estack_ax(stack, top)->type;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_S8):
- {
- dbg_printf("op load field s8\n");
-
- estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S16):
- {
- dbg_printf("op load field s16\n");
-
- estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S32):
- {
- dbg_printf("op load field s32\n");
-
- estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_S64):
- {
- dbg_printf("op load field s64\n");
-
- estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_S64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U8):
- {
- dbg_printf("op load field u8\n");
-
- estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U16):
- {
- dbg_printf("op load field u16\n");
-
- estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U32):
- {
- dbg_printf("op load field u32\n");
-
- estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_U64):
- {
- dbg_printf("op load field u64\n");
-
- estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax_t = REG_U64;
- next_pc += sizeof(struct load_op);
- PO;
- }
- OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
- {
- dbg_printf("op load field double\n");
-
- memcpy(&estack_ax(stack, top)->u.d,
- estack_ax(stack, top)->u.ptr.ptr,
- sizeof(struct literal_double));
- estack_ax(stack, top)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_STRING):
- {
- const char *str;
-
- dbg_printf("op load field string\n");
- str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.str = str;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL string.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- estack_ax(stack, top)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
- {
- const char *ptr;
-
- dbg_printf("op load field string sequence\n");
- ptr = estack_ax(stack, top)->u.ptr.ptr;
- estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
- estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
- estack_ax(stack, top)->type = REG_STRING;
- if (unlikely(!estack_ax(stack, top)->u.s.str)) {
- dbg_printf("Interpreter warning: loading a NULL sequence.\n");
- ret = -EINVAL;
- goto end;
- }
- estack_ax(stack, top)->u.s.literal_type =
- ESTACK_STRING_LITERAL_TYPE_NONE;
- next_pc += sizeof(struct load_op);
- PO;
- }
-
- END_OP
-end:
- /* No need to prepare output if an error occurred. */
- if (ret)
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
-
- /* Prepare output. */
- switch (ust_bytecode->type) {
- case LTTNG_UST_BYTECODE_TYPE_FILTER:
- {
- struct lttng_ust_bytecode_filter_ctx *filter_ctx =
- (struct lttng_ust_bytecode_filter_ctx *) caller_ctx;
- if (retval)
- filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_ACCEPT;
- else
- filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_REJECT;
- break;
- }
- case LTTNG_UST_BYTECODE_TYPE_CAPTURE:
- ret = lttng_bytecode_interpret_format_output(estack_ax(stack, top),
- (struct lttng_interpreter_output *) caller_ctx);
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret)
- return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
- else
- return LTTNG_UST_BYTECODE_INTERPRETER_OK;
-}
-
-/*
- * Return LTTNG_UST_EVENT_FILTER_ACCEPT or LTTNG_UST_EVENT_FILTER_REJECT.
- */
-int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
- const char *interpreter_stack_data,
- void *event_filter_ctx __attribute__((unused)))
-{
- struct lttng_ust_bytecode_runtime *filter_bc_runtime;
- struct cds_list_head *filter_bytecode_runtime_head = &event->priv->filter_bytecode_runtime_head;
- struct lttng_ust_bytecode_filter_ctx bytecode_filter_ctx;
- bool filter_record = false;
-
- cds_list_for_each_entry_rcu(filter_bc_runtime, filter_bytecode_runtime_head, node) {
- if (caa_likely(filter_bc_runtime->interpreter_func(filter_bc_runtime,
- interpreter_stack_data, &bytecode_filter_ctx) == LTTNG_UST_BYTECODE_INTERPRETER_OK)) {
- if (caa_unlikely(bytecode_filter_ctx.result == LTTNG_UST_BYTECODE_FILTER_ACCEPT)) {
- filter_record = true;
- break;
- }
- }
- }
- if (filter_record)
- return LTTNG_UST_EVENT_FILTER_ACCEPT;
- else
- return LTTNG_UST_EVENT_FILTER_REJECT;
-}
-
-#undef START_OP
-#undef OP
-#undef PO
-#undef END_OP
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode specializer.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <lttng/ust-utils.h>
-
-#include "context-internal.h"
-#include "lttng-bytecode.h"
-#include "ust-events-internal.h"
-#include "ust-helper.h"
-
-static int lttng_fls(int val)
-{
- int r = 32;
- unsigned int x = (unsigned int) val;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- r -= 1;
- }
- return r;
-}
-
-static int get_count_order(unsigned int count)
-{
- int order;
-
- order = lttng_fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
-static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
- size_t align, size_t len)
-{
- ssize_t ret;
- size_t padding = lttng_ust_offset_align(runtime->data_len, align);
- size_t new_len = runtime->data_len + padding + len;
- size_t new_alloc_len = new_len;
- size_t old_alloc_len = runtime->data_alloc_len;
-
- if (new_len > BYTECODE_MAX_DATA_LEN)
- return -EINVAL;
-
- if (new_alloc_len > old_alloc_len) {
- char *newptr;
-
- new_alloc_len =
- max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
- newptr = realloc(runtime->data, new_alloc_len);
- if (!newptr)
- return -ENOMEM;
- runtime->data = newptr;
- /* We zero directly the memory from start of allocation. */
- memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
- runtime->data_alloc_len = new_alloc_len;
- }
- runtime->data_len += padding;
- ret = runtime->data_len;
- runtime->data_len += len;
- return ret;
-}
-
-static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
- const void *p, size_t align, size_t len)
-{
- ssize_t offset;
-
- offset = bytecode_reserve_data(runtime, align, len);
- if (offset < 0)
- return -ENOMEM;
- memcpy(&runtime->data[offset], p, len);
- return offset;
-}
-
-static int specialize_load_field(struct vstack_entry *stack_top,
- struct load_op *insn)
-{
- int ret;
-
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- default:
- dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
- ret = -EINVAL;
- goto end;
- }
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_S8:
- dbg_printf("op load field s8\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S8;
- break;
- case OBJECT_TYPE_S16:
- dbg_printf("op load field s16\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S16;
- break;
- case OBJECT_TYPE_S32:
- dbg_printf("op load field s32\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S32;
- break;
- case OBJECT_TYPE_S64:
- dbg_printf("op load field s64\n");
- stack_top->type = REG_S64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_S64;
- break;
- case OBJECT_TYPE_SIGNED_ENUM:
- dbg_printf("op load field signed enumeration\n");
- stack_top->type = REG_PTR;
- break;
- case OBJECT_TYPE_U8:
- dbg_printf("op load field u8\n");
- stack_top->type = REG_U64;
- insn->op = BYTECODE_OP_LOAD_FIELD_U8;
- break;
- case OBJECT_TYPE_U16:
- dbg_printf("op load field u16\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U16;
- break;
- case OBJECT_TYPE_U32:
- dbg_printf("op load field u32\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U32;
- break;
- case OBJECT_TYPE_U64:
- dbg_printf("op load field u64\n");
- stack_top->type = REG_U64;
- if (!stack_top->load.rev_bo)
- insn->op = BYTECODE_OP_LOAD_FIELD_U64;
- break;
- case OBJECT_TYPE_UNSIGNED_ENUM:
- dbg_printf("op load field unsigned enumeration\n");
- stack_top->type = REG_PTR;
- break;
- case OBJECT_TYPE_DOUBLE:
- stack_top->type = REG_DOUBLE;
- insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
- break;
- case OBJECT_TYPE_STRING:
- dbg_printf("op load field string\n");
- stack_top->type = REG_STRING;
- insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
- break;
- case OBJECT_TYPE_STRING_SEQUENCE:
- dbg_printf("op load field string sequence\n");
- stack_top->type = REG_STRING;
- insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
- break;
- case OBJECT_TYPE_DYNAMIC:
- dbg_printf("op load field dynamic\n");
- stack_top->type = REG_UNKNOWN;
- /* Don't specialize load op. */
- break;
- case OBJECT_TYPE_SEQUENCE:
- case OBJECT_TYPE_ARRAY:
- case OBJECT_TYPE_STRUCT:
- case OBJECT_TYPE_VARIANT:
- ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
- ret = -EINVAL;
- goto end;
- }
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_get_index_object_type(enum object_type *otype,
- int signedness, uint32_t elem_len)
-{
- switch (elem_len) {
- case 8:
- if (signedness)
- *otype = OBJECT_TYPE_S8;
- else
- *otype = OBJECT_TYPE_U8;
- break;
- case 16:
- if (signedness)
- *otype = OBJECT_TYPE_S16;
- else
- *otype = OBJECT_TYPE_U16;
- break;
- case 32:
- if (signedness)
- *otype = OBJECT_TYPE_S32;
- else
- *otype = OBJECT_TYPE_U32;
- break;
- case 64:
- if (signedness)
- *otype = OBJECT_TYPE_S64;
- else
- *otype = OBJECT_TYPE_U64;
- break;
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_get_index(struct bytecode_runtime *runtime,
- struct load_op *insn, uint64_t index,
- struct vstack_entry *stack_top,
- int idx_len)
-{
- int ret;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- memset(&gid, 0, sizeof(gid));
- switch (stack_top->load.type) {
- case LOAD_OBJECT:
- switch (stack_top->load.object_type) {
- case OBJECT_TYPE_ARRAY:
- {
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_event_field *field;
- uint32_t elem_len, num_elems;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type->type) {
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
- num_elems = lttng_ust_get_type_array(field->type)->length;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- if (index >= num_elems) {
- ret = -EINVAL;
- goto end;
- }
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.array_len = num_elems * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_SEQUENCE:
- {
- const struct lttng_ust_type_integer *integer_type;
- const struct lttng_ust_event_field *field;
- uint32_t elem_len;
- int signedness;
-
- field = stack_top->load.field;
- switch (field->type->type) {
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ret = -EINVAL;
- goto end;
- }
- integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- elem_len = integer_type->size;
- signedness = integer_type->signedness;
- ret = specialize_get_index_object_type(&stack_top->load.object_type,
- signedness, elem_len);
- if (ret)
- goto end;
- gid.offset = index * (elem_len / CHAR_BIT);
- gid.elem.type = stack_top->load.object_type;
- gid.elem.len = elem_len;
- if (integer_type->reverse_byte_order)
- gid.elem.rev_bo = true;
- stack_top->load.rev_bo = gid.elem.rev_bo;
- break;
- }
- case OBJECT_TYPE_STRUCT:
- /* Only generated by the specialize phase. */
- case OBJECT_TYPE_VARIANT: /* Fall-through */
- default:
- ERR("Unexpected get index type %d",
- (int) stack_top->load.object_type);
- ret = -EINVAL;
- goto end;
- }
- break;
- case LOAD_ROOT_CONTEXT:
- case LOAD_ROOT_APP_CONTEXT:
- case LOAD_ROOT_PAYLOAD:
- ERR("Index lookup for root field not implemented yet.");
- ret = -EINVAL;
- goto end;
- }
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- switch (idx_len) {
- case 2:
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- break;
- case 8:
- ((struct get_index_u64 *) insn->data)->index = data_offset;
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
-
- return 0;
-
-end:
- return ret;
-}
-
-static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *bytecode,
- struct load_op *insn)
-{
- uint16_t offset;
- const char *name;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
- return lttng_get_context_index(ctx, name);
-}
-
-static int specialize_load_object(const struct lttng_ust_event_field *field,
- struct vstack_load *load, bool is_context)
-{
- load->type = LOAD_OBJECT;
-
- switch (field->type->type) {
- case lttng_ust_type_integer:
- if (lttng_ust_get_type_integer(field->type)->signedness)
- load->object_type = OBJECT_TYPE_S64;
- else
- load->object_type = OBJECT_TYPE_U64;
- load->rev_bo = false;
- break;
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_type_integer *itype;
-
- itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
- if (itype->signedness)
- load->object_type = OBJECT_TYPE_SIGNED_ENUM;
- else
- load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
- load->rev_bo = false;
- break;
- }
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Array nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
- load->object_type = OBJECT_TYPE_ARRAY;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
- ERR("Sequence nesting only supports integer types.");
- return -EINVAL;
- }
- if (is_context) {
- load->object_type = OBJECT_TYPE_STRING;
- } else {
- if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
- load->object_type = OBJECT_TYPE_SEQUENCE;
- load->field = field;
- } else {
- load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
- }
- }
- break;
-
- case lttng_ust_type_string:
- load->object_type = OBJECT_TYPE_STRING;
- break;
- case lttng_ust_type_float:
- load->object_type = OBJECT_TYPE_DOUBLE;
- break;
- case lttng_ust_type_dynamic:
- load->object_type = OBJECT_TYPE_DYNAMIC;
- break;
- default:
- ERR("Unknown type: %d", (int) field->type->type);
- return -EINVAL;
- }
- return 0;
-}
-
-static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- int idx, ret;
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- idx = specialize_context_lookup_name(ctx, runtime, insn);
- if (idx < 0) {
- return -ENOENT;
- }
- ctx_field = &ctx->fields[idx];
- field = ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- return ret;
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- return -EINVAL;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- return 0;
-}
-
-static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- uint16_t offset;
- const char *orig_name;
- char *name = NULL;
- int idx, ret;
- const struct lttng_ust_ctx_field *ctx_field;
- const struct lttng_ust_event_field *field;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- offset = ((struct get_symbol *) insn->data)->offset;
- orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
- if (!name) {
- ret = -ENOMEM;
- goto end;
- }
- strcpy(name, "$app.");
- strcat(name, orig_name);
- idx = lttng_get_context_index(*pctx, name);
- if (idx < 0) {
- assert(lttng_context_is_app(name));
- ret = lttng_ust_add_app_context_to_ctx_rcu(name,
- pctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(*pctx, name);
- if (idx < 0)
- return -ENOENT;
- }
- ctx_field = &(*pctx)->fields[idx];
- field = ctx_field->event_field;
- ret = specialize_load_object(field, load, true);
- if (ret)
- goto end;
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.ctx_index = idx;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- free(name);
- return ret;
-}
-
-static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- struct load_op *insn,
- struct vstack_load *load)
-{
- const char *name;
- uint16_t offset;
- unsigned int i, nr_fields;
- bool found = false;
- uint32_t field_offset = 0;
- const struct lttng_ust_event_field *field;
- int ret;
- struct bytecode_get_index_data gid;
- ssize_t data_offset;
-
- nr_fields = event_desc->nr_fields;
- offset = ((struct get_symbol *) insn->data)->offset;
- name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
- for (i = 0; i < nr_fields; i++) {
- field = event_desc->fields[i];
- if (field->nofilter) {
- continue;
- }
- if (!strcmp(field->name, name)) {
- found = true;
- break;
- }
- /* compute field offset on stack */
- switch (field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- field_offset += sizeof(int64_t);
- break;
- case lttng_ust_type_array:
- case lttng_ust_type_sequence:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_string:
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_float:
- field_offset += sizeof(double);
- break;
- default:
- ret = -EINVAL;
- goto end;
- }
- }
- if (!found) {
- ret = -EINVAL;
- goto end;
- }
-
- ret = specialize_load_object(field, load, false);
- if (ret)
- goto end;
-
- /* Specialize each get_symbol into a get_index. */
- insn->op = BYTECODE_OP_GET_INDEX_U16;
- memset(&gid, 0, sizeof(gid));
- gid.offset = field_offset;
- gid.elem.type = load->object_type;
- gid.elem.rev_bo = load->rev_bo;
- gid.field = field;
- data_offset = bytecode_push_data(runtime, &gid,
- __alignof__(gid), sizeof(gid));
- if (data_offset < 0) {
- ret = -EINVAL;
- goto end;
- }
- ((struct get_index_u16 *) insn->data)->index = data_offset;
- ret = 0;
-end:
- return ret;
-}
-
-int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *bytecode)
-{
- void *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack _stack;
- struct vstack *stack = &_stack;
- struct lttng_ust_ctx **pctx = bytecode->p.pctx;
-
- vstack_init(stack);
-
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case BYTECODE_OP_RETURN:
- if (vstack_ax(stack)->type == REG_S64 ||
- vstack_ax(stack)->type == REG_U64)
- *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
- ret = 0;
- goto end;
-
- case BYTECODE_OP_RETURN_S64:
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- ERR("Unexpected register type\n");
- ret = -EINVAL;
- goto end;
- }
- ret = 0;
- goto end;
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
-
- case BYTECODE_OP_EQ:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
- else
- insn->op = BYTECODE_OP_EQ_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_EQ_S64;
- else
- insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_EQ_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_NE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
- insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
- else
- insn->op = BYTECODE_OP_NE_STRING;
- break;
- case REG_STAR_GLOB_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_NE_S64;
- else
- insn->op = BYTECODE_OP_NE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_NE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_NE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_GT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for > binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_GT_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GT_S64;
- else
- insn->op = BYTECODE_OP_GT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GT_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_GT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_LT:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for < binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_LT_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LT_S64;
- else
- insn->op = BYTECODE_OP_LT_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LT_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_LT_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_GE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for >= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_GE_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GE_S64;
- else
- insn->op = BYTECODE_OP_GE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_GE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_GE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct binary_op);
- break;
- }
- case BYTECODE_OP_LE:
- {
- struct binary_op *insn = (struct binary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STAR_GLOB_STRING:
- ERR("invalid register type for <= binary operator\n");
- ret = -EINVAL;
- goto end;
- case REG_STRING:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- insn->op = BYTECODE_OP_LE_STRING;
- break;
- case REG_S64:
- case REG_U64:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LE_S64;
- else
- insn->op = BYTECODE_OP_LE_DOUBLE_S64;
- break;
- case REG_DOUBLE:
- if (vstack_bx(stack)->type == REG_UNKNOWN)
- break;
- if (vstack_bx(stack)->type == REG_S64 ||
- vstack_bx(stack)->type == REG_U64)
- insn->op = BYTECODE_OP_LE_S64_DOUBLE;
- else
- insn->op = BYTECODE_OP_LE_DOUBLE;
- break;
- case REG_UNKNOWN:
- break; /* Dynamic typing. */
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_PLUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_MINUS:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_MINUS_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT:
- {
- struct unary_op *insn = (struct unary_op *) pc;
-
- switch(vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_S64:
- case REG_U64:
- insn->op = BYTECODE_OP_UNARY_NOT_S64;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
- break;
- case REG_UNKNOWN: /* Dynamic typing. */
- break;
- }
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- /* cast */
- case BYTECODE_OP_CAST_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- insn->op = BYTECODE_OP_CAST_NOP;
- break;
- case REG_DOUBLE:
- insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
- break;
- case REG_UNKNOWN:
- case REG_U64:
- break;
- }
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
- next_pc += sizeof(struct load_op);
- break;
- }
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- assert(vstack_ax(stack)->type == REG_PTR);
- /* Pop 1, push 1 */
- ret = specialize_load_field(vstack_ax(stack), insn);
- if (ret)
- goto end;
-
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- dbg_printf("op get symbol\n");
- switch (vstack_ax(stack)->load.type) {
- case LOAD_OBJECT:
- ERR("Nested fields not implemented yet.");
- ret = -EINVAL;
- goto end;
- case LOAD_ROOT_CONTEXT:
- /* Lookup context field. */
- ret = specialize_context_lookup(*pctx,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_APP_CONTEXT:
- /* Lookup app context field. */
- ret = specialize_app_context_lookup(pctx,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- case LOAD_ROOT_PAYLOAD:
- /* Lookup event payload field. */
- ret = specialize_payload_lookup(event_desc,
- bytecode, insn,
- &vstack_ax(stack)->load);
- if (ret)
- goto end;
- break;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- /* Always generated by specialize phase. */
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("op get index u16\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("op get index u64\n");
- /* Pop 1, push 1 */
- ret = specialize_get_index(bytecode, insn, index->index,
- vstack_ax(stack), sizeof(*index));
- if (ret)
- goto end;
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
- }
-end:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode validator.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <time.h>
-
-#include "rculfhash.h"
-
-#include "lttng-bytecode.h"
-#include "lttng-hash-helper.h"
-#include "string-utils.h"
-#include "ust-events-internal.h"
-#include "ust-helper.h"
-
-/*
- * Number of merge points for hash table size. Hash table initialized to
- * that size, and we do not resize, because we do not want to trigger
- * RCU worker thread execution: fall-back on linear traversal if number
- * of merge points exceeds this value.
- */
-#define DEFAULT_NR_MERGE_POINTS 128
-#define MIN_NR_BUCKETS 128
-#define MAX_NR_BUCKETS 128
-
-/* merge point table node */
-struct lfht_mp_node {
- struct lttng_ust_lfht_node node;
-
- /* Context at merge point */
- struct vstack stack;
- unsigned long target_pc;
-};
-
-static unsigned long lttng_hash_seed;
-static unsigned int lttng_hash_seed_ready;
-
-static
-int lttng_hash_match(struct lttng_ust_lfht_node *node, const void *key)
-{
- struct lfht_mp_node *mp_node =
- caa_container_of(node, struct lfht_mp_node, node);
- unsigned long key_pc = (unsigned long) key;
-
- if (mp_node->target_pc == key_pc)
- return 1;
- else
- return 0;
-}
-
-static
-int merge_points_compare(const struct vstack *stacka,
- const struct vstack *stackb)
-{
- int i, len;
-
- if (stacka->top != stackb->top)
- return 1;
- len = stacka->top + 1;
- assert(len >= 0);
- for (i = 0; i < len; i++) {
- if (stacka->e[i].type != REG_UNKNOWN
- && stackb->e[i].type != REG_UNKNOWN
- && stacka->e[i].type != stackb->e[i].type)
- return 1;
- }
- return 0;
-}
-
-static
-int merge_point_add_check(struct lttng_ust_lfht *ht, unsigned long target_pc,
- const struct vstack *stack)
-{
- struct lfht_mp_node *node;
- unsigned long hash = lttng_hash_mix((const char *) target_pc,
- sizeof(target_pc),
- lttng_hash_seed);
- struct lttng_ust_lfht_node *ret;
-
- dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n",
- target_pc, hash);
- node = zmalloc(sizeof(struct lfht_mp_node));
- if (!node)
- return -ENOMEM;
- node->target_pc = target_pc;
- memcpy(&node->stack, stack, sizeof(node->stack));
- ret = lttng_ust_lfht_add_unique(ht, hash, lttng_hash_match,
- (const char *) target_pc, &node->node);
- if (ret != &node->node) {
- struct lfht_mp_node *ret_mp =
- caa_container_of(ret, struct lfht_mp_node, node);
-
- /* Key already present */
- dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n",
- target_pc, hash);
- free(node);
- if (merge_points_compare(stack, &ret_mp->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- }
- return 0;
-}
-
-/*
- * Binary comparators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode,
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- break;
- case REG_STAR_GLOB_STRING:
- if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
- goto error_mismatch;
- }
- break;
- case REG_STAR_GLOB_STRING:
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- goto error_mismatch;
- }
- break;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- goto error_mismatch;
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_mismatch:
- ERR("type mismatch for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-/*
- * Binary bitwise operators use top of stack and top of stack -1.
- * Return 0 if typing is known to match, 1 if typing is dynamic
- * (unknown), negative error value on error.
- */
-static
-int bin_op_bitwise_check(struct vstack *stack,
- bytecode_opcode_t opcode __attribute__((unused)),
- const char *str)
-{
- if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
- goto error_empty;
-
- switch (vstack_ax(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- case REG_U64:
- switch (vstack_bx(stack)->type) {
- default:
- goto error_type;
-
- case REG_UNKNOWN:
- goto unknown;
- case REG_S64:
- case REG_U64:
- break;
- }
- break;
- }
- return 0;
-
-unknown:
- return 1;
-
-error_empty:
- ERR("empty stack for '%s' binary operator\n", str);
- return -EINVAL;
-
-error_type:
- ERR("unknown type for '%s' binary operator\n", str);
- return -EINVAL;
-}
-
-static
-int validate_get_symbol(struct bytecode_runtime *bytecode,
- const struct get_symbol *sym)
-{
- const char *str, *str_limit;
- size_t len_limit;
-
- if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
- return -EINVAL;
-
- str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
- str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
- len_limit = str_limit - str;
- if (strnlen(str, len_limit) == len_limit)
- return -EINVAL;
- return 0;
-}
-
-/*
- * Validate bytecode range overflow within the validation pass.
- * Called for each instruction encountered.
- */
-static
-int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- char *start_pc, char *pc)
-{
- int ret = 0;
-
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case BYTECODE_OP_RETURN:
- case BYTECODE_OP_RETURN_S64:
- {
- if (unlikely(pc + sizeof(struct return_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- break;
- }
-
- case BYTECODE_OP_EQ:
- case BYTECODE_OP_NE:
- case BYTECODE_OP_GT:
- case BYTECODE_OP_LT:
- case BYTECODE_OP_GE:
- case BYTECODE_OP_LE:
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- if (unlikely(pc + sizeof(struct binary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- case BYTECODE_OP_UNARY_NOT:
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- if (unlikely(pc + sizeof(struct unary_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- if (unlikely(pc + sizeof(struct logical_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- break;
- }
-
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- uint32_t str_len, maxlen;
-
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
-
- maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
- str_len = strnlen(insn->data, maxlen);
- if (unlikely(str_len >= maxlen)) {
- /* Final '\0' not found within range */
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- case BYTECODE_OP_CAST_NOP:
- {
- if (unlikely(pc + sizeof(struct cast_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- case BYTECODE_OP_LOAD_FIELD:
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- if (unlikely(pc + sizeof(struct load_op)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- break;
- }
- ret = validate_get_symbol(bytecode, sym);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- ERR("Unexpected get symbol field");
- ret = -EINVAL;
- break;
-
- case BYTECODE_OP_GET_INDEX_U16:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
-
- case BYTECODE_OP_GET_INDEX_U64:
- if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
- > start_pc + bytecode->len)) {
- ret = -ERANGE;
- }
- break;
- }
-
- return ret;
-}
-
-static
-unsigned long delete_all_nodes(struct lttng_ust_lfht *ht)
-{
- struct lttng_ust_lfht_iter iter;
- struct lfht_mp_node *node;
- unsigned long nr_nodes = 0;
-
- lttng_ust_lfht_for_each_entry(ht, &iter, node, node) {
- int ret;
-
- ret = lttng_ust_lfht_del(ht, lttng_ust_lfht_iter_get_node(&iter));
- assert(!ret);
- /* note: this hash table is never used concurrently */
- free(node);
- nr_nodes++;
- }
- return nr_nodes;
-}
-
-/*
- * Return value:
- * >=0: success
- * <0: error
- */
-static
-int validate_instruction_context(
- struct bytecode_runtime *bytecode __attribute__((unused)),
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret = 0;
- const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
-
- switch (opcode) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_RETURN:
- case BYTECODE_OP_RETURN_S64:
- {
- goto end;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) opcode);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_EQ:
- {
- ret = bin_op_compare_check(stack, opcode, "==");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_NE:
- {
- ret = bin_op_compare_check(stack, opcode, "!=");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_GT:
- {
- ret = bin_op_compare_check(stack, opcode, ">");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_LT:
- {
- ret = bin_op_compare_check(stack, opcode, "<");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_GE:
- {
- ret = bin_op_compare_check(stack, opcode, ">=");
- if (ret < 0)
- goto end;
- break;
- }
- case BYTECODE_OP_LE:
- {
- ret = bin_op_compare_check(stack, opcode, "<=");
- if (ret < 0)
- goto end;
- break;
- }
-
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STRING
- || vstack_bx(stack)->type != REG_STRING) {
- ERR("Unexpected register type for string comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
- && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
- ERR("Unexpected register type for globbing pattern comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type for s64 comparator\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
- ERR("Double operator should have two double registers\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Double-S64 operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Double-S64 operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- if (!vstack_ax(stack) || !vstack_bx(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("S64-Double operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_bx(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("S64-Double operator has unexpected register types\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, ">>");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_LSHIFT:
- ret = bin_op_bitwise_check(stack, opcode, "<<");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_AND:
- ret = bin_op_bitwise_check(stack, opcode, "&");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_OR:
- ret = bin_op_bitwise_check(stack, opcode, "|");
- if (ret < 0)
- goto end;
- break;
- case BYTECODE_OP_BIT_XOR:
- ret = bin_op_bitwise_check(stack, opcode, "^");
- if (ret < 0)
- goto end;
- break;
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- case BYTECODE_OP_UNARY_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Unary op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_DOUBLE:
- ERR("Unary bitwise op can only be applied to numeric registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_UNKNOWN:
- break;
- }
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64 &&
- vstack_ax(stack)->type != REG_U64) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Invalid register type\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_S64
- && vstack_ax(stack)->type != REG_U64
- && vstack_ax(stack)->type != REG_UNKNOWN) {
- ERR("Logical comparator expects S64, U64 or dynamic register\n");
- ret = -EINVAL;
- goto end;
- }
-
- dbg_printf("Validate jumping to bytecode offset %u\n",
- (unsigned int) insn->skip_offset);
- if (unlikely(start_pc + insn->skip_offset <= pc)) {
- ERR("Loops are not allowed in bytecode\n");
- ret = -EINVAL;
- goto end;
- }
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type string\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate load field ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- struct cast_op *insn = (struct cast_op *) pc;
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- default:
- ERR("unknown register type\n");
- ret = -EINVAL;
- goto end;
-
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- ERR("Cast op can only be applied to numeric or floating point registers\n");
- ret = -EINVAL;
- goto end;
- case REG_S64:
- break;
- case REG_U64:
- break;
- case REG_DOUBLE:
- break;
- case REG_UNKNOWN:
- break;
- }
- if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
- if (vstack_ax(stack)->type != REG_DOUBLE) {
- ERR("Cast expects double\n");
- ret = -EINVAL;
- goto end;
- }
- }
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- break;
- }
-
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type dynamic\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type string\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type s64\n",
- ref->offset);
- break;
- }
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct field_ref *ref = (struct field_ref *) insn->data;
-
- dbg_printf("Validate get context ref offset %u type double\n",
- ref->offset);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- {
- dbg_printf("Validate get context root\n");
- break;
- }
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- {
- dbg_printf("Validate get app context root\n");
- break;
- }
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- dbg_printf("Validate get payload root\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD:
- {
- /*
- * We tolerate that field type is unknown at validation,
- * because we are performing the load specialization in
- * a phase after validation.
- */
- dbg_printf("Validate load field\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S8:
- {
- dbg_printf("Validate load field s8\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S16:
- {
- dbg_printf("Validate load field s16\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S32:
- {
- dbg_printf("Validate load field s32\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- dbg_printf("Validate load field s64\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U8:
- {
- dbg_printf("Validate load field u8\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U16:
- {
- dbg_printf("Validate load field u16\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U32:
- {
- dbg_printf("Validate load field u32\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- dbg_printf("Validate load field u64\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_STRING:
- {
- dbg_printf("Validate load field string\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- dbg_printf("Validate load field sequence\n");
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- dbg_printf("Validate load field double\n");
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol offset %u\n", sym->offset);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_symbol *sym = (struct get_symbol *) insn->data;
-
- dbg_printf("Validate get symbol field offset %u\n", sym->offset);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
-
- dbg_printf("Validate get index u16 index %u\n", get_index->index);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- struct load_op *insn = (struct load_op *) pc;
- struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
-
- dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
- break;
- }
- }
-end:
- return ret;
-}
-
-/*
- * Return value:
- * 0: success
- * <0: error
- */
-static
-int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
- struct lttng_ust_lfht *merge_points,
- struct vstack *stack,
- char *start_pc,
- char *pc)
-{
- int ret;
- unsigned long target_pc = pc - start_pc;
- struct lttng_ust_lfht_iter iter;
- struct lttng_ust_lfht_node *node;
- struct lfht_mp_node *mp_node;
- unsigned long hash;
-
- /* Validate the context resulting from the previous instruction */
- ret = validate_instruction_context(bytecode, stack, start_pc, pc);
- if (ret < 0)
- return ret;
-
- /* Validate merge points */
- hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
- lttng_hash_seed);
- lttng_ust_lfht_lookup(merge_points, hash, lttng_hash_match,
- (const char *) target_pc, &iter);
- node = lttng_ust_lfht_iter_get_node(&iter);
- if (node) {
- mp_node = caa_container_of(node, struct lfht_mp_node, node);
-
- dbg_printf("Bytecode: validate merge point at offset %lu\n",
- target_pc);
- if (merge_points_compare(stack, &mp_node->stack)) {
- ERR("Merge points differ for offset %lu\n",
- target_pc);
- return -EINVAL;
- }
- /* Once validated, we can remove the merge point */
- dbg_printf("Bytecode: remove merge point at offset %lu\n",
- target_pc);
- ret = lttng_ust_lfht_del(merge_points, node);
- assert(!ret);
- }
- return 0;
-}
-
-/*
- * Return value:
- * >0: going to next insn.
- * 0: success, stop iteration.
- * <0: error
- */
-static
-int exec_insn(struct bytecode_runtime *bytecode __attribute__((unused)),
- struct lttng_ust_lfht *merge_points,
- struct vstack *stack,
- char **_next_pc,
- char *pc)
-{
- int ret = 1;
- char *next_pc = *_next_pc;
-
- switch (*(bytecode_opcode_t *) pc) {
- case BYTECODE_OP_UNKNOWN:
- default:
- {
- ERR("unknown bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_RETURN:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_PTR:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
- case BYTECODE_OP_RETURN_S64:
- {
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- case REG_UNKNOWN:
- ERR("Unexpected register type %d at end of bytecode\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- ret = 0;
- goto end;
- }
-
- /* binary */
- case BYTECODE_OP_MUL:
- case BYTECODE_OP_DIV:
- case BYTECODE_OP_MOD:
- case BYTECODE_OP_PLUS:
- case BYTECODE_OP_MINUS:
- {
- ERR("unsupported bytecode op %u\n",
- (unsigned int) *(bytecode_opcode_t *) pc);
- ret = -EINVAL;
- goto end;
- }
-
- case BYTECODE_OP_EQ:
- case BYTECODE_OP_NE:
- case BYTECODE_OP_GT:
- case BYTECODE_OP_LT:
- case BYTECODE_OP_GE:
- case BYTECODE_OP_LE:
- case BYTECODE_OP_EQ_STRING:
- case BYTECODE_OP_NE_STRING:
- case BYTECODE_OP_GT_STRING:
- case BYTECODE_OP_LT_STRING:
- case BYTECODE_OP_GE_STRING:
- case BYTECODE_OP_LE_STRING:
- case BYTECODE_OP_EQ_STAR_GLOB_STRING:
- case BYTECODE_OP_NE_STAR_GLOB_STRING:
- case BYTECODE_OP_EQ_S64:
- case BYTECODE_OP_NE_S64:
- case BYTECODE_OP_GT_S64:
- case BYTECODE_OP_LT_S64:
- case BYTECODE_OP_GE_S64:
- case BYTECODE_OP_LE_S64:
- case BYTECODE_OP_EQ_DOUBLE:
- case BYTECODE_OP_NE_DOUBLE:
- case BYTECODE_OP_GT_DOUBLE:
- case BYTECODE_OP_LT_DOUBLE:
- case BYTECODE_OP_GE_DOUBLE:
- case BYTECODE_OP_LE_DOUBLE:
- case BYTECODE_OP_EQ_DOUBLE_S64:
- case BYTECODE_OP_NE_DOUBLE_S64:
- case BYTECODE_OP_GT_DOUBLE_S64:
- case BYTECODE_OP_LT_DOUBLE_S64:
- case BYTECODE_OP_GE_DOUBLE_S64:
- case BYTECODE_OP_LE_DOUBLE_S64:
- case BYTECODE_OP_EQ_S64_DOUBLE:
- case BYTECODE_OP_NE_S64_DOUBLE:
- case BYTECODE_OP_GT_S64_DOUBLE:
- case BYTECODE_OP_LT_S64_DOUBLE:
- case BYTECODE_OP_GE_S64_DOUBLE:
- case BYTECODE_OP_LE_S64_DOUBLE:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- case BYTECODE_OP_BIT_RSHIFT:
- case BYTECODE_OP_BIT_LSHIFT:
- case BYTECODE_OP_BIT_AND:
- case BYTECODE_OP_BIT_OR:
- case BYTECODE_OP_BIT_XOR:
- {
- /* Pop 2, push 1 */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_STRING:
- case REG_STAR_GLOB_STRING:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct binary_op);
- break;
- }
-
- /* unary */
- case BYTECODE_OP_UNARY_PLUS:
- case BYTECODE_OP_UNARY_MINUS:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_S64:
- case BYTECODE_OP_UNARY_MINUS_S64:
- case BYTECODE_OP_UNARY_NOT_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_DOUBLE:
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_BIT_NOT:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_UNKNOWN:
- case REG_S64:
- case REG_U64:
- break;
- case REG_DOUBLE:
- default:
- ERR("Unexpected register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_NOT_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- case BYTECODE_OP_UNARY_PLUS_DOUBLE:
- case BYTECODE_OP_UNARY_MINUS_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_DOUBLE:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct unary_op);
- break;
- }
-
- /* logical */
- case BYTECODE_OP_AND:
- case BYTECODE_OP_OR:
- {
- struct logical_op *insn = (struct logical_op *) pc;
- int merge_ret;
-
- /* Add merge point to table */
- merge_ret = merge_point_add_check(merge_points,
- insn->skip_offset, stack);
- if (merge_ret) {
- ret = merge_ret;
- goto end;
- }
-
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- /* There is always a cast-to-s64 operation before a or/and op. */
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- break;
- default:
- ERR("Incorrect register type %d for operation\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
-
- /* Continue to next instruction */
- /* Pop 1 when jump not taken */
- if (vstack_pop(stack)) {
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct logical_op);
- break;
- }
-
- /* load field ref */
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- ERR("Unknown field ref type\n");
- ret = -EINVAL;
- goto end;
- }
- /* get context ref */
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_STRING:
- case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
- case BYTECODE_OP_GET_CONTEXT_REF_STRING:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_S64:
- case BYTECODE_OP_GET_CONTEXT_REF_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
- case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
- case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
- break;
- }
-
- /* load from immediate operand */
- case BYTECODE_OP_LOAD_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
- {
- struct load_op *insn = (struct load_op *) pc;
-
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
- next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
- break;
- }
-
- case BYTECODE_OP_LOAD_S64:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_numeric);
- break;
- }
-
- case BYTECODE_OP_LOAD_DOUBLE:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op)
- + sizeof(struct literal_double);
- break;
- }
-
- case BYTECODE_OP_CAST_TO_S64:
- case BYTECODE_OP_CAST_DOUBLE_TO_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- switch (vstack_ax(stack)->type) {
- case REG_S64:
- case REG_U64:
- case REG_DOUBLE:
- case REG_UNKNOWN:
- break;
- default:
- ERR("Incorrect register type %d for cast\n",
- (int) vstack_ax(stack)->type);
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct cast_op);
- break;
- }
- case BYTECODE_OP_CAST_NOP:
- {
- next_pc += sizeof(struct cast_op);
- break;
- }
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- case BYTECODE_OP_GET_CONTEXT_ROOT:
- case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
- case BYTECODE_OP_GET_PAYLOAD_ROOT:
- {
- if (vstack_push(stack)) {
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_PTR;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_UNKNOWN;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_S8:
- case BYTECODE_OP_LOAD_FIELD_S16:
- case BYTECODE_OP_LOAD_FIELD_S32:
- case BYTECODE_OP_LOAD_FIELD_S64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_S64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_U8:
- case BYTECODE_OP_LOAD_FIELD_U16:
- case BYTECODE_OP_LOAD_FIELD_U32:
- case BYTECODE_OP_LOAD_FIELD_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_U64;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_STRING:
- case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_STRING;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_LOAD_FIELD_DOUBLE:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- vstack_ax(stack)->type = REG_DOUBLE;
- next_pc += sizeof(struct load_op);
- break;
- }
-
- case BYTECODE_OP_GET_SYMBOL:
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U16:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
- break;
- }
-
- case BYTECODE_OP_GET_INDEX_U64:
- {
- /* Pop 1, push 1 */
- if (!vstack_ax(stack)) {
- ERR("Empty stack\n");
- ret = -EINVAL;
- goto end;
- }
- if (vstack_ax(stack)->type != REG_PTR) {
- ERR("Expecting pointer on top of stack\n");
- ret = -EINVAL;
- goto end;
- }
- next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
- break;
- }
-
- }
-end:
- *_next_pc = next_pc;
- return ret;
-}
-
-/*
- * Never called concurrently (hash seed is shared).
- */
-int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
-{
- struct lttng_ust_lfht *merge_points;
- char *pc, *next_pc, *start_pc;
- int ret = -EINVAL;
- struct vstack stack;
-
- vstack_init(&stack);
-
- if (!lttng_hash_seed_ready) {
- lttng_hash_seed = time(NULL);
- lttng_hash_seed_ready = 1;
- }
- /*
- * Note: merge_points hash table used by single thread, and
- * never concurrently resized. Therefore, we can use it without
- * holding RCU read-side lock and free nodes without using
- * call_rcu.
- */
- merge_points = lttng_ust_lfht_new(DEFAULT_NR_MERGE_POINTS,
- MIN_NR_BUCKETS, MAX_NR_BUCKETS,
- 0, NULL);
- if (!merge_points) {
- ERR("Error allocating hash table for bytecode validation\n");
- return -ENOMEM;
- }
- start_pc = &bytecode->code[0];
- for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
- pc = next_pc) {
- ret = bytecode_validate_overflow(bytecode, start_pc, pc);
- if (ret != 0) {
- if (ret == -ERANGE)
- ERR("Bytecode overflow\n");
- goto end;
- }
- dbg_printf("Validating op %s (%u)\n",
- lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
- (unsigned int) *(bytecode_opcode_t *) pc);
-
- /*
- * For each instruction, validate the current context
- * (traversal of entire execution flow), and validate
- * all merge points targeting this instruction.
- */
- ret = validate_instruction_all_contexts(bytecode, merge_points,
- &stack, start_pc, pc);
- if (ret)
- goto end;
- ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
- if (ret <= 0)
- goto end;
- }
-end:
- if (delete_all_nodes(merge_points)) {
- if (!ret) {
- ERR("Unexpected merge points\n");
- ret = -EINVAL;
- }
- }
- if (lttng_ust_lfht_destroy(merge_points)) {
- ERR("Error destroying hash table\n");
- }
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode code.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-
-#include <urcu/rculist.h>
-
-#include "context-internal.h"
-#include "lttng-bytecode.h"
-#include "ust-events-internal.h"
-#include "ust-helper.h"
-
-static const char *opnames[] = {
- [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
-
- [ BYTECODE_OP_RETURN ] = "RETURN",
-
- /* binary */
- [ BYTECODE_OP_MUL ] = "MUL",
- [ BYTECODE_OP_DIV ] = "DIV",
- [ BYTECODE_OP_MOD ] = "MOD",
- [ BYTECODE_OP_PLUS ] = "PLUS",
- [ BYTECODE_OP_MINUS ] = "MINUS",
- [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
- [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
- [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
- [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
- [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
-
- /* binary comparators */
- [ BYTECODE_OP_EQ ] = "EQ",
- [ BYTECODE_OP_NE ] = "NE",
- [ BYTECODE_OP_GT ] = "GT",
- [ BYTECODE_OP_LT ] = "LT",
- [ BYTECODE_OP_GE ] = "GE",
- [ BYTECODE_OP_LE ] = "LE",
-
- /* string binary comparators */
- [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
- [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
- [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
- [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
- [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
- [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
-
- /* s64 binary comparators */
- [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
- [ BYTECODE_OP_NE_S64 ] = "NE_S64",
- [ BYTECODE_OP_GT_S64 ] = "GT_S64",
- [ BYTECODE_OP_LT_S64 ] = "LT_S64",
- [ BYTECODE_OP_GE_S64 ] = "GE_S64",
- [ BYTECODE_OP_LE_S64 ] = "LE_S64",
-
- /* double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
- [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
- [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
- [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
- [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
- [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
-
- /* Mixed S64-double binary comparators */
- [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
- [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
- [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
- [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
- [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
- [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
-
- [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
- [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
- [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
- [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
- [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
- [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
-
- /* unary */
- [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
- [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
- [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
- [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
- [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
- [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
- [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
- [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
- [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
-
- /* logical */
- [ BYTECODE_OP_AND ] = "AND",
- [ BYTECODE_OP_OR ] = "OR",
-
- /* load field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
- [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
- [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
- [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
- [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
-
- /* load from immediate operand */
- [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
- [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
- [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
-
- /* cast */
- [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
- [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
- [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
-
- /* get context ref */
- [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
- [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
- [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
- [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
-
- /* load userspace field ref */
- [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
- [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
-
- /*
- * load immediate star globbing pattern (literal string)
- * from immediate.
- */
- [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
-
- /* globbing pattern binary operator: apply to */
- [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
- [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
-
- /*
- * Instructions for recursive traversal through composed types.
- */
- [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
- [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
- [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
-
- [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
- [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
- [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
- [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
-
- [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
- [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
- [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
- [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
- [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
- [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
- [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
- [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
- [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
- [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
- [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
- [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
-
- [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
-
- [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
-};
-
-const char *lttng_bytecode_print_op(enum bytecode_op op)
-{
- if (op >= NR_BYTECODE_OPS)
- return "UNKNOWN";
- else
- return opnames[op];
-}
-
-static
-int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len __attribute__((unused)),
- uint32_t reloc_offset,
- const char *field_name,
- enum bytecode_op bytecode_op)
-{
- const struct lttng_ust_event_field **fields, *field = NULL;
- unsigned int nr_fields, i;
- struct load_op *op;
- uint32_t field_offset = 0;
-
- dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
-
- /* Lookup event by name */
- if (!event_desc)
- return -EINVAL;
- fields = event_desc->fields;
- if (!fields)
- return -EINVAL;
- nr_fields = event_desc->nr_fields;
- for (i = 0; i < nr_fields; i++) {
- if (fields[i]->nofilter) {
- continue;
- }
- if (!strcmp(fields[i]->name, field_name)) {
- field = fields[i];
- break;
- }
- /* compute field offset */
- switch (fields[i]->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- field_offset += sizeof(int64_t);
- break;
- case lttng_ust_type_array:
- case lttng_ust_type_sequence:
- field_offset += sizeof(unsigned long);
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_string:
- field_offset += sizeof(void *);
- break;
- case lttng_ust_type_float:
- field_offset += sizeof(double);
- break;
- default:
- return -EINVAL;
- }
- }
- if (!field)
- return -EINVAL;
-
- /* Check if field offset is too large for 16-bit offset */
- if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* set type */
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (bytecode_op) {
- case BYTECODE_OP_LOAD_FIELD_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
- break;
- case lttng_ust_type_array:
- {
- struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) field->type;
-
- if (array->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- }
- case lttng_ust_type_sequence:
- {
- struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) field->type;
-
- if (sequence->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
- break;
- }
- case lttng_ust_type_string:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
- break;
- case lttng_ust_type_float:
- op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
- break;
- default:
- return -EINVAL;
- }
- /* set offset */
- field_ref->offset = (uint16_t) field_offset;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_context_reloc(struct bytecode_runtime *runtime,
- uint32_t runtime_len __attribute__((unused)),
- uint32_t reloc_offset,
- const char *context_name,
- enum bytecode_op bytecode_op)
-{
- struct load_op *op;
- const struct lttng_ust_ctx_field *ctx_field;
- int idx;
- struct lttng_ust_ctx **pctx = runtime->p.pctx;
-
- dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
-
- /* Get context index */
- idx = lttng_get_context_index(*pctx, context_name);
- if (idx < 0) {
- if (lttng_context_is_app(context_name)) {
- int ret;
-
- ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
- pctx);
- if (ret)
- return ret;
- idx = lttng_get_context_index(*pctx, context_name);
- if (idx < 0)
- return -ENOENT;
- } else {
- return -ENOENT;
- }
- }
- /* Check if idx is too large for 16-bit offset */
- if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
- return -EINVAL;
-
- /* Get context return type */
- ctx_field = &(*pctx)->fields[idx];
- op = (struct load_op *) &runtime->code[reloc_offset];
-
- switch (bytecode_op) {
- case BYTECODE_OP_GET_CONTEXT_REF:
- {
- struct field_ref *field_ref;
-
- field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field->type->type) {
- case lttng_ust_type_integer:
- case lttng_ust_type_enum:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
- break;
- /* Sequence and array supported only as string */
- case lttng_ust_type_array:
- {
- struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) ctx_field->event_field->type;
-
- if (array->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- }
- case lttng_ust_type_sequence:
- {
- struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) ctx_field->event_field->type;
-
- if (sequence->encoding == lttng_ust_string_encoding_none)
- return -EINVAL;
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- }
- case lttng_ust_type_string:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
- break;
- case lttng_ust_type_float:
- op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
- break;
- case lttng_ust_type_dynamic:
- op->op = BYTECODE_OP_GET_CONTEXT_REF;
- break;
- default:
- return -EINVAL;
- }
- /* set offset to context index within channel contexts */
- field_ref->offset = (uint16_t) idx;
- break;
- }
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int apply_reloc(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *runtime,
- uint32_t runtime_len,
- uint32_t reloc_offset,
- const char *name)
-{
- struct load_op *op;
-
- dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
-
- /* Ensure that the reloc is within the code */
- if (runtime_len - reloc_offset < sizeof(uint16_t))
- return -EINVAL;
-
- op = (struct load_op *) &runtime->code[reloc_offset];
- switch (op->op) {
- case BYTECODE_OP_LOAD_FIELD_REF:
- return apply_field_reloc(event_desc, runtime, runtime_len,
- reloc_offset, name, op->op);
- case BYTECODE_OP_GET_CONTEXT_REF:
- return apply_context_reloc(runtime, runtime_len,
- reloc_offset, name, op->op);
- case BYTECODE_OP_GET_SYMBOL:
- case BYTECODE_OP_GET_SYMBOL_FIELD:
- /*
- * Will be handled by load specialize phase or
- * dynamically by interpreter.
- */
- return 0;
- default:
- ERR("Unknown reloc op type %u\n", op->op);
- return -EINVAL;
- }
- return 0;
-}
-
-static
-int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
- struct cds_list_head *bytecode_runtime_head)
-{
- struct lttng_ust_bytecode_runtime *bc_runtime;
-
- cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
- if (bc_runtime->bc == bytecode)
- return 1;
- }
- return 0;
-}
-
-/*
- * Take a bytecode with reloc table and link it to an event to create a
- * bytecode runtime.
- */
-static
-int link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct lttng_ust_bytecode_node *bytecode,
- struct cds_list_head *bytecode_runtime_head,
- struct cds_list_head *insert_loc)
-{
- int ret, offset, next_offset;
- struct bytecode_runtime *runtime = NULL;
- size_t runtime_alloc_len;
-
- if (!bytecode)
- return 0;
- /* Bytecode already linked */
- if (bytecode_is_linked(bytecode, bytecode_runtime_head))
- return 0;
-
- dbg_printf("Linking...\n");
-
- /* We don't need the reloc table in the runtime */
- runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
- runtime = zmalloc(runtime_alloc_len);
- if (!runtime) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- runtime->p.type = bytecode->type;
- runtime->p.bc = bytecode;
- runtime->p.pctx = ctx;
- runtime->len = bytecode->bc.reloc_offset;
- /* copy original bytecode */
- memcpy(runtime->code, bytecode->bc.data, runtime->len);
- /*
- * apply relocs. Those are a uint16_t (offset in bytecode)
- * followed by a string (field name).
- */
- for (offset = bytecode->bc.reloc_offset;
- offset < bytecode->bc.len;
- offset = next_offset) {
- uint16_t reloc_offset =
- *(uint16_t *) &bytecode->bc.data[offset];
- const char *name =
- (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
-
- ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
- if (ret) {
- goto link_error;
- }
- next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
- }
- /* Validate bytecode */
- ret = lttng_bytecode_validate(runtime);
- if (ret) {
- goto link_error;
- }
- /* Specialize bytecode */
- ret = lttng_bytecode_specialize(event_desc, runtime);
- if (ret) {
- goto link_error;
- }
-
- runtime->p.interpreter_func = lttng_bytecode_interpret;
- runtime->p.link_failed = 0;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
- dbg_printf("Linking successful.\n");
- return 0;
-
-link_error:
- runtime->p.interpreter_func = lttng_bytecode_interpret_error;
- runtime->p.link_failed = 1;
- cds_list_add_rcu(&runtime->p.node, insert_loc);
-alloc_error:
- dbg_printf("Linking failed.\n");
- return ret;
-}
-
-void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
-{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
-
- if (!bc->enabler->enabled || runtime->link_failed)
- runtime->interpreter_func = lttng_bytecode_interpret_error;
- else
- runtime->interpreter_func = lttng_bytecode_interpret;
-}
-
-/*
- * Given the lists of bytecode programs of an instance (trigger or event) and
- * of a matching enabler, try to link all the enabler's bytecode programs with
- * the instance.
- *
- * This function is called after we confirmed that name enabler and the
- * instance are name matching (or glob pattern matching).
- */
-void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct cds_list_head *instance_bytecode_head,
- struct cds_list_head *enabler_bytecode_head)
-{
- struct lttng_ust_bytecode_node *enabler_bc;
- struct lttng_ust_bytecode_runtime *runtime;
-
- assert(event_desc);
-
- /* Go over all the bytecode programs of the enabler. */
- cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
- int found = 0, ret;
- struct cds_list_head *insert_loc;
-
- /*
- * Check if the current enabler bytecode program is already
- * linked with the instance.
- */
- cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
- if (runtime->bc == enabler_bc) {
- found = 1;
- break;
- }
- }
-
- /*
- * Skip bytecode already linked, go to the next enabler
- * bytecode program.
- */
- if (found)
- continue;
-
- /*
- * Insert at specified priority (seqnum) in increasing
- * order. If there already is a bytecode of the same priority,
- * insert the new bytecode right after it.
- */
- cds_list_for_each_entry_reverse(runtime,
- instance_bytecode_head, node) {
- if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
- /* insert here */
- insert_loc = &runtime->node;
- goto add_within;
- }
- }
-
- /* Add to head to list */
- insert_loc = instance_bytecode_head;
- add_within:
- dbg_printf("linking bytecode\n");
- ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
- if (ret) {
- dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
- }
- }
-}
-
-static
-void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
-{
- struct bytecode_runtime *runtime, *tmp;
-
- cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
- p.node) {
- free(runtime->data);
- free(runtime);
- }
-}
-
-void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
-{
- free_filter_runtime(&event->priv->filter_bytecode_runtime_head);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST bytecode header.
- */
-
-#ifndef _LTTNG_BYTECODE_H
-#define _LTTNG_BYTECODE_H
-
-#include <errno.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <ust-helper.h>
-#include <lttng/ust-events.h>
-#include <ust-context-provider.h>
-#include <stdint.h>
-#include <assert.h>
-#include <errno.h>
-#include <string.h>
-#include <inttypes.h>
-#include <limits.h>
-#include <usterr-signal-safe.h>
-#include "bytecode.h"
-#include "ust-events-internal.h"
-
-/* Interpreter stack length, in number of entries */
-#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
-#define INTERPRETER_STACK_EMPTY 1
-
-#define BYTECODE_MAX_DATA_LEN 65536
-
-#ifndef min_t
-#define min_t(type, a, b) \
- ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
-#endif
-
-#ifndef likely
-#define likely(x) __builtin_expect(!!(x), 1)
-#endif
-
-#ifndef unlikely
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#endif
-
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args)
-#else
-#define dbg_printf(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printf("[debug bytecode in %s:%s@%u] " fmt, \
- __FILE__, __func__, __LINE__, ## args); \
-} while (0)
-#endif
-
-/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
-struct bytecode_runtime {
- struct lttng_ust_bytecode_runtime p;
- size_t data_len;
- size_t data_alloc_len;
- char *data;
- uint16_t len;
- char code[0];
-};
-
-enum entry_type {
- REG_S64,
- REG_U64,
- REG_DOUBLE,
- REG_STRING,
- REG_STAR_GLOB_STRING,
- REG_UNKNOWN,
- REG_PTR,
-};
-
-enum load_type {
- LOAD_ROOT_CONTEXT,
- LOAD_ROOT_APP_CONTEXT,
- LOAD_ROOT_PAYLOAD,
- LOAD_OBJECT,
-};
-
-enum object_type {
- OBJECT_TYPE_S8,
- OBJECT_TYPE_S16,
- OBJECT_TYPE_S32,
- OBJECT_TYPE_S64,
- OBJECT_TYPE_U8,
- OBJECT_TYPE_U16,
- OBJECT_TYPE_U32,
- OBJECT_TYPE_U64,
-
- OBJECT_TYPE_SIGNED_ENUM,
- OBJECT_TYPE_UNSIGNED_ENUM,
-
- OBJECT_TYPE_DOUBLE,
- OBJECT_TYPE_STRING,
- OBJECT_TYPE_STRING_SEQUENCE,
-
- OBJECT_TYPE_SEQUENCE,
- OBJECT_TYPE_ARRAY,
- OBJECT_TYPE_STRUCT,
- OBJECT_TYPE_VARIANT,
-
- OBJECT_TYPE_DYNAMIC,
-};
-
-struct bytecode_get_index_data {
- uint64_t offset; /* in bytes */
- size_t ctx_index;
- size_t array_len;
- /*
- * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
- * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
- * interpreter needs to find it from the event fields and types to
- * support variants.
- */
- const struct lttng_ust_event_field *field;
- struct {
- size_t len;
- enum object_type type;
- bool rev_bo; /* reverse byte order */
- } elem;
-};
-
-/* Validation stack */
-struct vstack_load {
- enum load_type type;
- enum object_type object_type;
- const struct lttng_ust_event_field *field;
- bool rev_bo; /* reverse byte order */
-};
-
-struct vstack_entry {
- enum entry_type type;
- struct vstack_load load;
-};
-
-struct vstack {
- int top; /* top of stack */
- struct vstack_entry e[INTERPRETER_STACK_LEN];
-};
-
-static inline
-void vstack_init(struct vstack *stack)
-{
- stack->top = -1;
-}
-
-static inline
-struct vstack_entry *vstack_ax(struct vstack *stack)
-{
- if (unlikely(stack->top < 0))
- return NULL;
- return &stack->e[stack->top];
-}
-
-static inline
-struct vstack_entry *vstack_bx(struct vstack *stack)
-{
- if (unlikely(stack->top < 1))
- return NULL;
- return &stack->e[stack->top - 1];
-}
-
-static inline
-int vstack_push(struct vstack *stack)
-{
- if (stack->top >= INTERPRETER_STACK_LEN - 1) {
- ERR("Stack full\n");
- return -EINVAL;
- }
- ++stack->top;
- return 0;
-}
-
-static inline
-int vstack_pop(struct vstack *stack)
-{
- if (unlikely(stack->top < 0)) {
- ERR("Stack empty\n");
- return -EINVAL;
- }
- stack->top--;
- return 0;
-}
-
-/* Execution stack */
-enum estack_string_literal_type {
- ESTACK_STRING_LITERAL_TYPE_NONE,
- ESTACK_STRING_LITERAL_TYPE_PLAIN,
- ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
-};
-
-struct load_ptr {
- enum load_type type;
- enum object_type object_type;
- const void *ptr;
- size_t nr_elem;
- bool rev_bo;
- /* Temporary place-holders for contexts. */
- union {
- int64_t s64;
- uint64_t u64;
- double d;
- } u;
- const struct lttng_ust_event_field *field;
-};
-
-struct estack_entry {
- enum entry_type type; /* For dynamic typing. */
- union {
- int64_t v;
- double d;
-
- struct {
- const char *str;
- size_t seq_len;
- enum estack_string_literal_type literal_type;
- } s;
- struct load_ptr ptr;
- } u;
-};
-
-struct estack {
- int top; /* top of stack */
- struct estack_entry e[INTERPRETER_STACK_LEN];
-};
-
-/*
- * Always use aliased type for ax/bx (top of stack).
- * When ax/bx are S64, use aliased value.
- */
-#define estack_ax_v ax
-#define estack_bx_v bx
-#define estack_ax_t ax_t
-#define estack_bx_t bx_t
-
-/*
- * ax and bx registers can hold either integer, double or string.
- */
-#define estack_ax(stack, top) \
- ({ \
- assert((top) > INTERPRETER_STACK_EMPTY); \
- &(stack)->e[top]; \
- })
-
-#define estack_bx(stack, top) \
- ({ \
- assert((top) > INTERPRETER_STACK_EMPTY + 1); \
- &(stack)->e[(top) - 1]; \
- })
-
-/*
- * Currently, only integers (REG_S64) can be pushed into the stack.
- */
-#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) < INTERPRETER_STACK_LEN - 1); \
- (stack)->e[(top) - 1].u.v = (bx); \
- (stack)->e[(top) - 1].type = (bx_t); \
- (bx) = (ax); \
- (bx_t) = (ax_t); \
- ++(top); \
- } while (0)
-
-#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
- do { \
- assert((top) > INTERPRETER_STACK_EMPTY); \
- (ax) = (bx); \
- (ax_t) = (bx_t); \
- (bx) = (stack)->e[(top) - 2].u.v; \
- (bx_t) = (stack)->e[(top) - 2].type; \
- (top)--; \
- } while (0)
-
-enum lttng_interpreter_type {
- LTTNG_INTERPRETER_TYPE_S64,
- LTTNG_INTERPRETER_TYPE_U64,
- LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
- LTTNG_INTERPRETER_TYPE_DOUBLE,
- LTTNG_INTERPRETER_TYPE_STRING,
- LTTNG_INTERPRETER_TYPE_SEQUENCE,
-};
-
-/*
- * Represents the output parameter of the lttng interpreter.
- * Currently capturable field classes are integer, double, string and sequence
- * of integer.
- */
-struct lttng_interpreter_output {
- enum lttng_interpreter_type type;
- union {
- int64_t s;
- uint64_t u;
- double d;
-
- struct {
- const char *str;
- size_t len;
- } str;
- struct {
- const void *ptr;
- size_t nr_elem;
-
- /* Inner type. */
- const struct lttng_ust_type_common *nested_type;
- } sequence;
- } u;
-};
-
-const char *lttng_bytecode_print_op(enum bytecode_op op)
- __attribute__((visibility("hidden")));
-
-void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
- struct bytecode_runtime *bytecode)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_interpret_error(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *stack_data,
- void *ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *stack_data,
- void *ctx)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_BYTECODE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <error.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <stdio.h>
-#include <usterr-signal-safe.h>
-#include <lttng/ust-clock.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-#include "clock.h"
-#include "getenv.h"
-
-struct lttng_ust_trace_clock *lttng_ust_trace_clock;
-
-static
-struct lttng_ust_trace_clock user_tc;
-
-static
-void *clock_handle;
-
-static
-uint64_t trace_clock_freq_monotonic(void)
-{
- return 1000000000ULL;
-}
-
-static
-int trace_clock_uuid_monotonic(char *uuid)
-{
- int ret = 0;
- size_t len;
- FILE *fp;
-
- /*
- * boot_id needs to be read once before being used concurrently
- * to deal with a Linux kernel race. A fix is proposed for
- * upstream, but the work-around is needed for older kernels.
- */
- fp = fopen("/proc/sys/kernel/random/boot_id", "r");
- if (!fp) {
- return -ENOENT;
- }
- len = fread(uuid, 1, LTTNG_UST_UUID_STR_LEN - 1, fp);
- if (len < LTTNG_UST_UUID_STR_LEN - 1) {
- ret = -EINVAL;
- goto end;
- }
- uuid[LTTNG_UST_UUID_STR_LEN - 1] = '\0';
-end:
- fclose(fp);
- return ret;
-}
-
-static
-const char *trace_clock_name_monotonic(void)
-{
- return "monotonic";
-}
-
-static
-const char *trace_clock_description_monotonic(void)
-{
- return "Monotonic Clock";
-}
-
-int lttng_ust_trace_clock_set_read64_cb(lttng_ust_clock_read64_function read64_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.read64 = read64_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_read64_cb(lttng_ust_clock_read64_function *read64_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *read64_cb = &trace_clock_read64_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *read64_cb = ltc->read64;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_freq_cb(lttng_ust_clock_freq_function freq_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.freq = freq_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_freq_cb(lttng_ust_clock_freq_function *freq_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *freq_cb = &trace_clock_freq_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *freq_cb = ltc->freq;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_uuid_cb(lttng_ust_clock_uuid_function uuid_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.uuid = uuid_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_uuid_cb(lttng_ust_clock_uuid_function *uuid_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *uuid_cb = &trace_clock_uuid_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *uuid_cb = ltc->uuid;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_name_cb(lttng_ust_clock_name_function name_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.name = name_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_name_cb(lttng_ust_clock_name_function *name_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *name_cb = &trace_clock_name_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *name_cb = ltc->name;
- }
- return 0;
-}
-
-int lttng_ust_trace_clock_set_description_cb(lttng_ust_clock_description_function description_cb)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- user_tc.description = description_cb;
- return 0;
-}
-
-int lttng_ust_trace_clock_get_description_cb(lttng_ust_clock_description_function *description_cb)
-{
- struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
-
- if (caa_likely(!ltc)) {
- *description_cb = &trace_clock_description_monotonic;
- } else {
- cmm_read_barrier_depends(); /* load ltc before content */
- *description_cb = ltc->description;
- }
- return 0;
-}
-
-int lttng_ust_enable_trace_clock_override(void)
-{
- if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
- return -EBUSY;
- if (!user_tc.read64)
- return -EINVAL;
- if (!user_tc.freq)
- return -EINVAL;
- if (!user_tc.name)
- return -EINVAL;
- if (!user_tc.description)
- return -EINVAL;
- /* Use default uuid cb when NULL */
- cmm_smp_mb(); /* Store callbacks before trace clock */
- CMM_STORE_SHARED(lttng_ust_trace_clock, &user_tc);
- return 0;
-}
-
-void lttng_ust_clock_init(void)
-{
- const char *libname;
- void (*libinit)(void);
-
- if (clock_handle)
- return;
- libname = lttng_ust_getenv("LTTNG_UST_CLOCK_PLUGIN");
- if (!libname)
- return;
- clock_handle = dlopen(libname, RTLD_NOW);
- if (!clock_handle) {
- PERROR("Cannot load LTTng UST clock override library %s",
- libname);
- return;
- }
- dlerror();
- libinit = (void (*)(void)) dlsym(clock_handle,
- "lttng_ust_clock_plugin_init");
- if (!libinit) {
- PERROR("Cannot find LTTng UST clock override library %s initialization function lttng_ust_clock_plugin_init()",
- libname);
- return;
- }
- libinit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST cgroup namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_cgroup_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_cgroup_ns(void)
-{
- struct stat sb;
- ino_t cgroup_ns;
-
- cgroup_ns = CMM_LOAD_SHARED(URCU_TLS(cached_cgroup_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(cgroup_ns != NS_INO_UNINITIALIZED))
- return cgroup_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- cgroup_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/cgroup", &sb) == 0) {
- cgroup_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/cgroup",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- cgroup_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), cgroup_ns);
-
- return cgroup_ns;
-}
-
-/*
- * The cgroup namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWCGROUP
- * * setns(2) called with the fd of a different cgroup ns
- * * unshare(2) called with CLONE_NEWCGROUP
- */
-void lttng_context_cgroup_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t cgroup_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void cgroup_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t cgroup_ns;
-
- cgroup_ns = get_cgroup_ns();
- chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns),
- lttng_ust_rb_alignof(cgroup_ns));
-}
-
-static
-void cgroup_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_cgroup_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("cgroup_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- cgroup_ns_get_size,
- cgroup_ns_record,
- cgroup_ns_get_value,
- NULL, NULL);
-
-int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_cgroup_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_cgroup_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST CPU id context.
- *
- * Note: threads can be migrated at any point while executing the
- * tracepoint probe. This means the CPU id field (and filter) is only
- * statistical. For instance, even though a user might select a
- * cpu_id==1 filter, there may be few events recorded into the channel
- * appearing from other CPUs, due to migration.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <limits.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include "../libringbuffer/getcpu.h"
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t cpu_id_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
- size += sizeof(int);
- return size;
-}
-
-static
-void cpu_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- int cpu;
-
- cpu = lttng_ust_get_cpu();
- chan->ops->event_write(ctx, &cpu, sizeof(cpu), lttng_ust_rb_alignof(cpu));
-}
-
-static
-void cpu_id_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = lttng_ust_get_cpu();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("cpu_id",
- lttng_ust_static_type_integer(sizeof(int) * CHAR_BIT,
- lttng_ust_rb_alignof(int) * CHAR_BIT,
- lttng_ust_is_signed_type(int),
- BYTE_ORDER, 10),
- false, false),
- cpu_id_get_size,
- cpu_id_record,
- cpu_id_get_value,
- NULL, NULL);
-
-int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST Instruction Pointer Context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t ip_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
- size += sizeof(void *);
- return size;
-}
-
-static
-void ip_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- void *ip;
-
- ip = ctx->ip;
- chan->ops->event_write(ctx, &ip, sizeof(ip), lttng_ust_rb_alignof(ip));
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("ip",
- lttng_ust_static_type_integer(sizeof(void *) * CHAR_BIT,
- lttng_ust_rb_alignof(void *) * CHAR_BIT,
- lttng_ust_is_signed_type(void *),
- BYTE_ORDER, 10),
- false, false),
- ip_get_size,
- ip_record,
- NULL, NULL, NULL);
-
-int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST ipc namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_ipc_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_ipc_ns(void)
-{
- struct stat sb;
- ino_t ipc_ns;
-
- ipc_ns = CMM_LOAD_SHARED(URCU_TLS(cached_ipc_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(ipc_ns != NS_INO_UNINITIALIZED))
- return ipc_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- ipc_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/ipc", &sb) == 0) {
- ipc_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/ipc",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- ipc_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), ipc_ns);
-
- return ipc_ns;
-}
-
-/*
- * The ipc namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWIPC
- * * setns(2) called with the fd of a different ipc ns
- * * unshare(2) called with CLONE_NEWIPC
- */
-void lttng_context_ipc_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t ipc_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void ipc_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t ipc_ns;
-
- ipc_ns = get_ipc_ns();
- chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns), lttng_ust_rb_alignof(ipc_ns));
-}
-
-static
-void ipc_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_ipc_ns();
-}
-
-const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("ipc_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- ipc_ns_get_size,
- ipc_ns_record,
- ipc_ns_get_value,
- NULL, NULL);
-
-int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_ipc_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_ipc_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST mnt namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The mount namespace is global to the process.
- */
-static ino_t cached_mnt_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_mnt_ns(void)
-{
- struct stat sb;
- ino_t mnt_ns;
-
- mnt_ns = CMM_LOAD_SHARED(cached_mnt_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(mnt_ns != NS_INO_UNINITIALIZED))
- return mnt_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- mnt_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/mnt", &sb) == 0) {
- mnt_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_mnt_ns, mnt_ns);
-
- return mnt_ns;
-}
-
-/*
- * The mnt namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWNS
- * * setns(2) called with the fd of a different mnt ns
- * * unshare(2) called with CLONE_NEWNS
- */
-void lttng_context_mnt_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_mnt_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t mnt_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void mnt_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t mnt_ns;
-
- mnt_ns = get_mnt_ns();
- chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns), lttng_ust_rb_alignof(mnt_ns));
-}
-
-static
-void mnt_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_mnt_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("mnt_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- mnt_ns_get_size,
- mnt_ns_record,
- mnt_ns_get_value,
- NULL, NULL);
-
-int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST net namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_net_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_net_ns(void)
-{
- struct stat sb;
- ino_t net_ns;
-
- net_ns = CMM_LOAD_SHARED(URCU_TLS(cached_net_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(net_ns != NS_INO_UNINITIALIZED))
- return net_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- net_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/net", &sb) == 0) {
- net_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/net",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- net_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_net_ns), net_ns);
-
- return net_ns;
-}
-
-/*
- * The net namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWNET
- * * setns(2) called with the fd of a different net ns
- * * unshare(2) called with CLONE_NEWNET
- */
-void lttng_context_net_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_net_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t net_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void net_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t net_ns;
-
- net_ns = get_net_ns();
- chan->ops->event_write(ctx, &net_ns, sizeof(net_ns), lttng_ust_rb_alignof(net_ns));
-}
-
-static
-void net_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_net_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("net_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- net_ns_get_size,
- net_ns_record,
- net_ns_get_value,
- NULL, NULL);
-
-int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_net_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_net_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST performance monitoring counters (perf-counters) integration.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <string.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <stdbool.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/mman.h>
-#include <sys/syscall.h>
-#include <lttng/ust-arch.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-#include <urcu/rculist.h>
-#include <ust-helper.h>
-#include <urcu/ref.h>
-#include <usterr-signal-safe.h>
-#include <signal.h>
-#include <urcu/tls-compat.h>
-#include "perf_event.h"
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ust-events-internal.h"
-
-/*
- * We use a global perf counter key and iterate on per-thread RCU lists
- * of fields in the fast path, even though this is not strictly speaking
- * what would provide the best fast-path complexity, to ensure teardown
- * of sessions vs thread exit is handled racelessly.
- *
- * Updates and traversals of thread_list are protected by UST lock.
- * Updates to rcu_field_list are protected by UST lock.
- */
-
-struct lttng_perf_counter_thread_field {
- struct lttng_perf_counter_field *field; /* Back reference */
- struct perf_event_mmap_page *pc;
- struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
- struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
- int fd; /* Perf FD */
-};
-
-struct lttng_perf_counter_thread {
- struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
-};
-
-struct lttng_perf_counter_field {
- struct perf_event_attr attr;
- struct cds_list_head thread_field_list; /* Per-field list of thread fields */
- char *name;
-};
-
-static pthread_key_t perf_counter_key;
-
-/*
- * lttng_perf_lock - Protect lttng-ust perf counter data structures
- *
- * Nests within the ust_lock, and therefore within the libc dl lock.
- * Therefore, we need to fixup the TLS before nesting into this lock.
- * Nests inside RCU bp read-side lock. Protects against concurrent
- * fork.
- */
-static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
- * restored on unlock. Protected by ust_perf_mutex.
- */
-static int ust_perf_saved_cancelstate;
-
-/*
- * Track whether we are tracing from a signal handler nested on an
- * application thread.
- */
-static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_ust_fixup_perf_counter_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
-}
-
-void lttng_perf_lock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_perf_mutex_nest)++) {
- /*
- * Ensure the compiler don't move the store after the close()
- * call in case close() would be marked as leaf.
- */
- cmm_barrier();
- pthread_mutex_lock(&ust_perf_mutex);
- ust_perf_saved_cancelstate = oldstate;
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-void lttng_perf_unlock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, newstate, oldstate;
- bool restore_cancel = false;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- /*
- * Ensure the compiler don't move the store before the close()
- * call, in case close() would be marked as leaf.
- */
- cmm_barrier();
- if (!--URCU_TLS(ust_perf_mutex_nest)) {
- newstate = ust_perf_saved_cancelstate;
- restore_cancel = true;
- pthread_mutex_unlock(&ust_perf_mutex);
- }
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (restore_cancel) {
- ret = pthread_setcancelstate(newstate, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- }
-}
-
-static
-size_t perf_counter_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- size += sizeof(uint64_t);
- return size;
-}
-
-static
-uint64_t read_perf_counter_syscall(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- uint64_t count;
-
- if (caa_unlikely(thread_field->fd < 0))
- return 0;
-
- if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
- < sizeof(count)))
- return 0;
-
- return count;
-}
-
-#if defined(LTTNG_UST_ARCH_X86)
-
-static
-uint64_t rdpmc(unsigned int counter)
-{
- unsigned int low, high;
-
- asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
-
- return low | ((uint64_t) high) << 32;
-}
-
-static
-bool has_rdpmc(struct perf_event_mmap_page *pc)
-{
- if (caa_unlikely(!pc->cap_bit0_is_deprecated))
- return false;
- /* Since Linux kernel 3.12. */
- return pc->cap_user_rdpmc;
-}
-
-static
-uint64_t arch_read_perf_counter(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- uint32_t seq, idx;
- uint64_t count;
- struct perf_event_mmap_page *pc = thread_field->pc;
-
- if (caa_unlikely(!pc))
- return 0;
-
- do {
- seq = CMM_LOAD_SHARED(pc->lock);
- cmm_barrier();
-
- idx = pc->index;
- if (caa_likely(has_rdpmc(pc) && idx)) {
- int64_t pmcval;
-
- pmcval = rdpmc(idx - 1);
- /* Sign-extend the pmc register result. */
- pmcval <<= 64 - pc->pmc_width;
- pmcval >>= 64 - pc->pmc_width;
- count = pc->offset + pmcval;
- } else {
- /* Fall-back on system call if rdpmc cannot be used. */
- return read_perf_counter_syscall(thread_field);
- }
- cmm_barrier();
- } while (CMM_LOAD_SHARED(pc->lock) != seq);
-
- return count;
-}
-
-static
-int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
-{
- struct perf_event_mmap_page *pc = thread_field->pc;
-
- if (!pc)
- return 0;
- return !has_rdpmc(pc);
-}
-
-#else
-
-/* Generic (slow) implementation using a read system call. */
-static
-uint64_t arch_read_perf_counter(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- return read_perf_counter_syscall(thread_field);
-}
-
-static
-int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
-{
- return 1;
-}
-
-#endif
-
-static
-int sys_perf_event_open(struct perf_event_attr *attr,
- pid_t pid, int cpu, int group_fd,
- unsigned long flags)
-{
- return syscall(SYS_perf_event_open, attr, pid, cpu,
- group_fd, flags);
-}
-
-static
-int open_perf_fd(struct perf_event_attr *attr)
-{
- int fd;
-
- fd = sys_perf_event_open(attr, 0, -1, -1, 0);
- if (fd < 0)
- return -1;
-
- return fd;
-}
-
-static
-void close_perf_fd(int fd)
-{
- int ret;
-
- if (fd < 0)
- return;
-
- ret = close(fd);
- if (ret) {
- perror("Error closing LTTng-UST perf memory mapping FD");
- }
-}
-
-static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
-{
- void *perf_addr;
-
- perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
- PROT_READ, MAP_SHARED, thread_field->fd, 0);
- if (perf_addr == MAP_FAILED)
- perf_addr = NULL;
- thread_field->pc = perf_addr;
-
- if (!arch_perf_keep_fd(thread_field)) {
- close_perf_fd(thread_field->fd);
- thread_field->fd = -1;
- }
-}
-
-static
-void unmap_perf_page(struct perf_event_mmap_page *pc)
-{
- int ret;
-
- if (!pc)
- return;
- ret = munmap(pc, sizeof(struct perf_event_mmap_page));
- if (ret < 0) {
- PERROR("Error in munmap");
- abort();
- }
-}
-
-static
-struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
-{
- struct lttng_perf_counter_thread *perf_thread;
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
- /* Check again with signals disabled */
- perf_thread = pthread_getspecific(perf_counter_key);
- if (perf_thread)
- goto skip;
- perf_thread = zmalloc(sizeof(*perf_thread));
- if (!perf_thread)
- abort();
- CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
- ret = pthread_setspecific(perf_counter_key, perf_thread);
- if (ret)
- abort();
-skip:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- return perf_thread;
-}
-
-static
-struct lttng_perf_counter_thread_field *
- add_thread_field(struct lttng_perf_counter_field *perf_field,
- struct lttng_perf_counter_thread *perf_thread)
-{
- struct lttng_perf_counter_thread_field *thread_field;
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
- /* Check again with signals disabled */
- cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
- rcu_field_node) {
- if (thread_field->field == perf_field)
- goto skip;
- }
- thread_field = zmalloc(sizeof(*thread_field));
- if (!thread_field)
- abort();
- thread_field->field = perf_field;
- thread_field->fd = open_perf_fd(&perf_field->attr);
- if (thread_field->fd >= 0)
- setup_perf(thread_field);
- /*
- * Note: thread_field->pc can be NULL if setup_perf() fails.
- * Also, thread_field->fd can be -1 if open_perf_fd() fails.
- */
- lttng_perf_lock();
- cds_list_add_rcu(&thread_field->rcu_field_node,
- &perf_thread->rcu_field_list);
- cds_list_add(&thread_field->thread_field_node,
- &perf_field->thread_field_list);
- lttng_perf_unlock();
-skip:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- return thread_field;
-}
-
-static
-struct lttng_perf_counter_thread_field *
- get_thread_field(struct lttng_perf_counter_field *field)
-{
- struct lttng_perf_counter_thread *perf_thread;
- struct lttng_perf_counter_thread_field *thread_field;
-
- perf_thread = pthread_getspecific(perf_counter_key);
- if (!perf_thread)
- perf_thread = alloc_perf_counter_thread();
- cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
- rcu_field_node) {
- if (thread_field->field == field)
- return thread_field;
- }
- /* perf_counter_thread_field not found, need to add one */
- return add_thread_field(field, perf_thread);
-}
-
-static
-uint64_t wrapper_perf_counter_read(void *priv)
-{
- struct lttng_perf_counter_field *perf_field;
- struct lttng_perf_counter_thread_field *perf_thread_field;
-
- perf_field = (struct lttng_perf_counter_field *) priv;
- perf_thread_field = get_thread_field(perf_field);
- return arch_read_perf_counter(perf_thread_field);
-}
-
-static
-void perf_counter_record(void *priv,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uint64_t value;
-
- value = wrapper_perf_counter_read(priv);
- chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
-}
-
-static
-void perf_counter_get_value(void *priv,
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_perf_counter_read(priv);
-}
-
-/* Called with perf lock held */
-static
-void lttng_destroy_perf_thread_field(
- struct lttng_perf_counter_thread_field *thread_field)
-{
- close_perf_fd(thread_field->fd);
- unmap_perf_page(thread_field->pc);
- cds_list_del_rcu(&thread_field->rcu_field_node);
- cds_list_del(&thread_field->thread_field_node);
- free(thread_field);
-}
-
-static
-void lttng_destroy_perf_thread_key(void *_key)
-{
- struct lttng_perf_counter_thread *perf_thread = _key;
- struct lttng_perf_counter_thread_field *pos, *p;
-
- lttng_perf_lock();
- cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
- rcu_field_node)
- lttng_destroy_perf_thread_field(pos);
- lttng_perf_unlock();
- free(perf_thread);
-}
-
-/* Called with UST lock held */
-static
-void lttng_destroy_perf_counter_ctx_field(void *priv)
-{
- struct lttng_perf_counter_field *perf_field;
- struct lttng_perf_counter_thread_field *pos, *p;
-
- perf_field = (struct lttng_perf_counter_field *) priv;
- free(perf_field->name);
- /*
- * This put is performed when no threads can concurrently
- * perform a "get" concurrently, thanks to urcu-bp grace
- * period. Holding the lttng perf lock protects against
- * concurrent modification of the per-thread thread field
- * list.
- */
- lttng_perf_lock();
- cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
- thread_field_node)
- lttng_destroy_perf_thread_field(pos);
- lttng_perf_unlock();
- free(perf_field);
-}
-
-#ifdef LTTNG_UST_ARCH_ARMV7
-
-static
-int perf_get_exclude_kernel(void)
-{
- return 0;
-}
-
-#else /* LTTNG_UST_ARCH_ARMV7 */
-
-static
-int perf_get_exclude_kernel(void)
-{
- return 1;
-}
-
-#endif /* LTTNG_UST_ARCH_ARMV7 */
-
-static const struct lttng_ust_type_common *ust_type =
- lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uint64_t),
- BYTE_ORDER, 10);
-
-/* Called with UST lock held */
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
-{
- struct lttng_ust_ctx_field ctx_field;
- struct lttng_ust_event_field *event_field;
- struct lttng_perf_counter_field *perf_field;
- char *name_alloc;
- int ret;
-
- if (lttng_find_context(*ctx, name)) {
- ret = -EEXIST;
- goto find_error;
- }
- name_alloc = strdup(name);
- if (!name_alloc) {
- ret = -ENOMEM;
- goto name_alloc_error;
- }
- event_field = zmalloc(sizeof(*event_field));
- if (!event_field) {
- ret = -ENOMEM;
- goto event_field_alloc_error;
- }
- event_field->name = name_alloc;
- event_field->type = ust_type;
-
- perf_field = zmalloc(sizeof(*perf_field));
- if (!perf_field) {
- ret = -ENOMEM;
- goto perf_field_alloc_error;
- }
- perf_field->attr.type = type;
- perf_field->attr.config = config;
- perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
- CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
- perf_field->name = name_alloc;
-
- /* Ensure that this perf counter can be used in this process. */
- ret = open_perf_fd(&perf_field->attr);
- if (ret < 0) {
- ret = -ENODEV;
- goto setup_error;
- }
- close_perf_fd(ret);
-
- ctx_field.event_field = event_field;
- ctx_field.get_size = perf_counter_get_size;
- ctx_field.record = perf_counter_record;
- ctx_field.get_value = perf_counter_get_value;
- ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
- ctx_field.priv = perf_field;
-
- ret = lttng_ust_context_append(ctx, &ctx_field);
- if (ret) {
- ret = -ENOMEM;
- goto append_context_error;
- }
- return 0;
-
-append_context_error:
-setup_error:
- free(perf_field);
-perf_field_alloc_error:
- free(event_field);
-event_field_alloc_error:
- free(name_alloc);
-name_alloc_error:
-find_error:
- return ret;
-}
-
-int lttng_perf_counter_init(void)
-{
- int ret;
-
- ret = pthread_key_create(&perf_counter_key,
- lttng_destroy_perf_thread_key);
- if (ret)
- ret = -ret;
- return ret;
-}
-
-void lttng_perf_counter_exit(void)
-{
- int ret;
-
- ret = pthread_key_delete(perf_counter_key);
- if (ret) {
- errno = ret;
- PERROR("Error in pthread_key_delete");
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST pid namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The PID namespace is global to the process.
- */
-static ino_t cached_pid_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_pid_ns(void)
-{
- struct stat sb;
- ino_t pid_ns;
-
- pid_ns = CMM_LOAD_SHARED(cached_pid_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(pid_ns != NS_INO_UNINITIALIZED))
- return pid_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- pid_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/pid", &sb) == 0) {
- pid_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_pid_ns, pid_ns);
-
- return pid_ns;
-}
-
-/*
- * A process's PID namespace membership is determined when the process is
- * created and cannot be changed thereafter.
- *
- * The pid namespace can change only on clone(2) / fork(2) :
- * - clone(2) with the CLONE_NEWPID flag
- * - clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWPID flag
- * - clone(2) / fork(2) after a call to setns(2) with a PID namespace fd
- */
-void lttng_context_pid_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_pid_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t pid_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void pid_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t pid_ns;
-
- pid_ns = get_pid_ns();
- chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns), lttng_ust_rb_alignof(pid_ns));
-}
-
-static
-void pid_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_pid_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("pid_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- pid_ns_get_size,
- pid_ns_record,
- pid_ns_get_value,
- NULL, NULL);
-
-int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST procname context.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <urcu/tls-compat.h>
-#include <assert.h>
-#include "compat.h"
-#include "lttng-tracer-core.h"
-
-#include "context-internal.h"
-
-/* Maximum number of nesting levels for the procname cache. */
-#define PROCNAME_NESTING_MAX 2
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- * Upon exec, procname changes, but exec takes care of throwing away
- * this cached version.
- * The procname can also change by calling prctl(). The procname should
- * be set for a thread before the first event is logged within this
- * thread.
- */
-typedef char procname_array[PROCNAME_NESTING_MAX][17];
-
-static DEFINE_URCU_TLS(procname_array, cached_procname);
-
-static DEFINE_URCU_TLS(int, procname_nesting);
-
-static inline
-const char *wrapper_getprocname(void)
-{
- int nesting = CMM_LOAD_SHARED(URCU_TLS(procname_nesting));
-
- if (caa_unlikely(nesting >= PROCNAME_NESTING_MAX))
- return "<unknown>";
- if (caa_unlikely(!URCU_TLS(cached_procname)[nesting][0])) {
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting + 1);
- /* Increment nesting before updating cache. */
- cmm_barrier();
- lttng_pthread_getname_np(URCU_TLS(cached_procname)[nesting], LTTNG_UST_ABI_PROCNAME_LEN);
- URCU_TLS(cached_procname)[nesting][LTTNG_UST_ABI_PROCNAME_LEN - 1] = '\0';
- /* Decrement nesting after updating cache. */
- cmm_barrier();
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting);
- }
- return URCU_TLS(cached_procname)[nesting];
-}
-
-/* Reset should not be called from a signal handler. */
-void lttng_ust_context_procname_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_procname)[1][0], '\0');
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), 1);
- CMM_STORE_SHARED(URCU_TLS(cached_procname)[0][0], '\0');
- CMM_STORE_SHARED(URCU_TLS(procname_nesting), 0);
-}
-
-static
-size_t procname_get_size(void *priv __attribute__((unused)),
- size_t offset __attribute__((unused)))
-{
- return LTTNG_UST_ABI_PROCNAME_LEN;
-}
-
-static
-void procname_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- const char *procname;
-
- procname = wrapper_getprocname();
- chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN, 1);
-}
-
-static
-void procname_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.str = wrapper_getprocname();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("procname",
- lttng_ust_static_type_array_text(LTTNG_UST_ABI_PROCNAME_LEN),
- false, false),
- procname_get_size,
- procname_record,
- procname_get_value,
- NULL, NULL);
-
-int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_fixup_procname_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_procname)[0]));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST application context provider.
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <ust-context-provider.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "context-provider-internal.h"
-#include <ust-helper.h>
-
-struct lttng_ust_registered_context_provider {
- const struct lttng_ust_context_provider *provider;
-
- struct cds_hlist_node node;
-};
-
-struct lttng_ust_app_ctx {
- char *name;
- struct lttng_ust_event_field *event_field;
- struct lttng_ust_type_common *type;
-};
-
-#define CONTEXT_PROVIDER_HT_BITS 12
-#define CONTEXT_PROVIDER_HT_SIZE (1U << CONTEXT_PROVIDER_HT_BITS)
-struct context_provider_ht {
- struct cds_hlist_head table[CONTEXT_PROVIDER_HT_SIZE];
-};
-
-static struct context_provider_ht context_provider_ht;
-
-static const struct lttng_ust_context_provider *
- lookup_provider_by_name(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct lttng_ust_registered_context_provider *reg_provider;
- uint32_t hash;
- const char *end;
- size_t len;
-
- /* Lookup using everything before first ':' as key. */
- end = strchr(name, ':');
- if (end)
- len = end - name;
- else
- len = strlen(name);
- hash = jhash(name, len, 0);
- head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
- cds_hlist_for_each_entry(reg_provider, node, head, node) {
- if (!strncmp(reg_provider->provider->name, name, len))
- return reg_provider->provider;
- }
- return NULL;
-}
-
-struct lttng_ust_registered_context_provider *lttng_ust_context_provider_register(struct lttng_ust_context_provider *provider)
-{
- struct lttng_ust_registered_context_provider *reg_provider = NULL;
- struct cds_hlist_head *head;
- size_t name_len = strlen(provider->name);
- uint32_t hash;
-
- lttng_ust_fixup_tls();
-
- /* Provider name starts with "$app.". */
- if (strncmp("$app.", provider->name, strlen("$app.")) != 0)
- return NULL;
- /* Provider name cannot contain a colon character. */
- if (strchr(provider->name, ':'))
- return NULL;
- if (ust_lock())
- goto end;
- if (lookup_provider_by_name(provider->name))
- goto end;
- reg_provider = zmalloc(sizeof(struct lttng_ust_registered_context_provider));
- if (!reg_provider)
- goto end;
- reg_provider->provider = provider;
- hash = jhash(provider->name, name_len, 0);
- head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
- cds_hlist_add_head(®_provider->node, head);
-
- lttng_ust_context_set_session_provider(provider->name,
- provider->get_size, provider->record,
- provider->get_value, provider->priv);
-
- lttng_ust_context_set_event_notifier_group_provider(provider->name,
- provider->get_size, provider->record,
- provider->get_value, provider->priv);
-end:
- ust_unlock();
- return reg_provider;
-}
-
-void lttng_ust_context_provider_unregister(struct lttng_ust_registered_context_provider *reg_provider)
-{
- lttng_ust_fixup_tls();
-
- if (ust_lock())
- goto end;
- lttng_ust_context_set_session_provider(reg_provider->provider->name,
- lttng_ust_dummy_get_size, lttng_ust_dummy_record,
- lttng_ust_dummy_get_value, NULL);
-
- lttng_ust_context_set_event_notifier_group_provider(reg_provider->provider->name,
- lttng_ust_dummy_get_size, lttng_ust_dummy_record,
- lttng_ust_dummy_get_value, NULL);
-
- cds_hlist_del(®_provider->node);
-end:
- ust_unlock();
- free(reg_provider);
-}
-
-static void destroy_app_ctx(void *priv)
-{
- struct lttng_ust_app_ctx *app_ctx = (struct lttng_ust_app_ctx *) priv;
-
- free(app_ctx->name);
- free(app_ctx->event_field);
- free(app_ctx->type);
- free(app_ctx);
-}
-
-/*
- * Called with ust mutex held.
- * Add application context to array of context, even if the application
- * context is not currently loaded by application. It will then use the
- * dummy callbacks in that case.
- * Always performed before tracing is started, since it modifies
- * metadata describing the context.
- */
-int lttng_ust_add_app_context_to_ctx_rcu(const char *name,
- struct lttng_ust_ctx **ctx)
-{
- const struct lttng_ust_context_provider *provider;
- struct lttng_ust_ctx_field new_field = { 0 };
- struct lttng_ust_event_field *event_field = NULL;
- struct lttng_ust_type_common *type = NULL;
- struct lttng_ust_app_ctx *app_ctx = NULL;
- char *ctx_name;
- int ret;
-
- if (*ctx && lttng_find_context(*ctx, name))
- return -EEXIST;
- event_field = zmalloc(sizeof(struct lttng_ust_event_field));
- if (!event_field) {
- ret = -ENOMEM;
- goto error_event_field_alloc;
- }
- ctx_name = strdup(name);
- if (!ctx_name) {
- ret = -ENOMEM;
- goto error_field_name_alloc;
- }
- type = zmalloc(sizeof(struct lttng_ust_type_common));
- if (!type) {
- ret = -ENOMEM;
- goto error_field_type_alloc;
- }
- app_ctx = zmalloc(sizeof(struct lttng_ust_app_ctx));
- if (!app_ctx) {
- ret = -ENOMEM;
- goto error_app_ctx_alloc;
- }
- event_field->name = ctx_name;
- type->type = lttng_ust_type_dynamic;
- event_field->type = type;
- new_field.event_field = event_field;
- /*
- * If provider is not found, we add the context anyway, but
- * it will provide a dummy context.
- */
- provider = lookup_provider_by_name(name);
- if (provider) {
- new_field.get_size = provider->get_size;
- new_field.record = provider->record;
- new_field.get_value = provider->get_value;
- } else {
- new_field.get_size = lttng_ust_dummy_get_size;
- new_field.record = lttng_ust_dummy_record;
- new_field.get_value = lttng_ust_dummy_get_value;
- }
- new_field.destroy = destroy_app_ctx;
- new_field.priv = app_ctx;
- /*
- * For application context, add it by expanding
- * ctx array.
- */
- ret = lttng_ust_context_append_rcu(ctx, &new_field);
- if (ret) {
- destroy_app_ctx(app_ctx);
- return ret;
- }
- return 0;
-
-error_app_ctx_alloc:
- free(type);
-error_field_type_alloc:
- free(ctx_name);
-error_field_name_alloc:
- free(event_field);
-error_event_field_alloc:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST pthread_id context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <pthread.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-static
-size_t pthread_id_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
- size += sizeof(unsigned long);
- return size;
-}
-
-static
-void pthread_id_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- unsigned long pthread_id;
-
- pthread_id = (unsigned long) pthread_self();
- chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id), lttng_ust_rb_alignof(pthread_id));
-}
-
-static
-void pthread_id_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = (unsigned long) pthread_self();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("pthread_id",
- lttng_ust_static_type_integer(sizeof(unsigned long) * CHAR_BIT,
- lttng_ust_rb_alignof(unsigned long) * CHAR_BIT,
- lttng_ust_is_signed_type(unsigned long),
- BYTE_ORDER, 10),
- false, false),
- pthread_id_get_size,
- pthread_id_record,
- pthread_id_get_value,
- NULL, NULL);
-
-int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST time namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-#include "lttng-tracer-core.h"
-#include "ns.h"
-#include "context-internal.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_time_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_time_ns(void)
-{
- struct stat sb;
- ino_t time_ns;
-
- time_ns = CMM_LOAD_SHARED(URCU_TLS(cached_time_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(time_ns != NS_INO_UNINITIALIZED))
- return time_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- time_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/time", &sb) == 0) {
- time_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/time",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- time_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_time_ns), time_ns);
-
- return time_ns;
-}
-
-/*
- * The time namespace can change for 2 reasons
- * * setns(2) called with the fd of a different time ns
- * * clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWTIME flag
- */
-void lttng_context_time_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_time_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t time_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void time_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t time_ns;
-
- time_ns = get_time_ns();
- chan->ops->event_write(ctx, &time_ns, sizeof(time_ns), lttng_ust_rb_alignof(time_ns));
-}
-
-static
-void time_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_time_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("time_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- time_ns_get_size,
- time_ns_record,
- time_ns_get_value,
- NULL, NULL);
-
-int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_time_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_time_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST user namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "ns.h"
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event. The user namespace is global to the process.
- */
-static ino_t cached_user_ns = NS_INO_UNINITIALIZED;
-
-static
-ino_t get_user_ns(void)
-{
- struct stat sb;
- ino_t user_ns;
-
- user_ns = CMM_LOAD_SHARED(cached_user_ns);
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(user_ns != NS_INO_UNINITIALIZED))
- return user_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- user_ns = NS_INO_UNAVAILABLE;
-
- if (stat("/proc/self/ns/user", &sb) == 0) {
- user_ns = sb.st_ino;
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(cached_user_ns, user_ns);
-
- return user_ns;
-}
-
-/*
- * The user namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWUSER
- * * setns(2) called with the fd of a different user ns
- * * unshare(2) called with CLONE_NEWUSER
- */
-void lttng_context_user_ns_reset(void)
-{
- CMM_STORE_SHARED(cached_user_ns, NS_INO_UNINITIALIZED);
-}
-
-static
-size_t user_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void user_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t user_ns;
-
- user_ns = get_user_ns();
- chan->ops->event_write(ctx, &user_ns, sizeof(user_ns), lttng_ust_rb_alignof(user_ns));
-}
-
-static
-void user_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_user_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("user_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- user_ns_get_size,
- user_ns_record,
- user_ns_get_value,
- NULL, NULL);
-
-int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST uts namespace context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-#include "ns.h"
-
-
-/*
- * We cache the result to ensure we don't stat(2) the proc filesystem on
- * each event.
- */
-static DEFINE_URCU_TLS_INIT(ino_t, cached_uts_ns, NS_INO_UNINITIALIZED);
-
-static
-ino_t get_uts_ns(void)
-{
- struct stat sb;
- ino_t uts_ns;
-
- uts_ns = CMM_LOAD_SHARED(URCU_TLS(cached_uts_ns));
-
- /*
- * If the cache is populated, do nothing and return the
- * cached inode number.
- */
- if (caa_likely(uts_ns != NS_INO_UNINITIALIZED))
- return uts_ns;
-
- /*
- * At this point we have to populate the cache, set the initial
- * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
- * number from the proc filesystem, this is the value we will
- * cache.
- */
- uts_ns = NS_INO_UNAVAILABLE;
-
- /*
- * /proc/thread-self was introduced in kernel v3.17
- */
- if (stat("/proc/thread-self/ns/uts", &sb) == 0) {
- uts_ns = sb.st_ino;
- } else {
- char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
-
- if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
- "/proc/self/task/%d/ns/uts",
- lttng_gettid()) >= 0) {
-
- if (stat(proc_ns_path, &sb) == 0) {
- uts_ns = sb.st_ino;
- }
- }
- }
-
- /*
- * And finally, store the inode number in the cache.
- */
- CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), uts_ns);
-
- return uts_ns;
-}
-
-/*
- * The uts namespace can change for 3 reasons
- * * clone(2) called with CLONE_NEWUTS
- * * setns(2) called with the fd of a different uts ns
- * * unshare(2) called with CLONE_NEWUTS
- */
-void lttng_context_uts_ns_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), NS_INO_UNINITIALIZED);
-}
-
-static
-size_t uts_ns_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
- size += sizeof(ino_t);
- return size;
-}
-
-static
-void uts_ns_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- ino_t uts_ns;
-
- uts_ns = get_uts_ns();
- chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns), lttng_ust_rb_alignof(uts_ns));
-}
-
-static
-void uts_ns_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_uts_ns();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("uts_ns",
- lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
- lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
- lttng_ust_is_signed_type(ino_t),
- BYTE_ORDER, 10),
- false, false),
- uts_ns_get_size,
- uts_ns_record,
- uts_ns_get_value,
- NULL, NULL);
-
-int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * * Force a read (imply TLS fixup for dlopen) of TLS variables.
- * */
-void lttng_fixup_uts_ns_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_uts_ns)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced effective group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vegid = INVALID_GID;
-
-static
-gid_t get_vegid(void)
-{
- gid_t vegid;
-
- vegid = CMM_LOAD_SHARED(cached_vegid);
-
- if (caa_unlikely(vegid == INVALID_GID)) {
- vegid = getegid();
- CMM_STORE_SHARED(cached_vegid, vegid);
- }
-
- return vegid;
-}
-
-/*
- * The vegid can change on setuid, setreuid, setresuid and seteuid.
- */
-void lttng_context_vegid_reset(void)
-{
- CMM_STORE_SHARED(cached_vegid, INVALID_GID);
-}
-
-static
-size_t vegid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vegid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vegid;
-
- vegid = get_vegid();
- chan->ops->event_write(ctx, &vegid, sizeof(vegid), lttng_ust_rb_alignof(vegid));
-}
-
-static
-void vegid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vegid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vegid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vegid_get_size,
- vegid_record,
- vegid_get_value,
- NULL, NULL);
-
-int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced effective user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_veuid = INVALID_UID;
-
-static
-uid_t get_veuid(void)
-{
- uid_t veuid;
-
- veuid = CMM_LOAD_SHARED(cached_veuid);
-
- if (caa_unlikely(veuid == INVALID_UID)) {
- veuid = geteuid();
- CMM_STORE_SHARED(cached_veuid, veuid);
- }
-
- return veuid;
-}
-
-/*
- * The veuid can change on setuid, setreuid, setresuid and seteuid.
- */
-void lttng_context_veuid_reset(void)
-{
- CMM_STORE_SHARED(cached_veuid, INVALID_UID);
-}
-
-static
-size_t veuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void veuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t veuid;
-
- veuid = get_veuid();
- chan->ops->event_write(ctx, &veuid, sizeof(veuid), lttng_ust_rb_alignof(veuid));
-}
-
-static
-void veuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_veuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("veuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- veuid_get_size,
- veuid_record,
- veuid_get_value,
- NULL, NULL);
-
-int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced real group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vgid = INVALID_GID;
-
-static
-gid_t get_vgid(void)
-{
- gid_t vgid;
-
- vgid = CMM_LOAD_SHARED(cached_vgid);
-
- if (caa_unlikely(cached_vgid == (gid_t) -1)) {
- vgid = getgid();
- CMM_STORE_SHARED(cached_vgid, vgid);
- }
-
- return vgid;
-}
-
-/*
- * The vgid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vgid_reset(void)
-{
- CMM_STORE_SHARED(cached_vgid, INVALID_GID);
-}
-
-static
-size_t vgid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vgid;
-
- vgid = get_vgid();
- chan->ops->event_write(ctx, &vgid, sizeof(vgid), lttng_ust_rb_alignof(vgid));
-}
-
-static
-void vgid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vgid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vgid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vgid_get_size,
- vgid_record,
- vgid_get_value,
- NULL, NULL);
-
-int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST vpid context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- */
-static pid_t cached_vpid;
-
-static inline
-pid_t wrapper_getvpid(void)
-{
- pid_t vpid;
-
- vpid = CMM_LOAD_SHARED(cached_vpid);
- if (caa_unlikely(!vpid)) {
- vpid = getpid();
- CMM_STORE_SHARED(cached_vpid, vpid);
- }
- return vpid;
-}
-
-/*
- * Upon fork or clone, the PID assigned to our thread is not the same as
- * we kept in cache.
- */
-void lttng_context_vpid_reset(void)
-{
- CMM_STORE_SHARED(cached_vpid, 0);
-}
-
-static
-size_t vpid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
-}
-
-static
-void vpid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- pid_t vpid = wrapper_getvpid();
-
- chan->ops->event_write(ctx, &vpid, sizeof(vpid), lttng_ust_rb_alignof(vpid));
-}
-
-static
-void vpid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_getvpid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vpid",
- lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(pid_t),
- BYTE_ORDER, 10),
- false, false),
- vpid_get_size,
- vpid_record,
- vpid_get_value,
- NULL, NULL);
-
-int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced saved set-group ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static gid_t cached_vsgid = INVALID_GID;
-
-static
-gid_t get_vsgid(void)
-{
- gid_t vsgid;
-
- vsgid = CMM_LOAD_SHARED(cached_vsgid);
-
- if (caa_unlikely(vsgid == INVALID_GID)) {
- gid_t gid, egid, sgid;
-
- if (getresgid(&gid, &egid, &sgid) == 0) {
- vsgid = sgid;
- CMM_STORE_SHARED(cached_vsgid, vsgid);
- }
- }
-
- return vsgid;
-}
-
-/*
- * The vsgid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vsgid_reset(void)
-{
- CMM_STORE_SHARED(cached_vsgid, INVALID_GID);
-}
-
-static
-size_t vsgid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
- size += sizeof(gid_t);
- return size;
-}
-
-static
-void vsgid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- gid_t vsgid;
-
- vsgid = get_vsgid();
- chan->ops->event_write(ctx, &vsgid, sizeof(vsgid), lttng_ust_rb_alignof(vsgid));
-}
-
-static
-void vsgid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vsgid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vsgid",
- lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(gid_t),
- BYTE_ORDER, 10),
- false, false),
- vsgid_get_size,
- vsgid_record,
- vsgid_get_value,
- NULL, NULL);
-
-int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced saved set-user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_vsuid = INVALID_UID;
-
-static
-uid_t get_vsuid(void)
-{
- uid_t vsuid;
-
- vsuid = CMM_LOAD_SHARED(cached_vsuid);
-
- if (caa_unlikely(vsuid == INVALID_UID)) {
- uid_t uid, euid, suid;
-
- if (getresuid(&uid, &euid, &suid) == 0) {
- vsuid = suid;
- CMM_STORE_SHARED(cached_vsuid, vsuid);
- }
- }
-
- return vsuid;
-}
-
-/*
- * The vsuid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vsuid_reset(void)
-{
- CMM_STORE_SHARED(cached_vsuid, INVALID_UID);
-}
-
-static
-size_t vsuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void vsuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t vsuid;
-
- vsuid = get_vsuid();
- chan->ops->event_write(ctx, &vsuid, sizeof(vsuid), lttng_ust_rb_alignof(vsuid));
-}
-
-static
-void vsuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vsuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vsuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- vsuid_get_size,
- vsuid_record,
- vsuid_get_value,
- NULL, NULL);
-
-int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST vtid context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <ust-tid.h>
-#include <urcu/tls-compat.h>
-
-#include "context-internal.h"
-#include "lttng-tracer-core.h"
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event.
- */
-static DEFINE_URCU_TLS(pid_t, cached_vtid);
-
-/*
- * Upon fork or clone, the TID assigned to our thread is not the same as
- * we kept in cache. Luckily, we are the only thread surviving in the
- * child process, so we can simply clear our cached version.
- */
-void lttng_context_vtid_reset(void)
-{
- CMM_STORE_SHARED(URCU_TLS(cached_vtid), 0);
-}
-
-static
-size_t vtid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
- size += sizeof(pid_t);
- return size;
-}
-
-static inline
-pid_t wrapper_getvtid(void)
-{
- pid_t vtid;
-
- vtid = CMM_LOAD_SHARED(URCU_TLS(cached_vtid));
- if (caa_unlikely(!vtid)) {
- vtid = lttng_gettid();
- CMM_STORE_SHARED(URCU_TLS(cached_vtid), vtid);
- }
- return vtid;
-}
-
-static
-void vtid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- pid_t vtid = wrapper_getvtid();
-
- chan->ops->event_write(ctx, &vtid, sizeof(vtid), lttng_ust_rb_alignof(vtid));
-}
-
-static
-void vtid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = wrapper_getvtid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vtid",
- lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(pid_t),
- BYTE_ORDER, 10),
- false, false),
- vtid_get_size,
- vtid_record,
- vtid_get_value,
- NULL, NULL);
-
-int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_fixup_vtid_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(cached_vtid)));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- *
- * LTTng UST namespaced real user ID context.
- */
-
-#define _LGPL_SOURCE
-#include <limits.h>
-#include <stddef.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "context-internal.h"
-#include "creds.h"
-
-
-/*
- * At the kernel level, user IDs and group IDs are a per-thread attribute.
- * However, POSIX requires that all threads in a process share the same
- * credentials. The NPTL threading implementation handles the POSIX
- * requirements by providing wrapper functions for the various system calls
- * that change process UIDs and GIDs. These wrapper functions (including those
- * for setreuid() and setregid()) employ a signal-based technique to ensure
- * that when one thread changes credentials, all of the other threads in the
- * process also change their credentials.
- */
-
-/*
- * We cache the result to ensure we don't trigger a system call for
- * each event. User / group IDs are global to the process.
- */
-static uid_t cached_vuid = INVALID_UID;
-
-static
-uid_t get_vuid(void)
-{
- uid_t vuid;
-
- vuid = CMM_LOAD_SHARED(cached_vuid);
-
- if (caa_unlikely(vuid == INVALID_UID)) {
- vuid = getuid();
- CMM_STORE_SHARED(cached_vuid, vuid);
- }
-
- return vuid;
-}
-
-/*
- * The vuid can change on setuid, setreuid and setresuid.
- */
-void lttng_context_vuid_reset(void)
-{
- CMM_STORE_SHARED(cached_vuid, INVALID_UID);
-}
-
-static
-size_t vuid_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
- size += sizeof(uid_t);
- return size;
-}
-
-static
-void vuid_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- uid_t vuid;
-
- vuid = get_vuid();
- chan->ops->event_write(ctx, &vuid, sizeof(vuid), lttng_ust_rb_alignof(vuid));
-}
-
-static
-void vuid_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->u.s64 = get_vuid();
-}
-
-static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
- lttng_ust_static_event_field("vuid",
- lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
- lttng_ust_is_signed_type(uid_t),
- BYTE_ORDER, 10),
- false, false),
- vuid_get_size,
- vuid_record,
- vuid_get_value,
- NULL, NULL);
-
-int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
- ret = -EEXIST;
- goto error_find_context;
- }
- ret = lttng_ust_context_append(ctx, ctx_field);
- if (ret)
- return ret;
- return 0;
-
-error_find_context:
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST trace/channel/event context management.
- */
-
-#define _LGPL_SOURCE
-#include <lttng/ust-events.h>
-#include <lttng/ust-tracer.h>
-#include <ust-context-provider.h>
-#include <lttng/urcu/pointer.h>
-#include <lttng/urcu/urcu-ust.h>
-#include <usterr-signal-safe.h>
-#include <ust-helper.h>
-#include <stddef.h>
-#include <string.h>
-#include <assert.h>
-#include <limits.h>
-#include "tracepoint-internal.h"
-
-#include "context-internal.h"
-
-/*
- * The filter implementation requires that two consecutive "get" for the
- * same context performed by the same thread return the same result.
- */
-
-int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
- const char *subname;
-
- if (!ctx)
- return 0;
- if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
- subname = name + strlen("$ctx.");
- } else {
- subname = name;
- }
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strcmp(ctx->fields[i].event_field->name, subname))
- return 1;
- }
- return 0;
-}
-
-int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
- const char *subname;
-
- if (!ctx)
- return -1;
- if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
- subname = name + strlen("$ctx.");
- } else {
- subname = name;
- }
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strcmp(ctx->fields[i].event_field->name, subname))
- return i;
- }
- return -1;
-}
-
-static int lttng_find_context_provider(struct lttng_ust_ctx *ctx, const char *name)
-{
- unsigned int i;
-
- for (i = 0; i < ctx->nr_fields; i++) {
- /* Skip allocated (but non-initialized) contexts */
- if (!ctx->fields[i].event_field->name)
- continue;
- if (!strncmp(ctx->fields[i].event_field->name, name,
- strlen(name)))
- return 1;
- }
- return 0;
-}
-
-/*
- * Note: as we append context information, the pointer location may change.
- * lttng_ust_context_add_field leaves the new last context initialized to NULL.
- */
-static
-int lttng_ust_context_add_field(struct lttng_ust_ctx **ctx_p)
-{
- struct lttng_ust_ctx *ctx;
-
- if (!*ctx_p) {
- *ctx_p = zmalloc(sizeof(struct lttng_ust_ctx));
- if (!*ctx_p)
- return -ENOMEM;
- (*ctx_p)->largest_align = 1;
- }
- ctx = *ctx_p;
- if (ctx->nr_fields + 1 > ctx->allocated_fields) {
- struct lttng_ust_ctx_field *new_fields;
-
- ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
- new_fields = zmalloc(ctx->allocated_fields * sizeof(*new_fields));
- if (!new_fields)
- return -ENOMEM;
- /* Copy elements */
- if (ctx->fields)
- memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
- free(ctx->fields);
- ctx->fields = new_fields;
- }
- ctx->nr_fields++;
- return 0;
-}
-
-static size_t get_type_max_align(const struct lttng_ust_type_common *type)
-{
- switch (type->type) {
- case lttng_ust_type_integer:
- return lttng_ust_get_type_integer(type)->alignment;
- case lttng_ust_type_string:
- return CHAR_BIT;
- case lttng_ust_type_dynamic:
- return 0;
- case lttng_ust_type_enum:
- return get_type_max_align(lttng_ust_get_type_enum(type)->container_type);
- case lttng_ust_type_array:
- return max_t(size_t, get_type_max_align(lttng_ust_get_type_array(type)->elem_type),
- lttng_ust_get_type_array(type)->alignment);
- case lttng_ust_type_sequence:
- return max_t(size_t, get_type_max_align(lttng_ust_get_type_sequence(type)->elem_type),
- lttng_ust_get_type_sequence(type)->alignment);
- case lttng_ust_type_struct:
- {
- unsigned int i;
- size_t field_align = 0;
- const struct lttng_ust_type_struct *struct_type = lttng_ust_get_type_struct(type);
-
- for (i = 0; i < struct_type->nr_fields; i++) {
- field_align = max_t(size_t,
- get_type_max_align(struct_type->fields[i]->type),
- field_align);
- }
- return field_align;
- }
- default:
- WARN_ON_ONCE(1);
- return 0;
- }
-}
-
-/*
- * lttng_context_update() should be called at least once between context
- * modification and trace start.
- */
-static
-void lttng_context_update(struct lttng_ust_ctx *ctx)
-{
- int i;
- size_t largest_align = 8; /* in bits */
-
- for (i = 0; i < ctx->nr_fields; i++) {
- size_t field_align = 8;
-
- field_align = get_type_max_align(ctx->fields[i].event_field->type);
- largest_align = max_t(size_t, largest_align, field_align);
- }
- ctx->largest_align = largest_align >> 3; /* bits to bytes */
-}
-
-int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
-{
- struct lttng_ust_ctx *old_ctx = *ctx_p, *new_ctx = NULL;
- struct lttng_ust_ctx_field *new_fields = NULL;
- int ret;
-
- if (old_ctx) {
- new_ctx = zmalloc(sizeof(struct lttng_ust_ctx));
- if (!new_ctx)
- return -ENOMEM;
- *new_ctx = *old_ctx;
- new_fields = zmalloc(new_ctx->allocated_fields * sizeof(*new_fields));
- if (!new_fields) {
- free(new_ctx);
- return -ENOMEM;
- }
- /* Copy elements */
- memcpy(new_fields, old_ctx->fields,
- sizeof(*old_ctx->fields) * old_ctx->nr_fields);
- new_ctx->fields = new_fields;
- }
- ret = lttng_ust_context_add_field(&new_ctx);
- if (ret) {
- free(new_fields);
- free(new_ctx);
- return ret;
- }
- new_ctx->fields[new_ctx->nr_fields - 1] = *f;
- lttng_context_update(new_ctx);
- lttng_ust_rcu_assign_pointer(*ctx_p, new_ctx);
- lttng_ust_urcu_synchronize_rcu();
- if (old_ctx) {
- free(old_ctx->fields);
- free(old_ctx);
- }
- return 0;
-}
-
-int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
- const struct lttng_ust_ctx_field *f)
-{
- int ret;
-
- ret = lttng_ust_context_add_field(ctx_p);
- if (ret)
- return ret;
- (*ctx_p)->fields[(*ctx_p)->nr_fields - 1] = *f;
- lttng_context_update(*ctx_p);
- return 0;
-}
-
-void lttng_destroy_context(struct lttng_ust_ctx *ctx)
-{
- int i;
-
- if (!ctx)
- return;
- for (i = 0; i < ctx->nr_fields; i++) {
- if (ctx->fields[i].destroy)
- ctx->fields[i].destroy(ctx->fields[i].priv);
- }
- free(ctx->fields);
- free(ctx);
-}
-
-/*
- * Can be safely performed concurrently with tracing using the struct
- * lttng_ctx. Using RCU update. Needs to match RCU read-side handling of
- * contexts.
- *
- * This does not allow adding, removing, or changing typing of the
- * contexts, since this needs to stay invariant for metadata. However,
- * it allows updating the handlers associated with all contexts matching
- * a provider (by name) while tracing is using it, in a way that ensures
- * a single RCU read-side critical section see either all old, or all
- * new handlers.
- */
-int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
- const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- int i, ret;
- struct lttng_ust_ctx *ctx = *_ctx, *new_ctx;
- struct lttng_ust_ctx_field *new_fields;
-
- if (!ctx || !lttng_find_context_provider(ctx, name))
- return 0;
- /*
- * We have at least one instance of context for the provider.
- */
- new_ctx = zmalloc(sizeof(*new_ctx));
- if (!new_ctx)
- return -ENOMEM;
- *new_ctx = *ctx;
- new_fields = zmalloc(sizeof(*new_fields) * ctx->allocated_fields);
- if (!new_fields) {
- ret = -ENOMEM;
- goto field_error;
- }
- /* Copy elements */
- memcpy(new_fields, ctx->fields,
- sizeof(*new_fields) * ctx->allocated_fields);
- for (i = 0; i < ctx->nr_fields; i++) {
- if (strncmp(new_fields[i].event_field->name,
- name, strlen(name)) != 0)
- continue;
- new_fields[i].get_size = get_size;
- new_fields[i].record = record;
- new_fields[i].get_value = get_value;
- new_fields[i].priv = priv;
- }
- new_ctx->fields = new_fields;
- lttng_ust_rcu_assign_pointer(*_ctx, new_ctx);
- lttng_ust_urcu_synchronize_rcu();
- free(ctx->fields);
- free(ctx);
- return 0;
-
-field_error:
- free(new_ctx);
- return ret;
-}
-
-int lttng_context_init_all(struct lttng_ust_ctx **ctx)
-{
- int ret;
-
- ret = lttng_add_pthread_id_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_pthread_id_to_ctx");
- goto error;
- }
- ret = lttng_add_vtid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vtid_to_ctx");
- goto error;
- }
- ret = lttng_add_vpid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vpid_to_ctx");
- goto error;
- }
- ret = lttng_add_procname_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_procname_to_ctx");
- goto error;
- }
- ret = lttng_add_cpu_id_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_cpu_id_to_ctx");
- goto error;
- }
- ret = lttng_add_cgroup_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_cgroup_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_ipc_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_ipc_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_mnt_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_mnt_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_net_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_net_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_pid_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_pid_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_time_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_time_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_user_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_user_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_uts_ns_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_uts_ns_to_ctx");
- goto error;
- }
- ret = lttng_add_vuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vuid_to_ctx");
- goto error;
- }
- ret = lttng_add_veuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_veuid_to_ctx");
- goto error;
- }
- ret = lttng_add_vsuid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vsuid_to_ctx");
- goto error;
- }
- ret = lttng_add_vgid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vgid_to_ctx");
- goto error;
- }
- ret = lttng_add_vegid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vegid_to_ctx");
- goto error;
- }
- ret = lttng_add_vsgid_to_ctx(ctx);
- if (ret) {
- WARN("Cannot add context lttng_add_vsgid_to_ctx");
- goto error;
- }
- lttng_context_update(*ctx);
- return 0;
-
-error:
- lttng_destroy_context(*ctx);
- return ret;
-}
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-counter-client-percpu-32-modular.c
- *
- * LTTng lib counter client. Per-cpu 32-bit counters in modular
- * arithmetic.
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "ust-events-internal.h"
-#include "../libcounter/counter.h"
-#include "../libcounter/counter-api.h"
-#include "lttng-tracer-core.h"
-#include "lttng-counter-client.h"
-
-static const struct lib_counter_config client_config = {
- .alloc = COUNTER_ALLOC_PER_CPU,
- .sync = COUNTER_SYNC_PER_CPU,
- .arithmetic = COUNTER_ARITHMETIC_MODULAR,
- .counter_size = COUNTER_SIZE_32_BIT,
-};
-
-static struct lib_counter *counter_create(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
-
- if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
- return NULL;
- for (i = 0; i < nr_dimensions; i++) {
- if (dimensions[i].has_underflow || dimensions[i].has_overflow)
- return NULL;
- max_nr_elem[i] = dimensions[i].size;
- }
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds, is_daemon);
-}
-
-static void counter_destroy(struct lib_counter *counter)
-{
- lttng_counter_destroy(counter);
-}
-
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
-{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
-}
-
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
- overflow, underflow);
-}
-
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
- overflow, underflow);
-}
-
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
-{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
-}
-
-static struct lttng_counter_transport lttng_counter_transport = {
- .name = "counter-per-cpu-32-modular",
- .ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
- },
- .client_config = &client_config,
-};
-
-void lttng_counter_client_percpu_32_modular_init(void)
-{
- lttng_counter_transport_register(<tng_counter_transport);
-}
-
-void lttng_counter_client_percpu_32_modular_exit(void)
-{
- lttng_counter_transport_unregister(<tng_counter_transport);
-}
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * lttng-counter-client-percpu-64-modular.c
- *
- * LTTng lib counter client. Per-cpu 64-bit counters in modular
- * arithmetic.
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "ust-events-internal.h"
-#include "../libcounter/counter.h"
-#include "../libcounter/counter-api.h"
-#include "lttng-tracer-core.h"
-#include "lttng-counter-client.h"
-
-static const struct lib_counter_config client_config = {
- .alloc = COUNTER_ALLOC_PER_CPU,
- .sync = COUNTER_SYNC_PER_CPU,
- .arithmetic = COUNTER_ARITHMETIC_MODULAR,
- .counter_size = COUNTER_SIZE_64_BIT,
-};
-
-static struct lib_counter *counter_create(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon)
-{
- size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
-
- if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
- return NULL;
- for (i = 0; i < nr_dimensions; i++) {
- if (dimensions[i].has_underflow || dimensions[i].has_overflow)
- return NULL;
- max_nr_elem[i] = dimensions[i].size;
- }
- return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
- global_sum_step, global_counter_fd, nr_counter_cpu_fds,
- counter_cpu_fds, is_daemon);
-}
-
-static void counter_destroy(struct lib_counter *counter)
-{
- lttng_counter_destroy(counter);
-}
-
-static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
-{
- return lttng_counter_add(&client_config, counter, dimension_indexes, v);
-}
-
-static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
- overflow, underflow);
-}
-
-static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
- int64_t *value, bool *overflow, bool *underflow)
-{
- return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
- overflow, underflow);
-}
-
-static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
-{
- return lttng_counter_clear(&client_config, counter, dimension_indexes);
-}
-
-static struct lttng_counter_transport lttng_counter_transport = {
- .name = "counter-per-cpu-64-modular",
- .ops = {
- .counter_create = counter_create,
- .counter_destroy = counter_destroy,
- .counter_add = counter_add,
- .counter_read = counter_read,
- .counter_aggregate = counter_aggregate,
- .counter_clear = counter_clear,
- },
- .client_config = &client_config,
-};
-
-void lttng_counter_client_percpu_64_modular_init(void)
-{
- lttng_counter_transport_register(<tng_counter_transport);
-}
-
-void lttng_counter_client_percpu_64_modular_exit(void)
-{
- lttng_counter_transport_unregister(<tng_counter_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib counter client.
- */
-
-#ifndef _LTTNG_UST_COUNTER_CLIENT_H
-#define _LTTNG_UST_COUNTER_CLIENT_H
-
-/*
- * The counter clients init/exit symbols are private ABI for
- * liblttng-ust-ctl, which is why they are not hidden.
- */
-
-void lttng_ust_counter_clients_init(void);
-void lttng_ust_counter_clients_exit(void);
-
-void lttng_counter_client_percpu_32_modular_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_32_modular_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_64_modular_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_client_percpu_64_modular_exit(void)
- __attribute__((visibility("hidden")));
-
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Holds LTTng per-session event registry.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <assert.h>
-#include <errno.h>
-#include <limits.h>
-#include <pthread.h>
-#include <sys/shm.h>
-#include <sys/ipc.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <inttypes.h>
-#include <time.h>
-#include <stdbool.h>
-#include <unistd.h>
-#include <dlfcn.h>
-#include <lttng/ust-endian.h>
-
-#include <urcu/arch.h>
-#include <urcu/compiler.h>
-#include <urcu/hlist.h>
-#include <urcu/list.h>
-#include <urcu/uatomic.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-events.h>
-
-#include <usterr-signal-safe.h>
-#include <ust-helper.h>
-#include <lttng/ust-ctl.h>
-#include <ust-comm.h>
-#include <ust-fd.h>
-#include <ust-dynamic-type.h>
-#include <ust-context-provider.h>
-#include "error.h"
-#include "compat.h"
-#include "lttng-ust-uuid.h"
-
-#include "tracepoint-internal.h"
-#include "string-utils.h"
-#include "lttng-bytecode.h"
-#include "lttng-tracer.h"
-#include "lttng-tracer-core.h"
-#include "lttng-ust-statedump.h"
-#include "context-internal.h"
-#include "ust-events-internal.h"
-#include "wait.h"
-#include "../libringbuffer/shm.h"
-#include "../libringbuffer/frontend_types.h"
-#include "../libringbuffer/frontend.h"
-#include "../libcounter/counter.h"
-#include "jhash.h"
-#include <lttng/ust-abi.h>
-#include "context-provider-internal.h"
-
-/*
- * All operations within this file are called by the communication
- * thread, under ust_lock protection.
- */
-
-static CDS_LIST_HEAD(sessions);
-static CDS_LIST_HEAD(event_notifier_groups);
-
-struct cds_list_head *lttng_get_sessions(void)
-{
- return &sessions;
-}
-
-static void _lttng_event_destroy(struct lttng_ust_event_common *event);
-static void _lttng_enum_destroy(struct lttng_enum *_enum);
-
-static
-void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session);
-static
-void lttng_session_sync_event_enablers(struct lttng_ust_session *session);
-static
-void lttng_event_notifier_group_sync_enablers(
- struct lttng_event_notifier_group *event_notifier_group);
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler);
-
-bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
-{
- if (strlen(desc->probe_desc->provider_name) + 1 +
- strlen(desc->event_name) >= LTTNG_UST_ABI_SYM_NAME_LEN)
- return false;
- return true;
-}
-
-void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
- char *name)
-{
- strcpy(name, desc->probe_desc->provider_name);
- strcat(name, ":");
- strcat(name, desc->event_name);
-}
-
-/*
- * Called with ust lock held.
- */
-int lttng_session_active(void)
-{
- struct lttng_ust_session_private *iter;
-
- cds_list_for_each_entry(iter, &sessions, node) {
- if (iter->pub->active)
- return 1;
- }
- return 0;
-}
-
-static
-int lttng_loglevel_match(int loglevel,
- unsigned int has_loglevel,
- enum lttng_ust_abi_loglevel_type req_type,
- int req_loglevel)
-{
- if (!has_loglevel)
- loglevel = TRACE_DEFAULT;
- switch (req_type) {
- case LTTNG_UST_ABI_LOGLEVEL_RANGE:
- if (loglevel <= req_loglevel
- || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
- return 1;
- else
- return 0;
- case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
- if (loglevel == req_loglevel
- || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
- return 1;
- else
- return 0;
- case LTTNG_UST_ABI_LOGLEVEL_ALL:
- default:
- if (loglevel <= TRACE_DEBUG)
- return 1;
- else
- return 0;
- }
-}
-
-struct lttng_ust_session *lttng_session_create(void)
-{
- struct lttng_ust_session *session;
- struct lttng_ust_session_private *session_priv;
- int i;
-
- session = zmalloc(sizeof(struct lttng_ust_session));
- if (!session)
- return NULL;
- session->struct_size = sizeof(struct lttng_ust_session);
- session_priv = zmalloc(sizeof(struct lttng_ust_session_private));
- if (!session_priv) {
- free(session);
- return NULL;
- }
- session->priv = session_priv;
- session_priv->pub = session;
- if (lttng_context_init_all(&session->priv->ctx)) {
- free(session_priv);
- free(session);
- return NULL;
- }
- CDS_INIT_LIST_HEAD(&session->priv->chan_head);
- CDS_INIT_LIST_HEAD(&session->priv->events_head);
- CDS_INIT_LIST_HEAD(&session->priv->enums_head);
- CDS_INIT_LIST_HEAD(&session->priv->enablers_head);
- for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&session->priv->events_ht.table[i]);
- for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&session->priv->enums_ht.table[i]);
- cds_list_add(&session->priv->node, &sessions);
- return session;
-}
-
-struct lttng_counter *lttng_ust_counter_create(
- const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
-{
- struct lttng_counter_transport *counter_transport = NULL;
- struct lttng_counter *counter = NULL;
-
- counter_transport = lttng_counter_transport_find(counter_transport_name);
- if (!counter_transport)
- goto notransport;
- counter = zmalloc(sizeof(struct lttng_counter));
- if (!counter)
- goto nomem;
-
- counter->ops = &counter_transport->ops;
- counter->transport = counter_transport;
-
- counter->counter = counter->ops->counter_create(
- number_dimensions, dimensions, 0,
- -1, 0, NULL, false);
- if (!counter->counter) {
- goto create_error;
- }
-
- return counter;
-
-create_error:
- free(counter);
-nomem:
-notransport:
- return NULL;
-}
-
-static
-void lttng_ust_counter_destroy(struct lttng_counter *counter)
-{
- counter->ops->counter_destroy(counter->counter);
- free(counter);
-}
-
-struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
-{
- struct lttng_event_notifier_group *event_notifier_group;
- int i;
-
- event_notifier_group = zmalloc(sizeof(struct lttng_event_notifier_group));
- if (!event_notifier_group)
- return NULL;
-
- /* Add all contexts. */
- if (lttng_context_init_all(&event_notifier_group->ctx)) {
- free(event_notifier_group);
- return NULL;
- }
-
- CDS_INIT_LIST_HEAD(&event_notifier_group->enablers_head);
- CDS_INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
- for (i = 0; i < LTTNG_UST_EVENT_NOTIFIER_HT_SIZE; i++)
- CDS_INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
-
- cds_list_add(&event_notifier_group->node, &event_notifier_groups);
-
- return event_notifier_group;
-}
-
-/*
- * Only used internally at session destruction.
- */
-static
-void _lttng_channel_unmap(struct lttng_ust_channel_buffer *lttng_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_shm_handle *handle;
-
- cds_list_del(<tng_chan->priv->node);
- lttng_destroy_context(lttng_chan->priv->ctx);
- chan = lttng_chan->priv->rb_chan;
- handle = chan->handle;
- channel_destroy(chan, handle, 0);
- free(lttng_chan->parent);
- free(lttng_chan->priv);
- free(lttng_chan);
-}
-
-static
-void register_event(struct lttng_ust_event_common *event)
-{
- int ret;
- const struct lttng_ust_event_desc *desc;
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- assert(event->priv->registered == 0);
- desc = event->priv->desc;
- lttng_ust_format_event_name(desc, name);
- ret = lttng_ust_tp_probe_register_queue_release(name,
- desc->probe_callback,
- event, desc->signature);
- WARN_ON_ONCE(ret);
- if (!ret)
- event->priv->registered = 1;
-}
-
-static
-void unregister_event(struct lttng_ust_event_common *event)
-{
- int ret;
- const struct lttng_ust_event_desc *desc;
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- assert(event->priv->registered == 1);
- desc = event->priv->desc;
- lttng_ust_format_event_name(desc, name);
- ret = lttng_ust_tp_probe_unregister_queue_release(name,
- desc->probe_callback,
- event);
- WARN_ON_ONCE(ret);
- if (!ret)
- event->priv->registered = 0;
-}
-
-static
-void _lttng_event_unregister(struct lttng_ust_event_common *event)
-{
- if (event->priv->registered)
- unregister_event(event);
-}
-
-void lttng_session_destroy(struct lttng_ust_session *session)
-{
- struct lttng_ust_channel_buffer_private *chan, *tmpchan;
- struct lttng_ust_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
- struct lttng_enum *_enum, *tmp_enum;
- struct lttng_event_enabler *event_enabler, *event_tmpenabler;
-
- CMM_ACCESS_ONCE(session->active) = 0;
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- _lttng_event_unregister(event_recorder_priv->parent.pub);
- }
- lttng_ust_urcu_synchronize_rcu(); /* Wait for in-flight events to complete */
- lttng_ust_tp_probe_prune_release_queue();
- cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
- &session->priv->enablers_head, node)
- lttng_event_enabler_destroy(event_enabler);
- cds_list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv,
- &session->priv->events_head, node)
- _lttng_event_destroy(event_recorder_priv->parent.pub);
- cds_list_for_each_entry_safe(_enum, tmp_enum,
- &session->priv->enums_head, node)
- _lttng_enum_destroy(_enum);
- cds_list_for_each_entry_safe(chan, tmpchan, &session->priv->chan_head, node)
- _lttng_channel_unmap(chan->pub);
- cds_list_del(&session->priv->node);
- lttng_destroy_context(session->priv->ctx);
- free(session->priv);
- free(session);
-}
-
-void lttng_event_notifier_group_destroy(
- struct lttng_event_notifier_group *event_notifier_group)
-{
- int close_ret;
- struct lttng_event_notifier_enabler *notifier_enabler, *tmpnotifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
-
- if (!event_notifier_group) {
- return;
- }
-
- cds_list_for_each_entry(event_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_unregister(event_notifier_priv->parent.pub);
-
- lttng_ust_urcu_synchronize_rcu();
-
- cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
- &event_notifier_group->enablers_head, node)
- lttng_event_notifier_enabler_destroy(notifier_enabler);
-
- cds_list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
- &event_notifier_group->event_notifiers_head, node)
- _lttng_event_destroy(event_notifier_priv->parent.pub);
-
- if (event_notifier_group->error_counter)
- lttng_ust_counter_destroy(event_notifier_group->error_counter);
-
- /* Close the notification fd to the listener of event_notifiers. */
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(event_notifier_group->notification_fd);
- if (!close_ret) {
- lttng_ust_delete_fd_from_tracker(
- event_notifier_group->notification_fd);
- } else {
- PERROR("close");
- abort();
- }
- lttng_ust_unlock_fd_tracker();
-
- cds_list_del(&event_notifier_group->node);
-
- free(event_notifier_group);
-}
-
-static
-void lttng_enabler_destroy(struct lttng_enabler *enabler)
-{
- struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
- struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
-
- if (!enabler) {
- return;
- }
-
- /* Destroy filter bytecode */
- cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
- &enabler->filter_bytecode_head, node) {
- free(filter_node);
- }
-
- /* Destroy excluders */
- cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
- &enabler->excluder_head, node) {
- free(excluder_node);
- }
-}
-
- void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- if (!event_notifier_enabler) {
- return;
- }
-
- cds_list_del(&event_notifier_enabler->node);
-
- lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
-
- free(event_notifier_enabler);
-}
-
-static
-int lttng_enum_create(const struct lttng_ust_enum_desc *desc,
- struct lttng_ust_session *session)
-{
- const char *enum_name = desc->name;
- struct lttng_enum *_enum;
- struct cds_hlist_head *head;
- int ret = 0;
- size_t name_len = strlen(enum_name);
- uint32_t hash;
- int notify_socket;
-
- /* Check if this enum is already registered for this session. */
- hash = jhash(enum_name, name_len, 0);
- head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
-
- _enum = lttng_ust_enum_get_from_desc(session, desc);
- if (_enum) {
- ret = -EEXIST;
- goto exist;
- }
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0) {
- ret = notify_socket;
- goto socket_error;
- }
-
- _enum = zmalloc(sizeof(*_enum));
- if (!_enum) {
- ret = -ENOMEM;
- goto cache_error;
- }
- _enum->session = session;
- _enum->desc = desc;
-
- ret = ustcomm_register_enum(notify_socket,
- session->priv->objd,
- enum_name,
- desc->nr_entries,
- desc->entries,
- &_enum->id);
- if (ret < 0) {
- DBG("Error (%d) registering enumeration to sessiond", ret);
- goto sessiond_register_error;
- }
- cds_list_add(&_enum->node, &session->priv->enums_head);
- cds_hlist_add_head(&_enum->hlist, head);
- return 0;
-
-sessiond_register_error:
- free(_enum);
-cache_error:
-socket_error:
-exist:
- return ret;
-}
-
-static
-int lttng_create_enum_check(const struct lttng_ust_type_common *type,
- struct lttng_ust_session *session)
-{
- switch (type->type) {
- case lttng_ust_type_enum:
- {
- const struct lttng_ust_enum_desc *enum_desc;
- int ret;
-
- enum_desc = lttng_ust_get_type_enum(type)->desc;
- ret = lttng_enum_create(enum_desc, session);
- if (ret && ret != -EEXIST) {
- DBG("Unable to create enum error: (%d)", ret);
- return ret;
- }
- break;
- }
- case lttng_ust_type_dynamic:
- {
- const struct lttng_ust_event_field *tag_field_generic;
- const struct lttng_ust_enum_desc *enum_desc;
- int ret;
-
- tag_field_generic = lttng_ust_dynamic_type_tag_field();
- enum_desc = lttng_ust_get_type_enum(tag_field_generic->type)->desc;
- ret = lttng_enum_create(enum_desc, session);
- if (ret && ret != -EEXIST) {
- DBG("Unable to create enum error: (%d)", ret);
- return ret;
- }
- break;
- }
- default:
- /* TODO: nested types when they become supported. */
- break;
- }
- return 0;
-}
-
-static
-int lttng_create_all_event_enums(size_t nr_fields,
- const struct lttng_ust_event_field **event_fields,
- struct lttng_ust_session *session)
-{
- size_t i;
- int ret;
-
- /* For each field, ensure enum is part of the session. */
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_type_common *type = event_fields[i]->type;
-
- ret = lttng_create_enum_check(type, session);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-static
-int lttng_create_all_ctx_enums(size_t nr_fields,
- struct lttng_ust_ctx_field *ctx_fields,
- struct lttng_ust_session *session)
-{
- size_t i;
- int ret;
-
- /* For each field, ensure enum is part of the session. */
- for (i = 0; i < nr_fields; i++) {
- const struct lttng_ust_type_common *type = ctx_fields[i].event_field->type;
-
- ret = lttng_create_enum_check(type, session);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-/*
- * Ensure that a state-dump will be performed for this session at the end
- * of the current handle_message().
- */
-int lttng_session_statedump(struct lttng_ust_session *session)
-{
- session->priv->statedump_pending = 1;
- lttng_ust_sockinfo_session_enabled(session->priv->owner);
- return 0;
-}
-
-int lttng_session_enable(struct lttng_ust_session *session)
-{
- int ret = 0;
- struct lttng_ust_channel_buffer_private *chan;
- int notify_socket;
-
- if (session->active) {
- ret = -EBUSY;
- goto end;
- }
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0)
- return notify_socket;
-
- /* Set transient enabler state to "enabled" */
- session->priv->tstate = 1;
-
- /* We need to sync enablers with session before activation. */
- lttng_session_sync_event_enablers(session);
-
- /*
- * Snapshot the number of events per channel to know the type of header
- * we need to use.
- */
- cds_list_for_each_entry(chan, &session->priv->chan_head, node) {
- struct lttng_ust_ctx *ctx;
- struct lttng_ust_ctx_field *fields = NULL;
- size_t nr_fields = 0;
- uint32_t chan_id;
-
- /* don't change it if session stop/restart */
- if (chan->header_type)
- continue;
- ctx = chan->ctx;
- if (ctx) {
- nr_fields = ctx->nr_fields;
- fields = ctx->fields;
- ret = lttng_create_all_ctx_enums(nr_fields, fields,
- session);
- if (ret < 0) {
- DBG("Error (%d) adding enum to session", ret);
- return ret;
- }
- }
- ret = ustcomm_register_channel(notify_socket,
- session,
- session->priv->objd,
- chan->parent.objd,
- nr_fields,
- fields,
- &chan_id,
- &chan->header_type);
- if (ret) {
- DBG("Error (%d) registering channel to sessiond", ret);
- return ret;
- }
- if (chan_id != chan->id) {
- DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
- chan_id, chan->id);
- return -EINVAL;
- }
- }
-
- /* Set atomically the state to "active" */
- CMM_ACCESS_ONCE(session->active) = 1;
- CMM_ACCESS_ONCE(session->priv->been_active) = 1;
-
- ret = lttng_session_statedump(session);
- if (ret)
- return ret;
-end:
- return ret;
-}
-
-int lttng_session_disable(struct lttng_ust_session *session)
-{
- int ret = 0;
-
- if (!session->active) {
- ret = -EBUSY;
- goto end;
- }
- /* Set atomically the state to "inactive" */
- CMM_ACCESS_ONCE(session->active) = 0;
-
- /* Set transient enabler state to "disabled" */
- session->priv->tstate = 0;
- lttng_session_sync_event_enablers(session);
-end:
- return ret;
-}
-
-int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
-{
- int ret = 0;
-
- if (lttng_channel->enabled) {
- ret = -EBUSY;
- goto end;
- }
- /* Set transient enabler state to "enabled" */
- lttng_channel->priv->tstate = 1;
- lttng_session_sync_event_enablers(lttng_channel->session);
- /* Set atomically the state to "enabled" */
- CMM_ACCESS_ONCE(lttng_channel->enabled) = 1;
-end:
- return ret;
-}
-
-int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
-{
- int ret = 0;
-
- if (!lttng_channel->enabled) {
- ret = -EBUSY;
- goto end;
- }
- /* Set atomically the state to "disabled" */
- CMM_ACCESS_ONCE(lttng_channel->enabled) = 0;
- /* Set transient enabler state to "enabled" */
- lttng_channel->priv->tstate = 0;
- lttng_session_sync_event_enablers(lttng_channel->session);
-end:
- return ret;
-}
-
-static inline
-struct cds_hlist_head *borrow_hash_table_bucket(
- struct cds_hlist_head *hash_table,
- unsigned int hash_table_size,
- const struct lttng_ust_event_desc *desc)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- size_t name_len;
- uint32_t hash;
-
- lttng_ust_format_event_name(desc, name);
- name_len = strlen(name);
-
- hash = jhash(name, name_len, 0);
- return &hash_table[hash & (hash_table_size - 1)];
-}
-
-/*
- * Supports event creation while tracing session is active.
- */
-static
-int lttng_event_recorder_create(const struct lttng_ust_event_desc *desc,
- struct lttng_ust_channel_buffer *chan)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- struct lttng_ust_event_recorder *event_recorder;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_session *session = chan->parent->session;
- struct cds_hlist_head *head;
- int ret = 0;
- int notify_socket, loglevel;
- const char *uri;
-
- head = borrow_hash_table_bucket(chan->parent->session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
-
- notify_socket = lttng_get_notify_socket(session->priv->owner);
- if (notify_socket < 0) {
- ret = notify_socket;
- goto socket_error;
- }
-
- ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
- session);
- if (ret < 0) {
- DBG("Error (%d) adding enum to session", ret);
- goto create_enum_error;
- }
-
- /*
- * Check if loglevel match. Refuse to connect event if not.
- */
- event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
- if (!event_recorder) {
- ret = -ENOMEM;
- goto cache_error;
- }
- event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
-
- event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_recorder->parent) {
- ret = -ENOMEM;
- goto parent_error;
- }
- event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
- event_recorder->parent->child = event_recorder;
-
- event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
- if (!event_recorder_priv) {
- ret = -ENOMEM;
- goto priv_error;
- }
- event_recorder->priv = event_recorder_priv;
- event_recorder_priv->pub = event_recorder;
- event_recorder->parent->priv = &event_recorder_priv->parent;
- event_recorder_priv->parent.pub = event_recorder->parent;
-
- event_recorder->chan = chan;
-
- /* Event will be enabled by enabler sync. */
- event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
- event_recorder->parent->enabled = 0;
- event_recorder->parent->priv->registered = 0;
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
- event_recorder->parent->priv->desc = desc;
-
- if (desc->loglevel)
- loglevel = *(*event_recorder->parent->priv->desc->loglevel);
- else
- loglevel = TRACE_DEFAULT;
- if (desc->model_emf_uri)
- uri = *(desc->model_emf_uri);
- else
- uri = NULL;
-
- lttng_ust_format_event_name(desc, name);
-
- /* Fetch event ID from sessiond */
- ret = ustcomm_register_event(notify_socket,
- session,
- session->priv->objd,
- chan->priv->parent.objd,
- name,
- loglevel,
- desc->signature,
- desc->nr_fields,
- desc->fields,
- uri,
- &event_recorder->priv->id);
- if (ret < 0) {
- DBG("Error (%d) registering event to sessiond", ret);
- goto sessiond_register_error;
- }
-
- cds_list_add(&event_recorder_priv->node, &chan->parent->session->priv->events_head);
- cds_hlist_add_head(&event_recorder_priv->hlist, head);
- return 0;
-
-sessiond_register_error:
- free(event_recorder_priv);
-priv_error:
- free(event_recorder->parent);
-parent_error:
- free(event_recorder);
-cache_error:
-create_enum_error:
-socket_error:
- return ret;
-}
-
-static
-int lttng_event_notifier_create(const struct lttng_ust_event_desc *desc,
- uint64_t token, uint64_t error_counter_index,
- struct lttng_event_notifier_group *event_notifier_group)
-{
- struct lttng_ust_event_notifier *event_notifier;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
- int ret = 0;
-
- /*
- * Get the hashtable bucket the created lttng_event_notifier object
- * should be inserted.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
-
- event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
- if (!event_notifier) {
- ret = -ENOMEM;
- goto error;
- }
- event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
-
- event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
- if (!event_notifier->parent) {
- ret = -ENOMEM;
- goto parent_error;
- }
- event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
- event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
- event_notifier->parent->child = event_notifier;
-
- event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
- if (!event_notifier_priv) {
- ret = -ENOMEM;
- goto priv_error;
- }
- event_notifier->priv = event_notifier_priv;
- event_notifier_priv->pub = event_notifier;
- event_notifier->parent->priv = &event_notifier_priv->parent;
- event_notifier_priv->parent.pub = event_notifier->parent;
-
- event_notifier_priv->group = event_notifier_group;
- event_notifier_priv->parent.user_token = token;
- event_notifier_priv->error_counter_index = error_counter_index;
-
- /* Event notifier will be enabled by enabler sync. */
- event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
- event_notifier->parent->enabled = 0;
- event_notifier_priv->parent.registered = 0;
-
- CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
- CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
- event_notifier_priv->parent.desc = desc;
- event_notifier->notification_send = lttng_event_notifier_notification_send;
-
- cds_list_add(&event_notifier_priv->node,
- &event_notifier_group->event_notifiers_head);
- cds_hlist_add_head(&event_notifier_priv->hlist, head);
-
- return 0;
-
-priv_error:
- free(event_notifier->parent);
-parent_error:
- free(event_notifier);
-error:
- return ret;
-}
-
-static
-int lttng_desc_match_star_glob_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- int loglevel = 0;
- unsigned int has_loglevel = 0;
-
- lttng_ust_format_event_name(desc, name);
- assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
- if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
- name, SIZE_MAX))
- return 0;
- if (desc->loglevel) {
- loglevel = *(*desc->loglevel);
- has_loglevel = 1;
- }
- if (!lttng_loglevel_match(loglevel,
- has_loglevel,
- enabler->event_param.loglevel_type,
- enabler->event_param.loglevel))
- return 0;
- return 1;
-}
-
-static
-int lttng_desc_match_event_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
- int loglevel = 0;
- unsigned int has_loglevel = 0;
-
- lttng_ust_format_event_name(desc, name);
- assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
- if (strcmp(name, enabler->event_param.name))
- return 0;
- if (desc->loglevel) {
- loglevel = *(*desc->loglevel);
- has_loglevel = 1;
- }
- if (!lttng_loglevel_match(loglevel,
- has_loglevel,
- enabler->event_param.loglevel_type,
- enabler->event_param.loglevel))
- return 0;
- return 1;
-}
-
-static
-int lttng_desc_match_enabler(const struct lttng_ust_event_desc *desc,
- struct lttng_enabler *enabler)
-{
- switch (enabler->format_type) {
- case LTTNG_ENABLER_FORMAT_STAR_GLOB:
- {
- struct lttng_ust_excluder_node *excluder;
-
- if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
- return 0;
- }
-
- /*
- * If the matching event matches with an excluder,
- * return 'does not match'
- */
- cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
- int count;
-
- for (count = 0; count < excluder->excluder.count; count++) {
- int len;
- char *excluder_name;
-
- excluder_name = (char *) (excluder->excluder.names)
- + count * LTTNG_UST_ABI_SYM_NAME_LEN;
- len = strnlen(excluder_name, LTTNG_UST_ABI_SYM_NAME_LEN);
- if (len > 0) {
- char name[LTTNG_UST_ABI_SYM_NAME_LEN];
-
- lttng_ust_format_event_name(desc, name);
- if (strutils_star_glob_match(excluder_name, len, name, SIZE_MAX)) {
- return 0;
- }
- }
- }
- }
- return 1;
- }
- case LTTNG_ENABLER_FORMAT_EVENT:
- return lttng_desc_match_event_enabler(desc, enabler);
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_event_recorder *event_recorder)
-{
- if (lttng_desc_match_enabler(event_recorder->parent->priv->desc,
- lttng_event_enabler_as_enabler(event_enabler))
- && event_recorder->chan == event_enabler->chan)
- return 1;
- else
- return 0;
-}
-
-static
-int lttng_event_notifier_enabler_match_event_notifier(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_event_notifier *event_notifier)
-{
- int desc_matches = lttng_desc_match_enabler(event_notifier->priv->parent.desc,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
-
- if (desc_matches && event_notifier->priv->group == event_notifier_enabler->group &&
- event_notifier->priv->parent.user_token == event_notifier_enabler->user_token)
- return 1;
- else
- return 0;
-}
-
-static
-struct lttng_enabler_ref *lttng_enabler_ref(
- struct cds_list_head *enabler_ref_list,
- struct lttng_enabler *enabler)
-{
- struct lttng_enabler_ref *enabler_ref;
-
- cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
- if (enabler_ref->ref == enabler)
- return enabler_ref;
- }
- return NULL;
-}
-
-/*
- * Create struct lttng_event if it is missing and present in the list of
- * tracepoint probes.
- */
-static
-void lttng_create_event_recorder_if_missing(struct lttng_event_enabler *event_enabler)
-{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
- struct lttng_ust_registered_probe *reg_probe;
- const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- int i;
- struct cds_list_head *probe_list;
-
- probe_list = lttng_get_probe_list_head();
- /*
- * For each probe event, if we find that a probe event matches
- * our enabler, create an associated lttng_event if not
- * already present.
- */
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- int ret;
- bool found = false;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- desc = probe_desc->event_desc[i];
- if (!lttng_desc_match_enabler(desc,
- lttng_event_enabler_as_enabler(event_enabler)))
- continue;
-
- head = borrow_hash_table_bucket(
- session->priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_recorder_priv, node, head, hlist) {
- if (event_recorder_priv->parent.desc == desc
- && event_recorder_priv->pub->chan == event_enabler->chan) {
- found = true;
- break;
- }
- }
- if (found)
- continue;
-
- /*
- * We need to create an event for this
- * event probe.
- */
- ret = lttng_event_recorder_create(probe_desc->event_desc[i],
- event_enabler->chan);
- if (ret) {
- DBG("Unable to create event \"%s:%s\", error %d\n",
- probe_desc->provider_name,
- probe_desc->event_desc[i]->event_name, ret);
- }
- }
- }
-}
-
-static
-void probe_provider_event_for_each(const struct lttng_ust_probe_desc *provider_desc,
- void (*event_func)(struct lttng_ust_event_common *event))
-{
- struct cds_hlist_node *node, *tmp_node;
- struct cds_list_head *sessionsp;
- unsigned int i;
-
- /* Get handle on list of sessions. */
- sessionsp = lttng_get_sessions();
-
- /*
- * Iterate over all events in the probe provider descriptions and
- * sessions to queue the unregistration of the events.
- */
- for (i = 0; i < provider_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc;
- struct lttng_event_notifier_group *event_notifier_group;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct lttng_ust_session_private *session_priv;
- struct cds_hlist_head *head;
-
- event_desc = provider_desc->event_desc[i];
-
- /*
- * Iterate over all session to find the current event
- * description.
- */
- cds_list_for_each_entry(session_priv, sessionsp, node) {
- /*
- * Get the list of events in the hashtable bucket and
- * iterate to find the event matching this descriptor.
- */
- head = borrow_hash_table_bucket(
- session_priv->events_ht.table,
- LTTNG_UST_EVENT_HT_SIZE, event_desc);
-
- cds_hlist_for_each_entry_safe(event_recorder_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_recorder_priv->parent.desc) {
- event_func(event_recorder_priv->parent.pub);
- break;
- }
- }
- }
-
- /*
- * Iterate over all event_notifier groups to find the current event
- * description.
- */
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- /*
- * Get the list of event_notifiers in the hashtable bucket and
- * iterate to find the event_notifier matching this
- * descriptor.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, event_desc);
-
- cds_hlist_for_each_entry_safe(event_notifier_priv, node, tmp_node, head, hlist) {
- if (event_desc == event_notifier_priv->parent.desc) {
- event_func(event_notifier_priv->parent.pub);
- break;
- }
- }
- }
- }
-}
-
-static
-void _event_enum_destroy(struct lttng_ust_event_common *event)
-{
-
- switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
- {
- struct lttng_ust_event_recorder *event_recorder = event->child;
- struct lttng_ust_session *session = event_recorder->chan->parent->session;
- unsigned int i;
-
- /* Destroy enums of the current event. */
- for (i = 0; i < event_recorder->parent->priv->desc->nr_fields; i++) {
- const struct lttng_ust_enum_desc *enum_desc;
- const struct lttng_ust_event_field *field;
- struct lttng_enum *curr_enum;
-
- field = event_recorder->parent->priv->desc->fields[i];
- switch (field->type->type) {
- case lttng_ust_type_enum:
- enum_desc = lttng_ust_get_type_enum(field->type)->desc;
- break;
- default:
- continue;
- }
-
- curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
- if (curr_enum) {
- _lttng_enum_destroy(curr_enum);
- }
- }
- break;
- }
- case LTTNG_UST_EVENT_TYPE_NOTIFIER:
- break;
- default:
- abort();
- }
- /* Destroy event. */
- _lttng_event_destroy(event);
-}
-
-/*
- * Iterate over all the UST sessions to unregister and destroy all probes from
- * the probe provider descriptor received as argument. Must me called with the
- * ust_lock held.
- */
-void lttng_probe_provider_unregister_events(
- const struct lttng_ust_probe_desc *provider_desc)
-{
- /*
- * Iterate over all events in the probe provider descriptions and sessions
- * to queue the unregistration of the events.
- */
- probe_provider_event_for_each(provider_desc, _lttng_event_unregister);
-
- /* Wait for grace period. */
- lttng_ust_urcu_synchronize_rcu();
- /* Prune the unregistration queue. */
- lttng_ust_tp_probe_prune_release_queue();
-
- /*
- * It is now safe to destroy the events and remove them from the event list
- * and hashtables.
- */
- probe_provider_event_for_each(provider_desc, _event_enum_destroy);
-}
-
-/*
- * Create events associated with an event enabler (if not already present),
- * and add backward reference from the event to the enabler.
- */
-static
-int lttng_event_enabler_ref_event_recorders(struct lttng_event_enabler *event_enabler)
-{
- struct lttng_ust_session *session = event_enabler->chan->parent->session;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
-
- if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
- goto end;
-
- /* First ensure that probe events are created for this enabler. */
- lttng_create_event_recorder_if_missing(event_enabler);
-
- /* For each event matching enabler in session event list. */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
-
- if (!lttng_event_enabler_match_event(event_enabler, event_recorder_priv->pub))
- continue;
-
- enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
- lttng_event_enabler_as_enabler(event_enabler));
- if (!enabler_ref) {
- /*
- * If no backward ref, create it.
- * Add backward ref from event to enabler.
- */
- enabler_ref = zmalloc(sizeof(*enabler_ref));
- if (!enabler_ref)
- return -ENOMEM;
- enabler_ref->ref = lttng_event_enabler_as_enabler(
- event_enabler);
- cds_list_add(&enabler_ref->node,
- &event_recorder_priv->parent.enablers_ref_head);
- }
-
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
- &session->priv->ctx,
- &event_recorder_priv->parent.filter_bytecode_runtime_head,
- <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
-
- /* TODO: merge event context. */
- }
-end:
- return 0;
-}
-
-/*
- * Called at library load: connect the probe on all enablers matching
- * this event.
- * Called with session mutex held.
- */
-int lttng_fix_pending_events(void)
-{
- struct lttng_ust_session_private *session_priv;
-
- cds_list_for_each_entry(session_priv, &sessions, node) {
- lttng_session_lazy_sync_event_enablers(session_priv->pub);
- }
- return 0;
-}
-
-int lttng_fix_pending_event_notifiers(void)
-{
- struct lttng_event_notifier_group *event_notifier_group;
-
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- lttng_event_notifier_group_sync_enablers(event_notifier_group);
- }
- return 0;
-}
-
-/*
- * For each session of the owner thread, execute pending statedump.
- * Only dump state for the sessions owned by the caller thread, because
- * we don't keep ust_lock across the entire iteration.
- */
-void lttng_handle_pending_statedump(void *owner)
-{
- struct lttng_ust_session_private *session_priv;
-
- /* Execute state dump */
- do_lttng_ust_statedump(owner);
-
- /* Clear pending state dump */
- if (ust_lock()) {
- goto end;
- }
- cds_list_for_each_entry(session_priv, &sessions, node) {
- if (session_priv->owner != owner)
- continue;
- if (!session_priv->statedump_pending)
- continue;
- session_priv->statedump_pending = 0;
- }
-end:
- ust_unlock();
- return;
-}
-
-static
-void _lttng_event_destroy(struct lttng_ust_event_common *event)
-{
- struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
-
- lttng_free_event_filter_runtime(event);
- /* Free event enabler refs */
- cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
- &event->priv->enablers_ref_head, node)
- free(enabler_ref);
-
- switch (event->type) {
- case LTTNG_UST_EVENT_TYPE_RECORDER:
- {
- struct lttng_ust_event_recorder *event_recorder = event->child;
-
- /* Remove from event list. */
- cds_list_del(&event_recorder->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_recorder->priv->hlist);
-
- lttng_destroy_context(event_recorder->priv->ctx);
- free(event_recorder->parent);
- free(event_recorder->priv);
- free(event_recorder);
- break;
- }
- case LTTNG_UST_EVENT_TYPE_NOTIFIER:
- {
- struct lttng_ust_event_notifier *event_notifier = event->child;
-
- /* Remove from event list. */
- cds_list_del(&event_notifier->priv->node);
- /* Remove from event hash table. */
- cds_hlist_del(&event_notifier->priv->hlist);
-
- free(event_notifier->priv);
- free(event_notifier->parent);
- free(event_notifier);
- break;
- }
- default:
- abort();
- }
-}
-
-static
-void _lttng_enum_destroy(struct lttng_enum *_enum)
-{
- cds_list_del(&_enum->node);
- cds_hlist_del(&_enum->hlist);
- free(_enum);
-}
-
-void lttng_ust_abi_events_exit(void)
-{
- struct lttng_ust_session_private *session_priv, *tmpsession_priv;
-
- cds_list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, node)
- lttng_session_destroy(session_priv->pub);
-}
-
-/*
- * Enabler management.
- */
-struct lttng_event_enabler *lttng_event_enabler_create(
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
- struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_event_enabler *event_enabler;
-
- event_enabler = zmalloc(sizeof(*event_enabler));
- if (!event_enabler)
- return NULL;
- event_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
- memcpy(&event_enabler->base.event_param, event_param,
- sizeof(event_enabler->base.event_param));
- event_enabler->chan = chan;
- /* ctx left NULL */
- event_enabler->base.enabled = 0;
- cds_list_add(&event_enabler->node, &event_enabler->chan->parent->session->priv->enablers_head);
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return event_enabler;
-}
-
-struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
- struct lttng_event_notifier_group *event_notifier_group,
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event_notifier *event_notifier_param)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler;
-
- event_notifier_enabler = zmalloc(sizeof(*event_notifier_enabler));
- if (!event_notifier_enabler)
- return NULL;
- event_notifier_enabler->base.format_type = format_type;
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
- CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.excluder_head);
-
- event_notifier_enabler->user_token = event_notifier_param->event.token;
- event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
- event_notifier_enabler->num_captures = 0;
-
- memcpy(&event_notifier_enabler->base.event_param.name,
- event_notifier_param->event.name,
- sizeof(event_notifier_enabler->base.event_param.name));
- event_notifier_enabler->base.event_param.instrumentation =
- event_notifier_param->event.instrumentation;
- event_notifier_enabler->base.event_param.loglevel =
- event_notifier_param->event.loglevel;
- event_notifier_enabler->base.event_param.loglevel_type =
- event_notifier_param->event.loglevel_type;
-
- event_notifier_enabler->base.enabled = 0;
- event_notifier_enabler->group = event_notifier_group;
-
- cds_list_add(&event_notifier_enabler->node,
- &event_notifier_group->enablers_head);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_group);
-
- return event_notifier_enabler;
-}
-
-int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
-{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return 0;
-}
-
-int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
-{
- lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
-
- return 0;
-}
-
-static
-void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- (*bytecode)->enabler = enabler;
- cds_list_add_tail(&(*bytecode)->node, &enabler->filter_bytecode_head);
- /* Take ownership of bytecode */
- *bytecode = NULL;
-}
-
-int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_enabler_as_enabler(event_enabler), bytecode);
-
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
- return 0;
-}
-
-static
-void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- (*excluder)->enabler = enabler;
- cds_list_add_tail(&(*excluder)->node, &enabler->excluder_head);
- /* Take ownership of excluder */
- *excluder = NULL;
-}
-
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_enabler_as_enabler(event_enabler), excluder);
-
- lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
- return 0;
-}
-
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
-
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- _lttng_enabler_attach_filter_bytecode(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- bytecode);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_capture_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
-{
- (*bytecode)->enabler = lttng_event_notifier_enabler_as_enabler(
- event_notifier_enabler);
- cds_list_add_tail(&(*bytecode)->node,
- &event_notifier_enabler->capture_bytecode_head);
- /* Take ownership of bytecode */
- *bytecode = NULL;
- event_notifier_enabler->num_captures++;
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
-{
- _lttng_enabler_attach_exclusion(
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
- excluder);
-
- lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
- return 0;
-}
-
-int lttng_attach_context(struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
-{
- /*
- * We cannot attach a context after trace has been started for a
- * session because the metadata does not allow expressing this
- * information outside of the original channel scope.
- */
- if (session->priv->been_active)
- return -EPERM;
-
- switch (context_param->ctx) {
- case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
- return lttng_add_pthread_id_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
- {
- struct lttng_ust_abi_perf_counter_ctx *perf_ctx_param;
-
- perf_ctx_param = &context_param->u.perf_counter;
- return lttng_add_perf_counter_to_ctx(
- perf_ctx_param->type,
- perf_ctx_param->config,
- perf_ctx_param->name,
- ctx);
- }
- case LTTNG_UST_ABI_CONTEXT_VTID:
- return lttng_add_vtid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VPID:
- return lttng_add_vpid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PROCNAME:
- return lttng_add_procname_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_IP:
- return lttng_add_ip_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_CPU_ID:
- return lttng_add_cpu_id_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
- ctx);
- case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
- return lttng_add_cgroup_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_IPC_NS:
- return lttng_add_ipc_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_MNT_NS:
- return lttng_add_mnt_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_NET_NS:
- return lttng_add_net_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_PID_NS:
- return lttng_add_pid_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_TIME_NS:
- return lttng_add_time_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_USER_NS:
- return lttng_add_user_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_UTS_NS:
- return lttng_add_uts_ns_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VUID:
- return lttng_add_vuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VEUID:
- return lttng_add_veuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VSUID:
- return lttng_add_vsuid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VGID:
- return lttng_add_vgid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VEGID:
- return lttng_add_vegid_to_ctx(ctx);
- case LTTNG_UST_ABI_CONTEXT_VSGID:
- return lttng_add_vsgid_to_ctx(ctx);
- default:
- return -EINVAL;
- }
-}
-
-int lttng_event_enabler_attach_context(
- struct lttng_event_enabler *enabler __attribute__((unused)),
- struct lttng_ust_abi_context *context_param __attribute__((unused)))
-{
- return -ENOSYS;
-}
-
-void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
-{
- if (!event_enabler) {
- return;
- }
- cds_list_del(&event_enabler->node);
-
- lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
-
- lttng_destroy_context(event_enabler->ctx);
- free(event_enabler);
-}
-
-/*
- * lttng_session_sync_event_enablers should be called just before starting a
- * session.
- */
-static
-void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
-{
- struct lttng_event_enabler *event_enabler;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
-
- cds_list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
- lttng_event_enabler_ref_event_recorders(event_enabler);
- /*
- * For each event, if at least one of its enablers is enabled,
- * and its channel and session transient states are enabled, we
- * enable the event, else we disable it.
- */
- cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0;
-
- /* Enable events */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
- /*
- * Enabled state is based on union of enablers, with
- * intesection of session and channel transient enable
- * states.
- */
- enabled = enabled && session->priv->tstate && event_recorder_priv->pub->chan->priv->parent.tstate;
-
- CMM_STORE_SHARED(event_recorder_priv->pub->parent->enabled, enabled);
- /*
- * Sync tracepoint registration with event enabled
- * state.
- */
- if (enabled) {
- if (!event_recorder_priv->parent.registered)
- register_event(event_recorder_priv->parent.pub);
- } else {
- if (event_recorder_priv->parent.registered)
- unregister_event(event_recorder_priv->parent.pub);
- }
-
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_recorder_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_recorder_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
-
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_recorder_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
- }
- lttng_ust_tp_probe_prune_release_queue();
-}
-
-/* Support for event notifier is introduced by probe provider major version 2. */
-static
-bool lttng_ust_probe_supports_event_notifier(const struct lttng_ust_probe_desc *probe_desc)
-{
- return probe_desc->major >= 2;
-}
-
-static
-void lttng_create_event_notifier_if_missing(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
-
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- int ret;
- bool found = false;
- const struct lttng_ust_event_desc *desc;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
-
- desc = probe_desc->event_desc[i];
-
- if (!lttng_desc_match_enabler(desc,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
- continue;
-
- /*
- * Given the current event_notifier group, get the bucket that
- * the target event_notifier would be if it was already
- * created.
- */
- head = borrow_hash_table_bucket(
- event_notifier_group->event_notifiers_ht.table,
- LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
-
- cds_hlist_for_each_entry(event_notifier_priv, node, head, hlist) {
- /*
- * Check if event_notifier already exists by checking
- * if the event_notifier and enabler share the same
- * description and id.
- */
- if (event_notifier_priv->parent.desc == desc &&
- event_notifier_priv->parent.user_token == event_notifier_enabler->user_token) {
- found = true;
- break;
- }
- }
-
- if (found)
- continue;
-
- /* Check that the probe supports event notifiers, else report the error. */
- if (!lttng_ust_probe_supports_event_notifier(probe_desc)) {
- ERR("Probe \"%s\" contains event \"%s:%s\" which matches an enabled event notifier, "
- "but its version (%u.%u) is too old and does not implement event notifiers. "
- "It needs to be recompiled against a newer version of LTTng-UST, otherwise "
- "this event will not generate any notification.",
- probe_desc->provider_name,
- probe_desc->provider_name, desc->event_name,
- probe_desc->major,
- probe_desc->minor);
- continue;
- }
- /*
- * We need to create a event_notifier for this event probe.
- */
- ret = lttng_event_notifier_create(desc,
- event_notifier_enabler->user_token,
- event_notifier_enabler->error_counter_index,
- event_notifier_group);
- if (ret) {
- DBG("Unable to create event_notifier \"%s:%s\", error %d\n",
- probe_desc->provider_name,
- probe_desc->event_desc[i]->event_name, ret);
- }
- }
- }
-}
-
-/*
- * Create event_notifiers associated with a event_notifier enabler (if not already present).
- */
-static
-int lttng_event_notifier_enabler_ref_event_notifiers(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
-
- /*
- * Only try to create event_notifiers for enablers that are enabled, the user
- * might still be attaching filter or exclusion to the
- * event_notifier_enabler.
- */
- if (!lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled)
- goto end;
-
- /* First, ensure that probe event_notifiers are created for this enabler. */
- lttng_create_event_notifier_if_missing(event_notifier_enabler);
-
- /* Link the created event_notifier with its associated enabler. */
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
- struct lttng_enabler_ref *enabler_ref;
-
- if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv->pub))
- continue;
-
- enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
- lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
- if (!enabler_ref) {
- /*
- * If no backward ref, create it.
- * Add backward ref from event_notifier to enabler.
- */
- enabler_ref = zmalloc(sizeof(*enabler_ref));
- if (!enabler_ref)
- return -ENOMEM;
-
- enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
- event_notifier_enabler);
- cds_list_add(&enabler_ref->node,
- &event_notifier_priv->parent.enablers_ref_head);
- }
-
- /*
- * Link filter bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx,
- &event_notifier_priv->parent.filter_bytecode_runtime_head,
- <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
-
- /*
- * Link capture bytecodes if not linked yet.
- */
- lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
- &event_notifier_group->ctx, &event_notifier_priv->capture_bytecode_runtime_head,
- &event_notifier_enabler->capture_bytecode_head);
-
- event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
- }
-end:
- return 0;
-}
-
-static
-void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler;
- struct lttng_ust_event_notifier_private *event_notifier_priv;
-
- cds_list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
- lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
-
- /*
- * For each event_notifier, if at least one of its enablers is enabled,
- * we enable the event_notifier, else we disable it.
- */
- cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
- struct lttng_enabler_ref *enabler_ref;
- struct lttng_ust_bytecode_runtime *runtime;
- int enabled = 0, has_enablers_without_filter_bytecode = 0;
- int nr_filters = 0, nr_captures = 0;
-
- /* Enable event_notifiers */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled) {
- enabled = 1;
- break;
- }
- }
-
- CMM_STORE_SHARED(event_notifier_priv->pub->parent->enabled, enabled);
- /*
- * Sync tracepoint registration with event_notifier enabled
- * state.
- */
- if (enabled) {
- if (!event_notifier_priv->parent.registered)
- register_event(event_notifier_priv->parent.pub);
- } else {
- if (event_notifier_priv->parent.registered)
- unregister_event(event_notifier_priv->parent.pub);
- }
-
- /* Check if has enablers without bytecode enabled */
- cds_list_for_each_entry(enabler_ref,
- &event_notifier_priv->parent.enablers_ref_head, node) {
- if (enabler_ref->ref->enabled
- && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
- has_enablers_without_filter_bytecode = 1;
- break;
- }
- }
- event_notifier_priv->parent.has_enablers_without_filter_bytecode =
- has_enablers_without_filter_bytecode;
-
- /* Enable filters */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_filters++;
- }
- CMM_STORE_SHARED(event_notifier_priv->parent.pub->eval_filter,
- !(has_enablers_without_filter_bytecode || !nr_filters));
-
- /* Enable captures. */
- cds_list_for_each_entry(runtime,
- &event_notifier_priv->capture_bytecode_runtime_head, node) {
- lttng_bytecode_sync_state(runtime);
- nr_captures++;
- }
- CMM_STORE_SHARED(event_notifier_priv->pub->eval_capture,
- !!nr_captures);
- }
- lttng_ust_tp_probe_prune_release_queue();
-}
-
-/*
- * Apply enablers to session events, adding events to session if need
- * be. It is required after each modification applied to an active
- * session, and right before session "start".
- * "lazy" sync means we only sync if required.
- */
-static
-void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session)
-{
- /* We can skip if session is not active */
- if (!session->active)
- return;
- lttng_session_sync_event_enablers(session);
-}
-
-/*
- * Update all sessions with the given app context.
- * Called with ust lock held.
- * This is invoked when an application context gets loaded/unloaded. It
- * ensures the context callbacks are in sync with the application
- * context (either app context callbacks, or dummy callbacks).
- */
-void lttng_ust_context_set_session_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- struct lttng_ust_session_private *session_priv;
-
- cds_list_for_each_entry(session_priv, &sessions, node) {
- struct lttng_ust_channel_buffer_private *chan;
- struct lttng_ust_event_recorder_private *event_recorder_priv;
- int ret;
-
- ret = lttng_ust_context_set_provider_rcu(&session_priv->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- cds_list_for_each_entry(chan, &session_priv->chan_head, node) {
- ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
- cds_list_for_each_entry(event_recorder_priv, &session_priv->events_head, node) {
- ret = lttng_ust_context_set_provider_rcu(&event_recorder_priv->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
- }
-}
-
-/*
- * Update all event_notifier groups with the given app context.
- * Called with ust lock held.
- * This is invoked when an application context gets loaded/unloaded. It
- * ensures the context callbacks are in sync with the application
- * context (either app context callbacks, or dummy callbacks).
- */
-void lttng_ust_context_set_event_notifier_group_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
-{
- struct lttng_event_notifier_group *event_notifier_group;
-
- cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
- int ret;
-
- ret = lttng_ust_context_set_provider_rcu(
- &event_notifier_group->ctx,
- name, get_size, record, get_value, priv);
- if (ret)
- abort();
- }
-}
-
-int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
- unsigned char *uuid)
-{
- if (!session)
- return 0;
- /* Compare UUID with session. */
- if (session->priv->uuid_set) {
- if (memcmp(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN)) {
- return -1;
- }
- } else {
- memcpy(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- session->priv->uuid_set = true;
- }
- return 0;
-
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <error.h>
-#include <dlfcn.h>
-#include <stdlib.h>
-#include <usterr-signal-safe.h>
-#include <lttng/ust-getcpu.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-#include "getenv.h"
-#include "../libringbuffer/getcpu.h"
-
-int (*lttng_get_cpu)(void);
-
-static
-void *getcpu_handle;
-
-int lttng_ust_getcpu_override(int (*getcpu)(void))
-{
- CMM_STORE_SHARED(lttng_get_cpu, getcpu);
- return 0;
-}
-
-void lttng_ust_getcpu_init(void)
-{
- const char *libname;
- void (*libinit)(void);
-
- if (getcpu_handle)
- return;
- libname = lttng_ust_getenv("LTTNG_UST_GETCPU_PLUGIN");
- if (!libname)
- return;
- getcpu_handle = dlopen(libname, RTLD_NOW);
- if (!getcpu_handle) {
- PERROR("Cannot load LTTng UST getcpu override library %s",
- libname);
- return;
- }
- dlerror();
- libinit = (void (*)(void)) dlsym(getcpu_handle,
- "lttng_ust_getcpu_plugin_init");
- if (!libinit) {
- PERROR("Cannot find LTTng UST getcpu override library %s initialization function lttng_ust_getcpu_plugin_init()",
- libname);
- return;
- }
- libinit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng hash table helpers.
- */
-
-#ifndef _LTTNG_HASH_HELPER_H
-#define _LTTNG_HASH_HELPER_H
-
-#include <assert.h>
-#include <stddef.h>
-#include <stdint.h>
-#include <urcu/compiler.h>
-
-/*
- * Hash function
- * Source: http://burtleburtle.net/bob/c/lookup3.c
- * Originally Public Domain
- */
-
-#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
-
-#define mix(a, b, c) \
-do { \
- a -= c; a ^= rot(c, 4); c += b; \
- b -= a; b ^= rot(a, 6); a += c; \
- c -= b; c ^= rot(b, 8); b += a; \
- a -= c; a ^= rot(c, 16); c += b; \
- b -= a; b ^= rot(a, 19); a += c; \
- c -= b; c ^= rot(b, 4); b += a; \
-} while (0)
-
-#define final(a, b, c) \
-{ \
- c ^= b; c -= rot(b, 14); \
- a ^= c; a -= rot(c, 11); \
- b ^= a; b -= rot(a, 25); \
- c ^= b; c -= rot(b, 16); \
- a ^= c; a -= rot(c, 4);\
- b ^= a; b -= rot(a, 14); \
- c ^= b; c -= rot(b, 24); \
-}
-
-static inline
-uint32_t lttng_hash_u32(const uint32_t *k, size_t length, uint32_t initval)
- __attribute__((unused));
-static inline
-uint32_t lttng_hash_u32(
- const uint32_t *k, /* the key, an array of uint32_t values */
- size_t length, /* the length of the key, in uint32_ts */
- uint32_t initval) /* the previous hash, or an arbitrary value */
-{
- uint32_t a, b, c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
-
- /*----------------------------------------- handle most of the key */
- while (length > 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 3;
- k += 3;
- }
-
- /*----------------------------------- handle the last 3 uint32_t's */
- switch (length) { /* all the case statements fall through */
- case 3: c += k[2];
- case 2: b += k[1];
- case 1: a += k[0];
- final(a, b, c);
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*---------------------------------------------- report the result */
- return c;
-}
-
-static inline
-void lttng_hashword2(
- const uint32_t *k, /* the key, an array of uint32_t values */
- size_t length, /* the length of the key, in uint32_ts */
- uint32_t *pc, /* IN: seed OUT: primary hash value */
- uint32_t *pb) /* IN: more seed OUT: secondary hash value */
-{
- uint32_t a, b, c;
-
- /* Set up the internal state */
- a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
- c += *pb;
-
- /*----------------------------------------- handle most of the key */
- while (length > 3) {
- a += k[0];
- b += k[1];
- c += k[2];
- mix(a, b, c);
- length -= 3;
- k += 3;
- }
-
- /*----------------------------------- handle the last 3 uint32_t's */
- switch (length) { /* all the case statements fall through */
- case 3: c += k[2]; /* fall through */
- case 2: b += k[1]; /* fall through */
- case 1: a += k[0];
- final(a, b, c); /* fall through */
- case 0: /* case 0: nothing left to add */
- break;
- }
- /*---------------------------------------------- report the result */
- *pc = c;
- *pb = b;
-}
-
-#if (CAA_BITS_PER_LONG == 32)
-static inline
-unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
-{
- unsigned int key = (unsigned int) _key;
-
- assert(length == sizeof(unsigned int));
- return lttng_hash_u32(&key, 1, seed);
-}
-#else
-static inline
-unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
-{
- union {
- uint64_t v64;
- uint32_t v32[2];
- } v;
- union {
- uint64_t v64;
- uint32_t v32[2];
- } key;
-
- assert(length == sizeof(unsigned long));
- v.v64 = (uint64_t) seed;
- key.v64 = (uint64_t) _key;
- lttng_hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
- return v.v64;
-}
-#endif
-
-#endif /* _LTTNG_HASH_HELPER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright 2010-2012 (C) Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Holds LTTng probes registry.
- */
-
-#define _LGPL_SOURCE
-#include <string.h>
-#include <errno.h>
-#include <urcu/list.h>
-#include <urcu/hlist.h>
-#include <lttng/ust-events.h>
-#include <lttng/tracepoint.h>
-#include "tracepoint-internal.h"
-#include <assert.h>
-#include <ust-helper.h>
-#include <ctype.h>
-
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "error.h"
-#include "ust-events-internal.h"
-
-/*
- * probe list is protected by ust_lock()/ust_unlock().
- */
-static CDS_LIST_HEAD(_probe_list);
-
-/*
- * List of probes registered by not yet processed.
- */
-static CDS_LIST_HEAD(lazy_probe_init);
-
-/*
- * lazy_nesting counter ensures we don't trigger lazy probe registration
- * fixup while we are performing the fixup. It is protected by the ust
- * mutex.
- */
-static int lazy_nesting;
-
-/*
- * Validate that each event within the probe provider refers to the
- * right probe, and that the resulting name is not too long.
- */
-static
-bool check_event_provider(const struct lttng_ust_probe_desc *probe_desc)
-{
- int i;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc = probe_desc->event_desc[i];
-
- if (event_desc->probe_desc != probe_desc) {
- ERR("Error registering probe provider '%s'. Event '%s:%s' refers to the wrong provider descriptor.",
- probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
- return false; /* provider mismatch */
- }
- if (!lttng_ust_validate_event_name(event_desc)) {
- ERR("Error registering probe provider '%s'. Event '%s:%s' name is too long.",
- probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
- return false; /* provider mismatch */
- }
- }
- return true;
-}
-
-/*
- * Called under ust lock.
- */
-static
-void lttng_lazy_probe_register(struct lttng_ust_registered_probe *reg_probe)
-{
- struct lttng_ust_registered_probe *iter;
- struct cds_list_head *probe_list;
-
- /*
- * The provider ensures there are no duplicate event names.
- * Duplicated TRACEPOINT_EVENT event names would generate a
- * compile-time error due to duplicated symbol names.
- */
-
- /*
- * We sort the providers by struct lttng_ust_probe_desc pointer
- * address.
- */
- probe_list = &_probe_list;
- cds_list_for_each_entry_reverse(iter, probe_list, head) {
- BUG_ON(iter == reg_probe); /* Should never be in the list twice */
- if (iter < reg_probe) {
- /* We belong to the location right after iter. */
- cds_list_add(®_probe->head, &iter->head);
- goto probe_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(®_probe->head, probe_list);
-probe_added:
- DBG("just registered probe %s containing %u events",
- reg_probe->desc->provider_name, reg_probe->desc->nr_events);
-}
-
-/*
- * Called under ust lock.
- */
-static
-void fixup_lazy_probes(void)
-{
- struct lttng_ust_registered_probe *iter, *tmp;
- int ret;
-
- lazy_nesting++;
- cds_list_for_each_entry_safe(iter, tmp,
- &lazy_probe_init, lazy_init_head) {
- lttng_lazy_probe_register(iter);
- iter->lazy = 0;
- cds_list_del(&iter->lazy_init_head);
- }
- ret = lttng_fix_pending_events();
- assert(!ret);
- lazy_nesting--;
-}
-
-/*
- * Called under ust lock.
- */
-struct cds_list_head *lttng_get_probe_list_head(void)
-{
- if (!lazy_nesting && !cds_list_empty(&lazy_probe_init))
- fixup_lazy_probes();
- return &_probe_list;
-}
-
-static
-int check_provider_version(const struct lttng_ust_probe_desc *desc)
-{
- /*
- * Check tracepoint provider version compatibility.
- */
- if (desc->major <= LTTNG_UST_PROVIDER_MAJOR) {
- DBG("Provider \"%s\" accepted, version %u.%u is compatible "
- "with LTTng UST provider version %u.%u.",
- desc->provider_name, desc->major, desc->minor,
- LTTNG_UST_PROVIDER_MAJOR,
- LTTNG_UST_PROVIDER_MINOR);
- if (desc->major < LTTNG_UST_PROVIDER_MAJOR) {
- DBG("However, some LTTng UST features might not be "
- "available for this provider unless it is "
- "recompiled against a more recent LTTng UST.");
- }
- return 1; /* accept */
- } else {
- ERR("Provider \"%s\" rejected, version %u.%u is incompatible "
- "with LTTng UST provider version %u.%u. Please upgrade "
- "LTTng UST.",
- desc->provider_name, desc->major, desc->minor,
- LTTNG_UST_PROVIDER_MAJOR,
- LTTNG_UST_PROVIDER_MINOR);
- return 0; /* reject */
- }
-}
-
-struct lttng_ust_registered_probe *lttng_ust_probe_register(const struct lttng_ust_probe_desc *desc)
-{
- struct lttng_ust_registered_probe *reg_probe = NULL;
-
- lttng_ust_fixup_tls();
-
- /*
- * If version mismatch, don't register, but don't trigger assert
- * on caller. The version check just prints an error.
- */
- if (!check_provider_version(desc))
- return NULL;
- if (!check_event_provider(desc))
- return NULL;
-
- ust_lock_nocheck();
-
- reg_probe = zmalloc(sizeof(struct lttng_ust_registered_probe));
- if (!reg_probe)
- goto end;
- reg_probe->desc = desc;
- cds_list_add(®_probe->lazy_init_head, &lazy_probe_init);
- reg_probe->lazy = 1;
-
- DBG("adding probe %s containing %u events to lazy registration list",
- desc->provider_name, desc->nr_events);
- /*
- * If there is at least one active session, we need to register
- * the probe immediately, since we cannot delay event
- * registration because they are needed ASAP.
- */
- if (lttng_session_active())
- fixup_lazy_probes();
-
- lttng_fix_pending_event_notifiers();
-end:
- ust_unlock();
- return reg_probe;
-}
-
-void lttng_ust_probe_unregister(struct lttng_ust_registered_probe *reg_probe)
-{
- lttng_ust_fixup_tls();
-
- if (!reg_probe)
- return;
- if (!check_provider_version(reg_probe->desc))
- return;
-
- ust_lock_nocheck();
- if (!reg_probe->lazy)
- cds_list_del(®_probe->head);
- else
- cds_list_del(®_probe->lazy_init_head);
-
- lttng_probe_provider_unregister_events(reg_probe->desc);
- DBG("just unregistered probes of provider %s", reg_probe->desc->provider_name);
- ust_unlock();
- free(reg_probe);
-}
-
-void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
-{
- struct tp_list_entry *list_entry, *tmp;
-
- cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
- cds_list_del(&list_entry->head);
- free(list_entry);
- }
-}
-
-/*
- * called with UST lock held.
- */
-int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
-{
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
- CDS_INIT_LIST_HEAD(&list->head);
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc =
- probe_desc->event_desc[i];
- struct tp_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->tp.name);
- if (!event_desc->loglevel) {
- list_entry->tp.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->tp.loglevel = *(*event_desc->loglevel);
- }
- }
- }
- if (cds_list_empty(&list->head))
- list->iter = NULL;
- else
- list->iter =
- cds_list_first_entry(&list->head, struct tp_list_entry, head);
- return 0;
-
-err_nomem:
- lttng_probes_prune_event_list(list);
- return -ENOMEM;
-}
-
-/*
- * Return current iteration position, advance internal iterator to next.
- * Return NULL if end of list.
- */
-struct lttng_ust_abi_tracepoint_iter *
- lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
-{
- struct tp_list_entry *entry;
-
- if (!list->iter)
- return NULL;
- entry = list->iter;
- if (entry->head.next == &list->head)
- list->iter = NULL;
- else
- list->iter = cds_list_entry(entry->head.next,
- struct tp_list_entry, head);
- return &entry->tp;
-}
-
-void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
-{
- struct tp_field_list_entry *list_entry, *tmp;
-
- cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
- cds_list_del(&list_entry->head);
- free(list_entry);
- }
-}
-
-/*
- * called with UST lock held.
- */
-int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
-{
- struct lttng_ust_registered_probe *reg_probe;
- struct cds_list_head *probe_list;
- int i;
-
- probe_list = lttng_get_probe_list_head();
- CDS_INIT_LIST_HEAD(&list->head);
- cds_list_for_each_entry(reg_probe, probe_list, head) {
- const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
-
- for (i = 0; i < probe_desc->nr_events; i++) {
- const struct lttng_ust_event_desc *event_desc =
- probe_desc->event_desc[i];
- int j;
-
- if (event_desc->nr_fields == 0) {
- /* Events without fields. */
- struct tp_field_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
- list_entry->field.field_name[0] = '\0';
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- if (!event_desc->loglevel) {
- list_entry->field.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->field.loglevel = *(*event_desc->loglevel);
- }
- list_entry->field.nowrite = 1;
- }
-
- for (j = 0; j < event_desc->nr_fields; j++) {
- const struct lttng_ust_event_field *event_field =
- event_desc->fields[j];
- struct tp_field_list_entry *list_entry;
-
- /* Skip event if name is too long. */
- if (!lttng_ust_validate_event_name(event_desc))
- continue;
- list_entry = zmalloc(sizeof(*list_entry));
- if (!list_entry)
- goto err_nomem;
- cds_list_add(&list_entry->head, &list->head);
- lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
- strncpy(list_entry->field.field_name,
- event_field->name,
- LTTNG_UST_ABI_SYM_NAME_LEN);
- list_entry->field.field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- switch (event_field->type->type) {
- case lttng_ust_type_integer:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_INTEGER;
- break;
- case lttng_ust_type_string:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_array:
- if (lttng_ust_get_type_array(event_field->type)->encoding == lttng_ust_string_encoding_none)
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- else
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_sequence:
- if (lttng_ust_get_type_sequence(event_field->type)->encoding == lttng_ust_string_encoding_none)
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- else
- list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
- break;
- case lttng_ust_type_float:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_FLOAT;
- break;
- case lttng_ust_type_enum:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_ENUM;
- break;
- default:
- list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
- }
- if (!event_desc->loglevel) {
- list_entry->field.loglevel = TRACE_DEFAULT;
- } else {
- list_entry->field.loglevel = *(*event_desc->loglevel);
- }
- list_entry->field.nowrite = event_field->nowrite;
- }
- }
- }
- if (cds_list_empty(&list->head))
- list->iter = NULL;
- else
- list->iter =
- cds_list_first_entry(&list->head,
- struct tp_field_list_entry, head);
- return 0;
-
-err_nomem:
- lttng_probes_prune_field_list(list);
- return -ENOMEM;
-}
-
-/*
- * Return current iteration position, advance internal iterator to next.
- * Return NULL if end of list.
- */
-struct lttng_ust_abi_field_iter *
- lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
-{
- struct tp_field_list_entry *entry;
-
- if (!list->iter)
- return NULL;
- entry = list->iter;
- if (entry->head.next == &list->head)
- list->iter = NULL;
- else
- list->iter = cds_list_entry(entry->head.next,
- struct tp_field_list_entry, head);
- return &entry->field;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_RB_CLIENT_H
-#define _LTTNG_RB_CLIENT_H
-
-#include <stdint.h>
-#include "../libringbuffer/ringbuffer-config.h"
-
-struct lttng_ust_client_lib_ring_buffer_client_cb {
- struct lttng_ust_lib_ring_buffer_client_cb parent;
-
- int (*timestamp_begin) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_begin);
- int (*timestamp_end) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_end);
- int (*events_discarded) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *events_discarded);
- int (*content_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *content_size);
- int (*packet_size) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *packet_size);
- int (*stream_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *stream_id);
- int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *ts);
- int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *seq);
- int (*instance_id) (struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *id);
-};
-
-/*
- * The ring buffer clients init/exit symbols are private ABI for
- * liblttng-ust-ctl, which is why they are not hidden.
- */
-void lttng_ust_ring_buffer_clients_init(void);
-void lttng_ust_ring_buffer_clients_exit(void);
-
-void lttng_ring_buffer_client_overwrite_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_overwrite_rt_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_rt_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_metadata_client_init(void)
- __attribute__((visibility("hidden")));
-
-
-void lttng_ring_buffer_client_overwrite_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_overwrite_rt_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_client_discard_rt_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ring_buffer_metadata_client_exit(void)
- __attribute__((visibility("hidden")));
-
-
-void lttng_ust_fixup_ring_buffer_client_overwrite_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_discard_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_ring_buffer_client_discard_rt_tls(void)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_RB_CLIENT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (discard mode) for RT.
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-rt"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_discard_rt_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_discard_rt_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_discard_rt_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD_RT
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (discard mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_discard_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_discard_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_discard_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (overwrite mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-rt"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_overwrite_rt_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_overwrite_rt_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE_RT
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client (overwrite mode).
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-#include "lttng-rb-clients.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
-#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
-#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
- lttng_ust_fixup_ring_buffer_client_overwrite_tls
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_client_overwrite_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_client_overwrite_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE
-#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
-#include "lttng-ring-buffer-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client template.
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <ust-events-internal.h>
-#include <lttng/urcu/pointer.h>
-#include "ust-bitfield.h"
-#include "ust-compat.h"
-#include "clock.h"
-#include "context-internal.h"
-#include "lttng-tracer.h"
-#include "../libringbuffer/frontend_types.h"
-#include <urcu/tls-compat.h>
-
-#define LTTNG_COMPACT_EVENT_BITS 5
-#define LTTNG_COMPACT_TSC_BITS 27
-
-/*
- * Keep the natural field alignment for _each field_ within this structure if
- * you ever add/remove a field from this header. Packed attribute is not used
- * because gcc generates poor code on at least powerpc and mips. Don't ever
- * let gcc add padding between the structure elements.
- */
-
-struct packet_header {
- /* Trace packet header */
- uint32_t magic; /*
- * Trace magic number.
- * contains endianness information.
- */
- uint8_t uuid[LTTNG_UST_UUID_LEN];
- uint32_t stream_id;
- uint64_t stream_instance_id;
-
- struct {
- /* Stream packet context */
- uint64_t timestamp_begin; /* Cycle count at subbuffer start */
- uint64_t timestamp_end; /* Cycle count at subbuffer end */
- uint64_t content_size; /* Size of data in subbuffer */
- uint64_t packet_size; /* Subbuffer size (include padding) */
- uint64_t packet_seq_num; /* Packet sequence number */
- unsigned long events_discarded; /*
- * Events lost in this subbuffer since
- * the beginning of the trace.
- * (may overflow)
- */
- uint32_t cpu_id; /* CPU id associated with stream */
- uint8_t header_end; /* End of header */
- } ctx;
-};
-
-struct lttng_client_ctx {
- size_t packet_context_len;
- size_t event_context_len;
- struct lttng_ust_ctx *chan_ctx;
- struct lttng_ust_ctx *event_ctx;
-};
-
-/*
- * Indexed by lib_ring_buffer_nesting_count().
- */
-typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
-static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(private_ctx_stack)));
-}
-
-static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return trace_clock_read64();
-}
-
-static inline
-size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
- size_t ctx_len)
-{
- size_t orig_offset = offset;
-
- if (caa_likely(!ctx))
- return 0;
- offset += lttng_ust_lib_ring_buffer_align(offset, ctx->largest_align);
- offset += ctx_len;
- return offset - orig_offset;
-}
-
-static inline
-void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len)
-{
- int i;
- size_t offset = 0;
-
- if (caa_likely(!ctx)) {
- *ctx_len = 0;
- return;
- }
- for (i = 0; i < ctx->nr_fields; i++)
- offset += ctx->fields[i].get_size(ctx->fields[i].priv, offset);
- *ctx_len = offset;
-}
-
-static inline
-void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
- struct lttng_ust_channel_buffer *chan,
- struct lttng_ust_ctx *ctx)
-{
- int i;
-
- if (caa_likely(!ctx))
- return;
- lttng_ust_lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- ctx->fields[i].record(ctx->fields[i].priv, bufctx, chan);
-}
-
-/*
- * record_header_size - Calculate the header size and padding necessary.
- * @config: ring buffer instance configuration
- * @chan: channel
- * @offset: offset in the write buffer
- * @pre_header_padding: padding to add before the header (output)
- * @ctx: reservation context
- *
- * Returns the event header size (including padding).
- *
- * The payload must itself determine its own alignment from the biggest type it
- * contains.
- */
-static __inline__
-size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- size_t offset,
- size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
- size_t orig_offset = offset;
- size_t padding;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
- offset += padding;
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- offset += sizeof(uint32_t); /* id and timestamp */
- } else {
- /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
- offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
- /* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint64_t); /* timestamp */
- }
- break;
- case 2: /* large */
- padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
- offset += padding;
- offset += sizeof(uint16_t);
- if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
- offset += sizeof(uint32_t); /* timestamp */
- } else {
- /* Align extended struct on largest member */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint32_t); /* id */
- offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
- offset += sizeof(uint64_t); /* timestamp */
- }
- break;
- default:
- padding = 0;
- WARN_ON_ONCE(1);
- }
- offset += ctx_get_aligned_size(offset, client_ctx->chan_ctx,
- client_ctx->packet_context_len);
- offset += ctx_get_aligned_size(offset, client_ctx->event_ctx,
- client_ctx->event_context_len);
- *pre_header_padding = padding;
- return offset - orig_offset;
-}
-
-#include "../libringbuffer/api.h"
-#include "lttng-rb-clients.h"
-
-static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id);
-
-/*
- * lttng_write_event_header
- *
- * Writes the event header to the offset (already aligned on 32-bits).
- *
- * @config: ring buffer instance configuration
- * @ctx: reservation context
- * @event_id: event ID
- */
-static __inline__
-void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
-
- if (caa_unlikely(ctx->priv->rflags))
- goto slow_path;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- {
- uint32_t id_time = 0;
-
- bt_bitfield_write(&id_time, uint32_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- event_id);
- bt_bitfield_write(&id_time, uint32_t,
- LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx->priv->tsc);
- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
- break;
- }
- case 2: /* large */
- {
- uint32_t timestamp = (uint32_t) ctx->priv->tsc;
- uint16_t id = event_id;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- break;
- }
- default:
- WARN_ON_ONCE(1);
- }
-
- ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-
- return;
-
-slow_path:
- lttng_write_event_header_slow(config, ctx, client_ctx, event_id);
-}
-
-static
-void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_client_ctx *client_ctx,
- uint32_t event_id)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t id_time = 0;
-
- bt_bitfield_write(&id_time, uint32_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- event_id);
- bt_bitfield_write(&id_time, uint32_t,
- LTTNG_COMPACT_EVENT_BITS,
- LTTNG_COMPACT_TSC_BITS,
- ctx_private->tsc);
- lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
- } else {
- uint8_t id = 0;
- uint64_t timestamp = ctx_private->tsc;
-
- bt_bitfield_write(&id, uint8_t,
- 0,
- LTTNG_COMPACT_EVENT_BITS,
- 31);
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- /* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- }
- break;
- case 2: /* large */
- {
- if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
- uint32_t timestamp = (uint32_t) ctx_private->tsc;
- uint16_t id = event_id;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- } else {
- uint16_t id = 65535;
- uint64_t timestamp = ctx_private->tsc;
-
- lib_ring_buffer_write(config, ctx, &id, sizeof(id));
- /* Align extended struct on largest member */
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
- lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
- lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
- }
- break;
- }
- default:
- WARN_ON_ONCE(1);
- }
- ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
- lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
-}
-
-static const struct lttng_ust_lib_ring_buffer_config client_config;
-
-static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return lib_ring_buffer_clock_read(chan);
-}
-
-static
-size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- size_t offset,
- size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
-{
- return record_header_size(config, chan, offset,
- pre_header_padding, ctx, client_ctx);
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
- return offsetof(struct packet_header, ctx.header_end);
-}
-
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct packet_header *header =
- (struct packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
- uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
-
- assert(header);
- if (!header)
- return;
- header->magic = CTF_MAGIC_NUMBER;
- memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
- header->stream_id = lttng_chan->priv->id;
- header->stream_instance_id = buf->backend.cpu;
- header->ctx.timestamp_begin = tsc;
- header->ctx.timestamp_end = 0;
- header->ctx.content_size = ~0ULL; /* for debugging */
- header->ctx.packet_size = ~0ULL;
- header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
- header->ctx.events_discarded = 0;
- header->ctx.cpu_id = buf->backend.cpu;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx, unsigned long data_size,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct packet_header *header =
- (struct packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- unsigned long records_lost = 0;
-
- assert(header);
- if (!header)
- return;
- header->ctx.timestamp_end = tsc;
- header->ctx.content_size =
- (uint64_t) data_size * CHAR_BIT; /* in bits */
- header->ctx.packet_size =
- (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
-
- records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
- header->ctx.events_discarded = records_lost;
-}
-
-static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- const char *name __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- return 0;
-}
-
-static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-
-static void client_content_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- size_t *offset, size_t *length)
-{
- *offset = offsetof(struct packet_header, ctx.content_size);
- *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
-}
-
-static void client_packet_size_field(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- size_t *offset, size_t *length)
-{
- *offset = offsetof(struct packet_header, ctx.packet_size);
- *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
-}
-
-static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
-}
-
-static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_begin)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *timestamp_begin = header->ctx.timestamp_begin;
- return 0;
-}
-
-static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *timestamp_end)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *timestamp_end = header->ctx.timestamp_end;
- return 0;
-}
-
-static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *events_discarded)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *events_discarded = header->ctx.events_discarded;
- return 0;
-}
-
-static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *content_size)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *content_size = header->ctx.content_size;
- return 0;
-}
-
-static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *packet_size)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *packet_size = header->ctx.packet_size;
- return 0;
-}
-
-static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *stream_id)
-{
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
-
- *stream_id = lttng_chan->priv->id;
-
- return 0;
-}
-
-static int client_current_timestamp(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *ts)
-{
- *ts = client_ring_buffer_clock_read(chan);
-
- return 0;
-}
-
-static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- uint64_t *seq)
-{
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct packet_header *header;
-
- header = client_packet_header(buf, handle);
- if (!header)
- return -1;
- *seq = header->ctx.packet_seq_num;
- return 0;
-}
-
-static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- uint64_t *id)
-{
- *id = buf->backend.cpu;
-
- return 0;
-}
-
-static const
-struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
- .parent = {
- .ring_buffer_clock_read = client_ring_buffer_clock_read,
- .record_header_size = client_record_header_size,
- .subbuffer_header_size = client_packet_header_size,
- .buffer_begin = client_buffer_begin,
- .buffer_end = client_buffer_end,
- .buffer_create = client_buffer_create,
- .buffer_finalize = client_buffer_finalize,
- .content_size_field = client_content_size_field,
- .packet_size_field = client_packet_size_field,
- },
- .timestamp_begin = client_timestamp_begin,
- .timestamp_end = client_timestamp_end,
- .events_discarded = client_events_discarded,
- .content_size = client_content_size,
- .packet_size = client_packet_size,
- .stream_id = client_stream_id,
- .current_timestamp = client_current_timestamp,
- .sequence_number = client_sequence_number,
- .instance_id = client_instance_id,
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
- .cb.record_header_size = client_record_header_size,
- .cb.subbuffer_header_size = client_packet_header_size,
- .cb.buffer_begin = client_buffer_begin,
- .cb.buffer_end = client_buffer_end,
- .cb.buffer_create = client_buffer_create,
- .cb.buffer_finalize = client_buffer_finalize,
- .cb.content_size_field = client_content_size_field,
- .cb.packet_size_field = client_packet_size_field,
-
- .tsc_bits = LTTNG_COMPACT_TSC_BITS,
- .alloc = RING_BUFFER_ALLOC_PER_CPU,
- .sync = RING_BUFFER_SYNC_GLOBAL,
- .mode = RING_BUFFER_MODE_TEMPLATE,
- .backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_MMAP,
- .oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_NO_IPI_BARRIER,
- .wakeup = LTTNG_CLIENT_WAKEUP,
- .client_type = LTTNG_CLIENT_TYPE,
-
- .cb_ptr = &client_cb.parent,
-};
-
-static
-struct lttng_ust_channel_buffer *_channel_create(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
-{
- struct lttng_ust_abi_channel_config chan_priv_init;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf)
- return NULL;
- memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- lttng_chan_buf->priv->id = chan_id;
-
- memset(&chan_priv_init, 0, sizeof(chan_priv_init));
- memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
- chan_priv_init.id = chan_id;
-
- handle = channel_create(&client_config, name,
- __alignof__(struct lttng_ust_abi_channel_config),
- sizeof(struct lttng_ust_abi_channel_config),
- &chan_priv_init,
- lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval,
- stream_fds, nr_stream_fds, blocking_timeout);
- if (!handle)
- goto error;
- lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
- return lttng_chan_buf;
-
-error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return NULL;
-}
-
-static
-void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
-{
- channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-}
-
-static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
- struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
- struct lttng_client_ctx client_ctx;
- int ret, nesting;
- struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
- uint32_t event_id;
-
- event_id = event_recorder->priv->id;
- client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
- client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
- /* Compute internal size of context structures. */
- ctx_get_struct_size(client_ctx.chan_ctx, &client_ctx.packet_context_len);
- ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len);
-
- nesting = lib_ring_buffer_nesting_inc(&client_config);
- if (nesting < 0)
- return -EPERM;
-
- private_ctx = &URCU_TLS(private_ctx_stack)[nesting];
- memset(private_ctx, 0, sizeof(*private_ctx));
- private_ctx->pub = ctx;
- private_ctx->chan = lttng_chan->priv->rb_chan;
-
- ctx->priv = private_ctx;
-
- switch (lttng_chan->priv->header_type) {
- case 1: /* compact */
- if (event_id > 30)
- private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
- break;
- case 2: /* large */
- if (event_id > 65534)
- private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
- break;
- default:
- WARN_ON_ONCE(1);
- }
-
- ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
- if (caa_unlikely(ret))
- goto put;
- if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &private_ctx->backend_pages)) {
- ret = -EPERM;
- goto put;
- }
- lttng_write_event_header(&client_config, ctx, &client_ctx, event_id);
- return 0;
-put:
- lib_ring_buffer_nesting_dec(&client_config);
- return ret;
-}
-
-static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- lib_ring_buffer_commit(&client_config, ctx);
- lib_ring_buffer_nesting_dec(&client_config);
-}
-
-static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len, size_t alignment)
-{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
- lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len)
-{
- lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
-}
-
-static
-void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len)
-{
- lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
-}
-
-static
-int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_finalized(rb_chan);
-}
-
-static
-int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_disabled(rb_chan);
-}
-
-static
-int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
- int cpu;
-
- for_each_channel_cpu(cpu, rb_chan) {
- int shm_fd, wait_fd, wakeup_fd;
- uint64_t memory_map_size;
-
- buf = channel_get_ring_buffer(&client_config, rb_chan,
- cpu, rb_chan->handle, &shm_fd, &wait_fd,
- &wakeup_fd, &memory_map_size);
- lib_ring_buffer_switch(&client_config, buf,
- SWITCH_ACTIVE, rb_chan->handle);
- }
- return 0;
-}
-
-static struct lttng_transport lttng_relay_transport = {
- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
- .ops = {
- .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
- .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
- .pub = <tng_relay_transport.ops,
- .channel_create = _channel_create,
- .channel_destroy = lttng_channel_destroy,
- .packet_avail_size = NULL, /* Would be racy anyway */
- .is_finalized = lttng_is_finalized,
- .is_disabled = lttng_is_disabled,
- .flush_buffer = lttng_flush_buffer,
- }),
- .event_reserve = lttng_event_reserve,
- .event_commit = lttng_event_commit,
- .event_write = lttng_event_write,
- .event_strcpy = lttng_event_strcpy,
- .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
- },
- .client_config = &client_config,
-};
-
-void RING_BUFFER_MODE_TEMPLATE_INIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" init\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_register(<tng_relay_transport);
-}
-
-void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" exit\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_unregister(<tng_relay_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer client template.
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdint.h>
-
-#include <ust-events-internal.h>
-#include "ust-bitfield.h"
-#include "ust-compat.h"
-#include "lttng-tracer.h"
-#include "../libringbuffer/frontend_types.h"
-#include <urcu/tls-compat.h>
-
-struct metadata_packet_header {
- uint32_t magic; /* 0x75D11D57 */
- uint8_t uuid[LTTNG_UST_UUID_LEN]; /* Unique Universal Identifier */
- uint32_t checksum; /* 0 if unused */
- uint32_t content_size; /* in bits */
- uint32_t packet_size; /* in bits */
- uint8_t compression_scheme; /* 0 if unused */
- uint8_t encryption_scheme; /* 0 if unused */
- uint8_t checksum_scheme; /* 0 if unused */
- uint8_t major; /* CTF spec major version number */
- uint8_t minor; /* CTF spec minor version number */
- uint8_t header_end[0];
-};
-
-struct metadata_record_header {
- uint8_t header_end[0]; /* End of header */
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config;
-
-/* No nested use supported for metadata ring buffer. */
-static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
-
-static inline uint64_t lib_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return 0;
-}
-
-static inline
-size_t record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- size_t offset __attribute__((unused)),
- size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- void *client_ctx __attribute__((unused)))
-{
- return 0;
-}
-
-#include "../libringbuffer/api.h"
-#include "lttng-rb-clients.h"
-
-static uint64_t client_ring_buffer_clock_read(
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
-{
- return 0;
-}
-
-static
-size_t client_record_header_size(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- size_t offset __attribute__((unused)),
- size_t *pre_header_padding __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- void *client_ctx __attribute__((unused)))
-{
- return 0;
-}
-
-/**
- * client_packet_header_size - called on buffer-switch to a new sub-buffer
- *
- * Return header size without padding after the structure. Don't use packed
- * structure because gcc generates inefficient code on some architectures
- * (powerpc, mips..)
- */
-static size_t client_packet_header_size(void)
-{
- return offsetof(struct metadata_packet_header, header_end);
-}
-
-static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
- unsigned int subbuf_idx,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct metadata_packet_header *header =
- (struct metadata_packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
-
- assert(header);
- if (!header)
- return;
- header->magic = TSDL_MAGIC_NUMBER;
- memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
- header->checksum = 0; /* 0 if unused */
- header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
- header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
- header->compression_scheme = 0; /* 0 if unused */
- header->encryption_scheme = 0; /* 0 if unused */
- header->checksum_scheme = 0; /* 0 if unused */
- header->major = CTF_SPEC_MAJOR;
- header->minor = CTF_SPEC_MINOR;
-}
-
-/*
- * offset is assumed to never be 0 here : never deliver a completely empty
- * subbuffer. data_size is between 1 and subbuf_size.
- */
-static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf,
- uint64_t tsc __attribute__((unused)),
- unsigned int subbuf_idx, unsigned long data_size,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
- struct metadata_packet_header *header =
- (struct metadata_packet_header *)
- lib_ring_buffer_offset_address(&buf->backend,
- subbuf_idx * chan->backend.subbuf_size,
- handle);
- unsigned long records_lost = 0;
-
- assert(header);
- if (!header)
- return;
- header->content_size = data_size * CHAR_BIT; /* in bits */
- header->packet_size = LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
- /*
- * We do not care about the records lost count, because the metadata
- * channel waits and retry.
- */
- (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
- records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
- WARN_ON_ONCE(records_lost != 0);
-}
-
-static int client_buffer_create(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- const char *name __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- return 0;
-}
-
-static void client_buffer_finalize(
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- void *priv __attribute__((unused)),
- int cpu __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-
-static const
-struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
- .parent = {
- .ring_buffer_clock_read = client_ring_buffer_clock_read,
- .record_header_size = client_record_header_size,
- .subbuffer_header_size = client_packet_header_size,
- .buffer_begin = client_buffer_begin,
- .buffer_end = client_buffer_end,
- .buffer_create = client_buffer_create,
- .buffer_finalize = client_buffer_finalize,
- },
-};
-
-static const struct lttng_ust_lib_ring_buffer_config client_config = {
- .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
- .cb.record_header_size = client_record_header_size,
- .cb.subbuffer_header_size = client_packet_header_size,
- .cb.buffer_begin = client_buffer_begin,
- .cb.buffer_end = client_buffer_end,
- .cb.buffer_create = client_buffer_create,
- .cb.buffer_finalize = client_buffer_finalize,
-
- .tsc_bits = 0,
- .alloc = RING_BUFFER_ALLOC_GLOBAL,
- .sync = RING_BUFFER_SYNC_GLOBAL,
- .mode = RING_BUFFER_MODE_TEMPLATE,
- .backend = RING_BUFFER_PAGE,
- .output = RING_BUFFER_MMAP,
- .oops = RING_BUFFER_OOPS_CONSISTENCY,
- .ipi = RING_BUFFER_NO_IPI_BARRIER,
- .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
- .client_type = LTTNG_CLIENT_TYPE,
-
- .cb_ptr = &client_cb.parent,
-};
-
-static
-struct lttng_ust_channel_buffer *_channel_create(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
-{
- struct lttng_ust_abi_channel_config chan_priv_init;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf)
- return NULL;
- memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
- lttng_chan_buf->priv->id = chan_id;
-
- memset(&chan_priv_init, 0, sizeof(chan_priv_init));
- memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
- chan_priv_init.id = chan_id;
-
- handle = channel_create(&client_config, name,
- __alignof__(struct lttng_ust_channel_buffer),
- sizeof(struct lttng_ust_channel_buffer),
- &chan_priv_init,
- lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
- switch_timer_interval, read_timer_interval,
- stream_fds, nr_stream_fds, blocking_timeout);
- if (!handle)
- goto error;
- lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
- return lttng_chan_buf;
-
-error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return NULL;
-}
-
-static
-void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
-{
- channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-}
-
-static
-int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- int ret;
-
- memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_lib_ring_buffer_ctx_private));
- URCU_TLS(private_ctx).pub = ctx;
- URCU_TLS(private_ctx).chan = ctx->client_priv;
- ctx->priv = &URCU_TLS(private_ctx);
- ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
- if (ret)
- return ret;
- if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
- &ctx->priv->backend_pages))
- return -EPERM;
- return 0;
-}
-
-static
-void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- lib_ring_buffer_commit(&client_config, ctx);
-}
-
-static
-void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len, size_t alignment)
-{
- lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
- lib_ring_buffer_write(&client_config, ctx, src, len);
-}
-
-static
-size_t lttng_packet_avail_size(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- unsigned long o_begin;
- struct lttng_ust_lib_ring_buffer *buf;
-
- buf = shmp(rb_chan->handle, rb_chan->backend.buf[0].shmp); /* Only for global buffer ! */
- o_begin = v_read(&client_config, &buf->offset);
- if (subbuf_offset(o_begin, rb_chan) != 0) {
- return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan);
- } else {
- return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan)
- - sizeof(struct metadata_packet_header);
- }
-}
-
-static
-int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_finalized(rb_chan);
-}
-
-static
-int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
-
- return lib_ring_buffer_channel_is_disabled(rb_chan);
-}
-
-static
-int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
- struct lttng_ust_lib_ring_buffer *buf;
- int shm_fd, wait_fd, wakeup_fd;
- uint64_t memory_map_size;
-
- buf = channel_get_ring_buffer(&client_config, rb_chan,
- 0, rb_chan->handle, &shm_fd, &wait_fd, &wakeup_fd,
- &memory_map_size);
- lib_ring_buffer_switch(&client_config, buf,
- SWITCH_ACTIVE, rb_chan->handle);
- return 0;
-}
-
-static struct lttng_transport lttng_relay_transport = {
- .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
- .ops = {
- .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
-
- .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
- .pub = <tng_relay_transport.ops,
- .channel_create = _channel_create,
- .channel_destroy = lttng_channel_destroy,
- .packet_avail_size = lttng_packet_avail_size,
- .is_finalized = lttng_is_finalized,
- .is_disabled = lttng_is_disabled,
- .flush_buffer = lttng_flush_buffer,
- }),
- .event_reserve = lttng_event_reserve,
- .event_commit = lttng_event_commit,
- .event_write = lttng_event_write,
- },
- .client_config = &client_config,
-};
-
-void RING_BUFFER_MODE_TEMPLATE_INIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" init\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_register(<tng_relay_transport);
-}
-
-void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
-{
- DBG("LTT : ltt ring buffer client \"%s\" exit\n",
- "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
- lttng_transport_unregister(<tng_relay_transport);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng lib ring buffer metadta client.
- */
-
-#define _LGPL_SOURCE
-#include "lttng-tracer.h"
-
-#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
-#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
-#define RING_BUFFER_MODE_TEMPLATE_INIT \
- lttng_ring_buffer_metadata_client_init
-#define RING_BUFFER_MODE_TEMPLATE_EXIT \
- lttng_ring_buffer_metadata_client_exit
-#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_METADATA
-#include "lttng-ring-buffer-metadata-client-template.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This contains the core definitions for the Linux Trace Toolkit.
- */
-
-#ifndef _LTTNG_TRACER_CORE_H
-#define _LTTNG_TRACER_CORE_H
-
-#include <stddef.h>
-#include <urcu/arch.h>
-#include <urcu/list.h>
-#include <lttng/ust-tracer.h>
-#include <lttng/ringbuffer-context.h>
-#include <usterr-signal-safe.h>
-
-/*
- * The longuest possible namespace proc path is with the cgroup ns
- * and the maximum theoretical linux pid of 536870912 :
- *
- * /proc/self/task/536870912/ns/cgroup
- */
-#define LTTNG_PROC_NS_PATH_MAX 40
-
-struct lttng_ust_session;
-struct lttng_ust_channel_buffer;
-struct lttng_ust_ctx_field;
-struct lttng_ust_lib_ring_buffer_ctx;
-struct lttng_ust_ctx_value;
-struct lttng_ust_event_recorder;
-struct lttng_ust_event_notifier;
-struct lttng_ust_notification_ctx;
-
-int ust_lock(void) __attribute__ ((warn_unused_result))
- __attribute__((visibility("hidden")));
-
-void ust_lock_nocheck(void)
- __attribute__((visibility("hidden")));
-
-void ust_unlock(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_event_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_vtid_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_procname_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_cgroup_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_ipc_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_net_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_time_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_fixup_uts_ns_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_fixup_fd_tracker_tls(void)
- __attribute__((visibility("hidden")));
-
-const char *lttng_ust_obj_get_name(int id)
- __attribute__((visibility("hidden")));
-
-int lttng_get_notify_socket(void *owner)
- __attribute__((visibility("hidden")));
-
-char* lttng_ust_sockinfo_get_procname(void *owner)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_sockinfo_session_enabled(void *owner)
- __attribute__((visibility("hidden")));
-
-ssize_t lttng_ust_read(int fd, void *buf, size_t len)
- __attribute__((visibility("hidden")));
-
-size_t lttng_ust_dummy_get_size(void *priv, size_t offset)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_dummy_record(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_dummy_get_value(void *priv, struct lttng_ust_ctx_value *value)
- __attribute__((visibility("hidden")));
-
-void lttng_event_notifier_notification_send(
- struct lttng_ust_event_notifier *event_notifier,
- const char *stack_data,
- struct lttng_ust_notification_ctx *notif_ctx)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_transport_register(struct lttng_counter_transport *transport)
- __attribute__((visibility("hidden")));
-
-void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
- __attribute__((visibility("hidden")));
-
-#ifdef HAVE_LINUX_PERF_EVENT_H
-void lttng_ust_fixup_perf_counter_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_lock(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_unlock(void)
- __attribute__((visibility("hidden")));
-#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
-static inline
-void lttng_ust_fixup_perf_counter_tls(void)
-{
-}
-static inline
-void lttng_perf_lock(void)
-{
-}
-static inline
-void lttng_perf_unlock(void)
-{
-}
-#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-#endif /* _LTTNG_TRACER_CORE_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This contains the definitions for the Linux Trace Toolkit tracer.
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-#ifndef _LTTNG_TRACER_H
-#define _LTTNG_TRACER_H
-
-#include <stdarg.h>
-#include <stdint.h>
-#include <lttng/ust-events.h>
-#include "lttng-tracer-core.h"
-#include "compat.h"
-
-/* Tracer properties */
-#define CTF_MAGIC_NUMBER 0xC1FC1FC1
-#define TSDL_MAGIC_NUMBER 0x75D11D57
-
-/* CTF specification version followed */
-#define CTF_SPEC_MAJOR 1
-#define CTF_SPEC_MINOR 8
-
-/*
- * Number of milliseconds to retry before failing metadata writes on buffer full
- * condition. (10 seconds)
- */
-#define LTTNG_METADATA_TIMEOUT_MSEC 10000
-
-#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
-#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
-
-/*
- * LTTng client type enumeration. Used by the consumer to map the
- * callbacks from its own address space.
- */
-enum lttng_client_types {
- LTTNG_CLIENT_METADATA = 0,
- LTTNG_CLIENT_DISCARD = 1,
- LTTNG_CLIENT_OVERWRITE = 2,
- LTTNG_CLIENT_DISCARD_RT = 3,
- LTTNG_CLIENT_OVERWRITE_RT = 4,
- LTTNG_NR_CLIENT_TYPES,
-};
-
-#endif /* _LTTNG_TRACER_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * LTTng UST ABI
- *
- * Mimic system calls for:
- * - session creation, returns an object descriptor or failure.
- * - channel creation, returns an object descriptor or failure.
- * - Operates on a session object descriptor
- * - Takes all channel options as parameters.
- * - stream get, returns an object descriptor or failure.
- * - Operates on a channel object descriptor.
- * - stream notifier get, returns an object descriptor or failure.
- * - Operates on a channel object descriptor.
- * - event creation, returns an object descriptor or failure.
- * - Operates on a channel object descriptor
- * - Takes an event name as parameter
- * - Takes an instrumentation source as parameter
- * - e.g. tracepoints, dynamic_probes...
- * - Takes instrumentation source specific arguments.
- */
-
-#define _LGPL_SOURCE
-#include <fcntl.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#include <urcu/compiler.h>
-#include <urcu/list.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-abi.h>
-#include <lttng/ust-error.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-version.h>
-#include <ust-fd.h>
-#include <usterr-signal-safe.h>
-
-#include "../libringbuffer/frontend_types.h"
-#include "../libringbuffer/frontend.h"
-#include "../libringbuffer/shm.h"
-#include "../libcounter/counter.h"
-#include "tracepoint-internal.h"
-#include "lttng-tracer.h"
-#include "string-utils.h"
-#include "ust-events-internal.h"
-#include "context-internal.h"
-#include "ust-helper.h"
-
-#define OBJ_NAME_LEN 16
-
-static int lttng_ust_abi_close_in_progress;
-
-static
-int lttng_abi_tracepoint_list(void *owner);
-static
-int lttng_abi_tracepoint_field_list(void *owner);
-
-/*
- * Object descriptor table. Should be protected from concurrent access
- * by the caller.
- */
-
-struct lttng_ust_abi_obj {
- union {
- struct {
- void *private_data;
- const struct lttng_ust_abi_objd_ops *ops;
- int f_count;
- int owner_ref; /* has ref from owner */
- void *owner;
- char name[OBJ_NAME_LEN];
- } s;
- int freelist_next; /* offset freelist. end is -1. */
- } u;
-};
-
-struct lttng_ust_abi_objd_table {
- struct lttng_ust_abi_obj *array;
- unsigned int len, allocated_len;
- int freelist_head; /* offset freelist head. end is -1 */
-};
-
-static struct lttng_ust_abi_objd_table objd_table = {
- .freelist_head = -1,
-};
-
-static
-int objd_alloc(void *private_data, const struct lttng_ust_abi_objd_ops *ops,
- void *owner, const char *name)
-{
- struct lttng_ust_abi_obj *obj;
-
- if (objd_table.freelist_head != -1) {
- obj = &objd_table.array[objd_table.freelist_head];
- objd_table.freelist_head = obj->u.freelist_next;
- goto end;
- }
-
- if (objd_table.len >= objd_table.allocated_len) {
- unsigned int new_allocated_len, old_allocated_len;
- struct lttng_ust_abi_obj *new_table, *old_table;
-
- old_allocated_len = objd_table.allocated_len;
- old_table = objd_table.array;
- if (!old_allocated_len)
- new_allocated_len = 1;
- else
- new_allocated_len = old_allocated_len << 1;
- new_table = zmalloc(sizeof(struct lttng_ust_abi_obj) * new_allocated_len);
- if (!new_table)
- return -ENOMEM;
- memcpy(new_table, old_table,
- sizeof(struct lttng_ust_abi_obj) * old_allocated_len);
- free(old_table);
- objd_table.array = new_table;
- objd_table.allocated_len = new_allocated_len;
- }
- obj = &objd_table.array[objd_table.len];
- objd_table.len++;
-end:
- obj->u.s.private_data = private_data;
- obj->u.s.ops = ops;
- obj->u.s.f_count = 2; /* count == 1 : object is allocated */
- /* count == 2 : allocated + hold ref */
- obj->u.s.owner_ref = 1; /* One owner reference */
- obj->u.s.owner = owner;
- strncpy(obj->u.s.name, name, OBJ_NAME_LEN);
- obj->u.s.name[OBJ_NAME_LEN - 1] = '\0';
- return obj - objd_table.array;
-}
-
-static
-struct lttng_ust_abi_obj *_objd_get(int id)
-{
- if (id >= objd_table.len)
- return NULL;
- if (!objd_table.array[id].u.s.f_count)
- return NULL;
- return &objd_table.array[id];
-}
-
-static
-void *objd_private(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj);
- return obj->u.s.private_data;
-}
-
-static
-void objd_set_private(int id, void *private_data)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj);
- obj->u.s.private_data = private_data;
-}
-
-const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return NULL;
- return obj->u.s.ops;
-}
-
-static
-void objd_free(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- assert(obj);
- obj->u.freelist_next = objd_table.freelist_head;
- objd_table.freelist_head = obj - objd_table.array;
- assert(obj->u.s.f_count == 1);
- obj->u.s.f_count = 0; /* deallocated */
-}
-
-static
-void objd_ref(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
- assert(obj != NULL);
- obj->u.s.f_count++;
-}
-
-int lttng_ust_abi_objd_unref(int id, int is_owner)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return -EINVAL;
- if (obj->u.s.f_count == 1) {
- ERR("Reference counting error\n");
- return -EINVAL;
- }
- if (is_owner) {
- if (!obj->u.s.owner_ref) {
- ERR("Error decrementing owner reference");
- return -EINVAL;
- }
- obj->u.s.owner_ref--;
- }
- if ((--obj->u.s.f_count) == 1) {
- const struct lttng_ust_abi_objd_ops *ops = lttng_ust_abi_objd_ops(id);
-
- if (ops->release)
- ops->release(id);
- objd_free(id);
- }
- return 0;
-}
-
-static
-void objd_table_destroy(void)
-{
- int i;
-
- for (i = 0; i < objd_table.allocated_len; i++) {
- struct lttng_ust_abi_obj *obj;
-
- obj = _objd_get(i);
- if (!obj)
- continue;
- if (!obj->u.s.owner_ref)
- continue; /* only unref owner ref. */
- (void) lttng_ust_abi_objd_unref(i, 1);
- }
- free(objd_table.array);
- objd_table.array = NULL;
- objd_table.len = 0;
- objd_table.allocated_len = 0;
- objd_table.freelist_head = -1;
-}
-
-const char *lttng_ust_obj_get_name(int id)
-{
- struct lttng_ust_abi_obj *obj = _objd_get(id);
-
- if (!obj)
- return NULL;
- return obj->u.s.name;
-}
-
-void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
-{
- int i;
-
- for (i = 0; i < objd_table.allocated_len; i++) {
- struct lttng_ust_abi_obj *obj;
-
- obj = _objd_get(i);
- if (!obj)
- continue;
- if (!obj->u.s.owner)
- continue; /* skip root handles */
- if (!obj->u.s.owner_ref)
- continue; /* only unref owner ref. */
- if (obj->u.s.owner == owner)
- (void) lttng_ust_abi_objd_unref(i, 1);
- }
-}
-
-/*
- * This is LTTng's own personal way to create an ABI for sessiond.
- * We send commands over a socket.
- */
-
-static const struct lttng_ust_abi_objd_ops lttng_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops;
-static const struct lttng_ust_abi_objd_ops lttng_session_ops;
-static const struct lttng_ust_abi_objd_ops lttng_channel_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops;
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops;
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops;
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops;
-
-int lttng_abi_create_root_handle(void)
-{
- int root_handle;
-
- /* root handles have NULL owners */
- root_handle = objd_alloc(NULL, <tng_ops, NULL, "root");
- return root_handle;
-}
-
-static
-int lttng_is_channel_ready(struct lttng_ust_channel_buffer *lttng_chan)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- unsigned int nr_streams, exp_streams;
-
- chan = lttng_chan->priv->rb_chan;
- nr_streams = channel_handle_get_nr_streams(lttng_chan->priv->rb_chan->handle);
- exp_streams = chan->nr_streams;
- return nr_streams == exp_streams;
-}
-
-static
-int lttng_abi_create_session(void *owner)
-{
- struct lttng_ust_session *session;
- int session_objd, ret;
-
- session = lttng_session_create();
- if (!session)
- return -ENOMEM;
- session_objd = objd_alloc(session, <tng_session_ops, owner, "session");
- if (session_objd < 0) {
- ret = session_objd;
- goto objd_error;
- }
- session->priv->objd = session_objd;
- session->priv->owner = owner;
- return session_objd;
-
-objd_error:
- lttng_session_destroy(session);
- return ret;
-}
-
-static
-long lttng_abi_tracer_version(int objd __attribute__((unused)),
- struct lttng_ust_abi_tracer_version *v)
-{
- v->major = LTTNG_UST_MAJOR_VERSION;
- v->minor = LTTNG_UST_MINOR_VERSION;
- v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
- return 0;
-}
-
-static
-int lttng_abi_event_notifier_send_fd(void *owner, int *event_notifier_notif_fd)
-{
- struct lttng_event_notifier_group *event_notifier_group;
- int event_notifier_group_objd, ret, fd_flag;
-
- event_notifier_group = lttng_event_notifier_group_create();
- if (!event_notifier_group)
- return -ENOMEM;
-
- /*
- * Set this file descriptor as NON-BLOCKING.
- */
- fd_flag = fcntl(*event_notifier_notif_fd, F_GETFL);
-
- fd_flag |= O_NONBLOCK;
-
- ret = fcntl(*event_notifier_notif_fd, F_SETFL, fd_flag);
- if (ret) {
- ret = -errno;
- goto fd_error;
- }
-
- event_notifier_group_objd = objd_alloc(event_notifier_group,
- <tng_event_notifier_group_ops, owner, "event_notifier_group");
- if (event_notifier_group_objd < 0) {
- ret = event_notifier_group_objd;
- goto objd_error;
- }
-
- event_notifier_group->objd = event_notifier_group_objd;
- event_notifier_group->owner = owner;
- event_notifier_group->notification_fd = *event_notifier_notif_fd;
- /* Object descriptor takes ownership of notification fd. */
- *event_notifier_notif_fd = -1;
-
- return event_notifier_group_objd;
-
-objd_error:
- lttng_event_notifier_group_destroy(event_notifier_group);
-fd_error:
- return ret;
-}
-
-static
-long lttng_abi_add_context(int objd __attribute__((unused)),
- struct lttng_ust_abi_context *context_param,
- union lttng_ust_abi_args *uargs,
- struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
-{
- return lttng_attach_context(context_param, uargs, ctx, session);
-}
-
-/**
- * lttng_cmd - lttng control through socket commands
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_SESSION
- * Returns a LTTng trace session object descriptor
- * LTTNG_UST_ABI_TRACER_VERSION
- * Returns the LTTng kernel tracer version
- * LTTNG_UST_ABI_TRACEPOINT_LIST
- * Returns a file descriptor listing available tracepoints
- * LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST
- * Returns a file descriptor listing available tracepoint fields
- * LTTNG_UST_ABI_WAIT_QUIESCENT
- * Returns after all previously running probes have completed
- *
- * The returned session will be deleted when its file descriptor is closed.
- */
-static
-long lttng_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_SESSION:
- return lttng_abi_create_session(owner);
- case LTTNG_UST_ABI_TRACER_VERSION:
- return lttng_abi_tracer_version(objd,
- (struct lttng_ust_abi_tracer_version *) arg);
- case LTTNG_UST_ABI_TRACEPOINT_LIST:
- return lttng_abi_tracepoint_list(owner);
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST:
- return lttng_abi_tracepoint_field_list(owner);
- case LTTNG_UST_ABI_WAIT_QUIESCENT:
- lttng_ust_urcu_synchronize_rcu();
- return 0;
- case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
- return lttng_abi_event_notifier_send_fd(owner,
- &uargs->event_notifier_handle.event_notifier_notif_fd);
- default:
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_ops = {
- .cmd = lttng_cmd,
-};
-
-static
-int lttng_abi_map_channel(int session_objd,
- struct lttng_ust_abi_channel *ust_chan,
- union lttng_ust_abi_args *uargs,
- void *owner)
-{
- struct lttng_ust_session *session = objd_private(session_objd);
- const char *transport_name;
- struct lttng_transport *transport;
- const char *chan_name;
- int chan_objd;
- struct lttng_ust_shm_handle *channel_handle;
- struct lttng_ust_abi_channel_config *lttng_chan_config;
- struct lttng_ust_channel_buffer *lttng_chan_buf;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer_config *config;
- void *chan_data;
- int wakeup_fd;
- uint64_t len;
- int ret;
- enum lttng_ust_abi_chan_type type;
-
- chan_data = uargs->channel.chan_data;
- wakeup_fd = uargs->channel.wakeup_fd;
- len = ust_chan->len;
- type = ust_chan->type;
-
- switch (type) {
- case LTTNG_UST_ABI_CHAN_PER_CPU:
- break;
- default:
- ret = -EINVAL;
- goto invalid;
- }
-
- if (session->priv->been_active) {
- ret = -EBUSY;
- goto active; /* Refuse to add channel to active session */
- }
-
- lttng_chan_buf = lttng_ust_alloc_channel_buffer();
- if (!lttng_chan_buf) {
- ret = -ENOMEM;
- goto lttng_chan_buf_error;
- }
-
- channel_handle = channel_handle_create(chan_data, len, wakeup_fd);
- if (!channel_handle) {
- ret = -EINVAL;
- goto handle_error;
- }
-
- /* Ownership of chan_data and wakeup_fd taken by channel handle. */
- uargs->channel.chan_data = NULL;
- uargs->channel.wakeup_fd = -1;
-
- chan = shmp(channel_handle, channel_handle->chan);
- assert(chan);
- chan->handle = channel_handle;
- config = &chan->backend.config;
- lttng_chan_config = channel_get_private_config(chan);
- if (!lttng_chan_config) {
- ret = -EINVAL;
- goto alloc_error;
- }
-
- if (lttng_ust_session_uuid_validate(session, lttng_chan_config->uuid)) {
- ret = -EINVAL;
- goto uuid_error;
- }
-
- /* Lookup transport name */
- switch (type) {
- case LTTNG_UST_ABI_CHAN_PER_CPU:
- if (config->output == RING_BUFFER_MMAP) {
- if (config->mode == RING_BUFFER_OVERWRITE) {
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
- transport_name = "relay-overwrite-mmap";
- } else {
- transport_name = "relay-overwrite-rt-mmap";
- }
- } else {
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
- transport_name = "relay-discard-mmap";
- } else {
- transport_name = "relay-discard-rt-mmap";
- }
- }
- } else {
- ret = -EINVAL;
- goto notransport;
- }
- chan_name = "channel";
- break;
- default:
- ret = -EINVAL;
- goto notransport;
- }
- transport = lttng_ust_transport_find(transport_name);
- if (!transport) {
- DBG("LTTng transport %s not found\n",
- transport_name);
- ret = -EINVAL;
- goto notransport;
- }
-
- chan_objd = objd_alloc(NULL, <tng_channel_ops, owner, chan_name);
- if (chan_objd < 0) {
- ret = chan_objd;
- goto objd_error;
- }
-
- /* Initialize our lttng chan */
- lttng_chan_buf->parent->enabled = 1;
- lttng_chan_buf->parent->session = session;
-
- lttng_chan_buf->priv->parent.tstate = 1;
- lttng_chan_buf->priv->ctx = NULL;
- lttng_chan_buf->priv->rb_chan = chan;
-
- lttng_chan_buf->ops = &transport->ops;
-
- memcpy(&chan->backend.config,
- transport->client_config,
- sizeof(chan->backend.config));
- cds_list_add(<tng_chan_buf->priv->node, &session->priv->chan_head);
- lttng_chan_buf->priv->header_type = 0;
- lttng_chan_buf->priv->type = type;
- /* Copy fields from lttng ust chan config. */
- lttng_chan_buf->priv->id = lttng_chan_config->id;
- memcpy(lttng_chan_buf->priv->uuid, lttng_chan_config->uuid, LTTNG_UST_UUID_LEN);
- channel_set_private(chan, lttng_chan_buf);
-
- /*
- * We tolerate no failure path after channel creation. It will stay
- * invariant for the rest of the session.
- */
- objd_set_private(chan_objd, lttng_chan_buf);
- lttng_chan_buf->priv->parent.objd = chan_objd;
- /* The channel created holds a reference on the session */
- objd_ref(session_objd);
- return chan_objd;
-
- /* error path after channel was created */
-objd_error:
-notransport:
-uuid_error:
-alloc_error:
- channel_destroy(chan, channel_handle, 0);
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
- return ret;
-
-handle_error:
- lttng_ust_free_channel_common(lttng_chan_buf->parent);
-lttng_chan_buf_error:
-active:
-invalid:
- return ret;
-}
-
-/**
- * lttng_session_cmd - lttng session object command
- *
- * @obj: the object
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_CHANNEL
- * Returns a LTTng channel object descriptor
- * LTTNG_UST_ABI_ENABLE
- * Enables tracing for a session (weak enable)
- * LTTNG_UST_ABI_DISABLE
- * Disables tracing for a session (strong disable)
- *
- * The returned channel will be deleted when its file descriptor is closed.
- */
-static
-long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- struct lttng_ust_session *session = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_CHANNEL:
- return lttng_abi_map_channel(objd,
- (struct lttng_ust_abi_channel *) arg,
- uargs, owner);
- case LTTNG_UST_ABI_SESSION_START:
- case LTTNG_UST_ABI_ENABLE:
- return lttng_session_enable(session);
- case LTTNG_UST_ABI_SESSION_STOP:
- case LTTNG_UST_ABI_DISABLE:
- return lttng_session_disable(session);
- case LTTNG_UST_ABI_SESSION_STATEDUMP:
- return lttng_session_statedump(session);
- case LTTNG_UST_ABI_COUNTER:
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- case LTTNG_UST_ABI_COUNTER_CPU:
- /* Not implemented yet. */
- return -EINVAL;
- default:
- return -EINVAL;
- }
-}
-
-/*
- * Called when the last file reference is dropped.
- *
- * Big fat note: channels and events are invariant for the whole session after
- * their creation. So this session destruction also destroys all channel and
- * event structures specific to this session (they are not destroyed when their
- * individual file is released).
- */
-static
-int lttng_release_session(int objd)
-{
- struct lttng_ust_session *session = objd_private(objd);
-
- if (session) {
- lttng_session_destroy(session);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_session_ops = {
- .release = lttng_release_session,
- .cmd = lttng_session_cmd,
-};
-
-static int lttng_ust_event_notifier_enabler_create(int event_notifier_group_obj,
- void *owner, struct lttng_ust_abi_event_notifier *event_notifier_param,
- enum lttng_enabler_format_type type)
-{
- struct lttng_event_notifier_group *event_notifier_group =
- objd_private(event_notifier_group_obj);
- struct lttng_event_notifier_enabler *event_notifier_enabler;
- int event_notifier_objd, ret;
-
- event_notifier_param->event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- event_notifier_objd = objd_alloc(NULL, <tng_event_notifier_enabler_ops, owner,
- "event_notifier enabler");
- if (event_notifier_objd < 0) {
- ret = event_notifier_objd;
- goto objd_error;
- }
-
- event_notifier_enabler = lttng_event_notifier_enabler_create(
- event_notifier_group, type, event_notifier_param);
- if (!event_notifier_enabler) {
- ret = -ENOMEM;
- goto event_notifier_error;
- }
-
- objd_set_private(event_notifier_objd, event_notifier_enabler);
- /* The event_notifier holds a reference on the event_notifier group. */
- objd_ref(event_notifier_enabler->group->objd);
-
- return event_notifier_objd;
-
-event_notifier_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(event_notifier_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-long lttng_event_notifier_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
- switch (cmd) {
- case LTTNG_UST_ABI_FILTER:
- return lttng_event_notifier_enabler_attach_filter_bytecode(
- event_notifier_enabler,
- (struct lttng_ust_bytecode_node **) arg);
- case LTTNG_UST_ABI_EXCLUSION:
- return lttng_event_notifier_enabler_attach_exclusion(event_notifier_enabler,
- (struct lttng_ust_excluder_node **) arg);
- case LTTNG_UST_ABI_CAPTURE:
- return lttng_event_notifier_enabler_attach_capture_bytecode(
- event_notifier_enabler,
- (struct lttng_ust_bytecode_node **) arg);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_event_notifier_enabler_enable(event_notifier_enabler);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_event_notifier_enabler_disable(event_notifier_enabler);
- default:
- return -EINVAL;
- }
-}
-
-/**
- * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command
- *
- * @obj: the object
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This descriptor implements lttng commands:
- * LTTNG_UST_ABI_COUNTER_GLOBAL
- * Return negative error code on error, 0 on success.
- * LTTNG_UST_ABI_COUNTER_CPU
- * Return negative error code on error, 0 on success.
- */
-static
-long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
-{
- int ret;
- struct lttng_counter *counter = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- ret = -EINVAL; /* Unimplemented. */
- break;
- case LTTNG_UST_ABI_COUNTER_CPU:
- {
- struct lttng_ust_abi_counter_cpu *counter_cpu =
- (struct lttng_ust_abi_counter_cpu *)arg;
-
- ret = lttng_counter_set_cpu_shm(counter->counter,
- counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
- if (!ret) {
- /* Take ownership of the shm_fd. */
- uargs->counter_shm.shm_fd = -1;
- }
- break;
- }
- default:
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-int lttng_release_event_notifier_group_error_counter(int objd)
- __attribute__((visibility("hidden")));
-int lttng_release_event_notifier_group_error_counter(int objd)
-{
- struct lttng_counter *counter = objd_private(objd);
-
- if (counter) {
- return lttng_ust_abi_objd_unref(counter->event_notifier_group->objd, 0);
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_error_counter_ops = {
- .release = lttng_release_event_notifier_group_error_counter,
- .cmd = lttng_event_notifier_group_error_counter_cmd,
-};
-
-static
-int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
- struct lttng_ust_abi_counter_conf *error_counter_conf)
-{
- const char *counter_transport_name;
- struct lttng_event_notifier_group *event_notifier_group =
- objd_private(event_notifier_group_objd);
- struct lttng_counter *counter;
- int counter_objd, ret;
- struct lttng_counter_dimension dimensions[1];
- size_t counter_len;
-
- if (event_notifier_group->error_counter)
- return -EBUSY;
-
- if (error_counter_conf->arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR)
- return -EINVAL;
-
- if (error_counter_conf->number_dimensions != 1)
- return -EINVAL;
-
- switch (error_counter_conf->bitness) {
- case LTTNG_UST_ABI_COUNTER_BITNESS_64:
- counter_transport_name = "counter-per-cpu-64-modular";
- break;
- case LTTNG_UST_ABI_COUNTER_BITNESS_32:
- counter_transport_name = "counter-per-cpu-32-modular";
- break;
- default:
- return -EINVAL;
- }
-
- counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
- "event_notifier group error counter");
- if (counter_objd < 0) {
- ret = counter_objd;
- goto objd_error;
- }
-
- counter_len = error_counter_conf->dimensions[0].size;
- dimensions[0].size = counter_len;
- dimensions[0].underflow_index = 0;
- dimensions[0].overflow_index = 0;
- dimensions[0].has_underflow = 0;
- dimensions[0].has_overflow = 0;
-
- counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
- if (!counter) {
- ret = -EINVAL;
- goto create_error;
- }
-
- event_notifier_group->error_counter_len = counter_len;
- /*
- * store-release to publish error counter matches load-acquire
- * in record_error. Ensures the counter is created and the
- * error_counter_len is set before they are used.
- * Currently a full memory barrier is used, which could be
- * turned into acquire-release barriers.
- */
- cmm_smp_mb();
- CMM_STORE_SHARED(event_notifier_group->error_counter, counter);
-
- counter->objd = counter_objd;
- counter->event_notifier_group = event_notifier_group; /* owner */
-
- objd_set_private(counter_objd, counter);
- /* The error counter holds a reference on the event_notifier group. */
- objd_ref(event_notifier_group->objd);
-
- return counter_objd;
-
-create_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(counter_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
- {
- struct lttng_ust_abi_event_notifier *event_notifier_param =
- (struct lttng_ust_abi_event_notifier *) arg;
- if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
- /*
- * If the event name is a star globbing pattern,
- * we create the special star globbing enabler.
- */
- return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
- LTTNG_ENABLER_FORMAT_STAR_GLOB);
- } else {
- return lttng_ust_event_notifier_enabler_create(objd,
- owner, event_notifier_param,
- LTTNG_ENABLER_FORMAT_EVENT);
- }
- }
- case LTTNG_UST_ABI_COUNTER:
- {
- struct lttng_ust_abi_counter_conf *counter_conf =
- (struct lttng_ust_abi_counter_conf *) uargs->counter.counter_data;
- return lttng_ust_event_notifier_group_create_error_counter(
- objd, owner, counter_conf);
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_notifier_enabler_release(int objd)
-{
- struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
-
- if (event_notifier_enabler)
- return lttng_ust_abi_objd_unref(event_notifier_enabler->group->objd, 0);
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops = {
- .release = lttng_event_notifier_enabler_release,
- .cmd = lttng_event_notifier_enabler_cmd,
-};
-
-static
-int lttng_release_event_notifier_group(int objd)
-{
- struct lttng_event_notifier_group *event_notifier_group = objd_private(objd);
-
- if (event_notifier_group) {
- lttng_event_notifier_group_destroy(event_notifier_group);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops = {
- .release = lttng_release_event_notifier_group,
- .cmd = lttng_event_notifier_group_cmd,
-};
-
-static
-long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_ust_tracepoint_list *list = objd_private(objd);
- struct lttng_ust_abi_tracepoint_iter *tp =
- (struct lttng_ust_abi_tracepoint_iter *) arg;
- struct lttng_ust_abi_tracepoint_iter *iter;
-
- switch (cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
- {
- iter = lttng_ust_tracepoint_list_get_iter_next(list);
- if (!iter)
- return -LTTNG_UST_ERR_NOENT;
- memcpy(tp, iter, sizeof(*tp));
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_abi_tracepoint_list(void *owner)
-{
- int list_objd, ret;
- struct lttng_ust_tracepoint_list *list;
-
- list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops, owner, "tp_list");
- if (list_objd < 0) {
- ret = list_objd;
- goto objd_error;
- }
- list = zmalloc(sizeof(*list));
- if (!list) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- objd_set_private(list_objd, list);
-
- /* populate list by walking on all registered probes. */
- ret = lttng_probes_get_event_list(list);
- if (ret) {
- goto list_error;
- }
- return list_objd;
-
-list_error:
- free(list);
-alloc_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(list_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-int lttng_release_tracepoint_list(int objd)
-{
- struct lttng_ust_tracepoint_list *list = objd_private(objd);
-
- if (list) {
- lttng_probes_prune_event_list(list);
- free(list);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops = {
- .release = lttng_release_tracepoint_list,
- .cmd = lttng_tracepoint_list_cmd,
-};
-
-static
-long lttng_tracepoint_field_list_cmd(int objd, unsigned int cmd,
- unsigned long arg __attribute__((unused)), union lttng_ust_abi_args *uargs,
- void *owner __attribute__((unused)))
-{
- struct lttng_ust_field_list *list = objd_private(objd);
- struct lttng_ust_abi_field_iter *tp = &uargs->field_list.entry;
- struct lttng_ust_abi_field_iter *iter;
-
- switch (cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
- {
- iter = lttng_ust_field_list_get_iter_next(list);
- if (!iter)
- return -LTTNG_UST_ERR_NOENT;
- memcpy(tp, iter, sizeof(*tp));
- return 0;
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_abi_tracepoint_field_list(void *owner)
-{
- int list_objd, ret;
- struct lttng_ust_field_list *list;
-
- list_objd = objd_alloc(NULL, <tng_tracepoint_field_list_ops, owner,
- "tp_field_list");
- if (list_objd < 0) {
- ret = list_objd;
- goto objd_error;
- }
- list = zmalloc(sizeof(*list));
- if (!list) {
- ret = -ENOMEM;
- goto alloc_error;
- }
- objd_set_private(list_objd, list);
-
- /* populate list by walking on all registered probes. */
- ret = lttng_probes_get_field_list(list);
- if (ret) {
- goto list_error;
- }
- return list_objd;
-
-list_error:
- free(list);
-alloc_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(list_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-static
-int lttng_release_tracepoint_field_list(int objd)
-{
- struct lttng_ust_field_list *list = objd_private(objd);
-
- if (list) {
- lttng_probes_prune_field_list(list);
- free(list);
- return 0;
- } else {
- return -EINVAL;
- }
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops = {
- .release = lttng_release_tracepoint_field_list,
- .cmd = lttng_tracepoint_field_list_cmd,
-};
-
-static
-int lttng_abi_map_stream(int channel_objd, struct lttng_ust_abi_stream *info,
- union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(channel_objd);
- int ret;
-
- ret = channel_handle_add_stream(lttng_chan_buf->priv->rb_chan->handle,
- uargs->stream.shm_fd, uargs->stream.wakeup_fd,
- info->stream_nr, info->len);
- if (ret)
- goto error_add_stream;
- /* Take ownership of shm_fd and wakeup_fd. */
- uargs->stream.shm_fd = -1;
- uargs->stream.wakeup_fd = -1;
-
- return 0;
-
-error_add_stream:
- return ret;
-}
-
-static
-int lttng_abi_create_event_enabler(int channel_objd,
- struct lttng_ust_abi_event *event_param,
- void *owner,
- enum lttng_enabler_format_type format_type)
-{
- struct lttng_ust_channel_buffer *channel = objd_private(channel_objd);
- struct lttng_event_enabler *enabler;
- int event_objd, ret;
-
- event_param->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
- event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
- "event enabler");
- if (event_objd < 0) {
- ret = event_objd;
- goto objd_error;
- }
- /*
- * We tolerate no failure path after event creation. It will stay
- * invariant for the rest of the session.
- */
- enabler = lttng_event_enabler_create(format_type, event_param, channel);
- if (!enabler) {
- ret = -ENOMEM;
- goto event_error;
- }
- objd_set_private(event_objd, enabler);
- /* The event holds a reference on the channel */
- objd_ref(channel_objd);
- return event_objd;
-
-event_error:
- {
- int err;
-
- err = lttng_ust_abi_objd_unref(event_objd, 1);
- assert(!err);
- }
-objd_error:
- return ret;
-}
-
-/**
- * lttng_channel_cmd - lttng control through object descriptors
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This object descriptor implements lttng commands:
- * LTTNG_UST_ABI_STREAM
- * Returns an event stream object descriptor or failure.
- * (typically, one event stream records events from one CPU)
- * LTTNG_UST_ABI_EVENT
- * Returns an event object descriptor or failure.
- * LTTNG_UST_ABI_CONTEXT
- * Prepend a context field to each event in the channel
- * LTTNG_UST_ABI_ENABLE
- * Enable recording for events in this channel (weak enable)
- * LTTNG_UST_ABI_DISABLE
- * Disable recording for events in this channel (strong disable)
- *
- * Channel and event file descriptors also hold a reference on the session.
- */
-static
-long lttng_channel_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs, void *owner)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
-
- if (cmd != LTTNG_UST_ABI_STREAM) {
- /*
- * Check if channel received all streams.
- */
- if (!lttng_is_channel_ready(lttng_chan_buf))
- return -EPERM;
- }
-
- switch (cmd) {
- case LTTNG_UST_ABI_STREAM:
- {
- struct lttng_ust_abi_stream *stream;
-
- stream = (struct lttng_ust_abi_stream *) arg;
- /* stream used as output */
- return lttng_abi_map_stream(objd, stream, uargs, owner);
- }
- case LTTNG_UST_ABI_EVENT:
- {
- struct lttng_ust_abi_event *event_param =
- (struct lttng_ust_abi_event *) arg;
-
- if (strutils_is_star_glob_pattern(event_param->name)) {
- /*
- * If the event name is a star globbing pattern,
- * we create the special star globbing enabler.
- */
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
- } else {
- return lttng_abi_create_event_enabler(objd, event_param,
- owner, LTTNG_ENABLER_FORMAT_EVENT);
- }
- }
- case LTTNG_UST_ABI_CONTEXT:
- return lttng_abi_add_context(objd,
- (struct lttng_ust_abi_context *) arg, uargs,
- <tng_chan_buf->priv->ctx,
- lttng_chan_buf->parent->session);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_channel_enable(lttng_chan_buf->parent);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_channel_disable(lttng_chan_buf->parent);
- case LTTNG_UST_ABI_FLUSH_BUFFER:
- return lttng_chan_buf->ops->priv->flush_buffer(lttng_chan_buf);
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_channel_release(int objd)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
-
- if (lttng_chan_buf)
- return lttng_ust_abi_objd_unref(lttng_chan_buf->parent->session->priv->objd, 0);
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_channel_ops = {
- .release = lttng_channel_release,
- .cmd = lttng_channel_cmd,
-};
-
-/**
- * lttng_enabler_cmd - lttng control through object descriptors
- *
- * @objd: the object descriptor
- * @cmd: the command
- * @arg: command arg
- * @uargs: UST arguments (internal)
- * @owner: objd owner
- *
- * This object descriptor implements lttng commands:
- * LTTNG_UST_ABI_CONTEXT
- * Prepend a context field to each record of events of this
- * enabler.
- * LTTNG_UST_ABI_ENABLE
- * Enable recording for this enabler
- * LTTNG_UST_ABI_DISABLE
- * Disable recording for this enabler
- * LTTNG_UST_ABI_FILTER
- * Attach a filter to an enabler.
- * LTTNG_UST_ABI_EXCLUSION
- * Attach exclusions to an enabler.
- */
-static
-long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *uargs __attribute__((unused)),
- void *owner __attribute__((unused)))
-{
- struct lttng_event_enabler *enabler = objd_private(objd);
-
- switch (cmd) {
- case LTTNG_UST_ABI_CONTEXT:
- return lttng_event_enabler_attach_context(enabler,
- (struct lttng_ust_abi_context *) arg);
- case LTTNG_UST_ABI_ENABLE:
- return lttng_event_enabler_enable(enabler);
- case LTTNG_UST_ABI_DISABLE:
- return lttng_event_enabler_disable(enabler);
- case LTTNG_UST_ABI_FILTER:
- {
- int ret;
-
- ret = lttng_event_enabler_attach_filter_bytecode(enabler,
- (struct lttng_ust_bytecode_node **) arg);
- if (ret)
- return ret;
- return 0;
- }
- case LTTNG_UST_ABI_EXCLUSION:
- {
- return lttng_event_enabler_attach_exclusion(enabler,
- (struct lttng_ust_excluder_node **) arg);
- }
- default:
- return -EINVAL;
- }
-}
-
-static
-int lttng_event_enabler_release(int objd)
-{
- struct lttng_event_enabler *event_enabler = objd_private(objd);
-
- if (event_enabler)
- return lttng_ust_abi_objd_unref(event_enabler->chan->priv->parent.objd, 0);
-
- return 0;
-}
-
-static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops = {
- .release = lttng_event_enabler_release,
- .cmd = lttng_event_enabler_cmd,
-};
-
-void lttng_ust_abi_exit(void)
-{
- lttng_ust_abi_close_in_progress = 1;
- ust_lock_nocheck();
- objd_table_destroy();
- ust_unlock();
- lttng_ust_abi_close_in_progress = 0;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <sys/socket.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <sys/wait.h>
-#include <dlfcn.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <time.h>
-#include <assert.h>
-#include <signal.h>
-#include <limits.h>
-#include <urcu/uatomic.h>
-#include "futex.h"
-#include <urcu/compiler.h>
-#include <lttng/urcu/urcu-ust.h>
-
-#include <lttng/ust-utils.h>
-#include <lttng/ust-events.h>
-#include <lttng/ust-abi.h>
-#include <lttng/ust-fork.h>
-#include <lttng/ust-error.h>
-#include <lttng/ust-ctl.h>
-#include <lttng/ust-libc-wrapper.h>
-#include <lttng/ust-thread.h>
-#include <lttng/ust-tracer.h>
-#include <urcu/tls-compat.h>
-#include <ust-comm.h>
-#include <ust-fd.h>
-#include <usterr-signal-safe.h>
-#include <ust-helper.h>
-#include "tracepoint-internal.h"
-#include "lttng-tracer-core.h"
-#include "compat.h"
-#include "../libringbuffer/rb-init.h"
-#include "lttng-ust-statedump.h"
-#include "clock.h"
-#include "../libringbuffer/getcpu.h"
-#include "getenv.h"
-#include "ust-events-internal.h"
-#include "context-internal.h"
-#include "ust-compat.h"
-#include "lttng-counter-client.h"
-#include "lttng-rb-clients.h"
-
-/*
- * Has lttng ust comm constructor been called ?
- */
-static int initialized;
-
-/*
- * The ust_lock/ust_unlock lock is used as a communication thread mutex.
- * Held when handling a command, also held by fork() to deal with
- * removal of threads, and by exit path.
- *
- * The UST lock is the centralized mutex across UST tracing control and
- * probe registration.
- *
- * ust_exit_mutex must never nest in ust_mutex.
- *
- * ust_fork_mutex must never nest in ust_mutex.
- *
- * ust_mutex_nest is a per-thread nesting counter, allowing the perf
- * counter lazy initialization called by events within the statedump,
- * which traces while the ust_mutex is held.
- *
- * ust_lock nests within the dynamic loader lock (within glibc) because
- * it is taken within the library constructor.
- *
- * The ust fd tracker lock nests within the ust_mutex.
- */
-static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Allow nesting the ust_mutex within the same thread. */
-static DEFINE_URCU_TLS(int, ust_mutex_nest);
-
-/*
- * ust_exit_mutex protects thread_active variable wrt thread exit. It
- * cannot be done by ust_mutex because pthread_cancel(), which takes an
- * internal libc lock, cannot nest within ust_mutex.
- *
- * It never nests within a ust_mutex.
- */
-static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * ust_fork_mutex protects base address statedump tracing against forks. It
- * prevents the dynamic loader lock to be taken (by base address statedump
- * tracing) while a fork is happening, thus preventing deadlock issues with
- * the dynamic loader lock.
- */
-static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/* Should the ust comm thread quit ? */
-static int lttng_ust_comm_should_quit;
-
-/*
- * This variable can be tested by applications to check whether
- * lttng-ust is loaded. They simply have to define their own
- * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
- * library constructor.
- */
-int lttng_ust_loaded __attribute__((weak));
-
-/*
- * Return 0 on success, -1 if should quit.
- * The lock is taken in both cases.
- * Signal-safe.
- */
-int ust_lock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_mutex_nest)++)
- pthread_mutex_lock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (lttng_ust_comm_should_quit) {
- return -1;
- } else {
- return 0;
- }
-}
-
-/*
- * ust_lock_nocheck() can be used in constructors/destructors, because
- * they are already nested within the dynamic loader lock, and therefore
- * have exclusive access against execution of liblttng-ust destructor.
- * Signal-safe.
- */
-void ust_lock_nocheck(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_ENABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!URCU_TLS(ust_mutex_nest)++)
- pthread_mutex_lock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-}
-
-/*
- * Signal-safe.
- */
-void ust_unlock(void)
-{
- sigset_t sig_all_blocked, orig_mask;
- int ret, oldstate;
-
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- if (!--URCU_TLS(ust_mutex_nest))
- pthread_mutex_unlock(&ust_mutex);
- ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
- ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
- if (ret) {
- ERR("pthread_setcancelstate: %s", strerror(ret));
- }
- if (oldstate != PTHREAD_CANCEL_DISABLE) {
- ERR("pthread_setcancelstate: unexpected oldstate");
- }
-}
-
-/*
- * Wait for either of these before continuing to the main
- * program:
- * - the register_done message from sessiond daemon
- * (will let the sessiond daemon enable sessions before main
- * starts.)
- * - sessiond daemon is not reachable.
- * - timeout (ensuring applications are resilient to session
- * daemon problems).
- */
-static sem_t constructor_wait;
-/*
- * Doing this for both the global and local sessiond.
- */
-enum {
- sem_count_initial_value = 4,
-};
-
-static int sem_count = sem_count_initial_value;
-
-/*
- * Counting nesting within lttng-ust. Used to ensure that calling fork()
- * from liblttng-ust does not execute the pre/post fork handlers.
- */
-static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
-
-/*
- * Info about socket and associated listener thread.
- */
-struct sock_info {
- const char *name;
- pthread_t ust_listener; /* listener thread */
- int root_handle;
- int registration_done;
- int allowed;
- int global;
- int thread_active;
-
- char sock_path[PATH_MAX];
- int socket;
- int notify_socket;
-
- char wait_shm_path[PATH_MAX];
- char *wait_shm_mmap;
- /* Keep track of lazy state dump not performed yet. */
- int statedump_pending;
- int initial_statedump_done;
- /* Keep procname for statedump */
- char procname[LTTNG_UST_ABI_PROCNAME_LEN];
-};
-
-/* Socket from app (connect) to session daemon (listen) for communication */
-struct sock_info global_apps = {
- .name = "global",
- .global = 1,
-
- .root_handle = -1,
- .registration_done = 0,
- .allowed = 0,
- .thread_active = 0,
-
- .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
- .socket = -1,
- .notify_socket = -1,
-
- .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
-
- .statedump_pending = 0,
- .initial_statedump_done = 0,
- .procname[0] = '\0'
-};
-
-/* TODO: allow global_apps_sock_path override */
-
-struct sock_info local_apps = {
- .name = "local",
- .global = 0,
- .root_handle = -1,
- .registration_done = 0,
- .allowed = 0, /* Check setuid bit first */
- .thread_active = 0,
-
- .socket = -1,
- .notify_socket = -1,
-
- .statedump_pending = 0,
- .initial_statedump_done = 0,
- .procname[0] = '\0'
-};
-
-static int wait_poll_fallback;
-
-static const char *cmd_name_mapping[] = {
- [ LTTNG_UST_ABI_RELEASE ] = "Release",
- [ LTTNG_UST_ABI_SESSION ] = "Create Session",
- [ LTTNG_UST_ABI_TRACER_VERSION ] = "Get Tracer Version",
-
- [ LTTNG_UST_ABI_TRACEPOINT_LIST ] = "Create Tracepoint List",
- [ LTTNG_UST_ABI_WAIT_QUIESCENT ] = "Wait for Quiescent State",
- [ LTTNG_UST_ABI_REGISTER_DONE ] = "Registration Done",
- [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
-
- [ LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE ] = "Create event notifier group",
-
- /* Session FD commands */
- [ LTTNG_UST_ABI_CHANNEL ] = "Create Channel",
- [ LTTNG_UST_ABI_SESSION_START ] = "Start Session",
- [ LTTNG_UST_ABI_SESSION_STOP ] = "Stop Session",
-
- /* Channel FD commands */
- [ LTTNG_UST_ABI_STREAM ] = "Create Stream",
- [ LTTNG_UST_ABI_EVENT ] = "Create Event",
-
- /* Event and Channel FD commands */
- [ LTTNG_UST_ABI_CONTEXT ] = "Create Context",
- [ LTTNG_UST_ABI_FLUSH_BUFFER ] = "Flush Buffer",
-
- /* Event, Channel and Session commands */
- [ LTTNG_UST_ABI_ENABLE ] = "Enable",
- [ LTTNG_UST_ABI_DISABLE ] = "Disable",
-
- /* Tracepoint list commands */
- [ LTTNG_UST_ABI_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
- [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
-
- /* Event FD commands */
- [ LTTNG_UST_ABI_FILTER ] = "Create Filter",
- [ LTTNG_UST_ABI_EXCLUSION ] = "Add exclusions to event",
-
- /* Event notifier group commands */
- [ LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE ] = "Create event notifier",
-
- /* Session and event notifier group commands */
- [ LTTNG_UST_ABI_COUNTER ] = "Create Counter",
-
- /* Counter commands */
- [ LTTNG_UST_ABI_COUNTER_GLOBAL ] = "Create Counter Global",
- [ LTTNG_UST_ABI_COUNTER_CPU ] = "Create Counter CPU",
-};
-
-static const char *str_timeout;
-static int got_timeout_env;
-
-static char *get_map_shm(struct sock_info *sock_info);
-
-ssize_t lttng_ust_read(int fd, void *buf, size_t len)
-{
- ssize_t ret;
- size_t copied = 0, to_copy = len;
-
- do {
- ret = read(fd, buf + copied, to_copy);
- if (ret > 0) {
- copied += ret;
- to_copy -= ret;
- }
- } while ((ret > 0 && to_copy > 0)
- || (ret < 0 && errno == EINTR));
- if (ret > 0) {
- ret = copied;
- }
- return ret;
-}
-/*
- * Returns the HOME directory path. Caller MUST NOT free(3) the returned
- * pointer.
- */
-static
-const char *get_lttng_home_dir(void)
-{
- const char *val;
-
- val = (const char *) lttng_ust_getenv("LTTNG_HOME");
- if (val != NULL) {
- return val;
- }
- return (const char *) lttng_ust_getenv("HOME");
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-static
-void lttng_fixup_nest_count_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
-}
-
-static
-void lttng_fixup_ust_mutex_nest_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
-}
-
-/*
- * Fixup lttng-ust urcu TLS.
- */
-static
-void lttng_fixup_lttng_ust_urcu_tls(void)
-{
- (void) lttng_ust_urcu_read_ongoing();
-}
-
-void lttng_ust_fixup_tls(void)
-{
- lttng_fixup_lttng_ust_urcu_tls();
- lttng_fixup_ringbuffer_tls();
- lttng_fixup_vtid_tls();
- lttng_fixup_nest_count_tls();
- lttng_fixup_procname_tls();
- lttng_fixup_ust_mutex_nest_tls();
- lttng_ust_fixup_perf_counter_tls();
- lttng_ust_fixup_fd_tracker_tls();
- lttng_fixup_cgroup_ns_tls();
- lttng_fixup_ipc_ns_tls();
- lttng_fixup_net_ns_tls();
- lttng_fixup_time_ns_tls();
- lttng_fixup_uts_ns_tls();
- lttng_ust_fixup_ring_buffer_client_discard_tls();
- lttng_ust_fixup_ring_buffer_client_discard_rt_tls();
- lttng_ust_fixup_ring_buffer_client_overwrite_tls();
- lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls();
-}
-
-/*
- * LTTng-UST uses Global Dynamic model TLS variables rather than IE
- * model because many versions of glibc don't preallocate a pool large
- * enough for TLS variables IE model defined in other shared libraries,
- * and causes issues when using LTTng-UST for Java tracing.
- *
- * Because of this use of Global Dynamic TLS variables, users wishing to
- * trace from signal handlers need to explicitly trigger the lazy
- * allocation of those variables for each thread before using them.
- * This can be triggered by calling lttng_ust_init_thread().
- */
-void lttng_ust_init_thread(void)
-{
- /*
- * Because those TLS variables are global dynamic, we need to
- * ensure those are initialized before a signal handler nesting over
- * this thread attempts to use them.
- */
- lttng_ust_fixup_tls();
-}
-
-int lttng_get_notify_socket(void *owner)
-{
- struct sock_info *info = owner;
-
- return info->notify_socket;
-}
-
-
-char* lttng_ust_sockinfo_get_procname(void *owner)
-{
- struct sock_info *info = owner;
-
- return info->procname;
-}
-
-static
-void print_cmd(int cmd, int handle)
-{
- const char *cmd_name = "Unknown";
-
- if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
- && cmd_name_mapping[cmd]) {
- cmd_name = cmd_name_mapping[cmd];
- }
- DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
- cmd_name, cmd,
- lttng_ust_obj_get_name(handle), handle);
-}
-
-static
-int setup_global_apps(void)
-{
- int ret = 0;
- assert(!global_apps.wait_shm_mmap);
-
- global_apps.wait_shm_mmap = get_map_shm(&global_apps);
- if (!global_apps.wait_shm_mmap) {
- WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
- global_apps.allowed = 0;
- ret = -EIO;
- goto error;
- }
-
- global_apps.allowed = 1;
- lttng_pthread_getname_np(global_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
-error:
- return ret;
-}
-static
-int setup_local_apps(void)
-{
- int ret = 0;
- const char *home_dir;
- uid_t uid;
-
- assert(!local_apps.wait_shm_mmap);
-
- uid = getuid();
- /*
- * Disallow per-user tracing for setuid binaries.
- */
- if (uid != geteuid()) {
- assert(local_apps.allowed == 0);
- ret = 0;
- goto end;
- }
- home_dir = get_lttng_home_dir();
- if (!home_dir) {
- WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
- assert(local_apps.allowed == 0);
- ret = -ENOENT;
- goto end;
- }
- local_apps.allowed = 1;
- snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
- home_dir,
- LTTNG_DEFAULT_HOME_RUNDIR,
- LTTNG_UST_SOCK_FILENAME);
- snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
- LTTNG_UST_WAIT_FILENAME,
- uid);
-
- local_apps.wait_shm_mmap = get_map_shm(&local_apps);
- if (!local_apps.wait_shm_mmap) {
- WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
- local_apps.allowed = 0;
- ret = -EIO;
- goto end;
- }
-
- lttng_pthread_getname_np(local_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
-end:
- return ret;
-}
-
-/*
- * Get socket timeout, in ms.
- * -1: wait forever. 0: don't wait. >0: timeout, in ms.
- */
-static
-long get_timeout(void)
-{
- long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
-
- if (!got_timeout_env) {
- str_timeout = lttng_ust_getenv("LTTNG_UST_REGISTER_TIMEOUT");
- got_timeout_env = 1;
- }
- if (str_timeout)
- constructor_delay_ms = strtol(str_timeout, NULL, 10);
- /* All negative values are considered as "-1". */
- if (constructor_delay_ms < -1)
- constructor_delay_ms = -1;
- return constructor_delay_ms;
-}
-
-/* Timeout for notify socket send and recv. */
-static
-long get_notify_sock_timeout(void)
-{
- return get_timeout();
-}
-
-/* Timeout for connecting to cmd and notify sockets. */
-static
-long get_connect_sock_timeout(void)
-{
- return get_timeout();
-}
-
-/*
- * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
- */
-static
-int get_constructor_timeout(struct timespec *constructor_timeout)
-{
- long constructor_delay_ms;
- int ret;
-
- constructor_delay_ms = get_timeout();
-
- switch (constructor_delay_ms) {
- case -1:/* fall-through */
- case 0:
- return constructor_delay_ms;
- default:
- break;
- }
-
- /*
- * If we are unable to find the current time, don't wait.
- */
- ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
- if (ret) {
- /* Don't wait. */
- return 0;
- }
- constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
- constructor_timeout->tv_nsec +=
- (constructor_delay_ms % 1000UL) * 1000000UL;
- if (constructor_timeout->tv_nsec >= 1000000000UL) {
- constructor_timeout->tv_sec++;
- constructor_timeout->tv_nsec -= 1000000000UL;
- }
- /* Timeout wait (constructor_delay_ms). */
- return 1;
-}
-
-static
-void get_allow_blocking(void)
-{
- const char *str_allow_blocking =
- lttng_ust_getenv("LTTNG_UST_ALLOW_BLOCKING");
-
- if (str_allow_blocking) {
- DBG("%s environment variable is set",
- "LTTNG_UST_ALLOW_BLOCKING");
- lttng_ust_ringbuffer_set_allow_blocking();
- }
-}
-
-static
-int register_to_sessiond(int socket, enum ustctl_socket_type type)
-{
- return ustcomm_send_reg_msg(socket,
- type,
- CAA_BITS_PER_LONG,
- lttng_ust_rb_alignof(uint8_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint16_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint32_t) * CHAR_BIT,
- lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
- lttng_ust_rb_alignof(unsigned long) * CHAR_BIT);
-}
-
-static
-int send_reply(int sock, struct ustcomm_ust_reply *lur)
-{
- ssize_t len;
-
- len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
- switch (len) {
- case sizeof(*lur):
- DBG("message successfully sent");
- return 0;
- default:
- if (len == -ECONNRESET) {
- DBG("remote end closed connection");
- return 0;
- }
- if (len < 0)
- return len;
- DBG("incorrect message size: %zd", len);
- return -EINVAL;
- }
-}
-
-static
-void decrement_sem_count(unsigned int count)
-{
- int ret;
-
- assert(uatomic_read(&sem_count) >= count);
-
- if (uatomic_read(&sem_count) <= 0) {
- return;
- }
-
- ret = uatomic_add_return(&sem_count, -count);
- if (ret == 0) {
- ret = sem_post(&constructor_wait);
- assert(!ret);
- }
-}
-
-static
-int handle_register_done(struct sock_info *sock_info)
-{
- if (sock_info->registration_done)
- return 0;
- sock_info->registration_done = 1;
-
- decrement_sem_count(1);
- if (!sock_info->statedump_pending) {
- sock_info->initial_statedump_done = 1;
- decrement_sem_count(1);
- }
-
- return 0;
-}
-
-static
-int handle_register_failed(struct sock_info *sock_info)
-{
- if (sock_info->registration_done)
- return 0;
- sock_info->registration_done = 1;
- sock_info->initial_statedump_done = 1;
-
- decrement_sem_count(2);
-
- return 0;
-}
-
-/*
- * Only execute pending statedump after the constructor semaphore has
- * been posted by the current listener thread. This means statedump will
- * only be performed after the "registration done" command is received
- * from this thread's session daemon.
- *
- * This ensures we don't run into deadlock issues with the dynamic
- * loader mutex, which is held while the constructor is called and
- * waiting on the constructor semaphore. All operations requiring this
- * dynamic loader lock need to be postponed using this mechanism.
- *
- * In a scenario with two session daemons connected to the application,
- * it is possible that the first listener thread which receives the
- * registration done command issues its statedump while the dynamic
- * loader lock is still held by the application constructor waiting on
- * the semaphore. It will however be allowed to proceed when the
- * second session daemon sends the registration done command to the
- * second listener thread. This situation therefore does not produce
- * a deadlock.
- */
-static
-void handle_pending_statedump(struct sock_info *sock_info)
-{
- if (sock_info->registration_done && sock_info->statedump_pending) {
- sock_info->statedump_pending = 0;
- pthread_mutex_lock(&ust_fork_mutex);
- lttng_handle_pending_statedump(sock_info);
- pthread_mutex_unlock(&ust_fork_mutex);
-
- if (!sock_info->initial_statedump_done) {
- sock_info->initial_statedump_done = 1;
- decrement_sem_count(1);
- }
- }
-}
-
-static inline
-const char *bytecode_type_str(uint32_t cmd)
-{
- switch (cmd) {
- case LTTNG_UST_ABI_CAPTURE:
- return "capture";
- case LTTNG_UST_ABI_FILTER:
- return "filter";
- default:
- abort();
- }
-}
-
-static
-int handle_bytecode_recv(struct sock_info *sock_info,
- int sock, struct ustcomm_ust_msg *lum)
-{
- struct lttng_ust_bytecode_node *bytecode = NULL;
- enum lttng_ust_bytecode_type type;
- const struct lttng_ust_abi_objd_ops *ops;
- uint32_t data_size, data_size_max, reloc_offset;
- uint64_t seqnum;
- ssize_t len;
- int ret = 0;
-
- switch (lum->cmd) {
- case LTTNG_UST_ABI_FILTER:
- type = LTTNG_UST_BYTECODE_TYPE_FILTER;
- data_size = lum->u.filter.data_size;
- data_size_max = LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN;
- reloc_offset = lum->u.filter.reloc_offset;
- seqnum = lum->u.filter.seqnum;
- break;
- case LTTNG_UST_ABI_CAPTURE:
- type = LTTNG_UST_BYTECODE_TYPE_CAPTURE;
- data_size = lum->u.capture.data_size;
- data_size_max = LTTNG_UST_ABI_CAPTURE_BYTECODE_MAX_LEN;
- reloc_offset = lum->u.capture.reloc_offset;
- seqnum = lum->u.capture.seqnum;
- break;
- default:
- abort();
- }
-
- if (data_size > data_size_max) {
- ERR("Bytecode %s data size is too large: %u bytes",
- bytecode_type_str(lum->cmd), data_size);
- ret = -EINVAL;
- goto end;
- }
-
- if (reloc_offset > data_size) {
- ERR("Bytecode %s reloc offset %u is not within data",
- bytecode_type_str(lum->cmd), reloc_offset);
- ret = -EINVAL;
- goto end;
- }
-
- /* Allocate the structure AND the `data[]` field. */
- bytecode = zmalloc(sizeof(*bytecode) + data_size);
- if (!bytecode) {
- ret = -ENOMEM;
- goto end;
- }
-
- bytecode->bc.len = data_size;
- bytecode->bc.reloc_offset = reloc_offset;
- bytecode->bc.seqnum = seqnum;
- bytecode->type = type;
-
- len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto end;
- default:
- if (len == bytecode->bc.len) {
- DBG("Bytecode %s data received",
- bytecode_type_str(lum->cmd));
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto end;
- }
- ret = len;
- goto end;
- } else {
- DBG("Incorrect %s bytecode data message size: %zd",
- bytecode_type_str(lum->cmd), len);
- ret = -EINVAL;
- goto end;
- }
- }
-
- ops = lttng_ust_abi_objd_ops(lum->handle);
- if (!ops) {
- ret = -ENOENT;
- goto end;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &bytecode,
- NULL, sock_info);
- else
- ret = -ENOSYS;
-
-end:
- free(bytecode);
- return ret;
-}
-
-static
-int handle_message(struct sock_info *sock_info,
- int sock, struct ustcomm_ust_msg *lum)
-{
- int ret = 0;
- const struct lttng_ust_abi_objd_ops *ops;
- struct ustcomm_ust_reply lur;
- union lttng_ust_abi_args args;
- char ctxstr[LTTNG_UST_ABI_SYM_NAME_LEN]; /* App context string. */
- ssize_t len;
-
- memset(&lur, 0, sizeof(lur));
-
- if (ust_lock()) {
- ret = -LTTNG_UST_ERR_EXITING;
- goto error;
- }
-
- ops = lttng_ust_abi_objd_ops(lum->handle);
- if (!ops) {
- ret = -ENOENT;
- goto error;
- }
-
- switch (lum->cmd) {
- case LTTNG_UST_ABI_REGISTER_DONE:
- if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
- ret = handle_register_done(sock_info);
- else
- ret = -EINVAL;
- break;
- case LTTNG_UST_ABI_RELEASE:
- if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
- ret = -EPERM;
- else
- ret = lttng_ust_abi_objd_unref(lum->handle, 1);
- break;
- case LTTNG_UST_ABI_CAPTURE:
- case LTTNG_UST_ABI_FILTER:
- ret = handle_bytecode_recv(sock_info, sock, lum);
- if (ret)
- goto error;
- break;
- case LTTNG_UST_ABI_EXCLUSION:
- {
- /* Receive exclusion names */
- struct lttng_ust_excluder_node *node;
- unsigned int count;
-
- count = lum->u.exclusion.count;
- if (count == 0) {
- /* There are no names to read */
- ret = 0;
- goto error;
- }
- node = zmalloc(sizeof(*node) +
- count * LTTNG_UST_ABI_SYM_NAME_LEN);
- if (!node) {
- ret = -ENOMEM;
- goto error;
- }
- node->excluder.count = count;
- len = ustcomm_recv_unix_sock(sock, node->excluder.names,
- count * LTTNG_UST_ABI_SYM_NAME_LEN);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- free(node);
- goto error;
- default:
- if (len == count * LTTNG_UST_ABI_SYM_NAME_LEN) {
- DBG("Exclusion data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- free(node);
- goto error;
- }
- ret = len;
- free(node);
- goto error;
- } else {
- DBG("Incorrect exclusion data message size: %zd", len);
- ret = -EINVAL;
- free(node);
- goto error;
- }
- }
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &node,
- &args, sock_info);
- else
- ret = -ENOSYS;
- free(node);
- break;
- }
- case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
- {
- int event_notifier_notif_fd, close_ret;
-
- len = ustcomm_recv_event_notifier_notif_fd_from_sessiond(sock,
- &event_notifier_notif_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- case 1:
- break;
- default:
- if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d",
- (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection",
- sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("Incorrect event notifier fd message size: %zd",
- len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.event_notifier_handle.event_notifier_notif_fd =
- event_notifier_notif_fd;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.event_notifier_handle.event_notifier_notif_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.event_notifier_handle.event_notifier_notif_fd);
- lttng_ust_unlock_fd_tracker();
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_CHANNEL:
- {
- void *chan_data;
- int wakeup_fd;
-
- len = ustcomm_recv_channel_from_sessiond(sock,
- &chan_data, lum->u.channel.len,
- &wakeup_fd);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == lum->u.channel.len) {
- DBG("channel data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect channel data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.channel.chan_data = chan_data;
- args.channel.wakeup_fd = wakeup_fd;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.channel.wakeup_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.channel.wakeup_fd);
- lttng_ust_unlock_fd_tracker();
- args.channel.wakeup_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- free(args.channel.chan_data);
- break;
- }
- case LTTNG_UST_ABI_STREAM:
- {
- int close_ret;
-
- /* Receive shm_fd, wakeup_fd */
- ret = ustcomm_recv_stream_from_sessiond(sock,
- NULL,
- &args.stream.shm_fd,
- &args.stream.wakeup_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.stream.shm_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.stream.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.stream.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- if (args.stream.wakeup_fd >= 0) {
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.stream.wakeup_fd);
- lttng_ust_unlock_fd_tracker();
- args.stream.wakeup_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_CONTEXT:
- switch (lum->u.context.ctx) {
- case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
- {
- char *p;
- size_t ctxlen, recvlen;
-
- ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
- + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
- if (ctxlen >= LTTNG_UST_ABI_SYM_NAME_LEN) {
- ERR("Application context string length size is too large: %zu bytes",
- ctxlen);
- ret = -EINVAL;
- goto error;
- }
- strcpy(ctxstr, "$app.");
- p = &ctxstr[strlen("$app.")];
- recvlen = ctxlen - strlen("$app.");
- len = ustcomm_recv_unix_sock(sock, p, recvlen);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == recvlen) {
- DBG("app context data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect app context data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- /* Put : between provider and ctxname. */
- p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
- args.app_context.ctxname = ctxstr;
- break;
- }
- default:
- break;
- }
- if (ops->cmd) {
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- } else {
- ret = -ENOSYS;
- }
- break;
- case LTTNG_UST_ABI_COUNTER:
- {
- void *counter_data;
-
- len = ustcomm_recv_counter_from_sessiond(sock,
- &counter_data, lum->u.counter.len);
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == lum->u.counter.len) {
- DBG("counter data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect counter data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- args.counter.counter_data = counter_data;
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- free(args.counter.counter_data);
- break;
- }
- case LTTNG_UST_ABI_COUNTER_GLOBAL:
- {
- /* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.counter_shm.shm_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.counter_shm.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.counter_shm.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_COUNTER_CPU:
- {
- /* Receive shm_fd */
- ret = ustcomm_recv_counter_shm_from_sessiond(sock,
- &args.counter_shm.shm_fd);
- if (ret) {
- goto error;
- }
-
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- if (args.counter_shm.shm_fd >= 0) {
- int close_ret;
-
- lttng_ust_lock_fd_tracker();
- close_ret = close(args.counter_shm.shm_fd);
- lttng_ust_unlock_fd_tracker();
- args.counter_shm.shm_fd = -1;
- if (close_ret)
- PERROR("close");
- }
- break;
- }
- case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
- {
- /* Receive struct lttng_ust_event_notifier */
- struct lttng_ust_abi_event_notifier event_notifier;
-
- if (sizeof(event_notifier) != lum->u.event_notifier.len) {
- DBG("incorrect event notifier data message size: %u", lum->u.event_notifier.len);
- ret = -EINVAL;
- goto error;
- }
- len = ustcomm_recv_unix_sock(sock, &event_notifier, sizeof(event_notifier));
- switch (len) {
- case 0: /* orderly shutdown */
- ret = 0;
- goto error;
- default:
- if (len == sizeof(event_notifier)) {
- DBG("event notifier data received");
- break;
- } else if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- if (len == -ECONNRESET) {
- ERR("%s remote end closed connection", sock_info->name);
- ret = len;
- goto error;
- }
- ret = len;
- goto error;
- } else {
- DBG("incorrect event notifier data message size: %zd", len);
- ret = -EINVAL;
- goto error;
- }
- }
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &event_notifier,
- &args, sock_info);
- else
- ret = -ENOSYS;
- break;
- }
-
- default:
- if (ops->cmd)
- ret = ops->cmd(lum->handle, lum->cmd,
- (unsigned long) &lum->u,
- &args, sock_info);
- else
- ret = -ENOSYS;
- break;
- }
-
- lur.handle = lum->handle;
- lur.cmd = lum->cmd;
- lur.ret_val = ret;
- if (ret >= 0) {
- lur.ret_code = LTTNG_UST_OK;
- } else {
- /*
- * Use -LTTNG_UST_ERR as wildcard for UST internal
- * error that are not caused by the transport, except if
- * we already have a more precise error message to
- * report.
- */
- if (ret > -LTTNG_UST_ERR) {
- /* Translate code to UST error. */
- switch (ret) {
- case -EEXIST:
- lur.ret_code = -LTTNG_UST_ERR_EXIST;
- break;
- case -EINVAL:
- lur.ret_code = -LTTNG_UST_ERR_INVAL;
- break;
- case -ENOENT:
- lur.ret_code = -LTTNG_UST_ERR_NOENT;
- break;
- case -EPERM:
- lur.ret_code = -LTTNG_UST_ERR_PERM;
- break;
- case -ENOSYS:
- lur.ret_code = -LTTNG_UST_ERR_NOSYS;
- break;
- default:
- lur.ret_code = -LTTNG_UST_ERR;
- break;
- }
- } else {
- lur.ret_code = ret;
- }
- }
- if (ret >= 0) {
- switch (lum->cmd) {
- case LTTNG_UST_ABI_TRACER_VERSION:
- lur.u.version = lum->u.version;
- break;
- case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
- memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
- break;
- }
- }
- DBG("Return value: %d", lur.ret_val);
-
- ust_unlock();
-
- /*
- * Performed delayed statedump operations outside of the UST
- * lock. We need to take the dynamic loader lock before we take
- * the UST lock internally within handle_pending_statedump().
- */
- handle_pending_statedump(sock_info);
-
- if (ust_lock()) {
- ret = -LTTNG_UST_ERR_EXITING;
- goto error;
- }
-
- ret = send_reply(sock, &lur);
- if (ret < 0) {
- DBG("error sending reply");
- goto error;
- }
-
- /*
- * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
- * after the reply.
- */
- if (lur.ret_code == LTTNG_UST_OK) {
- switch (lum->cmd) {
- case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
- len = ustcomm_send_unix_sock(sock,
- &args.field_list.entry,
- sizeof(args.field_list.entry));
- if (len < 0) {
- ret = len;
- goto error;
- }
- if (len != sizeof(args.field_list.entry)) {
- ret = -EINVAL;
- goto error;
- }
- }
- }
-
-error:
- ust_unlock();
-
- return ret;
-}
-
-static
-void cleanup_sock_info(struct sock_info *sock_info, int exiting)
-{
- int ret;
-
- if (sock_info->root_handle != -1) {
- ret = lttng_ust_abi_objd_unref(sock_info->root_handle, 1);
- if (ret) {
- ERR("Error unref root handle");
- }
- sock_info->root_handle = -1;
- }
- sock_info->registration_done = 0;
- sock_info->initial_statedump_done = 0;
-
- /*
- * wait_shm_mmap, socket and notify socket are used by listener
- * threads outside of the ust lock, so we cannot tear them down
- * ourselves, because we cannot join on these threads. Leave
- * responsibility of cleaning up these resources to the OS
- * process exit.
- */
- if (exiting)
- return;
-
- if (sock_info->socket != -1) {
- ret = ustcomm_close_unix_sock(sock_info->socket);
- if (ret) {
- ERR("Error closing ust cmd socket");
- }
- sock_info->socket = -1;
- }
- if (sock_info->notify_socket != -1) {
- ret = ustcomm_close_unix_sock(sock_info->notify_socket);
- if (ret) {
- ERR("Error closing ust notify socket");
- }
- sock_info->notify_socket = -1;
- }
- if (sock_info->wait_shm_mmap) {
- long page_size;
-
- page_size = LTTNG_UST_PAGE_SIZE;
- if (page_size <= 0) {
- if (!page_size) {
- errno = EINVAL;
- }
- PERROR("Error in sysconf(_SC_PAGE_SIZE)");
- } else {
- ret = munmap(sock_info->wait_shm_mmap, page_size);
- if (ret) {
- ERR("Error unmapping wait shm");
- }
- }
- sock_info->wait_shm_mmap = NULL;
- }
-}
-
-/*
- * Using fork to set umask in the child process (not multi-thread safe).
- * We deal with the shm_open vs ftruncate race (happening when the
- * sessiond owns the shm and does not let everybody modify it, to ensure
- * safety against shm_unlink) by simply letting the mmap fail and
- * retrying after a few seconds.
- * For global shm, everybody has rw access to it until the sessiond
- * starts.
- */
-static
-int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
-{
- int wait_shm_fd, ret;
- pid_t pid;
-
- /*
- * Try to open read-only.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
- if (wait_shm_fd >= 0) {
- int32_t tmp_read;
- ssize_t len;
- size_t bytes_read = 0;
-
- /*
- * Try to read the fd. If unable to do so, try opening
- * it in write mode.
- */
- do {
- len = read(wait_shm_fd,
- &((char *) &tmp_read)[bytes_read],
- sizeof(tmp_read) - bytes_read);
- if (len > 0) {
- bytes_read += len;
- }
- } while ((len < 0 && errno == EINTR)
- || (len > 0 && bytes_read < sizeof(tmp_read)));
- if (bytes_read != sizeof(tmp_read)) {
- ret = close(wait_shm_fd);
- if (ret) {
- ERR("close wait_shm_fd");
- }
- goto open_write;
- }
- goto end;
- } else if (wait_shm_fd < 0 && errno != ENOENT) {
- /*
- * Real-only open did not work, and it's not because the
- * entry was not present. It's a failure that prohibits
- * using shm.
- */
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- goto end;
- }
-
-open_write:
- /*
- * If the open failed because the file did not exist, or because
- * the file was not truncated yet, try creating it ourself.
- */
- URCU_TLS(lttng_ust_nest_count)++;
- pid = fork();
- URCU_TLS(lttng_ust_nest_count)--;
- if (pid > 0) {
- int status;
-
- /*
- * Parent: wait for child to return, in which case the
- * shared memory map will have been created.
- */
- pid = wait(&status);
- if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
- wait_shm_fd = -1;
- goto end;
- }
- /*
- * Try to open read-only again after creation.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
- if (wait_shm_fd < 0) {
- /*
- * Real-only open did not work. It's a failure
- * that prohibits using shm.
- */
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- goto end;
- }
- goto end;
- } else if (pid == 0) {
- int create_mode;
-
- /* Child */
- create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
- if (sock_info->global)
- create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
- /*
- * We're alone in a child process, so we can modify the
- * process-wide umask.
- */
- umask(~create_mode);
- /*
- * Try creating shm (or get rw access).
- * We don't do an exclusive open, because we allow other
- * processes to create+ftruncate it concurrently.
- */
- wait_shm_fd = shm_open(sock_info->wait_shm_path,
- O_RDWR | O_CREAT, create_mode);
- if (wait_shm_fd >= 0) {
- ret = ftruncate(wait_shm_fd, mmap_size);
- if (ret) {
- PERROR("ftruncate");
- _exit(EXIT_FAILURE);
- }
- _exit(EXIT_SUCCESS);
- }
- /*
- * For local shm, we need to have rw access to accept
- * opening it: this means the local sessiond will be
- * able to wake us up. For global shm, we open it even
- * if rw access is not granted, because the root.root
- * sessiond will be able to override all rights and wake
- * us up.
- */
- if (!sock_info->global && errno != EACCES) {
- ERR("Error opening shm %s", sock_info->wait_shm_path);
- _exit(EXIT_FAILURE);
- }
- /*
- * The shm exists, but we cannot open it RW. Report
- * success.
- */
- _exit(EXIT_SUCCESS);
- } else {
- return -1;
- }
-end:
- if (wait_shm_fd >= 0 && !sock_info->global) {
- struct stat statbuf;
-
- /*
- * Ensure that our user is the owner of the shm file for
- * local shm. If we do not own the file, it means our
- * sessiond will not have access to wake us up (there is
- * probably a rogue process trying to fake our
- * sessiond). Fallback to polling method in this case.
- */
- ret = fstat(wait_shm_fd, &statbuf);
- if (ret) {
- PERROR("fstat");
- goto error_close;
- }
- if (statbuf.st_uid != getuid())
- goto error_close;
- }
- return wait_shm_fd;
-
-error_close:
- ret = close(wait_shm_fd);
- if (ret) {
- PERROR("Error closing fd");
- }
- return -1;
-}
-
-static
-char *get_map_shm(struct sock_info *sock_info)
-{
- long page_size;
- int wait_shm_fd, ret;
- char *wait_shm_mmap;
-
- page_size = sysconf(_SC_PAGE_SIZE);
- if (page_size <= 0) {
- if (!page_size) {
- errno = EINVAL;
- }
- PERROR("Error in sysconf(_SC_PAGE_SIZE)");
- goto error;
- }
-
- lttng_ust_lock_fd_tracker();
- wait_shm_fd = get_wait_shm(sock_info, page_size);
- if (wait_shm_fd < 0) {
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- ret = lttng_ust_add_fd_to_tracker(wait_shm_fd);
- if (ret < 0) {
- ret = close(wait_shm_fd);
- if (!ret) {
- PERROR("Error closing fd");
- }
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- wait_shm_fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
- MAP_SHARED, wait_shm_fd, 0);
-
- /* close shm fd immediately after taking the mmap reference */
- lttng_ust_lock_fd_tracker();
- ret = close(wait_shm_fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(wait_shm_fd);
- } else {
- PERROR("Error closing fd");
- }
- lttng_ust_unlock_fd_tracker();
-
- if (wait_shm_mmap == MAP_FAILED) {
- DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
- goto error;
- }
- return wait_shm_mmap;
-
-error:
- return NULL;
-}
-
-static
-void wait_for_sessiond(struct sock_info *sock_info)
-{
- /* Use ust_lock to check if we should quit. */
- if (ust_lock()) {
- goto quit;
- }
- if (wait_poll_fallback) {
- goto error;
- }
- ust_unlock();
-
- assert(sock_info->wait_shm_mmap);
-
- DBG("Waiting for %s apps sessiond", sock_info->name);
- /* Wait for futex wakeup */
- if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
- goto end_wait;
-
- while (lttng_ust_futex_async((int32_t *) sock_info->wait_shm_mmap,
- FUTEX_WAIT, 0, NULL, NULL, 0)) {
- switch (errno) {
- case EWOULDBLOCK:
- /* Value already changed. */
- goto end_wait;
- case EINTR:
- /* Retry if interrupted by signal. */
- break; /* Get out of switch. */
- case EFAULT:
- wait_poll_fallback = 1;
- DBG(
-"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
-"do not support FUTEX_WAKE on read-only memory mappings correctly. "
-"Please upgrade your kernel "
-"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
-"mainline). LTTng-UST will use polling mode fallback.");
- if (ust_err_debug_enabled())
- PERROR("futex");
- goto end_wait;
- }
- }
-end_wait:
- return;
-
-quit:
- ust_unlock();
- return;
-
-error:
- ust_unlock();
- return;
-}
-
-/*
- * This thread does not allocate any resource, except within
- * handle_message, within mutex protection. This mutex protects against
- * fork and exit.
- * The other moment it allocates resources is at socket connection, which
- * is also protected by the mutex.
- */
-static
-void *ust_listener_thread(void *arg)
-{
- struct sock_info *sock_info = arg;
- int sock, ret, prev_connect_failed = 0, has_waited = 0, fd;
- long timeout;
-
- lttng_ust_fixup_tls();
- /*
- * If available, add '-ust' to the end of this thread's
- * process name
- */
- ret = lttng_ust_setustprocname();
- if (ret) {
- ERR("Unable to set UST process name");
- }
-
- /* Restart trying to connect to the session daemon */
-restart:
- if (prev_connect_failed) {
- /* Wait for sessiond availability with pipe */
- wait_for_sessiond(sock_info);
- if (has_waited) {
- has_waited = 0;
- /*
- * Sleep for 5 seconds before retrying after a
- * sequence of failure / wait / failure. This
- * deals with a killed or broken session daemon.
- */
- sleep(5);
- } else {
- has_waited = 1;
- }
- prev_connect_failed = 0;
- }
-
- if (ust_lock()) {
- goto quit;
- }
-
- if (sock_info->socket != -1) {
- /* FD tracker is updated by ustcomm_close_unix_sock() */
- ret = ustcomm_close_unix_sock(sock_info->socket);
- if (ret) {
- ERR("Error closing %s ust cmd socket",
- sock_info->name);
- }
- sock_info->socket = -1;
- }
- if (sock_info->notify_socket != -1) {
- /* FD tracker is updated by ustcomm_close_unix_sock() */
- ret = ustcomm_close_unix_sock(sock_info->notify_socket);
- if (ret) {
- ERR("Error closing %s ust notify socket",
- sock_info->name);
- }
- sock_info->notify_socket = -1;
- }
-
-
- /*
- * Register. We need to perform both connect and sending
- * registration message before doing the next connect otherwise
- * we may reach unix socket connect queue max limits and block
- * on the 2nd connect while the session daemon is awaiting the
- * first connect registration message.
- */
- /* Connect cmd socket */
- lttng_ust_lock_fd_tracker();
- ret = ustcomm_connect_unix_sock(sock_info->sock_path,
- get_connect_sock_timeout());
- if (ret < 0) {
- lttng_ust_unlock_fd_tracker();
- DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
- prev_connect_failed = 1;
-
- /*
- * If we cannot find the sessiond daemon, don't delay
- * constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
- fd = ret;
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on sock_info->socket");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
- goto quit;
- }
-
- sock_info->socket = ret;
- lttng_ust_unlock_fd_tracker();
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- /*
- * Create only one root handle per listener thread for the whole
- * process lifetime, so we ensure we get ID which is statically
- * assigned to the root handle.
- */
- if (sock_info->root_handle == -1) {
- ret = lttng_abi_create_root_handle();
- if (ret < 0) {
- ERR("Error creating root handle");
- goto quit;
- }
- sock_info->root_handle = ret;
- }
-
- ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
- if (ret < 0) {
- ERR("Error registering to %s ust cmd socket",
- sock_info->name);
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- /* Connect notify socket */
- lttng_ust_lock_fd_tracker();
- ret = ustcomm_connect_unix_sock(sock_info->sock_path,
- get_connect_sock_timeout());
- if (ret < 0) {
- lttng_ust_unlock_fd_tracker();
- DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
- prev_connect_failed = 1;
-
- /*
- * If we cannot find the sessiond daemon, don't delay
- * constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
-
- fd = ret;
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on sock_info->notify_socket");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
- goto quit;
- }
-
- sock_info->notify_socket = ret;
- lttng_ust_unlock_fd_tracker();
-
- ust_unlock();
- /*
- * Unlock/relock ust lock because connect is blocking (with
- * timeout). Don't delay constructors on the ust lock for too
- * long.
- */
- if (ust_lock()) {
- goto quit;
- }
-
- timeout = get_notify_sock_timeout();
- if (timeout >= 0) {
- /*
- * Give at least 10ms to sessiond to reply to
- * notifications.
- */
- if (timeout < 10)
- timeout = 10;
- ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
- timeout);
- if (ret < 0) {
- WARN("Error setting socket receive timeout");
- }
- ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
- timeout);
- if (ret < 0) {
- WARN("Error setting socket send timeout");
- }
- } else if (timeout < -1) {
- WARN("Unsupported timeout value %ld", timeout);
- }
-
- ret = register_to_sessiond(sock_info->notify_socket,
- USTCTL_SOCKET_NOTIFY);
- if (ret < 0) {
- ERR("Error registering to %s ust notify socket",
- sock_info->name);
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto restart;
- }
- sock = sock_info->socket;
-
- ust_unlock();
-
- for (;;) {
- ssize_t len;
- struct ustcomm_ust_msg lum;
-
- len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
- switch (len) {
- case 0: /* orderly shutdown */
- DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
- if (ust_lock()) {
- goto quit;
- }
- /*
- * Either sessiond has shutdown or refused us by closing the socket.
- * In either case, we don't want to delay construction execution,
- * and we need to wait before retry.
- */
- prev_connect_failed = 1;
- /*
- * If we cannot register to the sessiond daemon, don't
- * delay constructor execution.
- */
- ret = handle_register_failed(sock_info);
- assert(!ret);
- ust_unlock();
- goto end;
- case sizeof(lum):
- print_cmd(lum.cmd, lum.handle);
- ret = handle_message(sock_info, sock, &lum);
- if (ret) {
- ERR("Error handling message for %s socket",
- sock_info->name);
- /*
- * Close socket if protocol error is
- * detected.
- */
- goto end;
- }
- continue;
- default:
- if (len < 0) {
- DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
- } else {
- DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
- }
- if (len == -ECONNRESET) {
- DBG("%s remote end closed connection", sock_info->name);
- goto end;
- }
- goto end;
- }
-
- }
-end:
- if (ust_lock()) {
- goto quit;
- }
- /* Cleanup socket handles before trying to reconnect */
- lttng_ust_abi_objd_table_owner_cleanup(sock_info);
- ust_unlock();
- goto restart; /* try to reconnect */
-
-quit:
- ust_unlock();
-
- pthread_mutex_lock(&ust_exit_mutex);
- sock_info->thread_active = 0;
- pthread_mutex_unlock(&ust_exit_mutex);
- return NULL;
-}
-
-/*
- * Weak symbol to call when the ust malloc wrapper is not loaded.
- */
-__attribute__((weak))
-void lttng_ust_libc_wrapper_malloc_init(void)
-{
-}
-
-/*
- * sessiond monitoring thread: monitor presence of global and per-user
- * sessiond by polling the application common named pipe.
- */
-static
-void lttng_ust_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_init(void)
-{
- struct timespec constructor_timeout;
- sigset_t sig_all_blocked, orig_parent_mask;
- pthread_attr_t thread_attr;
- int timeout_mode;
- int ret;
- void *handle;
-
- if (uatomic_xchg(&initialized, 1) == 1)
- return;
-
- /*
- * Fixup interdependency between TLS fixup mutex (which happens
- * to be the dynamic linker mutex) and ust_lock, taken within
- * the ust lock.
- */
- lttng_ust_fixup_tls();
-
- lttng_ust_loaded = 1;
-
- /*
- * We need to ensure that the liblttng-ust library is not unloaded to avoid
- * the unloading of code used by the ust_listener_threads as we can not
- * reliably know when they exited. To do that, manually load
- * liblttng-ust.so to increment the dynamic loader's internal refcount for
- * this library so it never becomes zero, thus never gets unloaded from the
- * address space of the process. Since we are already running in the
- * constructor of the LTTNG_UST_LIB_SONAME library, calling dlopen will
- * simply increment the refcount and no additionnal work is needed by the
- * dynamic loader as the shared library is already loaded in the address
- * space. As a safe guard, we use the RTLD_NODELETE flag to prevent
- * unloading of the UST library if its refcount becomes zero (which should
- * never happen). Do the return value check but discard the handle at the
- * end of the function as it's not needed.
- */
- handle = dlopen(LTTNG_UST_LIB_SONAME, RTLD_LAZY | RTLD_NODELETE);
- if (!handle) {
- ERR("dlopen of liblttng-ust shared library (%s).", LTTNG_UST_LIB_SONAME);
- }
-
- /*
- * We want precise control over the order in which we construct
- * our sub-libraries vs starting to receive commands from
- * sessiond (otherwise leading to errors when trying to create
- * sessiond before the init functions are completed).
- */
- ust_err_init();
- lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
- lttng_ust_tp_init();
- lttng_ust_init_fd_tracker();
- lttng_ust_clock_init();
- lttng_ust_getcpu_init();
- lttng_ust_statedump_init();
- lttng_ust_ring_buffer_clients_init();
- lttng_ust_counter_clients_init();
- lttng_perf_counter_init();
- /*
- * Invoke ust malloc wrapper init before starting other threads.
- */
- lttng_ust_libc_wrapper_malloc_init();
-
- timeout_mode = get_constructor_timeout(&constructor_timeout);
-
- get_allow_blocking();
-
- ret = sem_init(&constructor_wait, 0, 0);
- if (ret) {
- PERROR("sem_init");
- }
-
- ret = setup_global_apps();
- if (ret) {
- assert(global_apps.allowed == 0);
- DBG("global apps setup returned %d", ret);
- }
-
- ret = setup_local_apps();
- if (ret) {
- assert(local_apps.allowed == 0);
- DBG("local apps setup returned %d", ret);
- }
-
- /* A new thread created by pthread_create inherits the signal mask
- * from the parent. To avoid any signal being received by the
- * listener thread, we block all signals temporarily in the parent,
- * while we create the listener thread.
- */
- sigfillset(&sig_all_blocked);
- ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-
- ret = pthread_attr_init(&thread_attr);
- if (ret) {
- ERR("pthread_attr_init: %s", strerror(ret));
- }
- ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
- if (ret) {
- ERR("pthread_attr_setdetachstate: %s", strerror(ret));
- }
-
- if (global_apps.allowed) {
- pthread_mutex_lock(&ust_exit_mutex);
- ret = pthread_create(&global_apps.ust_listener, &thread_attr,
- ust_listener_thread, &global_apps);
- if (ret) {
- ERR("pthread_create global: %s", strerror(ret));
- }
- global_apps.thread_active = 1;
- pthread_mutex_unlock(&ust_exit_mutex);
- } else {
- handle_register_done(&global_apps);
- }
-
- if (local_apps.allowed) {
- pthread_mutex_lock(&ust_exit_mutex);
- ret = pthread_create(&local_apps.ust_listener, &thread_attr,
- ust_listener_thread, &local_apps);
- if (ret) {
- ERR("pthread_create local: %s", strerror(ret));
- }
- local_apps.thread_active = 1;
- pthread_mutex_unlock(&ust_exit_mutex);
- } else {
- handle_register_done(&local_apps);
- }
- ret = pthread_attr_destroy(&thread_attr);
- if (ret) {
- ERR("pthread_attr_destroy: %s", strerror(ret));
- }
-
- /* Restore original signal mask in parent */
- ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
- if (ret) {
- ERR("pthread_sigmask: %s", strerror(ret));
- }
-
- switch (timeout_mode) {
- case 1: /* timeout wait */
- do {
- ret = sem_timedwait(&constructor_wait,
- &constructor_timeout);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- switch (errno) {
- case ETIMEDOUT:
- ERR("Timed out waiting for lttng-sessiond");
- break;
- case EINVAL:
- PERROR("sem_timedwait");
- break;
- default:
- ERR("Unexpected error \"%s\" returned by sem_timedwait",
- strerror(errno));
- }
- }
- break;
- case -1:/* wait forever */
- do {
- ret = sem_wait(&constructor_wait);
- } while (ret < 0 && errno == EINTR);
- if (ret < 0) {
- switch (errno) {
- case EINVAL:
- PERROR("sem_wait");
- break;
- default:
- ERR("Unexpected error \"%s\" returned by sem_wait",
- strerror(errno));
- }
- }
- break;
- case 0: /* no timeout */
- break;
- }
-}
-
-static
-void lttng_ust_cleanup(int exiting)
-{
- cleanup_sock_info(&global_apps, exiting);
- cleanup_sock_info(&local_apps, exiting);
- local_apps.allowed = 0;
- global_apps.allowed = 0;
- /*
- * The teardown in this function all affect data structures
- * accessed under the UST lock by the listener thread. This
- * lock, along with the lttng_ust_comm_should_quit flag, ensure
- * that none of these threads are accessing this data at this
- * point.
- */
- lttng_ust_abi_exit();
- lttng_ust_abi_events_exit();
- lttng_perf_counter_exit();
- lttng_ust_ring_buffer_clients_exit();
- lttng_ust_counter_clients_exit();
- lttng_ust_statedump_destroy();
- lttng_ust_tp_exit();
- if (!exiting) {
- /* Reinitialize values for fork */
- sem_count = sem_count_initial_value;
- lttng_ust_comm_should_quit = 0;
- initialized = 0;
- }
-}
-
-static
-void lttng_ust_exit(void)
- __attribute__((destructor));
-static
-void lttng_ust_exit(void)
-{
- int ret;
-
- /*
- * Using pthread_cancel here because:
- * A) we don't want to hang application teardown.
- * B) the thread is not allocating any resource.
- */
-
- /*
- * Require the communication thread to quit. Synchronize with
- * mutexes to ensure it is not in a mutex critical section when
- * pthread_cancel is later called.
- */
- ust_lock_nocheck();
- lttng_ust_comm_should_quit = 1;
- ust_unlock();
-
- pthread_mutex_lock(&ust_exit_mutex);
- /* cancel threads */
- if (global_apps.thread_active) {
- ret = pthread_cancel(global_apps.ust_listener);
- if (ret) {
- ERR("Error cancelling global ust listener thread: %s",
- strerror(ret));
- } else {
- global_apps.thread_active = 0;
- }
- }
- if (local_apps.thread_active) {
- ret = pthread_cancel(local_apps.ust_listener);
- if (ret) {
- ERR("Error cancelling local ust listener thread: %s",
- strerror(ret));
- } else {
- local_apps.thread_active = 0;
- }
- }
- pthread_mutex_unlock(&ust_exit_mutex);
-
- /*
- * Do NOT join threads: use of sys_futex makes it impossible to
- * join the threads without using async-cancel, but async-cancel
- * is delivered by a signal, which could hit the target thread
- * anywhere in its code path, including while the ust_lock() is
- * held, causing a deadlock for the other thread. Let the OS
- * cleanup the threads if there are stalled in a syscall.
- */
- lttng_ust_cleanup(1);
-}
-
-static
-void ust_context_ns_reset(void)
-{
- lttng_context_pid_ns_reset();
- lttng_context_cgroup_ns_reset();
- lttng_context_ipc_ns_reset();
- lttng_context_mnt_ns_reset();
- lttng_context_net_ns_reset();
- lttng_context_user_ns_reset();
- lttng_context_time_ns_reset();
- lttng_context_uts_ns_reset();
-}
-
-static
-void ust_context_vuids_reset(void)
-{
- lttng_context_vuid_reset();
- lttng_context_veuid_reset();
- lttng_context_vsuid_reset();
-}
-
-static
-void ust_context_vgids_reset(void)
-{
- lttng_context_vgid_reset();
- lttng_context_vegid_reset();
- lttng_context_vsgid_reset();
-}
-
-/*
- * We exclude the worker threads across fork and clone (except
- * CLONE_VM), because these system calls only keep the forking thread
- * running in the child. Therefore, we don't want to call fork or clone
- * in the middle of an tracepoint or ust tracing state modification.
- * Holding this mutex protects these structures across fork and clone.
- */
-void lttng_ust_before_fork(sigset_t *save_sigset)
-{
- /*
- * Disable signals. This is to avoid that the child intervenes
- * before it is properly setup for tracing. It is safer to
- * disable all signals, because then we know we are not breaking
- * anything by restoring the original mask.
- */
- sigset_t all_sigs;
- int ret;
-
- /* Fixup lttng-ust TLS. */
- lttng_ust_fixup_tls();
-
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- /* Disable signals */
- sigfillset(&all_sigs);
- ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
- if (ret == -1) {
- PERROR("sigprocmask");
- }
-
- pthread_mutex_lock(&ust_fork_mutex);
-
- ust_lock_nocheck();
- lttng_ust_urcu_before_fork();
- lttng_ust_lock_fd_tracker();
- lttng_perf_lock();
-}
-
-static void ust_after_fork_common(sigset_t *restore_sigset)
-{
- int ret;
-
- DBG("process %d", getpid());
- lttng_perf_unlock();
- lttng_ust_unlock_fd_tracker();
- ust_unlock();
-
- pthread_mutex_unlock(&ust_fork_mutex);
-
- /* Restore signals */
- ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
- if (ret == -1) {
- PERROR("sigprocmask");
- }
-}
-
-void lttng_ust_after_fork_parent(sigset_t *restore_sigset)
-{
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- DBG("process %d", getpid());
- lttng_ust_urcu_after_fork_parent();
- /* Release mutexes and reenable signals */
- ust_after_fork_common(restore_sigset);
-}
-
-/*
- * After fork, in the child, we need to cleanup all the leftover state,
- * except the worker thread which already magically disappeared thanks
- * to the weird Linux fork semantics. After tyding up, we call
- * lttng_ust_init() again to start over as a new PID.
- *
- * This is meant for forks() that have tracing in the child between the
- * fork and following exec call (if there is any).
- */
-void lttng_ust_after_fork_child(sigset_t *restore_sigset)
-{
- if (URCU_TLS(lttng_ust_nest_count))
- return;
- lttng_context_vpid_reset();
- lttng_context_vtid_reset();
- lttng_ust_context_procname_reset();
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
- DBG("process %d", getpid());
- /* Release urcu mutexes */
- lttng_ust_urcu_after_fork_child();
- lttng_ust_cleanup(0);
- /* Release mutexes and reenable signals */
- ust_after_fork_common(restore_sigset);
- lttng_ust_init();
-}
-
-void lttng_ust_after_setns(void)
-{
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_unshare(void)
-{
- ust_context_ns_reset();
- ust_context_vuids_reset();
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_seteuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setreuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setresuid(void)
-{
- ust_context_vuids_reset();
-}
-
-void lttng_ust_after_setgid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setegid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setregid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_after_setresgid(void)
-{
- ust_context_vgids_reset();
-}
-
-void lttng_ust_sockinfo_session_enabled(void *owner)
-{
- struct sock_info *sock_info = owner;
- sock_info->statedump_pending = 1;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * UST dynamic type implementation.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <inttypes.h>
-
-#include <ust-helper.h>
-#include <ust-dynamic-type.h>
-
-#define ctf_enum_value(_string, _value) \
- __LTTNG_COMPOUND_LITERAL(struct lttng_ust_enum_entry, { \
- .struct_size = sizeof(struct lttng_ust_enum_entry), \
- .start = { \
- .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
- .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
- (long long) (_value) : (_value), \
- }, \
- .end = { \
- .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
- .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
- (long long) (_value) : (_value), \
- }, \
- .string = (_string), \
- }),
-
-static const struct lttng_ust_enum_entry *dt_enum[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
- [LTTNG_UST_DYNAMIC_TYPE_NONE] = ctf_enum_value("_none", 0)
- [LTTNG_UST_DYNAMIC_TYPE_S8] = ctf_enum_value("_int8", 1)
- [LTTNG_UST_DYNAMIC_TYPE_S16] = ctf_enum_value("_int16", 2)
- [LTTNG_UST_DYNAMIC_TYPE_S32] = ctf_enum_value("_int32", 3)
- [LTTNG_UST_DYNAMIC_TYPE_S64] = ctf_enum_value("_int64", 4)
- [LTTNG_UST_DYNAMIC_TYPE_U8] = ctf_enum_value("_uint8", 5)
- [LTTNG_UST_DYNAMIC_TYPE_U16] = ctf_enum_value("_uint16", 6)
- [LTTNG_UST_DYNAMIC_TYPE_U32] = ctf_enum_value("_uint32", 7)
- [LTTNG_UST_DYNAMIC_TYPE_U64] = ctf_enum_value("_uint64", 8)
- [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = ctf_enum_value("_float", 9)
- [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = ctf_enum_value("_double", 10)
- [LTTNG_UST_DYNAMIC_TYPE_STRING] = ctf_enum_value("_string", 11)
-};
-
-static struct lttng_ust_enum_desc dt_enum_desc = {
- .name = "dynamic_type_enum",
- .entries = dt_enum,
- .nr_entries = LTTNG_ARRAY_SIZE(dt_enum),
-};
-
-const struct lttng_ust_event_field *dt_var_fields[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
- [LTTNG_UST_DYNAMIC_TYPE_NONE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "none",
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_struct, {
- .parent = {
- .type = lttng_ust_type_struct,
- },
- .struct_size = sizeof(struct lttng_ust_type_struct),
- .nr_fields = 0, /* empty struct */
- .alignment = 0,
- }),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int8",
- .type = lttng_ust_type_integer_define(int8_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int16",
- .type = lttng_ust_type_integer_define(int16_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int32",
- .type = lttng_ust_type_integer_define(int32_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_S64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "int64",
- .type = lttng_ust_type_integer_define(int64_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint8",
- .type = lttng_ust_type_integer_define(uint8_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint16",
- .type = lttng_ust_type_integer_define(uint16_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint32",
- .type = lttng_ust_type_integer_define(uint32_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_U64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "uint64",
- .type = lttng_ust_type_integer_define(uint64_t, BYTE_ORDER, 10),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "float",
- .type = lttng_ust_type_float_define(float),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "double",
- .type = lttng_ust_type_float_define(double),
- .nowrite = 0,
- }),
- [LTTNG_UST_DYNAMIC_TYPE_STRING] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = "string",
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_string, {
- .parent = {
- .type = lttng_ust_type_string,
- },
- .struct_size = sizeof(struct lttng_ust_type_string),
- .encoding = lttng_ust_string_encoding_UTF8,
- }),
- .nowrite = 0,
- }),
-};
-
-static const struct lttng_ust_event_field dt_enum_field = {
- .struct_size = sizeof(struct lttng_ust_event_field),
- .name = NULL,
- .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_enum, {
- .parent = {
- .type = lttng_ust_type_enum,
- },
- .struct_size = sizeof(struct lttng_ust_type_enum),
- .desc = &dt_enum_desc,
- .container_type = lttng_ust_type_integer_define(char, BYTE_ORDER, 10),
- }),
- .nowrite = 0,
-};
-
-const struct lttng_ust_event_field *lttng_ust_dynamic_type_field(int64_t value)
-{
- if (value >= _NR_LTTNG_UST_DYNAMIC_TYPES || value < 0)
- return NULL;
- return dt_var_fields[value];
-}
-
-int lttng_ust_dynamic_type_choices(size_t *nr_choices, const struct lttng_ust_event_field ***choices)
-{
- *nr_choices = _NR_LTTNG_UST_DYNAMIC_TYPES;
- *choices = dt_var_fields;
- return 0;
-}
-
-const struct lttng_ust_event_field *lttng_ust_dynamic_type_tag_field(void)
-{
- return &dt_enum_field;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <fcntl.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <lttng/ust-utils.h>
-
-#include <ust-elf.h>
-#include <ust-fd.h>
-
-#include "lttng-tracer-core.h"
-#include "lttng-ust-elf.h"
-#include "ust-helper.h"
-
-#define BUF_LEN 4096
-
-#ifndef NT_GNU_BUILD_ID
-# define NT_GNU_BUILD_ID 3
-#endif
-
-/*
- * Retrieve the nth (where n is the `index` argument) phdr (program
- * header) from the given elf instance.
- *
- * A pointer to the phdr is returned on success, NULL on failure.
- */
-static
-struct lttng_ust_elf_phdr *lttng_ust_elf_get_phdr(struct lttng_ust_elf *elf,
- uint16_t index)
-{
- struct lttng_ust_elf_phdr *phdr = NULL;
- off_t offset;
-
- if (!elf) {
- goto error;
- }
-
- if (index >= elf->ehdr->e_phnum) {
- goto error;
- }
-
- phdr = zmalloc(sizeof(struct lttng_ust_elf_phdr));
- if (!phdr) {
- goto error;
- }
-
- offset = (off_t) elf->ehdr->e_phoff
- + (off_t) index * elf->ehdr->e_phentsize;
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Phdr elf_phdr;
-
- if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
- < sizeof(elf_phdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_phdr(elf_phdr);
- }
- copy_phdr(elf_phdr, *phdr);
- } else {
- Elf64_Phdr elf_phdr;
-
- if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
- < sizeof(elf_phdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_phdr(elf_phdr);
- }
- copy_phdr(elf_phdr, *phdr);
- }
-
- return phdr;
-
-error:
- free(phdr);
- return NULL;
-}
-
-/*
- * Retrieve the nth (where n is the `index` argument) shdr (section
- * header) from the given elf instance.
- *
- * A pointer to the shdr is returned on success, NULL on failure.
- */
-static
-struct lttng_ust_elf_shdr *lttng_ust_elf_get_shdr(struct lttng_ust_elf *elf,
- uint16_t index)
-{
- struct lttng_ust_elf_shdr *shdr = NULL;
- off_t offset;
-
- if (!elf) {
- goto error;
- }
-
- if (index >= elf->ehdr->e_shnum) {
- goto error;
- }
-
- shdr = zmalloc(sizeof(struct lttng_ust_elf_shdr));
- if (!shdr) {
- goto error;
- }
-
- offset = (off_t) elf->ehdr->e_shoff
- + (off_t) index * elf->ehdr->e_shentsize;
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Shdr elf_shdr;
-
- if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
- < sizeof(elf_shdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_shdr(elf_shdr);
- }
- copy_shdr(elf_shdr, *shdr);
- } else {
- Elf64_Shdr elf_shdr;
-
- if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
- < sizeof(elf_shdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_shdr(elf_shdr);
- }
- copy_shdr(elf_shdr, *shdr);
- }
-
- return shdr;
-
-error:
- free(shdr);
- return NULL;
-}
-
-/*
- * Lookup a section's name from a given offset (usually from an shdr's
- * sh_name value) in bytes relative to the beginning of the section
- * names string table.
- *
- * If no name is found, NULL is returned.
- */
-static
-char *lttng_ust_elf_get_section_name(struct lttng_ust_elf *elf, off_t offset)
-{
- char *name = NULL;
- size_t len = 0, to_read; /* len does not include \0 */
-
- if (!elf) {
- goto error;
- }
-
- if (offset >= elf->section_names_size) {
- goto error;
- }
-
- if (lseek(elf->fd, elf->section_names_offset + offset, SEEK_SET) < 0) {
- goto error;
- }
-
- to_read = elf->section_names_size - offset;
-
- /* Find first \0 after or at current location, remember len. */
- for (;;) {
- char buf[BUF_LEN];
- ssize_t read_len;
- size_t i;
-
- if (!to_read) {
- goto error;
- }
- read_len = lttng_ust_read(elf->fd, buf,
- min_t(size_t, BUF_LEN, to_read));
- if (read_len <= 0) {
- goto error;
- }
- for (i = 0; i < read_len; i++) {
- if (buf[i] == '\0') {
- len += i;
- goto end;
- }
- }
- len += read_len;
- to_read -= read_len;
- }
-end:
- name = zmalloc(sizeof(char) * (len + 1)); /* + 1 for \0 */
- if (!name) {
- goto error;
- }
- if (lseek(elf->fd, elf->section_names_offset + offset,
- SEEK_SET) < 0) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, name, len + 1) < len + 1) {
- goto error;
- }
-
- return name;
-
-error:
- free(name);
- return NULL;
-}
-
-/*
- * Create an instance of lttng_ust_elf for the ELF file located at
- * `path`.
- *
- * Return a pointer to the instance on success, NULL on failure.
- */
-struct lttng_ust_elf *lttng_ust_elf_create(const char *path)
-{
- uint8_t e_ident[EI_NIDENT];
- struct lttng_ust_elf_shdr *section_names_shdr;
- struct lttng_ust_elf *elf = NULL;
- int ret, fd;
-
- elf = zmalloc(sizeof(struct lttng_ust_elf));
- if (!elf) {
- goto error;
- }
-
- /* Initialize fd field to -1. 0 is a valid fd number */
- elf->fd = -1;
-
- elf->path = strdup(path);
- if (!elf->path) {
- goto error;
- }
-
- lttng_ust_lock_fd_tracker();
- fd = open(elf->path, O_RDONLY | O_CLOEXEC);
- if (fd < 0) {
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
-
- ret = lttng_ust_add_fd_to_tracker(fd);
- if (ret < 0) {
- ret = close(fd);
- if (ret) {
- PERROR("close on elf->fd");
- }
- ret = -1;
- lttng_ust_unlock_fd_tracker();
- goto error;
- }
- elf->fd = ret;
- lttng_ust_unlock_fd_tracker();
-
- if (lttng_ust_read(elf->fd, e_ident, EI_NIDENT) < EI_NIDENT) {
- goto error;
- }
- elf->bitness = e_ident[EI_CLASS];
- elf->endianness = e_ident[EI_DATA];
-
- if (lseek(elf->fd, 0, SEEK_SET) < 0) {
- goto error;
- }
-
- elf->ehdr = zmalloc(sizeof(struct lttng_ust_elf_ehdr));
- if (!elf->ehdr) {
- goto error;
- }
-
- if (is_elf_32_bit(elf)) {
- Elf32_Ehdr elf_ehdr;
-
- if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
- < sizeof(elf_ehdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_ehdr(elf_ehdr);
- }
- copy_ehdr(elf_ehdr, *(elf->ehdr));
- } else {
- Elf64_Ehdr elf_ehdr;
-
- if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
- < sizeof(elf_ehdr)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- bswap_ehdr(elf_ehdr);
- }
- copy_ehdr(elf_ehdr, *(elf->ehdr));
- }
-
- section_names_shdr = lttng_ust_elf_get_shdr(elf, elf->ehdr->e_shstrndx);
- if (!section_names_shdr) {
- goto error;
- }
-
- elf->section_names_offset = section_names_shdr->sh_offset;
- elf->section_names_size = section_names_shdr->sh_size;
-
- free(section_names_shdr);
- return elf;
-
-error:
- lttng_ust_elf_destroy(elf);
- return NULL;
-}
-
-/*
- * Test whether the ELF file is position independent code (PIC)
- */
-uint8_t lttng_ust_elf_is_pic(struct lttng_ust_elf *elf)
-{
- /*
- * PIC has and e_type value of ET_DYN, see ELF specification
- * version 1.1 p. 1-3.
- */
- return elf->ehdr->e_type == ET_DYN;
-}
-
-/*
- * Destroy the given lttng_ust_elf instance.
- */
-void lttng_ust_elf_destroy(struct lttng_ust_elf *elf)
-{
- int ret;
-
- if (!elf) {
- return;
- }
-
- if (elf->fd >= 0) {
- lttng_ust_lock_fd_tracker();
- ret = close(elf->fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(elf->fd);
- } else {
- PERROR("close");
- abort();
- }
- lttng_ust_unlock_fd_tracker();
- }
-
- free(elf->ehdr);
- free(elf->path);
- free(elf);
-}
-
-/*
- * Compute the total in-memory size of the ELF file, in bytes.
- *
- * Returns 0 if successful, -1 if not. On success, the memory size is
- * returned through the out parameter `memsz`.
- */
-int lttng_ust_elf_get_memsz(struct lttng_ust_elf *elf, uint64_t *memsz)
-{
- uint16_t i;
- uint64_t low_addr = UINT64_MAX, high_addr = 0;
-
- if (!elf || !memsz) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_phnum; ++i) {
- struct lttng_ust_elf_phdr *phdr;
-
- phdr = lttng_ust_elf_get_phdr(elf, i);
- if (!phdr) {
- goto error;
- }
-
- /*
- * Only PT_LOAD segments contribute to memsz. Skip
- * other segments.
- */
- if (phdr->p_type != PT_LOAD) {
- goto next_loop;
- }
-
- low_addr = min_t(uint64_t, low_addr, phdr->p_vaddr);
- high_addr = max_t(uint64_t, high_addr,
- phdr->p_vaddr + phdr->p_memsz);
- next_loop:
- free(phdr);
- }
-
- if (high_addr < low_addr) {
- /* No PT_LOAD segments or corrupted data. */
- goto error;
- }
-
- *memsz = high_addr - low_addr;
- return 0;
-error:
- return -1;
-}
-
-/*
- * Internal method used to try and get the build_id from a PT_NOTE
- * segment ranging from `offset` to `segment_end`.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the build id information was present in the
- * segment or not. If `found` is not 0, the out parameters `build_id`
- * and `length` will both have been set with the retrieved
- * information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-static
-int lttng_ust_elf_get_build_id_from_segment(
- struct lttng_ust_elf *elf, uint8_t **build_id, size_t *length,
- off_t offset, off_t segment_end)
-{
- uint8_t *_build_id = NULL; /* Silence old gcc warning. */
- size_t _length = 0; /* Silence old gcc warning. */
-
- while (offset < segment_end) {
- struct lttng_ust_elf_nhdr nhdr;
- size_t read_len;
-
- /* Align start of note entry */
- offset += lttng_ust_offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
- if (offset >= segment_end) {
- break;
- }
- /*
- * We seek manually because if the note isn't the
- * build id the data following the header will not
- * have been read.
- */
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, &nhdr, sizeof(nhdr))
- < sizeof(nhdr)) {
- goto error;
- }
-
- if (!is_elf_native_endian(elf)) {
- nhdr.n_namesz = bswap_32(nhdr.n_namesz);
- nhdr.n_descsz = bswap_32(nhdr.n_descsz);
- nhdr.n_type = bswap_32(nhdr.n_type);
- }
-
- offset += sizeof(nhdr) + nhdr.n_namesz;
- /* Align start of desc entry */
- offset += lttng_ust_offset_align(offset, ELF_NOTE_DESC_ALIGN);
-
- if (nhdr.n_type != NT_GNU_BUILD_ID) {
- /*
- * Ignore non build id notes but still
- * increase the offset.
- */
- offset += nhdr.n_descsz;
- continue;
- }
-
- _length = nhdr.n_descsz;
- _build_id = zmalloc(sizeof(uint8_t) * _length);
- if (!_build_id) {
- goto error;
- }
-
- if (lseek(elf->fd, offset, SEEK_SET) < 0) {
- goto error;
- }
- read_len = sizeof(*_build_id) * _length;
- if (lttng_ust_read(elf->fd, _build_id, read_len) < read_len) {
- goto error;
- }
-
- break;
- }
-
- if (_build_id) {
- *build_id = _build_id;
- *length = _length;
- }
-
- return 0;
-error:
- free(_build_id);
- return -1;
-}
-
-/*
- * Retrieve a build ID (an array of bytes) from the corresponding
- * section in the ELF file. The length of the build ID can be either
- * 16 or 20 bytes depending on the method used to generate it, hence
- * the length out parameter.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the build id information was present in the ELF
- * file or not. If `found` is not 0, the out parameters `build_id` and
- * `length` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-int lttng_ust_elf_get_build_id(struct lttng_ust_elf *elf, uint8_t **build_id,
- size_t *length, int *found)
-{
- uint16_t i;
- uint8_t *_build_id = NULL; /* Silence old gcc warning. */
- size_t _length = 0; /* Silence old gcc warning. */
-
- if (!elf || !build_id || !length || !found) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_phnum; ++i) {
- off_t offset, segment_end;
- struct lttng_ust_elf_phdr *phdr;
- int ret = 0;
-
- phdr = lttng_ust_elf_get_phdr(elf, i);
- if (!phdr) {
- goto error;
- }
-
- /* Build ID will be contained in a PT_NOTE segment. */
- if (phdr->p_type != PT_NOTE) {
- goto next_loop;
- }
-
- offset = phdr->p_offset;
- segment_end = offset + phdr->p_filesz;
- ret = lttng_ust_elf_get_build_id_from_segment(
- elf, &_build_id, &_length, offset, segment_end);
- next_loop:
- free(phdr);
- if (ret) {
- goto error;
- }
- if (_build_id) {
- break;
- }
- }
-
- if (_build_id) {
- *build_id = _build_id;
- *length = _length;
- *found = 1;
- } else {
- *found = 0;
- }
-
- return 0;
-error:
- free(_build_id);
- return -1;
-}
-
-/*
- * Try to retrieve filename and CRC from given ELF section `shdr`.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the debug link information was present in the ELF
- * section or not. If `found` is not 0, the out parameters `filename` and
- * `crc` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-static
-int lttng_ust_elf_get_debug_link_from_section(struct lttng_ust_elf *elf,
- char **filename, uint32_t *crc,
- struct lttng_ust_elf_shdr *shdr)
-{
- char *_filename = NULL; /* Silence old gcc warning. */
- size_t filename_len;
- char *section_name = NULL;
- uint32_t _crc = 0; /* Silence old gcc warning. */
-
- if (!elf || !filename || !crc || !shdr) {
- goto error;
- }
-
- /*
- * The .gnu_debuglink section is of type SHT_PROGBITS,
- * skip the other sections.
- */
- if (shdr->sh_type != SHT_PROGBITS) {
- goto end;
- }
-
- section_name = lttng_ust_elf_get_section_name(elf,
- shdr->sh_name);
- if (!section_name) {
- goto end;
- }
- if (strcmp(section_name, ".gnu_debuglink")) {
- goto end;
- }
-
- /*
- * The length of the filename is the sh_size excluding the CRC
- * which comes after it in the section.
- */
- _filename = zmalloc(sizeof(char) * (shdr->sh_size - ELF_CRC_SIZE));
- if (!_filename) {
- goto error;
- }
- if (lseek(elf->fd, shdr->sh_offset, SEEK_SET) < 0) {
- goto error;
- }
- filename_len = sizeof(*_filename) * (shdr->sh_size - ELF_CRC_SIZE);
- if (lttng_ust_read(elf->fd, _filename, filename_len) < filename_len) {
- goto error;
- }
- if (lttng_ust_read(elf->fd, &_crc, sizeof(_crc)) < sizeof(_crc)) {
- goto error;
- }
- if (!is_elf_native_endian(elf)) {
- _crc = bswap_32(_crc);
- }
-
-end:
- free(section_name);
- if (_filename) {
- *filename = _filename;
- *crc = _crc;
- }
-
- return 0;
-
-error:
- free(_filename);
- free(section_name);
- return -1;
-}
-
-/*
- * Retrieve filename and CRC from ELF's .gnu_debuglink section, if any.
- *
- * If the function returns successfully, the out parameter `found`
- * indicates whether the debug link information was present in the ELF
- * file or not. If `found` is not 0, the out parameters `filename` and
- * `crc` will both have been set with the retrieved information.
- *
- * Returns 0 on success, -1 if an error occurred.
- */
-int lttng_ust_elf_get_debug_link(struct lttng_ust_elf *elf, char **filename,
- uint32_t *crc, int *found)
-{
- int ret;
- uint16_t i;
- char *_filename = NULL; /* Silence old gcc warning. */
- uint32_t _crc = 0; /* Silence old gcc warning. */
-
- if (!elf || !filename || !crc || !found) {
- goto error;
- }
-
- for (i = 0; i < elf->ehdr->e_shnum; ++i) {
- struct lttng_ust_elf_shdr *shdr = NULL;
-
- shdr = lttng_ust_elf_get_shdr(elf, i);
- if (!shdr) {
- goto error;
- }
-
- ret = lttng_ust_elf_get_debug_link_from_section(
- elf, &_filename, &_crc, shdr);
- free(shdr);
-
- if (ret) {
- goto error;
- }
- if (_filename) {
- break;
- }
- }
-
- if (_filename) {
- *filename = _filename;
- *crc = _crc;
- *found = 1;
- } else {
- *found = 0;
- }
-
- return 0;
-
-error:
- free(_filename);
- return -1;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#ifndef _LIB_LTTNG_UST_ELF_H
-#define _LIB_LTTNG_UST_ELF_H
-
-#include <elf.h>
-#include <lttng/ust-endian.h>
-
-/*
- * Determine native endianness in order to convert when reading an ELF
- * file if there is a mismatch.
- */
-#if BYTE_ORDER == LITTLE_ENDIAN
-#define NATIVE_ELF_ENDIANNESS ELFDATA2LSB
-#else
-#define NATIVE_ELF_ENDIANNESS ELFDATA2MSB
-#endif
-
-/*
- * The size in bytes of the debug link CRC as contained in an ELF
- * section.
- */
-#define ELF_CRC_SIZE 4
-/*
- * ELF notes are aligned on 4 bytes. ref: ELF specification version
- * 1.1 p. 2-5.
- */
-#define ELF_NOTE_ENTRY_ALIGN 4
-/*
- * Within an ELF note, the `desc` field is also aligned on 4
- * bytes. ref: ELF specification version 1.1 p. 2-5.
- */
-#define ELF_NOTE_DESC_ALIGN 4
-
-#define bswap(x) \
- do { \
- switch (sizeof(x)) { \
- case 8: \
- x = bswap_64(x); \
- break; \
- case 4: \
- x = bswap_32(x); \
- break; \
- case 2: \
- x = bswap_16(x); \
- break; \
- case 1: \
- break; \
- default: \
- abort(); \
- } \
- } while (0)
-
-#define bswap_phdr(phdr) \
- do { \
- bswap((phdr).p_type); \
- bswap((phdr).p_offset); \
- bswap((phdr).p_filesz); \
- bswap((phdr).p_memsz); \
- bswap((phdr).p_align); \
- bswap((phdr).p_vaddr); \
- } while (0)
-
-#define bswap_shdr(shdr) \
- do { \
- bswap((shdr).sh_name); \
- bswap((shdr).sh_type); \
- bswap((shdr).sh_flags); \
- bswap((shdr).sh_addr); \
- bswap((shdr).sh_offset); \
- bswap((shdr).sh_size); \
- bswap((shdr).sh_link); \
- bswap((shdr).sh_info); \
- bswap((shdr).sh_addralign); \
- bswap((shdr).sh_entsize); \
- } while (0)
-
-#define bswap_ehdr(ehdr) \
- do { \
- bswap((ehdr).e_type); \
- bswap((ehdr).e_machine); \
- bswap((ehdr).e_version); \
- bswap((ehdr).e_entry); \
- bswap((ehdr).e_phoff); \
- bswap((ehdr).e_shoff); \
- bswap((ehdr).e_flags); \
- bswap((ehdr).e_ehsize); \
- bswap((ehdr).e_phentsize); \
- bswap((ehdr).e_phnum); \
- bswap((ehdr).e_shentsize); \
- bswap((ehdr).e_shnum); \
- bswap((ehdr).e_shstrndx); \
- } while (0)
-
-#define copy_phdr(src_phdr, dst_phdr) \
- do { \
- (dst_phdr).p_type = (src_phdr).p_type; \
- (dst_phdr).p_offset = (src_phdr).p_offset; \
- (dst_phdr).p_filesz = (src_phdr).p_filesz; \
- (dst_phdr).p_memsz = (src_phdr).p_memsz; \
- (dst_phdr).p_align = (src_phdr).p_align; \
- (dst_phdr).p_vaddr = (src_phdr).p_vaddr; \
- } while (0)
-
-#define copy_shdr(src_shdr, dst_shdr) \
- do { \
- (dst_shdr).sh_name = (src_shdr).sh_name; \
- (dst_shdr).sh_type = (src_shdr).sh_type; \
- (dst_shdr).sh_flags = (src_shdr).sh_flags; \
- (dst_shdr).sh_addr = (src_shdr).sh_addr; \
- (dst_shdr).sh_offset = (src_shdr).sh_offset; \
- (dst_shdr).sh_size = (src_shdr).sh_size; \
- (dst_shdr).sh_link = (src_shdr).sh_link; \
- (dst_shdr).sh_info = (src_shdr).sh_info; \
- (dst_shdr).sh_addralign = (src_shdr).sh_addralign; \
- (dst_shdr).sh_entsize = (src_shdr).sh_entsize; \
- } while (0)
-
-#define copy_ehdr(src_ehdr, dst_ehdr) \
- do { \
- (dst_ehdr).e_type = (src_ehdr).e_type; \
- (dst_ehdr).e_machine = (src_ehdr).e_machine; \
- (dst_ehdr).e_version = (src_ehdr).e_version; \
- (dst_ehdr).e_entry = (src_ehdr).e_entry; \
- (dst_ehdr).e_phoff = (src_ehdr).e_phoff; \
- (dst_ehdr).e_shoff = (src_ehdr).e_shoff; \
- (dst_ehdr).e_flags = (src_ehdr).e_flags; \
- (dst_ehdr).e_ehsize = (src_ehdr).e_ehsize; \
- (dst_ehdr).e_phentsize = (src_ehdr).e_phentsize; \
- (dst_ehdr).e_phnum = (src_ehdr).e_phnum; \
- (dst_ehdr).e_shentsize = (src_ehdr).e_shentsize; \
- (dst_ehdr).e_shnum = (src_ehdr).e_shnum; \
- (dst_ehdr).e_shstrndx = (src_ehdr).e_shstrndx; \
- } while (0)
-
-static inline
-int is_elf_32_bit(struct lttng_ust_elf *elf)
-{
- return elf->bitness == ELFCLASS32;
-}
-
-static inline
-int is_elf_native_endian(struct lttng_ust_elf *elf)
-{
- return elf->endianness == NATIVE_ELF_ENDIANNESS;
-}
-
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_statedump
-
-#if !defined(_TRACEPOINT_LTTNG_UST_STATEDUMP_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_STATEDUMP_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <lttng/ust-events.h>
-#include "compat.h"
-
-#define LTTNG_UST_STATEDUMP_PROVIDER
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_statedump, start,
- TP_ARGS(struct lttng_ust_session *, session),
- TP_FIELDS(
- ctf_unused(session)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, bin_info,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- const char*, path,
- uint64_t, memsz,
- uint8_t, is_pic,
- uint8_t, has_build_id,
- uint8_t, has_debug_link
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_string(path, path)
- ctf_integer(uint8_t, is_pic, is_pic)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, build_id,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- uint8_t *, build_id,
- size_t, build_id_len
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_sequence_hex(uint8_t, build_id, build_id,
- size_t, build_id_len)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, debug_link,
- TP_ARGS(
- struct lttng_ust_session *, session,
- void *, baddr,
- char *, filename,
- uint32_t, crc
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint32_t, crc, crc)
- ctf_string(filename, filename)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, procname,
- TP_ARGS(
- struct lttng_ust_session *, session,
- char *, name
- ),
- TP_FIELDS(
- ctf_unused(session)
- ctf_array_text(char, procname, name, LTTNG_UST_ABI_PROCNAME_LEN)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_statedump, end,
- TP_ARGS(struct lttng_ust_session *, session),
- TP_FIELDS(
- ctf_unused(session)
- )
-)
-
-#endif /* _TRACEPOINT_LTTNG_UST_STATEDUMP_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./lttng-ust-statedump-provider.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <link.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <stdbool.h>
-#include <sys/types.h>
-#include <unistd.h>
-
-#include <ust-elf.h>
-#include <ust-helper.h>
-#include "lttng-tracer-core.h"
-#include "lttng-ust-statedump.h"
-#include "jhash.h"
-#include "getenv.h"
-#include "compat.h"
-#include "ust-events-internal.h"
-
-#define TRACEPOINT_DEFINE
-#include "ust_lib.h" /* Only define. */
-
-#define TRACEPOINT_CREATE_PROBES
-#define TP_SESSION_CHECK
-#include "lttng-ust-statedump-provider.h" /* Define and create probes. */
-
-struct dl_iterate_data {
- int exec_found;
- bool first;
- bool cancel;
-};
-
-struct bin_info_data {
- void *base_addr_ptr;
- char resolved_path[PATH_MAX];
- char *dbg_file;
- uint8_t *build_id;
- uint64_t memsz;
- size_t build_id_len;
- int vdso;
- uint32_t crc;
- uint8_t is_pic;
- uint8_t has_build_id;
- uint8_t has_debug_link;
-};
-
-struct lttng_ust_dl_node {
- struct bin_info_data bin_data;
- struct cds_hlist_node node;
- bool traced;
- bool marked;
-};
-
-#define UST_DL_STATE_HASH_BITS 8
-#define UST_DL_STATE_TABLE_SIZE (1 << UST_DL_STATE_HASH_BITS)
-struct cds_hlist_head dl_state_table[UST_DL_STATE_TABLE_SIZE];
-
-typedef void (*tracepoint_cb)(struct lttng_ust_session *session, void *priv);
-
-static
-struct lttng_ust_dl_node *alloc_dl_node(const struct bin_info_data *bin_data)
-{
- struct lttng_ust_dl_node *e;
-
- e = zmalloc(sizeof(struct lttng_ust_dl_node));
- if (!e)
- return NULL;
- if (bin_data->dbg_file) {
- e->bin_data.dbg_file = strdup(bin_data->dbg_file);
- if (!e->bin_data.dbg_file)
- goto error;
- }
- if (bin_data->build_id) {
- e->bin_data.build_id = zmalloc(bin_data->build_id_len);
- if (!e->bin_data.build_id)
- goto error;
- memcpy(e->bin_data.build_id, bin_data->build_id,
- bin_data->build_id_len);
- }
- e->bin_data.base_addr_ptr = bin_data->base_addr_ptr;
- memcpy(e->bin_data.resolved_path, bin_data->resolved_path, PATH_MAX);
- e->bin_data.memsz = bin_data->memsz;
- e->bin_data.build_id_len = bin_data->build_id_len;
- e->bin_data.vdso = bin_data->vdso;
- e->bin_data.crc = bin_data->crc;
- e->bin_data.is_pic = bin_data->is_pic;
- e->bin_data.has_build_id = bin_data->has_build_id;
- e->bin_data.has_debug_link = bin_data->has_debug_link;
- return e;
-
-error:
- free(e->bin_data.build_id);
- free(e->bin_data.dbg_file);
- free(e);
- return NULL;
-}
-
-static
-void free_dl_node(struct lttng_ust_dl_node *e)
-{
- free(e->bin_data.build_id);
- free(e->bin_data.dbg_file);
- free(e);
-}
-
-/* Return 0 if same, nonzero if not. */
-static
-int compare_bin_data(const struct bin_info_data *a,
- const struct bin_info_data *b)
-{
- if (a->base_addr_ptr != b->base_addr_ptr)
- return -1;
- if (strcmp(a->resolved_path, b->resolved_path) != 0)
- return -1;
- if (a->dbg_file && !b->dbg_file)
- return -1;
- if (!a->dbg_file && b->dbg_file)
- return -1;
- if (a->dbg_file && strcmp(a->dbg_file, b->dbg_file) != 0)
- return -1;
- if (a->build_id && !b->build_id)
- return -1;
- if (!a->build_id && b->build_id)
- return -1;
- if (a->build_id_len != b->build_id_len)
- return -1;
- if (a->build_id &&
- memcmp(a->build_id, b->build_id, a->build_id_len) != 0)
- return -1;
- if (a->memsz != b->memsz)
- return -1;
- if (a->vdso != b->vdso)
- return -1;
- if (a->crc != b->crc)
- return -1;
- if (a->is_pic != b->is_pic)
- return -1;
- if (a->has_build_id != b->has_build_id)
- return -1;
- if (a->has_debug_link != b->has_debug_link)
- return -1;
- return 0;
-}
-
-static
-struct lttng_ust_dl_node *find_or_create_dl_node(struct bin_info_data *bin_data)
-{
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
- unsigned int hash;
- bool found = false;
-
- hash = jhash(&bin_data->base_addr_ptr,
- sizeof(bin_data->base_addr_ptr), 0);
- head = &dl_state_table[hash & (UST_DL_STATE_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (compare_bin_data(&e->bin_data, bin_data) != 0)
- continue;
- found = true;
- break;
- }
- if (!found) {
- /* Create */
- e = alloc_dl_node(bin_data);
- if (!e)
- return NULL;
- cds_hlist_add_head(&e->node, head);
- }
- return e;
-}
-
-static
-void remove_dl_node(struct lttng_ust_dl_node *e)
-{
- cds_hlist_del(&e->node);
-}
-
-/*
- * Trace statedump event into all sessions owned by the caller thread
- * for which statedump is pending.
- */
-static
-void trace_statedump_event(tracepoint_cb tp_cb, void *owner, void *priv)
-{
- struct cds_list_head *sessionsp;
- struct lttng_ust_session_private *session_priv;
-
- sessionsp = lttng_get_sessions();
- cds_list_for_each_entry(session_priv, sessionsp, node) {
- if (session_priv->owner != owner)
- continue;
- if (!session_priv->statedump_pending)
- continue;
- tp_cb(session_priv->pub, priv);
- }
-}
-
-static
-void trace_bin_info_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, bin_info,
- session, bin_data->base_addr_ptr,
- bin_data->resolved_path, bin_data->memsz,
- bin_data->is_pic, bin_data->has_build_id,
- bin_data->has_debug_link);
-}
-
-static
-void trace_build_id_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, build_id,
- session, bin_data->base_addr_ptr,
- bin_data->build_id, bin_data->build_id_len);
-}
-
-static
-void trace_debug_link_cb(struct lttng_ust_session *session, void *priv)
-{
- struct bin_info_data *bin_data = (struct bin_info_data *) priv;
-
- tracepoint(lttng_ust_statedump, debug_link,
- session, bin_data->base_addr_ptr,
- bin_data->dbg_file, bin_data->crc);
-}
-
-static
-void procname_cb(struct lttng_ust_session *session, void *priv)
-{
- char *procname = (char *) priv;
- tracepoint(lttng_ust_statedump, procname, session, procname);
-}
-
-static
-void trace_start_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
-{
- tracepoint(lttng_ust_statedump, start, session);
-}
-
-static
-void trace_end_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
-{
- tracepoint(lttng_ust_statedump, end, session);
-}
-
-static
-int get_elf_info(struct bin_info_data *bin_data)
-{
- struct lttng_ust_elf *elf;
- int ret = 0, found;
-
- elf = lttng_ust_elf_create(bin_data->resolved_path);
- if (!elf) {
- ret = -1;
- goto end;
- }
-
- ret = lttng_ust_elf_get_memsz(elf, &bin_data->memsz);
- if (ret) {
- goto end;
- }
-
- found = 0;
- ret = lttng_ust_elf_get_build_id(elf, &bin_data->build_id,
- &bin_data->build_id_len,
- &found);
- if (ret) {
- goto end;
- }
- bin_data->has_build_id = !!found;
- found = 0;
- ret = lttng_ust_elf_get_debug_link(elf, &bin_data->dbg_file,
- &bin_data->crc,
- &found);
- if (ret) {
- goto end;
- }
- bin_data->has_debug_link = !!found;
-
- bin_data->is_pic = lttng_ust_elf_is_pic(elf);
-
-end:
- lttng_ust_elf_destroy(elf);
- return ret;
-}
-
-static
-void trace_baddr(struct bin_info_data *bin_data, void *owner)
-{
- trace_statedump_event(trace_bin_info_cb, owner, bin_data);
-
- if (bin_data->has_build_id)
- trace_statedump_event(trace_build_id_cb, owner, bin_data);
-
- if (bin_data->has_debug_link)
- trace_statedump_event(trace_debug_link_cb, owner, bin_data);
-}
-
-static
-int extract_baddr(struct bin_info_data *bin_data)
-{
- int ret = 0;
- struct lttng_ust_dl_node *e;
-
- if (!bin_data->vdso) {
- ret = get_elf_info(bin_data);
- if (ret) {
- goto end;
- }
- } else {
- bin_data->memsz = 0;
- bin_data->has_build_id = 0;
- bin_data->has_debug_link = 0;
- }
-
- e = find_or_create_dl_node(bin_data);
- if (!e) {
- ret = -1;
- goto end;
- }
- e->marked = true;
-end:
- free(bin_data->build_id);
- bin_data->build_id = NULL;
- free(bin_data->dbg_file);
- bin_data->dbg_file = NULL;
- return ret;
-}
-
-static
-void trace_statedump_start(void *owner)
-{
- trace_statedump_event(trace_start_cb, owner, NULL);
-}
-
-static
-void trace_statedump_end(void *owner)
-{
- trace_statedump_event(trace_end_cb, owner, NULL);
-}
-
-static
-void iter_begin(struct dl_iterate_data *data)
-{
- unsigned int i;
-
- /*
- * UST lock nests within dynamic loader lock.
- *
- * Hold this lock across handling of the module listing to
- * protect memory allocation at early process start, due to
- * interactions with libc-wrapper lttng malloc instrumentation.
- */
- if (ust_lock()) {
- data->cancel = true;
- return;
- }
-
- /* Ensure all entries are unmarked. */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node)
- assert(!e->marked);
- }
-}
-
-static
-void trace_lib_load(const struct bin_info_data *bin_data, void *ip)
-{
- tracepoint(lttng_ust_lib, load,
- ip, bin_data->base_addr_ptr, bin_data->resolved_path,
- bin_data->memsz, bin_data->has_build_id,
- bin_data->has_debug_link);
-
- if (bin_data->has_build_id) {
- tracepoint(lttng_ust_lib, build_id,
- ip, bin_data->base_addr_ptr, bin_data->build_id,
- bin_data->build_id_len);
- }
-
- if (bin_data->has_debug_link) {
- tracepoint(lttng_ust_lib, debug_link,
- ip, bin_data->base_addr_ptr, bin_data->dbg_file,
- bin_data->crc);
- }
-}
-
-static
-void trace_lib_unload(const struct bin_info_data *bin_data, void *ip)
-{
- tracepoint(lttng_ust_lib, unload, ip, bin_data->base_addr_ptr);
-}
-
-static
-void iter_end(struct dl_iterate_data *data, void *ip)
-{
- unsigned int i;
-
- if (data->cancel)
- goto end;
- /*
- * Iterate on hash table.
- * For each marked, traced, do nothing.
- * For each marked, not traced, trace lib open event. traced = true.
- * For each unmarked, traced, trace lib close event. remove node.
- * For each unmarked, not traced, remove node.
- */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (e->marked) {
- if (!e->traced) {
- trace_lib_load(&e->bin_data, ip);
- e->traced = true;
- }
- e->marked = false;
- } else {
- if (e->traced)
- trace_lib_unload(&e->bin_data, ip);
- remove_dl_node(e);
- free_dl_node(e);
- }
- }
- }
-end:
- ust_unlock();
-}
-
-static
-int extract_bin_info_events(struct dl_phdr_info *info, size_t size __attribute__((unused)), void *_data)
-{
- int j, ret = 0;
- struct dl_iterate_data *data = _data;
-
- if (data->first) {
- iter_begin(data);
- data->first = false;
- }
-
- if (data->cancel)
- goto end;
-
- for (j = 0; j < info->dlpi_phnum; j++) {
- struct bin_info_data bin_data;
-
- if (info->dlpi_phdr[j].p_type != PT_LOAD)
- continue;
-
- memset(&bin_data, 0, sizeof(bin_data));
-
- /* Calculate virtual memory address of the loadable segment */
- bin_data.base_addr_ptr = (void *) info->dlpi_addr +
- info->dlpi_phdr[j].p_vaddr;
-
- if ((info->dlpi_name == NULL || info->dlpi_name[0] == 0)) {
- /*
- * Only the first phdr without a dlpi_name
- * encountered is considered as the program
- * executable. The rest are vdsos.
- */
- if (!data->exec_found) {
- ssize_t path_len;
- data->exec_found = 1;
-
- /*
- * Use /proc/self/exe to resolve the
- * executable's full path.
- */
- path_len = readlink("/proc/self/exe",
- bin_data.resolved_path,
- PATH_MAX - 1);
- if (path_len <= 0)
- break;
-
- bin_data.resolved_path[path_len] = '\0';
- bin_data.vdso = 0;
- } else {
- snprintf(bin_data.resolved_path,
- PATH_MAX - 1, "[vdso]");
- bin_data.vdso = 1;
- }
- } else {
- /*
- * For regular dl_phdr_info entries check if
- * the path to the binary really exists. If not,
- * treat as vdso and use dlpi_name as 'path'.
- */
- if (!realpath(info->dlpi_name,
- bin_data.resolved_path)) {
- snprintf(bin_data.resolved_path,
- PATH_MAX - 1, "[%s]",
- info->dlpi_name);
- bin_data.vdso = 1;
- } else {
- bin_data.vdso = 0;
- }
- }
-
- ret = extract_baddr(&bin_data);
- break;
- }
-end:
- return ret;
-}
-
-static
-void ust_dl_table_statedump(void *owner)
-{
- unsigned int i;
-
- if (ust_lock())
- goto end;
-
- /* Statedump each traced table entry into session for owner. */
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_2(e, head, node) {
- if (e->traced)
- trace_baddr(&e->bin_data, owner);
- }
- }
-
-end:
- ust_unlock();
-}
-
-void lttng_ust_dl_update(void *ip)
-{
- struct dl_iterate_data data;
-
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
- return;
-
- /*
- * Fixup lttng-ust TLS when called from dlopen/dlclose
- * instrumentation.
- */
- lttng_ust_fixup_tls();
-
- data.exec_found = 0;
- data.first = true;
- data.cancel = false;
- /*
- * Iterate through the list of currently loaded shared objects and
- * generate tables entries for loadable segments using
- * extract_bin_info_events.
- * Removed libraries are detected by mark-and-sweep: marking is
- * done in the iteration over libraries, and sweeping is
- * performed by iter_end().
- */
- dl_iterate_phdr(extract_bin_info_events, &data);
- if (data.first)
- iter_begin(&data);
- iter_end(&data, ip);
-}
-
-/*
- * Generate a statedump of base addresses of all shared objects loaded
- * by the traced application, as well as for the application's
- * executable itself.
- */
-static
-int do_baddr_statedump(void *owner)
-{
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
- return 0;
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
- ust_dl_table_statedump(owner);
- return 0;
-}
-
-static
-int do_procname_statedump(void *owner)
-{
- if (lttng_ust_getenv("LTTNG_UST_WITHOUT_PROCNAME_STATEDUMP"))
- return 0;
-
- trace_statedump_event(procname_cb, owner, lttng_ust_sockinfo_get_procname(owner));
- return 0;
-}
-
-/*
- * Generate a statedump of a given traced application. A statedump is
- * delimited by start and end events. For a given (process, session)
- * pair, begin/end events are serialized and will match. However, in a
- * session, statedumps from different processes may be
- * interleaved. The vpid context should be used to identify which
- * events belong to which process.
- *
- * Grab the ust_lock outside of the RCU read-side lock because we
- * perform synchronize_rcu with the ust_lock held, which can trigger
- * deadlocks otherwise.
- */
-int do_lttng_ust_statedump(void *owner)
-{
- ust_lock_nocheck();
- trace_statedump_start(owner);
- ust_unlock();
-
- do_procname_statedump(owner);
- do_baddr_statedump(owner);
-
- ust_lock_nocheck();
- trace_statedump_end(owner);
- ust_unlock();
-
- return 0;
-}
-
-void lttng_ust_statedump_init(void)
-{
- __tracepoints__init();
- __tracepoints__ptrs_init();
- __lttng_events_init__lttng_ust_statedump();
- lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
-}
-
-static
-void ust_dl_state_destroy(void)
-{
- unsigned int i;
-
- for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
- struct cds_hlist_head *head;
- struct lttng_ust_dl_node *e, *tmp;
-
- head = &dl_state_table[i];
- cds_hlist_for_each_entry_safe_2(e, tmp, head, node)
- free_dl_node(e);
- CDS_INIT_HLIST_HEAD(head);
- }
-}
-
-void lttng_ust_statedump_destroy(void)
-{
- __lttng_events_exit__lttng_ust_statedump();
- __tracepoints__ptrs_destroy();
- __tracepoints__destroy();
- ust_dl_state_destroy();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- */
-
-#ifndef LTTNG_UST_STATEDUMP_H
-#define LTTNG_UST_STATEDUMP_H
-
-#include <lttng/ust-events.h>
-
-void lttng_ust_statedump_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_statedump_destroy(void)
- __attribute__((visibility("hidden")));
-
-int do_lttng_ust_statedump(void *owner)
- __attribute__((visibility("hidden")));
-
-#endif /* LTTNG_UST_STATEDUMP_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_tracef
-
-#if !defined(_TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H
-
-#include <lttng/tp/lttng-ust-tracef.h>
-
-#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
-
-#define TP_IP_PARAM ip /* IP context received as parameter */
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracef.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_tracelog
-
-#if !defined(_TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H
-
-#include <lttng/tp/lttng-ust-tracelog.h>
-
-#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
-
-#define TP_IP_PARAM ip /* IP context received as parameter */
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracelog.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-#include <urcu/uatomic.h>
-
-#include <lttng/urcu/static/pointer.h>
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#include <lttng/urcu/pointer.h>
-
-void *lttng_ust_rcu_dereference_sym(void *p)
-{
- return _lttng_ust_rcu_dereference(p);
-}
-
-void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- uatomic_set(p, v);
- return v;
-}
-
-void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
-{
- cmm_wmb();
- return uatomic_xchg(p, v);
-}
-
-void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
-{
- cmm_wmb();
- return uatomic_cmpxchg(p, old, _new);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
- *
- * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-#include <assert.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include <poll.h>
-#include <unistd.h>
-#include <stdbool.h>
-#include <sys/mman.h>
-
-#include <urcu/arch.h>
-#include <urcu/wfcqueue.h>
-#include <lttng/urcu/static/urcu-ust.h>
-#include <lttng/urcu/pointer.h>
-#include <urcu/tls-compat.h>
-
-/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
-#undef _LGPL_SOURCE
-#include <lttng/urcu/urcu-ust.h>
-#define _LGPL_SOURCE
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-#ifdef __linux__
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- return mremap(old_address, old_size, new_size, flags);
-}
-#else
-
-#define MREMAP_MAYMOVE 1
-#define MREMAP_FIXED 2
-
-/*
- * mremap wrapper for non-Linux systems not allowing MAYMOVE.
- * This is not generic.
-*/
-static
-void *mremap_wrapper(void *old_address, size_t old_size,
- size_t new_size, int flags)
-{
- assert(!(flags & MREMAP_MAYMOVE));
-
- return MAP_FAILED;
-}
-#endif
-
-/* Sleep delay in ms */
-#define RCU_SLEEP_DELAY_MS 10
-#define INIT_NR_THREADS 8
-#define ARENA_INIT_ALLOC \
- sizeof(struct registry_chunk) \
- + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
-
-/*
- * Active attempts to check for reader Q.S. before calling sleep().
- */
-#define RCU_QS_ACTIVE_ATTEMPTS 100
-
-static
-int lttng_ust_urcu_refcount;
-
-/* If the headers do not support membarrier system call, fall back smp_mb. */
-#ifdef __NR_membarrier
-# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
-#else
-# define membarrier(...) -ENOSYS
-#endif
-
-enum membarrier_cmd {
- MEMBARRIER_CMD_QUERY = 0,
- MEMBARRIER_CMD_SHARED = (1 << 0),
- /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
- /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
- MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
- MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
-};
-
-static
-void _lttng_ust_urcu_init(void)
- __attribute__((constructor));
-static
-void lttng_ust_urcu_exit(void)
- __attribute__((destructor));
-
-#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-int lttng_ust_urcu_has_sys_membarrier;
-#endif
-
-/*
- * rcu_gp_lock ensures mutual exclusion between threads calling
- * synchronize_rcu().
- */
-static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
-/*
- * rcu_registry_lock ensures mutual exclusion between threads
- * registering and unregistering themselves to/from the registry, and
- * with threads reading that registry from synchronize_rcu(). However,
- * this lock is not held all the way through the completion of awaiting
- * for the grace period. It is sporadically released between iterations
- * on the registry.
- * rcu_registry_lock may nest inside rcu_gp_lock.
- */
-static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
-
-static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
-static int initialized;
-
-static pthread_key_t lttng_ust_urcu_key;
-
-struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
-
-/*
- * Pointer to registry elements. Written to only by each individual reader. Read
- * by both the reader and the writers.
- */
-DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
-
-static CDS_LIST_HEAD(registry);
-
-struct registry_chunk {
- size_t data_len; /* data length */
- size_t used; /* amount of data used */
- struct cds_list_head node; /* chunk_list node */
- char data[];
-};
-
-struct registry_arena {
- struct cds_list_head chunk_list;
-};
-
-static struct registry_arena registry_arena = {
- .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
-};
-
-/* Saved fork signal mask, protected by rcu_gp_lock */
-static sigset_t saved_fork_signal_mask;
-
-static void mutex_lock(pthread_mutex_t *mutex)
-{
- int ret;
-
-#ifndef DISTRUST_SIGNALS_EXTREME
- ret = pthread_mutex_lock(mutex);
- if (ret)
- abort();
-#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
- while ((ret = pthread_mutex_trylock(mutex)) != 0) {
- if (ret != EBUSY && ret != EINTR)
- abort();
- poll(NULL,0,10);
- }
-#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
-}
-
-static void mutex_unlock(pthread_mutex_t *mutex)
-{
- int ret;
-
- ret = pthread_mutex_unlock(mutex);
- if (ret)
- abort();
-}
-
-static void smp_mb_master(void)
-{
- if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
- if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
- abort();
- } else {
- cmm_smp_mb();
- }
-}
-
-/*
- * Always called with rcu_registry lock held. Releases this lock between
- * iterations and grabs it again. Holds the lock when it returns.
- */
-static void wait_for_readers(struct cds_list_head *input_readers,
- struct cds_list_head *cur_snap_readers,
- struct cds_list_head *qsreaders)
-{
- unsigned int wait_loops = 0;
- struct lttng_ust_urcu_reader *index, *tmp;
-
- /*
- * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
- * indicate quiescence (not nested), or observe the current
- * rcu_gp.ctr value.
- */
- for (;;) {
- if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
- wait_loops++;
-
- cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
- switch (lttng_ust_urcu_reader_state(&index->ctr)) {
- case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
- if (cur_snap_readers) {
- cds_list_move(&index->node,
- cur_snap_readers);
- break;
- }
- /* Fall-through */
- case LTTNG_UST_URCU_READER_INACTIVE:
- cds_list_move(&index->node, qsreaders);
- break;
- case LTTNG_UST_URCU_READER_ACTIVE_OLD:
- /*
- * Old snapshot. Leaving node in
- * input_readers will make us busy-loop
- * until the snapshot becomes current or
- * the reader becomes inactive.
- */
- break;
- }
- }
-
- if (cds_list_empty(input_readers)) {
- break;
- } else {
- /* Temporarily unlock the registry lock. */
- mutex_unlock(&rcu_registry_lock);
- if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
- (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
- else
- caa_cpu_relax();
- /* Re-lock the registry lock before the next loop. */
- mutex_lock(&rcu_registry_lock);
- }
- }
-}
-
-void lttng_ust_urcu_synchronize_rcu(void)
-{
- CDS_LIST_HEAD(cur_snap_readers);
- CDS_LIST_HEAD(qsreaders);
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
-
- mutex_lock(&rcu_gp_lock);
-
- mutex_lock(&rcu_registry_lock);
-
- if (cds_list_empty(®istry))
- goto out;
-
- /* All threads should read qparity before accessing data structure
- * where new ptr points to. */
- /* Write new ptr before changing the qparity */
- smp_mb_master();
-
- /*
- * Wait for readers to observe original parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /* Switch parity: 0 -> 1, 1 -> 0 */
- CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
-
- /*
- * Must commit qparity update to memory before waiting for other parity
- * quiescent state. Failure to do so could result in the writer waiting
- * forever while new readers are always accessing data (no progress).
- * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
- */
-
- /*
- * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
- * model easier to understand. It does not have a big performance impact
- * anyway, given this is the write-side.
- */
- cmm_smp_mb();
-
- /*
- * Wait for readers to observe new parity or be quiescent.
- * wait_for_readers() can release and grab again rcu_registry_lock
- * interally.
- */
- wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
-
- /*
- * Put quiescent reader list back into registry.
- */
- cds_list_splice(&qsreaders, ®istry);
-
- /*
- * Finish waiting for reader threads before letting the old ptr being
- * freed.
- */
- smp_mb_master();
-out:
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * library wrappers to be used by non-LGPL compatible source code.
- */
-
-void lttng_ust_urcu_read_lock(void)
-{
- _lttng_ust_urcu_read_lock();
-}
-
-void lttng_ust_urcu_read_unlock(void)
-{
- _lttng_ust_urcu_read_unlock();
-}
-
-int lttng_ust_urcu_read_ongoing(void)
-{
- return _lttng_ust_urcu_read_ongoing();
-}
-
-/*
- * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
- * Else, try expanding the last chunk. If this fails, allocate a new
- * chunk twice as big as the last chunk.
- * Memory used by chunks _never_ moves. A chunk could theoretically be
- * freed when all "used" slots are released, but we don't do it at this
- * point.
- */
-static
-void expand_arena(struct registry_arena *arena)
-{
- struct registry_chunk *new_chunk, *last_chunk;
- size_t old_chunk_len, new_chunk_len;
-
- /* No chunk. */
- if (cds_list_empty(&arena->chunk_list)) {
- assert(ARENA_INIT_ALLOC >=
- sizeof(struct registry_chunk)
- + sizeof(struct lttng_ust_urcu_reader));
- new_chunk_len = ARENA_INIT_ALLOC;
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
- return; /* We're done. */
- }
-
- /* Try expanding last chunk. */
- last_chunk = cds_list_entry(arena->chunk_list.prev,
- struct registry_chunk, node);
- old_chunk_len =
- last_chunk->data_len + sizeof(struct registry_chunk);
- new_chunk_len = old_chunk_len << 1;
-
- /* Don't allow memory mapping to move, just expand. */
- new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
- new_chunk_len, 0);
- if (new_chunk != MAP_FAILED) {
- /* Should not have moved. */
- assert(new_chunk == last_chunk);
- memset((char *) last_chunk + old_chunk_len, 0,
- new_chunk_len - old_chunk_len);
- last_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- return; /* We're done. */
- }
-
- /* Remap did not succeed, we need to add a new chunk. */
- new_chunk = (struct registry_chunk *) mmap(NULL,
- new_chunk_len,
- PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE,
- -1, 0);
- if (new_chunk == MAP_FAILED)
- abort();
- memset(new_chunk, 0, new_chunk_len);
- new_chunk->data_len =
- new_chunk_len - sizeof(struct registry_chunk);
- cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
-}
-
-static
-struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int expand_done = 0; /* Only allow to expand once per alloc */
- size_t len = sizeof(struct lttng_ust_urcu_reader);
-
-retry:
- cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
- if (chunk->data_len - chunk->used < len)
- continue;
- /* Find spot */
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc) {
- rcu_reader_reg->alloc = 1;
- chunk->used += len;
- return rcu_reader_reg;
- }
- }
- }
-
- if (!expand_done) {
- expand_arena(arena);
- expand_done = 1;
- goto retry;
- }
-
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void add_thread(void)
-{
- struct lttng_ust_urcu_reader *rcu_reader_reg;
- int ret;
-
- rcu_reader_reg = arena_alloc(®istry_arena);
- if (!rcu_reader_reg)
- abort();
- ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
- if (ret)
- abort();
-
- /* Add to registry */
- rcu_reader_reg->tid = pthread_self();
- assert(rcu_reader_reg->ctr == 0);
- cds_list_add(&rcu_reader_reg->node, ®istry);
- /*
- * Reader threads are pointing to the reader registry. This is
- * why its memory should never be relocated.
- */
- URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
-}
-
-/* Called with mutex locked */
-static
-void cleanup_thread(struct registry_chunk *chunk,
- struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- rcu_reader_reg->ctr = 0;
- cds_list_del(&rcu_reader_reg->node);
- rcu_reader_reg->tid = 0;
- rcu_reader_reg->alloc = 0;
- chunk->used -= sizeof(struct lttng_ust_urcu_reader);
-}
-
-static
-struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- struct registry_chunk *chunk;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
- continue;
- if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
- continue;
- return chunk;
- }
- return NULL;
-}
-
-/* Called with signals off and mutex locked */
-static
-void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
- URCU_TLS(lttng_ust_urcu_reader) = NULL;
-}
-
-/* Disable signals, take mutex, add to registry */
-void lttng_ust_urcu_register(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- /*
- * Check if a signal concurrently registered our thread since
- * the check in rcu_read_lock().
- */
- if (URCU_TLS(lttng_ust_urcu_reader))
- goto end;
-
- /*
- * Take care of early registration before lttng_ust_urcu constructor.
- */
- _lttng_ust_urcu_init();
-
- mutex_lock(&rcu_registry_lock);
- add_thread();
- mutex_unlock(&rcu_registry_lock);
-end:
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
-}
-
-void lttng_ust_urcu_register_thread(void)
-{
- if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
- lttng_ust_urcu_register(); /* If not yet registered. */
-}
-
-/* Disable signals, take mutex, remove from registry */
-static
-void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- if (ret)
- abort();
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- if (ret)
- abort();
-
- mutex_lock(&rcu_registry_lock);
- remove_thread(rcu_reader_reg);
- mutex_unlock(&rcu_registry_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- if (ret)
- abort();
- lttng_ust_urcu_exit();
-}
-
-/*
- * Remove thread from the registry when it exits, and flag it as
- * destroyed so garbage collection can take care of it.
- */
-static
-void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
-{
- lttng_ust_urcu_unregister(rcu_key);
-}
-
-#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- abort();
-}
-#else
-static
-void lttng_ust_urcu_sys_membarrier_status(bool available)
-{
- if (!available)
- return;
- lttng_ust_urcu_has_sys_membarrier = 1;
-}
-#endif
-
-static
-void lttng_ust_urcu_sys_membarrier_init(void)
-{
- bool available = false;
- int mask;
-
- mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
- if (mask >= 0) {
- if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
- if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
- abort();
- available = true;
- }
- }
- lttng_ust_urcu_sys_membarrier_status(available);
-}
-
-static
-void _lttng_ust_urcu_init(void)
-{
- mutex_lock(&init_lock);
- if (!lttng_ust_urcu_refcount++) {
- int ret;
-
- ret = pthread_key_create(<tng_ust_urcu_key,
- lttng_ust_urcu_thread_exit_notifier);
- if (ret)
- abort();
- lttng_ust_urcu_sys_membarrier_init();
- initialized = 1;
- }
- mutex_unlock(&init_lock);
-}
-
-static
-void lttng_ust_urcu_exit(void)
-{
- mutex_lock(&init_lock);
- if (!--lttng_ust_urcu_refcount) {
- struct registry_chunk *chunk, *tmp;
- int ret;
-
- cds_list_for_each_entry_safe(chunk, tmp,
- ®istry_arena.chunk_list, node) {
- munmap((void *) chunk, chunk->data_len
- + sizeof(struct registry_chunk));
- }
- CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
- ret = pthread_key_delete(lttng_ust_urcu_key);
- if (ret)
- abort();
- }
- mutex_unlock(&init_lock);
-}
-
-/*
- * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
- * sure we fork() don't race with a concurrent thread executing with
- * any of those locks held. This ensures that the registry and data
- * protected by rcu_gp_lock are in a coherent state in the child.
- */
-void lttng_ust_urcu_before_fork(void)
-{
- sigset_t newmask, oldmask;
- int ret;
-
- ret = sigfillset(&newmask);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
- assert(!ret);
- mutex_lock(&rcu_gp_lock);
- mutex_lock(&rcu_registry_lock);
- saved_fork_signal_mask = oldmask;
-}
-
-void lttng_ust_urcu_after_fork_parent(void)
-{
- sigset_t oldmask;
- int ret;
-
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
-
-/*
- * Prune all entries from registry except our own thread. Fits the Linux
- * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
- */
-static
-void lttng_ust_urcu_prune_registry(void)
-{
- struct registry_chunk *chunk;
- struct lttng_ust_urcu_reader *rcu_reader_reg;
-
- cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
- for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
- rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
- rcu_reader_reg++) {
- if (!rcu_reader_reg->alloc)
- continue;
- if (rcu_reader_reg->tid == pthread_self())
- continue;
- cleanup_thread(chunk, rcu_reader_reg);
- }
- }
-}
-
-void lttng_ust_urcu_after_fork_child(void)
-{
- sigset_t oldmask;
- int ret;
-
- lttng_ust_urcu_prune_registry();
- oldmask = saved_fork_signal_mask;
- mutex_unlock(&rcu_registry_lock);
- mutex_unlock(&rcu_gp_lock);
- ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
- assert(!ret);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_UUID_H
-#define _LTTNG_UST_UUID_H
-
-#include <lttng/ust-events.h> /* For LTTNG_UST_UUID_LEN */
-#include <lttng/ust-clock.h>
-
-#endif /* _LTTNG_UST_UUID_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#ifndef _LTTNG_NS_H
-#define _LTTNG_NS_H
-
-/*
- * The lowest valid inode number that can be allocated in the proc filesystem
- * is 0xF0000000. Any number below can be used internally as an error code.
- *
- * Zero is used in the kernel as an error code, it's the value we will return
- * when we fail to read the proper inode number.
- *
- * One is used internally to identify an uninitialized cache entry, it should
- * never be returned.
- */
-
-enum ns_ino_state {
- NS_INO_UNAVAILABLE = 0x0,
- NS_INO_UNINITIALIZED = 0x1,
- NS_INO_MIN = 0xF0000000,
-};
-
-#endif /* _LTTNG_NS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: GPL-2.0-only
- *
- * Performance events:
- *
- * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
- * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
- * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
- *
- * Data type definitions, declarations, prototypes.
- *
- * Started by: Thomas Gleixner and Ingo Molnar
- *
- * Header copied from Linux kernel v4.7 installed headers.
- */
-
-#ifndef _UAPI_LINUX_PERF_EVENT_H
-#define _UAPI_LINUX_PERF_EVENT_H
-
-#include <linux/types.h>
-#include <linux/ioctl.h>
-#include <asm/byteorder.h>
-
-/*
- * User-space ABI bits:
- */
-
-/*
- * attr.type
- */
-enum perf_type_id {
- PERF_TYPE_HARDWARE = 0,
- PERF_TYPE_SOFTWARE = 1,
- PERF_TYPE_TRACEPOINT = 2,
- PERF_TYPE_HW_CACHE = 3,
- PERF_TYPE_RAW = 4,
- PERF_TYPE_BREAKPOINT = 5,
-
- PERF_TYPE_MAX, /* non-ABI */
-};
-
-/*
- * Generalized performance event event_id types, used by the
- * attr.event_id parameter of the sys_perf_event_open()
- * syscall:
- */
-enum perf_hw_id {
- /*
- * Common hardware events, generalized by the kernel:
- */
- PERF_COUNT_HW_CPU_CYCLES = 0,
- PERF_COUNT_HW_INSTRUCTIONS = 1,
- PERF_COUNT_HW_CACHE_REFERENCES = 2,
- PERF_COUNT_HW_CACHE_MISSES = 3,
- PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
- PERF_COUNT_HW_BRANCH_MISSES = 5,
- PERF_COUNT_HW_BUS_CYCLES = 6,
- PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
- PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
- PERF_COUNT_HW_REF_CPU_CYCLES = 9,
-
- PERF_COUNT_HW_MAX, /* non-ABI */
-};
-
-/*
- * Generalized hardware cache events:
- *
- * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
- * { read, write, prefetch } x
- * { accesses, misses }
- */
-enum perf_hw_cache_id {
- PERF_COUNT_HW_CACHE_L1D = 0,
- PERF_COUNT_HW_CACHE_L1I = 1,
- PERF_COUNT_HW_CACHE_LL = 2,
- PERF_COUNT_HW_CACHE_DTLB = 3,
- PERF_COUNT_HW_CACHE_ITLB = 4,
- PERF_COUNT_HW_CACHE_BPU = 5,
- PERF_COUNT_HW_CACHE_NODE = 6,
-
- PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_id {
- PERF_COUNT_HW_CACHE_OP_READ = 0,
- PERF_COUNT_HW_CACHE_OP_WRITE = 1,
- PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
-
- PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
-};
-
-enum perf_hw_cache_op_result_id {
- PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
- PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
-
- PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
-};
-
-/*
- * Special "software" events provided by the kernel, even if the hardware
- * does not support performance events. These events measure various
- * physical and sw events of the kernel (and allow the profiling of them as
- * well):
- */
-enum perf_sw_ids {
- PERF_COUNT_SW_CPU_CLOCK = 0,
- PERF_COUNT_SW_TASK_CLOCK = 1,
- PERF_COUNT_SW_PAGE_FAULTS = 2,
- PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
- PERF_COUNT_SW_CPU_MIGRATIONS = 4,
- PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
- PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
- PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
- PERF_COUNT_SW_EMULATION_FAULTS = 8,
- PERF_COUNT_SW_DUMMY = 9,
- PERF_COUNT_SW_BPF_OUTPUT = 10,
-
- PERF_COUNT_SW_MAX, /* non-ABI */
-};
-
-/*
- * Bits that can be set in attr.sample_type to request information
- * in the overflow packets.
- */
-enum perf_event_sample_format {
- PERF_SAMPLE_IP = 1U << 0,
- PERF_SAMPLE_TID = 1U << 1,
- PERF_SAMPLE_TIME = 1U << 2,
- PERF_SAMPLE_ADDR = 1U << 3,
- PERF_SAMPLE_READ = 1U << 4,
- PERF_SAMPLE_CALLCHAIN = 1U << 5,
- PERF_SAMPLE_ID = 1U << 6,
- PERF_SAMPLE_CPU = 1U << 7,
- PERF_SAMPLE_PERIOD = 1U << 8,
- PERF_SAMPLE_STREAM_ID = 1U << 9,
- PERF_SAMPLE_RAW = 1U << 10,
- PERF_SAMPLE_BRANCH_STACK = 1U << 11,
- PERF_SAMPLE_REGS_USER = 1U << 12,
- PERF_SAMPLE_STACK_USER = 1U << 13,
- PERF_SAMPLE_WEIGHT = 1U << 14,
- PERF_SAMPLE_DATA_SRC = 1U << 15,
- PERF_SAMPLE_IDENTIFIER = 1U << 16,
- PERF_SAMPLE_TRANSACTION = 1U << 17,
- PERF_SAMPLE_REGS_INTR = 1U << 18,
-
- PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
-};
-
-/*
- * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
- *
- * If the user does not pass priv level information via branch_sample_type,
- * the kernel uses the event's priv level. Branch and event priv levels do
- * not have to match. Branch priv level is checked for permissions.
- *
- * The branch types can be combined, however BRANCH_ANY covers all types
- * of branches and therefore it supersedes all the other types.
- */
-enum perf_branch_sample_type_shift {
- PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
- PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
- PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
-
- PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
- PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
- PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
- PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
- PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
- PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
- PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
- PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
-
- PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
- PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
- PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
-
- PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
- PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
-
- PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
-};
-
-enum perf_branch_sample_type {
- PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
- PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
- PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
-
- PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
- PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
- PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
- PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
- PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
- PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
-
- PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
- PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
- PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
-
- PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
- PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
-
- PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
-};
-
-#define PERF_SAMPLE_BRANCH_PLM_ALL \
- (PERF_SAMPLE_BRANCH_USER|\
- PERF_SAMPLE_BRANCH_KERNEL|\
- PERF_SAMPLE_BRANCH_HV)
-
-/*
- * Values to determine ABI of the registers dump.
- */
-enum perf_sample_regs_abi {
- PERF_SAMPLE_REGS_ABI_NONE = 0,
- PERF_SAMPLE_REGS_ABI_32 = 1,
- PERF_SAMPLE_REGS_ABI_64 = 2,
-};
-
-/*
- * Values for the memory transaction event qualifier, mostly for
- * abort events. Multiple bits can be set.
- */
-enum {
- PERF_TXN_ELISION = (1 << 0), /* From elision */
- PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
- PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
- PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
- PERF_TXN_RETRY = (1 << 4), /* Retry possible */
- PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
- PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
- PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
-
- PERF_TXN_MAX = (1 << 8), /* non-ABI */
-
- /* bits 32..63 are reserved for the abort code */
-
- PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
- PERF_TXN_ABORT_SHIFT = 32,
-};
-
-/*
- * The format of the data returned by read() on a perf event fd,
- * as specified by attr.read_format:
- *
- * struct read_format {
- * { u64 value;
- * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
- * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
- * { u64 id; } && PERF_FORMAT_ID
- * } && !PERF_FORMAT_GROUP
- *
- * { u64 nr;
- * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
- * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
- * { u64 value;
- * { u64 id; } && PERF_FORMAT_ID
- * } cntr[nr];
- * } && PERF_FORMAT_GROUP
- * };
- */
-enum perf_event_read_format {
- PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
- PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
- PERF_FORMAT_ID = 1U << 2,
- PERF_FORMAT_GROUP = 1U << 3,
-
- PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
-};
-
-#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
-#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
-#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
-#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
- /* add: sample_stack_user */
-#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
-#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
-
-/*
- * Hardware event_id to monitor via a performance monitoring event:
- */
-struct perf_event_attr {
-
- /*
- * Major type: hardware/software/tracepoint/etc.
- */
- __u32 type;
-
- /*
- * Size of the attr structure, for fwd/bwd compat.
- */
- __u32 size;
-
- /*
- * Type specific configuration information.
- */
- __u64 config;
-
- union {
- __u64 sample_period;
- __u64 sample_freq;
- };
-
- __u64 sample_type;
- __u64 read_format;
-
- __u64 disabled : 1, /* off by default */
- inherit : 1, /* children inherit it */
- pinned : 1, /* must always be on PMU */
- exclusive : 1, /* only group on PMU */
- exclude_user : 1, /* don't count user */
- exclude_kernel : 1, /* ditto kernel */
- exclude_hv : 1, /* ditto hypervisor */
- exclude_idle : 1, /* don't count when idle */
- mmap : 1, /* include mmap data */
- comm : 1, /* include comm data */
- freq : 1, /* use freq, not period */
- inherit_stat : 1, /* per task counts */
- enable_on_exec : 1, /* next exec enables */
- task : 1, /* trace fork/exit */
- watermark : 1, /* wakeup_watermark */
- /*
- * precise_ip:
- *
- * 0 - SAMPLE_IP can have arbitrary skid
- * 1 - SAMPLE_IP must have constant skid
- * 2 - SAMPLE_IP requested to have 0 skid
- * 3 - SAMPLE_IP must have 0 skid
- *
- * See also PERF_RECORD_MISC_EXACT_IP
- */
- precise_ip : 2, /* skid constraint */
- mmap_data : 1, /* non-exec mmap data */
- sample_id_all : 1, /* sample_type all events */
-
- exclude_host : 1, /* don't count in host */
- exclude_guest : 1, /* don't count in guest */
-
- exclude_callchain_kernel : 1, /* exclude kernel callchains */
- exclude_callchain_user : 1, /* exclude user callchains */
- mmap2 : 1, /* include mmap with inode data */
- comm_exec : 1, /* flag comm events that are due to an exec */
- use_clockid : 1, /* use @clockid for time fields */
- context_switch : 1, /* context switch data */
- write_backward : 1, /* Write ring buffer from end to beginning */
- __reserved_1 : 36;
-
- union {
- __u32 wakeup_events; /* wakeup every n events */
- __u32 wakeup_watermark; /* bytes before wakeup */
- };
-
- __u32 bp_type;
- union {
- __u64 bp_addr;
- __u64 config1; /* extension of config */
- };
- union {
- __u64 bp_len;
- __u64 config2; /* extension of config1 */
- };
- __u64 branch_sample_type; /* enum perf_branch_sample_type */
-
- /*
- * Defines set of user regs to dump on samples.
- * See asm/perf_regs.h for details.
- */
- __u64 sample_regs_user;
-
- /*
- * Defines size of the user stack to dump on samples.
- */
- __u32 sample_stack_user;
-
- __s32 clockid;
- /*
- * Defines set of regs to dump for each sample
- * state captured on:
- * - precise = 0: PMU interrupt
- * - precise > 0: sampled instruction
- *
- * See asm/perf_regs.h for details.
- */
- __u64 sample_regs_intr;
-
- /*
- * Wakeup watermark for AUX area
- */
- __u32 aux_watermark;
- __u32 __reserved_2; /* align to __u64 */
-};
-
-#define perf_flags(attr) (*(&(attr)->read_format + 1))
-
-/*
- * Ioctls that can be done on a perf event fd:
- */
-#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
-#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
-#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
-#define PERF_EVENT_IOC_RESET _IO ('$', 3)
-#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
-#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
-#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
-#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
-#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
-#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
-
-enum perf_event_ioc_flags {
- PERF_IOC_FLAG_GROUP = 1U << 0,
-};
-
-/*
- * Structure of the page that can be mapped via mmap
- */
-struct perf_event_mmap_page {
- __u32 version; /* version number of this structure */
- __u32 compat_version; /* lowest version this is compat with */
-
- /*
- * Bits needed to read the hw events in user-space.
- *
- * u32 seq, time_mult, time_shift, index, width;
- * u64 count, enabled, running;
- * u64 cyc, time_offset;
- * s64 pmc = 0;
- *
- * do {
- * seq = pc->lock;
- * barrier()
- *
- * enabled = pc->time_enabled;
- * running = pc->time_running;
- *
- * if (pc->cap_usr_time && enabled != running) {
- * cyc = rdtsc();
- * time_offset = pc->time_offset;
- * time_mult = pc->time_mult;
- * time_shift = pc->time_shift;
- * }
- *
- * index = pc->index;
- * count = pc->offset;
- * if (pc->cap_user_rdpmc && index) {
- * width = pc->pmc_width;
- * pmc = rdpmc(index - 1);
- * }
- *
- * barrier();
- * } while (pc->lock != seq);
- *
- * NOTE: for obvious reason this only works on self-monitoring
- * processes.
- */
- __u32 lock; /* seqlock for synchronization */
- __u32 index; /* hardware event identifier */
- __s64 offset; /* add to hardware event value */
- __u64 time_enabled; /* time event active */
- __u64 time_running; /* time event on cpu */
- union {
- __u64 capabilities;
- struct {
- __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
- cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
-
- cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
- cap_user_time : 1, /* The time_* fields are used */
- cap_user_time_zero : 1, /* The time_zero field is used */
- cap_____res : 59;
- };
- };
-
- /*
- * If cap_user_rdpmc this field provides the bit-width of the value
- * read using the rdpmc() or equivalent instruction. This can be used
- * to sign extend the result like:
- *
- * pmc <<= 64 - width;
- * pmc >>= 64 - width; // signed shift right
- * count += pmc;
- */
- __u16 pmc_width;
-
- /*
- * If cap_usr_time the below fields can be used to compute the time
- * delta since time_enabled (in ns) using rdtsc or similar.
- *
- * u64 quot, rem;
- * u64 delta;
- *
- * quot = (cyc >> time_shift);
- * rem = cyc & (((u64)1 << time_shift) - 1);
- * delta = time_offset + quot * time_mult +
- * ((rem * time_mult) >> time_shift);
- *
- * Where time_offset,time_mult,time_shift and cyc are read in the
- * seqcount loop described above. This delta can then be added to
- * enabled and possible running (if index), improving the scaling:
- *
- * enabled += delta;
- * if (index)
- * running += delta;
- *
- * quot = count / running;
- * rem = count % running;
- * count = quot * enabled + (rem * enabled) / running;
- */
- __u16 time_shift;
- __u32 time_mult;
- __u64 time_offset;
- /*
- * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
- * from sample timestamps.
- *
- * time = timestamp - time_zero;
- * quot = time / time_mult;
- * rem = time % time_mult;
- * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
- *
- * And vice versa:
- *
- * quot = cyc >> time_shift;
- * rem = cyc & (((u64)1 << time_shift) - 1);
- * timestamp = time_zero + quot * time_mult +
- * ((rem * time_mult) >> time_shift);
- */
- __u64 time_zero;
- __u32 size; /* Header size up to __reserved[] fields. */
-
- /*
- * Hole for extension of the self monitor capabilities
- */
-
- __u8 __reserved[118*8+4]; /* align to 1k. */
-
- /*
- * Control data for the mmap() data buffer.
- *
- * User-space reading the @data_head value should issue an smp_rmb(),
- * after reading this value.
- *
- * When the mapping is PROT_WRITE the @data_tail value should be
- * written by userspace to reflect the last read data, after issueing
- * an smp_mb() to separate the data read from the ->data_tail store.
- * In this case the kernel will not over-write unread data.
- *
- * See perf_output_put_handle() for the data ordering.
- *
- * data_{offset,size} indicate the location and size of the perf record
- * buffer within the mmapped area.
- */
- __u64 data_head; /* head in the data section */
- __u64 data_tail; /* user-space written tail */
- __u64 data_offset; /* where the buffer starts */
- __u64 data_size; /* data buffer size */
-
- /*
- * AUX area is defined by aux_{offset,size} fields that should be set
- * by the userspace, so that
- *
- * aux_offset >= data_offset + data_size
- *
- * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
- *
- * Ring buffer pointers aux_{head,tail} have the same semantics as
- * data_{head,tail} and same ordering rules apply.
- */
- __u64 aux_head;
- __u64 aux_tail;
- __u64 aux_offset;
- __u64 aux_size;
-};
-
-#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
-#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
-#define PERF_RECORD_MISC_KERNEL (1 << 0)
-#define PERF_RECORD_MISC_USER (2 << 0)
-#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
-#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
-#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
-
-/*
- * Indicates that /proc/PID/maps parsing are truncated by time out.
- */
-#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
-/*
- * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
- * different events so can reuse the same bit position.
- * Ditto PERF_RECORD_MISC_SWITCH_OUT.
- */
-#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
-#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
-#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
-/*
- * Indicates that the content of PERF_SAMPLE_IP points to
- * the actual instruction that triggered the event. See also
- * perf_event_attr::precise_ip.
- */
-#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
-/*
- * Reserve the last bit to indicate some extended misc field
- */
-#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
-
-struct perf_event_header {
- __u32 type;
- __u16 misc;
- __u16 size;
-};
-
-enum perf_event_type {
-
- /*
- * If perf_event_attr.sample_id_all is set then all event types will
- * have the sample_type selected fields related to where/when
- * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
- * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
- * just after the perf_event_header and the fields already present for
- * the existing fields, i.e. at the end of the payload. That way a newer
- * perf.data file will be supported by older perf tools, with these new
- * optional fields being ignored.
- *
- * struct sample_id {
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 id; } && PERF_SAMPLE_IDENTIFIER
- * } && perf_event_attr::sample_id_all
- *
- * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
- * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
- * relative to header.size.
- */
-
- /*
- * The MMAP events record the PROT_EXEC mappings so that we can
- * correlate userspace IPs to code. They have the following structure:
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * char filename[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_MMAP = 1,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 id;
- * u64 lost;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_LOST = 2,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * char comm[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_COMM = 3,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_EXIT = 4,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u64 time;
- * u64 id;
- * u64 stream_id;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_THROTTLE = 5,
- PERF_RECORD_UNTHROTTLE = 6,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
- * u32 tid, ptid;
- * u64 time;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_FORK = 7,
-
- /*
- * struct {
- * struct perf_event_header header;
- * u32 pid, tid;
- *
- * struct read_format values;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_READ = 8,
-
- /*
- * struct {
- * struct perf_event_header header;
- *
- * #
- * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
- * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
- * # is fixed relative to header.
- * #
- *
- * { u64 id; } && PERF_SAMPLE_IDENTIFIER
- * { u64 ip; } && PERF_SAMPLE_IP
- * { u32 pid, tid; } && PERF_SAMPLE_TID
- * { u64 time; } && PERF_SAMPLE_TIME
- * { u64 addr; } && PERF_SAMPLE_ADDR
- * { u64 id; } && PERF_SAMPLE_ID
- * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
- * { u32 cpu, res; } && PERF_SAMPLE_CPU
- * { u64 period; } && PERF_SAMPLE_PERIOD
- *
- * { struct read_format values; } && PERF_SAMPLE_READ
- *
- * { u64 nr,
- * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
- *
- * #
- * # The RAW record below is opaque data wrt the ABI
- * #
- * # That is, the ABI doesn't make any promises wrt to
- * # the stability of its content, it may vary depending
- * # on event, hardware, kernel version and phase of
- * # the moon.
- * #
- * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
- * #
- *
- * { u32 size;
- * char data[size];}&& PERF_SAMPLE_RAW
- *
- * { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
- *
- * { u64 abi; # enum perf_sample_regs_abi
- * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
- *
- * { u64 size;
- * char data[size];
- * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
- *
- * { u64 weight; } && PERF_SAMPLE_WEIGHT
- * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
- * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
- * { u64 abi; # enum perf_sample_regs_abi
- * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
- * };
- */
- PERF_RECORD_SAMPLE = 9,
-
- /*
- * The MMAP2 records are an augmented version of MMAP, they add
- * maj, min, ino numbers to be used to uniquely identify each mapping
- *
- * struct {
- * struct perf_event_header header;
- *
- * u32 pid, tid;
- * u64 addr;
- * u64 len;
- * u64 pgoff;
- * u32 maj;
- * u32 min;
- * u64 ino;
- * u64 ino_generation;
- * u32 prot, flags;
- * char filename[];
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_MMAP2 = 10,
-
- /*
- * Records that new data landed in the AUX buffer part.
- *
- * struct {
- * struct perf_event_header header;
- *
- * u64 aux_offset;
- * u64 aux_size;
- * u64 flags;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_AUX = 11,
-
- /*
- * Indicates that instruction trace has started
- *
- * struct {
- * struct perf_event_header header;
- * u32 pid;
- * u32 tid;
- * };
- */
- PERF_RECORD_ITRACE_START = 12,
-
- /*
- * Records the dropped/lost sample number.
- *
- * struct {
- * struct perf_event_header header;
- *
- * u64 lost;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_LOST_SAMPLES = 13,
-
- /*
- * Records a context switch in or out (flagged by
- * PERF_RECORD_MISC_SWITCH_OUT). See also
- * PERF_RECORD_SWITCH_CPU_WIDE.
- *
- * struct {
- * struct perf_event_header header;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_SWITCH = 14,
-
- /*
- * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
- * next_prev_tid that are the next (switching out) or previous
- * (switching in) pid/tid.
- *
- * struct {
- * struct perf_event_header header;
- * u32 next_prev_pid;
- * u32 next_prev_tid;
- * struct sample_id sample_id;
- * };
- */
- PERF_RECORD_SWITCH_CPU_WIDE = 15,
-
- PERF_RECORD_MAX, /* non-ABI */
-};
-
-#define PERF_MAX_STACK_DEPTH 127
-#define PERF_MAX_CONTEXTS_PER_STACK 8
-
-enum perf_callchain_context {
- PERF_CONTEXT_HV = (__u64)-32,
- PERF_CONTEXT_KERNEL = (__u64)-128,
- PERF_CONTEXT_USER = (__u64)-512,
-
- PERF_CONTEXT_GUEST = (__u64)-2048,
- PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
- PERF_CONTEXT_GUEST_USER = (__u64)-2560,
-
- PERF_CONTEXT_MAX = (__u64)-4095,
-};
-
-/**
- * PERF_RECORD_AUX::flags bits
- */
-#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
-#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
-
-#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
-#define PERF_FLAG_FD_OUTPUT (1UL << 1)
-#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
-#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
-
-union perf_mem_data_src {
- __u64 val;
- struct {
- __u64 mem_op:5, /* type of opcode */
- mem_lvl:14, /* memory hierarchy level */
- mem_snoop:5, /* snoop mode */
- mem_lock:2, /* lock instr */
- mem_dtlb:7, /* tlb access */
- mem_rsvd:31;
- };
-};
-
-/* type of opcode (load/store/prefetch,code) */
-#define PERF_MEM_OP_NA 0x01 /* not available */
-#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
-#define PERF_MEM_OP_STORE 0x04 /* store instruction */
-#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
-#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
-#define PERF_MEM_OP_SHIFT 0
-
-/* memory hierarchy (memory level, hit or miss) */
-#define PERF_MEM_LVL_NA 0x01 /* not available */
-#define PERF_MEM_LVL_HIT 0x02 /* hit level */
-#define PERF_MEM_LVL_MISS 0x04 /* miss level */
-#define PERF_MEM_LVL_L1 0x08 /* L1 */
-#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
-#define PERF_MEM_LVL_L2 0x20 /* L2 */
-#define PERF_MEM_LVL_L3 0x40 /* L3 */
-#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
-#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
-#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
-#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
-#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
-#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
-#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
-#define PERF_MEM_LVL_SHIFT 5
-
-/* snoop mode */
-#define PERF_MEM_SNOOP_NA 0x01 /* not available */
-#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
-#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
-#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
-#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
-#define PERF_MEM_SNOOP_SHIFT 19
-
-/* locked instruction */
-#define PERF_MEM_LOCK_NA 0x01 /* not available */
-#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
-#define PERF_MEM_LOCK_SHIFT 24
-
-/* TLB access */
-#define PERF_MEM_TLB_NA 0x01 /* not available */
-#define PERF_MEM_TLB_HIT 0x02 /* hit level */
-#define PERF_MEM_TLB_MISS 0x04 /* miss level */
-#define PERF_MEM_TLB_L1 0x08 /* L1 */
-#define PERF_MEM_TLB_L2 0x10 /* L2 */
-#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
-#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
-#define PERF_MEM_TLB_SHIFT 26
-
-#define PERF_MEM_S(a, s) \
- (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
-
-/*
- * single taken branch record layout:
- *
- * from: source instruction (may not always be a branch insn)
- * to: branch target
- * mispred: branch target was mispredicted
- * predicted: branch target was predicted
- *
- * support for mispred, predicted is optional. In case it
- * is not supported mispred = predicted = 0.
- *
- * in_tx: running in a hardware transaction
- * abort: aborting a hardware transaction
- * cycles: cycles from last branch (or 0 if not supported)
- */
-struct perf_branch_entry {
- __u64 from;
- __u64 to;
- __u64 mispred:1, /* target mispredicted */
- predicted:1,/* target predicted */
- in_tx:1, /* in transaction */
- abort:1, /* transaction abort */
- cycles:16, /* cycle count to last branch */
- reserved:44;
-};
-
-#endif /* _UAPI_LINUX_PERF_EVENT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Internal header for Lock-Free RCU Hash Table
- */
-
-#ifndef _LTTNG_UST_RCULFHASH_INTERNAL_H
-#define _LTTNG_UST_RCULFHASH_INTERNAL_H
-
-#include "rculfhash.h"
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) printf("[debug lttng-ust rculfhash] " fmt, ## args)
-#else
-#define dbg_printf(fmt, args...) \
-do { \
- /* do nothing but check printf format */ \
- if (0) \
- printf("[debug lttng-ust rculfhash] " fmt, ## args); \
-} while (0)
-#endif
-
-#if (CAA_BITS_PER_LONG == 32)
-#define MAX_TABLE_ORDER 32
-#else
-#define MAX_TABLE_ORDER 64
-#endif
-
-#define MAX_CHUNK_TABLE (1UL << 10)
-
-#ifndef min
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#endif
-
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
-/*
- * lttng_ust_lfht: Top-level data structure representing a lock-free hash
- * table. Defined in the implementation file to make it be an opaque
- * cookie to users.
- *
- * The fields used in fast-paths are placed near the end of the
- * structure, because we need to have a variable-sized union to contain
- * the mm plugin fields, which are used in the fast path.
- */
-struct lttng_ust_lfht {
- /* Initial configuration items */
- unsigned long max_nr_buckets;
- const struct lttng_ust_lfht_mm_type *mm; /* memory management plugin */
- const struct rcu_flavor_struct *flavor; /* RCU flavor */
-
- /*
- * We need to put the work threads offline (QSBR) when taking this
- * mutex, because we use synchronize_rcu within this mutex critical
- * section, which waits on read-side critical sections, and could
- * therefore cause grace-period deadlock if we hold off RCU G.P.
- * completion.
- */
- pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
- unsigned int in_progress_destroy;
- unsigned long resize_target;
- int resize_initiated;
-
- /*
- * Variables needed for add and remove fast-paths.
- */
- int flags;
- unsigned long min_alloc_buckets_order;
- unsigned long min_nr_alloc_buckets;
-
- /*
- * Variables needed for the lookup, add and remove fast-paths.
- */
- unsigned long size; /* always a power of 2, shared (RCU) */
- /*
- * bucket_at pointer is kept here to skip the extra level of
- * dereference needed to get to "mm" (this is a fast-path).
- */
- struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
- unsigned long index);
- /*
- * Dynamic length "tbl_chunk" needs to be at the end of
- * lttng_ust_lfht.
- */
- union {
- /*
- * Contains the per order-index-level bucket node table.
- * The size of each bucket node table is half the number
- * of hashes contained in this order (except for order 0).
- * The minimum allocation buckets size parameter allows
- * combining the bucket node arrays of the lowermost
- * levels to improve cache locality for small index orders.
- */
- struct lttng_ust_lfht_node *tbl_order[MAX_TABLE_ORDER];
-
- /*
- * Contains the bucket node chunks. The size of each
- * bucket node chunk is ->min_alloc_size (we avoid to
- * allocate chunks with different size). Chunks improve
- * cache locality for small index orders, and are more
- * friendly with environments where allocation of large
- * contiguous memory areas is challenging due to memory
- * fragmentation concerns or inability to use virtual
- * memory addressing.
- */
- struct lttng_ust_lfht_node *tbl_chunk[0];
-
- /*
- * Memory mapping with room for all possible buckets.
- * Their memory is allocated when needed.
- */
- struct lttng_ust_lfht_node *tbl_mmap;
- };
- /*
- * End of variables needed for the lookup, add and remove
- * fast-paths.
- */
-};
-
-extern unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_lfht_get_count_order_u32(uint32_t x)
- __attribute__((visibility("hidden")));
-
-extern int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
- __attribute__((visibility("hidden")));
-
-#ifdef POISON_FREE
-#define poison_free(ptr) \
- do { \
- if (ptr) { \
- memset(ptr, 0x42, sizeof(*(ptr))); \
- free(ptr); \
- } \
- } while (0)
-#else
-#define poison_free(ptr) free(ptr)
-#endif
-
-static inline
-struct lttng_ust_lfht *__default_alloc_lttng_ust_lfht(
- const struct lttng_ust_lfht_mm_type *mm,
- unsigned long lttng_ust_lfht_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- struct lttng_ust_lfht *ht;
-
- ht = calloc(1, lttng_ust_lfht_size);
- assert(ht);
-
- ht->mm = mm;
- ht->bucket_at = mm->bucket_at;
- ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
- ht->min_alloc_buckets_order =
- lttng_ust_lfht_get_count_order_ulong(min_nr_alloc_buckets);
- ht->max_nr_buckets = max_nr_buckets;
-
- return ht;
-}
-
-#endif /* _LTTNG_UST_RCULFHASH_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Chunk based memory management for Lock-Free RCU Hash Table
- */
-
-#include <stddef.h>
-#include "rculfhash-internal.h"
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- ht->tbl_chunk[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_chunk[0]);
- } else if (order > ht->min_alloc_buckets_order) {
- unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
-
- for (i = len; i < 2 * len; i++) {
- ht->tbl_chunk[i] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_chunk[i]);
- }
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0)
- poison_free(ht->tbl_chunk[0]);
- else if (order > ht->min_alloc_buckets_order) {
- unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
-
- for (i = len; i < 2 * len; i++)
- poison_free(ht->tbl_chunk[i]);
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- unsigned long chunk, offset;
-
- chunk = index >> ht->min_alloc_buckets_order;
- offset = index & (ht->min_nr_alloc_buckets - 1);
- return &ht->tbl_chunk[chunk][offset];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- unsigned long nr_chunks, lttng_ust_lfht_size;
-
- min_nr_alloc_buckets = max(min_nr_alloc_buckets,
- max_nr_buckets / MAX_CHUNK_TABLE);
- nr_chunks = max_nr_buckets / min_nr_alloc_buckets;
- lttng_ust_lfht_size = offsetof(struct lttng_ust_lfht, tbl_chunk) +
- sizeof(struct lttng_ust_lfht_node *) * nr_chunks;
- lttng_ust_lfht_size = max(lttng_ust_lfht_size, sizeof(struct lttng_ust_lfht));
-
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_chunk, lttng_ust_lfht_size,
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * mmap/reservation based memory management for Lock-Free RCU Hash Table
- */
-
-#include <unistd.h>
-#include <stdio.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <sys/mman.h>
-#include "rculfhash-internal.h"
-
-#ifndef MAP_ANONYMOUS
-#define MAP_ANONYMOUS MAP_ANON
-#endif
-
-/*
- * The allocation scheme used by the mmap based RCU hash table is to make a
- * large unaccessible mapping to reserve memory without allocating it.
- * Then smaller chunks are allocated by overlapping read/write mappings which
- * do allocate memory. Deallocation is done by an overlapping unaccessible
- * mapping.
- *
- * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
- * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
- * support overlapping mappings.
- *
- * An alternative to the overlapping mappings is to use mprotect to change the
- * protection on chunks of the large mapping, read/write to allocate and none
- * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
- * call to madvise is also required to deallocate and it just doesn't work on
- * macOS.
- *
- * For this reason, we keep to original scheme on all platforms except Cygwin.
- */
-
-
-/* Reserve inaccessible memory space without allocating it */
-static
-void *memory_map(size_t length)
-{
- void *ret;
-
- ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
- if (ret == MAP_FAILED) {
- perror("mmap");
- abort();
- }
- return ret;
-}
-
-static
-void memory_unmap(void *ptr, size_t length)
-{
- if (munmap(ptr, length)) {
- perror("munmap");
- abort();
- }
-}
-
-#ifdef __CYGWIN__
-/* Set protection to read/write to allocate a memory chunk */
-static
-void memory_populate(void *ptr, size_t length)
-{
- if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) {
- perror("mprotect");
- abort();
- }
-}
-
-/* Set protection to none to deallocate a memory chunk */
-static
-void memory_discard(void *ptr, size_t length)
-{
- if (mprotect(ptr, length, PROT_NONE)) {
- perror("mprotect");
- abort();
- }
-}
-
-#else /* __CYGWIN__ */
-
-static
-void memory_populate(void *ptr, size_t length)
-{
- if (mmap(ptr, length, PROT_READ | PROT_WRITE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0) != ptr) {
- perror("mmap");
- abort();
- }
-}
-
-/*
- * Discard garbage memory and avoid system save it when try to swap it out.
- * Make it still reserved, inaccessible.
- */
-static
-void memory_discard(void *ptr, size_t length)
-{
- if (mmap(ptr, length, PROT_NONE,
- MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
- -1, 0) != ptr) {
- perror("mmap");
- abort();
- }
-}
-#endif /* __CYGWIN__ */
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
- /* small table */
- ht->tbl_mmap = calloc(ht->max_nr_buckets,
- sizeof(*ht->tbl_mmap));
- assert(ht->tbl_mmap);
- return;
- }
- /* large table */
- ht->tbl_mmap = memory_map(ht->max_nr_buckets
- * sizeof(*ht->tbl_mmap));
- memory_populate(ht->tbl_mmap,
- ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
- } else if (order > ht->min_alloc_buckets_order) {
- /* large table */
- unsigned long len = 1UL << (order - 1);
-
- assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
- memory_populate(ht->tbl_mmap + len,
- len * sizeof(*ht->tbl_mmap));
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
- /* small table */
- poison_free(ht->tbl_mmap);
- return;
- }
- /* large table */
- memory_unmap(ht->tbl_mmap,
- ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
- } else if (order > ht->min_alloc_buckets_order) {
- /* large table */
- unsigned long len = 1UL << (order - 1);
-
- assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
- memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- return &ht->tbl_mmap[index];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- unsigned long page_bucket_size;
-
- page_bucket_size = getpagesize() / sizeof(struct lttng_ust_lfht_node);
- if (max_nr_buckets <= page_bucket_size) {
- /* small table */
- min_nr_alloc_buckets = max_nr_buckets;
- } else {
- /* large table */
- min_nr_alloc_buckets = max(min_nr_alloc_buckets,
- page_bucket_size);
- }
-
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_mmap, sizeof(struct lttng_ust_lfht),
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Order based memory management for Lock-Free RCU Hash Table
- */
-
-#include <rculfhash-internal.h>
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0) {
- ht->tbl_order[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_order[0]);
- } else if (order > ht->min_alloc_buckets_order) {
- ht->tbl_order[order] = calloc(1UL << (order -1),
- sizeof(struct lttng_ust_lfht_node));
- assert(ht->tbl_order[order]);
- }
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- if (order == 0)
- poison_free(ht->tbl_order[0]);
- else if (order > ht->min_alloc_buckets_order)
- poison_free(ht->tbl_order[order]);
- /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
-}
-
-static
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- unsigned long order;
-
- if (index < ht->min_nr_alloc_buckets) {
- dbg_printf("bucket index %lu order 0 aridx 0\n", index);
- return &ht->tbl_order[0][index];
- }
- /*
- * equivalent to lttng_ust_lfht_get_count_order_ulong(index + 1), but
- * optimizes away the non-existing 0 special-case for
- * lttng_ust_lfht_get_count_order_ulong.
- */
- order = lttng_ust_lfht_fls_ulong(index);
- dbg_printf("bucket index %lu order %lu aridx %lu\n",
- index, order, index & ((1UL << (order - 1)) - 1));
- return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
-}
-
-static
-struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
-{
- return __default_alloc_lttng_ust_lfht(
- <tng_ust_lfht_mm_order, sizeof(struct lttng_ust_lfht),
- min_nr_alloc_buckets, max_nr_buckets);
-}
-
-const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order = {
- .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
- .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
- .free_bucket_table = lttng_ust_lfht_free_bucket_table,
- .bucket_at = bucket_at,
-};
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Userspace RCU library - Lock-Free Resizable RCU Hash Table
- */
-
-/*
- * Based on the following articles:
- * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
- * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
- * - Michael, M. M. High performance dynamic lock-free hash tables
- * and list-based sets. In Proceedings of the fourteenth annual ACM
- * symposium on Parallel algorithms and architectures, ACM Press,
- * (2002), 73-82.
- *
- * Some specificities of this Lock-Free Resizable RCU Hash Table
- * implementation:
- *
- * - RCU read-side critical section allows readers to perform hash
- * table lookups, as well as traversals, and use the returned objects
- * safely by allowing memory reclaim to take place only after a grace
- * period.
- * - Add and remove operations are lock-free, and do not need to
- * allocate memory. They need to be executed within RCU read-side
- * critical section to ensure the objects they read are valid and to
- * deal with the cmpxchg ABA problem.
- * - add and add_unique operations are supported. add_unique checks if
- * the node key already exists in the hash table. It ensures not to
- * populate a duplicate key if the node key already exists in the hash
- * table.
- * - The resize operation executes concurrently with
- * add/add_unique/add_replace/remove/lookup/traversal.
- * - Hash table nodes are contained within a split-ordered list. This
- * list is ordered by incrementing reversed-bits-hash value.
- * - An index of bucket nodes is kept. These bucket nodes are the hash
- * table "buckets". These buckets are internal nodes that allow to
- * perform a fast hash lookup, similarly to a skip list. These
- * buckets are chained together in the split-ordered list, which
- * allows recursive expansion by inserting new buckets between the
- * existing buckets. The split-ordered list allows adding new buckets
- * between existing buckets as the table needs to grow.
- * - The resize operation for small tables only allows expanding the
- * hash table. It is triggered automatically by detecting long chains
- * in the add operation.
- * - The resize operation for larger tables (and available through an
- * API) allows both expanding and shrinking the hash table.
- * - Split-counters are used to keep track of the number of
- * nodes within the hash table for automatic resize triggering.
- * - Resize operation initiated by long chain detection is executed by a
- * worker thread, which keeps lock-freedom of add and remove.
- * - Resize operations are protected by a mutex.
- * - The removal operation is split in two parts: first, a "removed"
- * flag is set in the next pointer within the node to remove. Then,
- * a "garbage collection" is performed in the bucket containing the
- * removed node (from the start of the bucket up to the removed node).
- * All encountered nodes with "removed" flag set in their next
- * pointers are removed from the linked-list. If the cmpxchg used for
- * removal fails (due to concurrent garbage-collection or concurrent
- * add), we retry from the beginning of the bucket. This ensures that
- * the node with "removed" flag set is removed from the hash table
- * (not visible to lookups anymore) before the RCU read-side critical
- * section held across removal ends. Furthermore, this ensures that
- * the node with "removed" flag set is removed from the linked-list
- * before its memory is reclaimed. After setting the "removal" flag,
- * only the thread which removal is the first to set the "removal
- * owner" flag (with an xchg) into a node's next pointer is considered
- * to have succeeded its removal (and thus owns the node to reclaim).
- * Because we garbage-collect starting from an invariant node (the
- * start-of-bucket bucket node) up to the "removed" node (or find a
- * reverse-hash that is higher), we are sure that a successful
- * traversal of the chain leads to a chain that is present in the
- * linked-list (the start node is never removed) and that it does not
- * contain the "removed" node anymore, even if concurrent delete/add
- * operations are changing the structure of the list concurrently.
- * - The add operations perform garbage collection of buckets if they
- * encounter nodes with removed flag set in the bucket where they want
- * to add their new node. This ensures lock-freedom of add operation by
- * helping the remover unlink nodes from the list rather than to wait
- * for it do to so.
- * - There are three memory backends for the hash table buckets: the
- * "order table", the "chunks", and the "mmap".
- * - These bucket containers contain a compact version of the hash table
- * nodes.
- * - The RCU "order table":
- * - has a first level table indexed by log2(hash index) which is
- * copied and expanded by the resize operation. This order table
- * allows finding the "bucket node" tables.
- * - There is one bucket node table per hash index order. The size of
- * each bucket node table is half the number of hashes contained in
- * this order (except for order 0).
- * - The RCU "chunks" is best suited for close interaction with a page
- * allocator. It uses a linear array as index to "chunks" containing
- * each the same number of buckets.
- * - The RCU "mmap" memory backend uses a single memory map to hold
- * all buckets.
- * - synchronize_rcu is used to garbage-collect the old bucket node table.
- *
- * Ordering Guarantees:
- *
- * To discuss these guarantees, we first define "read" operation as any
- * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
- * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
- * lttng_ust_lfht_add_unique (failure).
- *
- * We define "read traversal" operation as any of the following
- * group of operations
- * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
- * (and/or lttng_ust_lfht_next, although less common).
- * - lttng_ust_lfht_add_unique (failure) followed by iteration with
- * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
- * common).
- * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
- * lttng_ust_lfht_next_duplicate, although less common).
- *
- * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
- * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
- *
- * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
- * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
- * fails (returns a node different from the one passed as parameter), it
- * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
- * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
- * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
- * (failure).
- *
- * We define "prior" and "later" node as nodes observable by reads and
- * read traversals respectively before and after a write or sequence of
- * write operations.
- *
- * Hash-table operations are often cascaded, for example, the pointer
- * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
- * whose return value might in turn be passed to another hash-table
- * operation. This entire cascaded series of operations must be enclosed
- * by a pair of matching rcu_read_lock() and rcu_read_unlock()
- * operations.
- *
- * The following ordering guarantees are offered by this hash table:
- *
- * A.1) "read" after "write": if there is ordering between a write and a
- * later read, then the read is guaranteed to see the write or some
- * later write.
- * A.2) "read traversal" after "write": given that there is dependency
- * ordering between reads in a "read traversal", if there is
- * ordering between a write and the first read of the traversal,
- * then the "read traversal" is guaranteed to see the write or
- * some later write.
- * B.1) "write" after "read": if there is ordering between a read and a
- * later write, then the read will never see the write.
- * B.2) "write" after "read traversal": given that there is dependency
- * ordering between reads in a "read traversal", if there is
- * ordering between the last read of the traversal and a later
- * write, then the "read traversal" will never see the write.
- * C) "write" while "read traversal": if a write occurs during a "read
- * traversal", the traversal may, or may not, see the write.
- * D.1) "write" after "write": if there is ordering between a write and
- * a later write, then the later write is guaranteed to see the
- * effects of the first write.
- * D.2) Concurrent "write" pairs: The system will assign an arbitrary
- * order to any pair of concurrent conflicting writes.
- * Non-conflicting writes (for example, to different keys) are
- * unordered.
- * E) If a grace period separates a "del" or "replace" operation
- * and a subsequent operation, then that subsequent operation is
- * guaranteed not to see the removed item.
- * F) Uniqueness guarantee: given a hash table that does not contain
- * duplicate items for a given key, there will only be one item in
- * the hash table after an arbitrary sequence of add_unique and/or
- * add_replace operations. Note, however, that a pair of
- * concurrent read operations might well access two different items
- * with that key.
- * G.1) If a pair of lookups for a given key are ordered (e.g. by a
- * memory barrier), then the second lookup will return the same
- * node as the previous lookup, or some later node.
- * G.2) A "read traversal" that starts after the end of a prior "read
- * traversal" (ordered by memory barriers) is guaranteed to see the
- * same nodes as the previous traversal, or some later nodes.
- * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
- * example, if a pair of reads to the same key run concurrently
- * with an insertion of that same key, the reads remain unordered
- * regardless of their return values. In other words, you cannot
- * rely on the values returned by the reads to deduce ordering.
- *
- * Progress guarantees:
- *
- * * Reads are wait-free. These operations always move forward in the
- * hash table linked list, and this list has no loop.
- * * Writes are lock-free. Any retry loop performed by a write operation
- * is triggered by progress made within another update operation.
- *
- * Bucket node tables:
- *
- * hash table hash table the last all bucket node tables
- * order size bucket node 0 1 2 3 4 5 6(index)
- * table size
- * 0 1 1 1
- * 1 2 1 1 1
- * 2 4 2 1 1 2
- * 3 8 4 1 1 2 4
- * 4 16 8 1 1 2 4 8
- * 5 32 16 1 1 2 4 8 16
- * 6 64 32 1 1 2 4 8 16 32
- *
- * When growing/shrinking, we only focus on the last bucket node table
- * which size is (!order ? 1 : (1 << (order -1))).
- *
- * Example for growing/shrinking:
- * grow hash table from order 5 to 6: init the index=6 bucket node table
- * shrink hash table from order 6 to 5: fini the index=6 bucket node table
- *
- * A bit of ascii art explanation:
- *
- * The order index is the off-by-one compared to the actual power of 2
- * because we use index 0 to deal with the 0 special-case.
- *
- * This shows the nodes for a small table ordered by reversed bits:
- *
- * bits reverse
- * 0 000 000
- * 4 100 001
- * 2 010 010
- * 6 110 011
- * 1 001 100
- * 5 101 101
- * 3 011 110
- * 7 111 111
- *
- * This shows the nodes in order of non-reversed bits, linked by
- * reversed-bit order.
- *
- * order bits reverse
- * 0 0 000 000
- * 1 | 1 001 100 <-
- * 2 | | 2 010 010 <- |
- * | | | 3 011 110 | <- |
- * 3 -> | | | 4 100 001 | |
- * -> | | 5 101 101 |
- * -> | 6 110 011
- * -> 7 111 111
- */
-
-/*
- * Note on port to lttng-ust: auto-resize and accounting features are
- * removed.
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <errno.h>
-#include <assert.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <string.h>
-#include <sched.h>
-#include <unistd.h>
-
-#include <lttng/ust-arch.h>
-#include <lttng/urcu/pointer.h>
-#include <urcu/arch.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include "rculfhash.h"
-#include "rculfhash-internal.h"
-#include <stdio.h>
-#include <pthread.h>
-#include <signal.h>
-
-/*
- * Split-counters lazily update the global counter each 1024
- * addition/removal. It automatically keeps track of resize required.
- * We use the bucket length as indicator for need to expand for small
- * tables and machines lacking per-cpu data support.
- */
-#define COUNT_COMMIT_ORDER 10
-
-/*
- * Define the minimum table size.
- */
-#define MIN_TABLE_ORDER 0
-#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
-
-/*
- * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
- */
-#define MIN_PARTITION_PER_THREAD_ORDER 12
-#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
-
-/*
- * The removed flag needs to be updated atomically with the pointer.
- * It indicates that no node must attach to the node scheduled for
- * removal, and that node garbage collection must be performed.
- * The bucket flag does not require to be updated atomically with the
- * pointer, but it is added as a pointer low bit flag to save space.
- * The "removal owner" flag is used to detect which of the "del"
- * operation that has set the "removed flag" gets to return the removed
- * node to its caller. Note that the replace operation does not need to
- * iteract with the "removal owner" flag, because it validates that
- * the "removed" flag is not set before performing its cmpxchg.
- */
-#define REMOVED_FLAG (1UL << 0)
-#define BUCKET_FLAG (1UL << 1)
-#define REMOVAL_OWNER_FLAG (1UL << 2)
-#define FLAGS_MASK ((1UL << 3) - 1)
-
-/* Value of the end pointer. Should not interact with flags. */
-#define END_VALUE NULL
-
-/*
- * ht_items_count: Split-counters counting the number of node addition
- * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
- * is set at hash table creation.
- *
- * These are free-running counters, never reset to zero. They count the
- * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
- * operations to update the global counter. We choose a power-of-2 value
- * for the trigger to deal with 32 or 64-bit overflow of the counter.
- */
-struct ht_items_count {
- unsigned long add, del;
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
-
-static
-void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
-{
- iter->lfht = ht;
-}
-
-#define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
-
-#else
-
-static
-void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht __attribute__((unused)),
- struct lttng_ust_lfht_iter *iter __attribute__((unused)))
-{
-}
-
-#define lttng_ust_lfht_iter_debug_assert(...)
-
-#endif
-
-/*
- * Algorithm to reverse bits in a word by lookup table, extended to
- * 64-bit words.
- * Source:
- * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
- * Originally from Public Domain.
- */
-
-static const uint8_t BitReverseTable256[256] =
-{
-#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
-#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
-#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
- R6(0), R6(2), R6(1), R6(3)
-};
-#undef R2
-#undef R4
-#undef R6
-
-static
-uint8_t bit_reverse_u8(uint8_t v)
-{
- return BitReverseTable256[v];
-}
-
-#if (CAA_BITS_PER_LONG == 32)
-static
-uint32_t bit_reverse_u32(uint32_t v)
-{
- return ((uint32_t) bit_reverse_u8(v) << 24) |
- ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
- ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
- ((uint32_t) bit_reverse_u8(v >> 24));
-}
-#else
-static
-uint64_t bit_reverse_u64(uint64_t v)
-{
- return ((uint64_t) bit_reverse_u8(v) << 56) |
- ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
- ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
- ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
- ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
- ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
- ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
- ((uint64_t) bit_reverse_u8(v >> 56));
-}
-#endif
-
-static
-unsigned long bit_reverse_ulong(unsigned long v)
-{
-#if (CAA_BITS_PER_LONG == 32)
- return bit_reverse_u32(v);
-#else
- return bit_reverse_u64(v);
-#endif
-}
-
-/*
- * fls: returns the position of the most significant bit.
- * Returns 0 if no bit is set, else returns the position of the most
- * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
- */
-#if defined(LTTNG_UST_ARCH_X86)
-static inline
-unsigned int fls_u32(uint32_t x)
-{
- int r;
-
- __asm__ ("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n\t"
- "1:\n\t"
- : "=r" (r) : "rm" (x));
- return r + 1;
-}
-#define HAS_FLS_U32
-#endif
-
-#if defined(LTTNG_UST_ARCH_AMD64)
-static inline
-unsigned int fls_u64(uint64_t x)
-{
- long r;
-
- __asm__ ("bsrq %1,%0\n\t"
- "jnz 1f\n\t"
- "movq $-1,%0\n\t"
- "1:\n\t"
- : "=r" (r) : "rm" (x));
- return r + 1;
-}
-#define HAS_FLS_U64
-#endif
-
-#ifndef HAS_FLS_U64
-static
-unsigned int fls_u64(uint64_t x)
- __attribute__((unused));
-static
-unsigned int fls_u64(uint64_t x)
-{
- unsigned int r = 64;
-
- if (!x)
- return 0;
-
- if (!(x & 0xFFFFFFFF00000000ULL)) {
- x <<= 32;
- r -= 32;
- }
- if (!(x & 0xFFFF000000000000ULL)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF00000000000000ULL)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF000000000000000ULL)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC000000000000000ULL)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x8000000000000000ULL)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-#endif
-
-#ifndef HAS_FLS_U32
-static
-unsigned int fls_u32(uint32_t x)
- __attribute__((unused));
-static
-unsigned int fls_u32(uint32_t x)
-{
- unsigned int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- x <<= 1;
- r -= 1;
- }
- return r;
-}
-#endif
-
-unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
-{
-#if (CAA_BITS_PER_LONG == 32)
- return fls_u32(x);
-#else
- return fls_u64(x);
-#endif
-}
-
-/*
- * Return the minimum order for which x <= (1UL << order).
- * Return -1 if x is 0.
- */
-int lttng_ust_lfht_get_count_order_u32(uint32_t x)
-{
- if (!x)
- return -1;
-
- return fls_u32(x - 1);
-}
-
-/*
- * Return the minimum order for which x <= (1UL << order).
- * Return -1 if x is 0.
- */
-int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
-{
- if (!x)
- return -1;
-
- return lttng_ust_lfht_fls_ulong(x - 1);
-}
-
-static
-struct lttng_ust_lfht_node *clear_flag(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
-}
-
-static
-int is_removed(const struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & REMOVED_FLAG;
-}
-
-static
-int is_bucket(struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & BUCKET_FLAG;
-}
-
-static
-struct lttng_ust_lfht_node *flag_bucket(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
-}
-
-static
-int is_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
-}
-
-static
-struct lttng_ust_lfht_node *flag_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
-}
-
-static
-struct lttng_ust_lfht_node *flag_removed_or_removal_owner(struct lttng_ust_lfht_node *node)
-{
- return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
-}
-
-static
-struct lttng_ust_lfht_node *get_end(void)
-{
- return (struct lttng_ust_lfht_node *) END_VALUE;
-}
-
-static
-int is_end(struct lttng_ust_lfht_node *node)
-{
- return clear_flag(node) == (struct lttng_ust_lfht_node *) END_VALUE;
-}
-
-static
-void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- return ht->mm->alloc_bucket_table(ht, order);
-}
-
-/*
- * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
- * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
- * lfht is destroyed.
- */
-static
-void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
-{
- return ht->mm->free_bucket_table(ht, order);
-}
-
-static inline
-struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
-{
- return ht->bucket_at(ht, index);
-}
-
-static inline
-struct lttng_ust_lfht_node *lookup_bucket(struct lttng_ust_lfht *ht, unsigned long size,
- unsigned long hash)
-{
- assert(size > 0);
- return bucket_at(ht, hash & (size - 1));
-}
-
-/*
- * Remove all logically deleted nodes from a bucket up to a certain node key.
- */
-static
-void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node *bucket, struct lttng_ust_lfht_node *node)
-{
- struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_next;
-
- assert(!is_bucket(bucket));
- assert(!is_removed(bucket));
- assert(!is_removal_owner(bucket));
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- for (;;) {
- iter_prev = bucket;
- /* We can always skip the bucket node initially */
- iter = lttng_ust_rcu_dereference(iter_prev->next);
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev->reverse_hash <= node->reverse_hash);
- /*
- * We should never be called with bucket (start of chain)
- * and logically removed node (end of path compression
- * marker) being the actual same node. This would be a
- * bug in the algorithm implementation.
- */
- assert(bucket != node);
- for (;;) {
- if (caa_unlikely(is_end(iter)))
- return;
- if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
- return;
- next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
- if (caa_likely(is_removed(next)))
- break;
- iter_prev = clear_flag(iter);
- iter = next;
- }
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- if (is_bucket(iter))
- new_next = flag_bucket(clear_flag(next));
- else
- new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
- }
-}
-
-static
-int _lttng_ust_lfht_replace(struct lttng_ust_lfht *ht, unsigned long size,
- struct lttng_ust_lfht_node *old_node,
- struct lttng_ust_lfht_node *old_next,
- struct lttng_ust_lfht_node *new_node)
-{
- struct lttng_ust_lfht_node *bucket, *ret_next;
-
- if (!old_node) /* Return -ENOENT if asked to replace NULL node */
- return -ENOENT;
-
- assert(!is_removed(old_node));
- assert(!is_removal_owner(old_node));
- assert(!is_bucket(old_node));
- assert(!is_removed(new_node));
- assert(!is_removal_owner(new_node));
- assert(!is_bucket(new_node));
- assert(new_node != old_node);
- for (;;) {
- /* Insert after node to be replaced */
- if (is_removed(old_next)) {
- /*
- * Too late, the old node has been removed under us
- * between lookup and replace. Fail.
- */
- return -ENOENT;
- }
- assert(old_next == clear_flag(old_next));
- assert(new_node != old_next);
- /*
- * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
- * flag. It is either set atomically at the same time
- * (replace) or after (del).
- */
- assert(!is_removal_owner(old_next));
- new_node->next = old_next;
- /*
- * Here is the whole trick for lock-free replace: we add
- * the replacement node _after_ the node we want to
- * replace by atomically setting its next pointer at the
- * same time we set its removal flag. Given that
- * the lookups/get next use an iterator aware of the
- * next pointer, they will either skip the old node due
- * to the removal flag and see the new node, or use
- * the old node, but will not see the new one.
- * This is a replacement of a node with another node
- * that has the same value: we are therefore not
- * removing a value from the hash table. We set both the
- * REMOVED and REMOVAL_OWNER flags atomically so we own
- * the node after successful cmpxchg.
- */
- ret_next = uatomic_cmpxchg(&old_node->next,
- old_next, flag_removed_or_removal_owner(new_node));
- if (ret_next == old_next)
- break; /* We performed the replacement. */
- old_next = ret_next;
- }
-
- /*
- * Ensure that the old node is not visible to readers anymore:
- * lookup for the node, and remove it (along with any other
- * logically removed node) if found.
- */
- bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
- _lttng_ust_lfht_gc_bucket(bucket, new_node);
-
- assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
- return 0;
-}
-
-/*
- * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
- * mode. A NULL unique_ret allows creation of duplicate keys.
- */
-static
-void _lttng_ust_lfht_add(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- unsigned long size,
- struct lttng_ust_lfht_node *node,
- struct lttng_ust_lfht_iter *unique_ret,
- int bucket_flag)
-{
- struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
- *return_node;
- struct lttng_ust_lfht_node *bucket;
-
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- bucket = lookup_bucket(ht, size, hash);
- for (;;) {
- /*
- * iter_prev points to the non-removed node prior to the
- * insert location.
- */
- iter_prev = bucket;
- /* We can always skip the bucket node initially */
- iter = lttng_ust_rcu_dereference(iter_prev->next);
- assert(iter_prev->reverse_hash <= node->reverse_hash);
- for (;;) {
- if (caa_unlikely(is_end(iter)))
- goto insert;
- if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
- goto insert;
-
- /* bucket node is the first node of the identical-hash-value chain */
- if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
- goto insert;
-
- next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
- if (caa_unlikely(is_removed(next)))
- goto gc_node;
-
- /* uniquely add */
- if (unique_ret
- && !is_bucket(next)
- && clear_flag(iter)->reverse_hash == node->reverse_hash) {
- struct lttng_ust_lfht_iter d_iter = {
- .node = node,
- .next = iter,
-#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
- .lfht = ht,
-#endif
- };
-
- /*
- * uniquely adding inserts the node as the first
- * node of the identical-hash-value node chain.
- *
- * This semantic ensures no duplicated keys
- * should ever be observable in the table
- * (including traversing the table node by
- * node by forward iterations)
- */
- lttng_ust_lfht_next_duplicate(ht, match, key, &d_iter);
- if (!d_iter.node)
- goto insert;
-
- *unique_ret = d_iter;
- return;
- }
-
- iter_prev = clear_flag(iter);
- iter = next;
- }
-
- insert:
- assert(node != clear_flag(iter));
- assert(!is_removed(iter_prev));
- assert(!is_removal_owner(iter_prev));
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- assert(iter_prev != node);
- if (!bucket_flag)
- node->next = clear_flag(iter);
- else
- node->next = flag_bucket(clear_flag(iter));
- if (is_bucket(iter))
- new_node = flag_bucket(node);
- else
- new_node = node;
- if (uatomic_cmpxchg(&iter_prev->next, iter,
- new_node) != iter) {
- continue; /* retry */
- } else {
- return_node = node;
- goto end;
- }
-
- gc_node:
- assert(!is_removed(iter));
- assert(!is_removal_owner(iter));
- if (is_bucket(iter))
- new_next = flag_bucket(clear_flag(next));
- else
- new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
- /* retry */
- }
-end:
- if (unique_ret) {
- unique_ret->node = return_node;
- /* unique_ret->next left unset, never used. */
- }
-}
-
-static
-int _lttng_ust_lfht_del(struct lttng_ust_lfht *ht, unsigned long size,
- struct lttng_ust_lfht_node *node)
-{
- struct lttng_ust_lfht_node *bucket, *next;
-
- if (!node) /* Return -ENOENT if asked to delete NULL node */
- return -ENOENT;
-
- /* logically delete the node */
- assert(!is_bucket(node));
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
-
- /*
- * We are first checking if the node had previously been
- * logically removed (this check is not atomic with setting the
- * logical removal flag). Return -ENOENT if the node had
- * previously been removed.
- */
- next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
- if (caa_unlikely(is_removed(next)))
- return -ENOENT;
- assert(!is_bucket(next));
- /*
- * The del operation semantic guarantees a full memory barrier
- * before the uatomic_or atomic commit of the deletion flag.
- */
- cmm_smp_mb__before_uatomic_or();
- /*
- * We set the REMOVED_FLAG unconditionally. Note that there may
- * be more than one concurrent thread setting this flag.
- * Knowing which wins the race will be known after the garbage
- * collection phase, stay tuned!
- */
- uatomic_or(&node->next, REMOVED_FLAG);
- /* We performed the (logical) deletion. */
-
- /*
- * Ensure that the node is not visible to readers anymore: lookup for
- * the node, and remove it (along with any other logically removed node)
- * if found.
- */
- bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
- _lttng_ust_lfht_gc_bucket(bucket, node);
-
- assert(is_removed(CMM_LOAD_SHARED(node->next)));
- /*
- * Last phase: atomically exchange node->next with a version
- * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
- * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
- * the node and win the removal race.
- * It is interesting to note that all "add" paths are forbidden
- * to change the next pointer starting from the point where the
- * REMOVED_FLAG is set, so here using a read, followed by a
- * xchg() suffice to guarantee that the xchg() will ever only
- * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
- * was already set).
- */
- if (!is_removal_owner(uatomic_xchg(&node->next,
- flag_removal_owner(node->next))))
- return 0;
- else
- return -ENOENT;
-}
-
-/*
- * Never called with size < 1.
- */
-static
-void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht *ht, unsigned long size)
-{
- struct lttng_ust_lfht_node *prev, *node;
- unsigned long order, len, i;
- int bucket_order;
-
- lttng_ust_lfht_alloc_bucket_table(ht, 0);
-
- dbg_printf("create bucket: order 0 index 0 hash 0\n");
- node = bucket_at(ht, 0);
- node->next = flag_bucket(get_end());
- node->reverse_hash = 0;
-
- bucket_order = lttng_ust_lfht_get_count_order_ulong(size);
- assert(bucket_order >= 0);
-
- for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
- len = 1UL << (order - 1);
- lttng_ust_lfht_alloc_bucket_table(ht, order);
-
- for (i = 0; i < len; i++) {
- /*
- * Now, we are trying to init the node with the
- * hash=(len+i) (which is also a bucket with the
- * index=(len+i)) and insert it into the hash table,
- * so this node has to be inserted after the bucket
- * with the index=(len+i)&(len-1)=i. And because there
- * is no other non-bucket node nor bucket node with
- * larger index/hash inserted, so the bucket node
- * being inserted should be inserted directly linked
- * after the bucket node with index=i.
- */
- prev = bucket_at(ht, i);
- node = bucket_at(ht, len + i);
-
- dbg_printf("create bucket: order %lu index %lu hash %lu\n",
- order, len + i, len + i);
- node->reverse_hash = bit_reverse_ulong(len + i);
-
- /* insert after prev */
- assert(is_bucket(prev->next));
- node->next = prev->next;
- prev->next = flag_bucket(node);
- }
- }
-}
-
-#if (CAA_BITS_PER_LONG > 32)
-/*
- * For 64-bit architectures, with max number of buckets small enough not to
- * use the entire 64-bit memory mapping space (and allowing a fair number of
- * hash table instances), use the mmap allocator, which is faster. Otherwise,
- * fallback to the order allocator.
- */
-static
-const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
-{
- if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
- return <tng_ust_lfht_mm_mmap;
- else
- return <tng_ust_lfht_mm_order;
-}
-#else
-/*
- * For 32-bit architectures, use the order allocator.
- */
-static
-const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
-{
- return <tng_ust_lfht_mm_order;
-}
-#endif
-
-struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets,
- int flags,
- const struct lttng_ust_lfht_mm_type *mm)
-{
- struct lttng_ust_lfht *ht;
- unsigned long order;
-
- /* min_nr_alloc_buckets must be power of two */
- if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
- return NULL;
-
- /* init_size must be power of two */
- if (!init_size || (init_size & (init_size - 1)))
- return NULL;
-
- /*
- * Memory management plugin default.
- */
- if (!mm)
- mm = get_mm_type(max_nr_buckets);
-
- /* max_nr_buckets == 0 for order based mm means infinite */
- if (mm == <tng_ust_lfht_mm_order && !max_nr_buckets)
- max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
-
- /* max_nr_buckets must be power of two */
- if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
- return NULL;
-
- if (flags & LTTNG_UST_LFHT_AUTO_RESIZE)
- return NULL;
-
- min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
- init_size = max(init_size, MIN_TABLE_SIZE);
- max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
- init_size = min(init_size, max_nr_buckets);
-
- ht = mm->alloc_lttng_ust_lfht(min_nr_alloc_buckets, max_nr_buckets);
- assert(ht);
- assert(ht->mm == mm);
- assert(ht->bucket_at == mm->bucket_at);
-
- ht->flags = flags;
- /* this mutex should not nest in read-side C.S. */
- pthread_mutex_init(&ht->resize_mutex, NULL);
- order = lttng_ust_lfht_get_count_order_ulong(init_size);
- ht->resize_target = 1UL << order;
- lttng_ust_lfht_create_bucket(ht, 1UL << order);
- ht->size = 1UL << order;
- return ht;
-}
-
-void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next, *bucket;
- unsigned long reverse_hash, size;
-
- lttng_ust_lfht_iter_debug_set_ht(ht, iter);
-
- reverse_hash = bit_reverse_ulong(hash);
-
- size = lttng_ust_rcu_dereference(ht->size);
- bucket = lookup_bucket(ht, size, hash);
- /* We can always skip the bucket node initially */
- node = lttng_ust_rcu_dereference(bucket->next);
- node = clear_flag(node);
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- if (caa_unlikely(node->reverse_hash > reverse_hash)) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- assert(node == clear_flag(node));
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)
- && node->reverse_hash == reverse_hash
- && caa_likely(match(node, key))) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht __attribute__((unused)),
- lttng_ust_lfht_match_fct match,
- const void *key, struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next;
- unsigned long reverse_hash;
-
- lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
- node = iter->node;
- reverse_hash = node->reverse_hash;
- next = iter->next;
- node = clear_flag(next);
-
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- if (caa_unlikely(node->reverse_hash > reverse_hash)) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)
- && caa_likely(match(node, key))) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_next(struct lttng_ust_lfht *ht __attribute__((unused)),
- struct lttng_ust_lfht_iter *iter)
-{
- struct lttng_ust_lfht_node *node, *next;
-
- lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
- node = clear_flag(iter->next);
- for (;;) {
- if (caa_unlikely(is_end(node))) {
- node = next = NULL;
- break;
- }
- next = lttng_ust_rcu_dereference(node->next);
- if (caa_likely(!is_removed(next))
- && !is_bucket(next)) {
- break;
- }
- node = clear_flag(next);
- }
- assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
- iter->node = node;
- iter->next = next;
-}
-
-void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
-{
- lttng_ust_lfht_iter_debug_set_ht(ht, iter);
- /*
- * Get next after first bucket node. The first bucket node is the
- * first node of the linked list.
- */
- iter->next = bucket_at(ht, 0)->next;
- lttng_ust_lfht_next(ht, iter);
-}
-
-void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- _lttng_ust_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
-}
-
-struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
- struct lttng_ust_lfht_iter iter;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
- return iter.node;
-}
-
-struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
- struct lttng_ust_lfht_iter iter;
-
- node->reverse_hash = bit_reverse_ulong(hash);
- size = lttng_ust_rcu_dereference(ht->size);
- for (;;) {
- _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
- if (iter.node == node) {
- return NULL;
- }
-
- if (!_lttng_ust_lfht_replace(ht, size, iter.node, iter.next, node))
- return iter.node;
- }
-}
-
-int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
- struct lttng_ust_lfht_iter *old_iter,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *new_node)
-{
- unsigned long size;
-
- new_node->reverse_hash = bit_reverse_ulong(hash);
- if (!old_iter->node)
- return -ENOENT;
- if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
- return -EINVAL;
- if (caa_unlikely(!match(old_iter->node, key)))
- return -EINVAL;
- size = lttng_ust_rcu_dereference(ht->size);
- return _lttng_ust_lfht_replace(ht, size, old_iter->node, old_iter->next,
- new_node);
-}
-
-int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
-{
- unsigned long size;
-
- size = lttng_ust_rcu_dereference(ht->size);
- return _lttng_ust_lfht_del(ht, size, node);
-}
-
-int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
-{
- return is_removed(CMM_LOAD_SHARED(node->next));
-}
-
-static
-int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht *ht)
-{
- struct lttng_ust_lfht_node *node;
- unsigned long order, i, size;
-
- /* Check that the table is empty */
- node = bucket_at(ht, 0);
- do {
- node = clear_flag(node)->next;
- if (!is_bucket(node))
- return -EPERM;
- assert(!is_removed(node));
- assert(!is_removal_owner(node));
- } while (!is_end(node));
- /*
- * size accessed without lttng_ust_rcu_dereference because hash table is
- * being destroyed.
- */
- size = ht->size;
- /* Internal sanity check: all nodes left should be buckets */
- for (i = 0; i < size; i++) {
- node = bucket_at(ht, i);
- dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
- i, i, bit_reverse_ulong(node->reverse_hash));
- assert(is_bucket(node->next));
- }
-
- for (order = lttng_ust_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
- lttng_ust_lfht_free_bucket_table(ht, order);
-
- return 0;
-}
-
-/*
- * Should only be called when no more concurrent readers nor writers can
- * possibly access the table.
- */
-int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
-{
- int ret;
-
- ret = lttng_ust_lfht_delete_bucket(ht);
- if (ret)
- return ret;
- ret = pthread_mutex_destroy(&ht->resize_mutex);
- if (ret)
- ret = -EBUSY;
- poison_free(ht);
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
- *
- * Userspace RCU library - Lock-Free RCU Hash Table
- */
-
-#ifndef _LTTNG_UST_RCULFHASH_H
-#define _LTTNG_UST_RCULFHASH_H
-
-#include <stdint.h>
-#include <pthread.h>
-#include <urcu/compiler.h>
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-struct lttng_ust_lfht;
-
-/*
- * lttng_ust_lfht_node: Contains the next pointers and reverse-hash
- * value required for lookup and traversal of the hash table.
- *
- * struct lttng_ust_lfht_node should be aligned on 8-bytes boundaries because
- * the three lower bits are used as flags. It is worth noting that the
- * information contained within these three bits could be represented on
- * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
- * BUCKET_FLAG. This can be done if we ensure that no iterator nor
- * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
- * is set. Given the minimum size of struct lttng_ust_lfht_node is 8 bytes on
- * 32-bit architectures, we choose to go for simplicity and reserve
- * three bits.
- *
- * struct lttng_ust_lfht_node can be embedded into a structure (as a field).
- * caa_container_of() can be used to get the structure from the struct
- * lttng_ust_lfht_node after a lookup.
- *
- * The structure which embeds it typically holds the key (or key-value
- * pair) of the object. The caller code is responsible for calculation
- * of the hash value for lttng_ust_lfht APIs.
- */
-struct lttng_ust_lfht_node {
- struct lttng_ust_lfht_node *next; /* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
- unsigned long reverse_hash;
-} __attribute__((aligned(8)));
-
-/* lttng_ust_lfht_iter: Used to track state while traversing a hash chain. */
-struct lttng_ust_lfht_iter {
- struct lttng_ust_lfht_node *node, *next;
-};
-
-static inline
-struct lttng_ust_lfht_node *lttng_ust_lfht_iter_get_node(struct lttng_ust_lfht_iter *iter)
-{
- return iter->node;
-}
-
-struct rcu_flavor_struct;
-
-/*
- * Caution !
- * Ensure reader and writer threads are registered as urcu readers.
- */
-
-typedef int (*lttng_ust_lfht_match_fct)(struct lttng_ust_lfht_node *node, const void *key);
-
-/*
- * lttng_ust_lfht_node_init - initialize a hash table node
- * @node: the node to initialize.
- *
- * This function is kept to be eventually used for debugging purposes
- * (detection of memory corruption).
- */
-static inline
-void lttng_ust_lfht_node_init(struct lttng_ust_lfht_node *node __attribute__((unused)))
-{
-}
-
-/*
- * Hash table creation flags.
- */
-enum {
- LTTNG_UST_LFHT_AUTO_RESIZE = (1U << 0),
- LTTNG_UST_LFHT_ACCOUNTING = (1U << 1),
-};
-
-struct lttng_ust_lfht_mm_type {
- struct lttng_ust_lfht *(*alloc_lttng_ust_lfht)(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets);
- void (*alloc_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
- void (*free_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
- struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
- unsigned long index);
-};
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order
- __attribute__((visibility("hidden")));
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk
- __attribute__((visibility("hidden")));
-
-extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_new - allocate a hash table.
- * @init_size: number of buckets to allocate initially. Must be power of two.
- * @min_nr_alloc_buckets: the minimum number of allocated buckets.
- * (must be power of two)
- * @max_nr_buckets: the maximum number of hash table buckets allowed.
- * (must be power of two, 0 is accepted, means
- * "infinite")
- * @flags: hash table creation flags (can be combined with bitwise or: '|').
- * 0: no flags.
- * LTTNG_UST_LFHT_AUTO_RESIZE: automatically resize hash table.
- * LTTNG_UST_LFHT_ACCOUNTING: count the number of node addition
- * and removal in the table
- *
- * Return NULL on error.
- * Note: the RCU flavor must be already included before the hash table header.
- */
-extern struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
- unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets,
- int flags,
- const struct lttng_ust_lfht_mm_type *mm)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_destroy - destroy a hash table.
- * @ht: the hash table to destroy.
- *
- * Return 0 on success, negative error value on error.
-
- * Prior to liburcu 0.10:
- * - Threads calling this API need to be registered RCU read-side
- * threads.
- * - lttng_ust_lfht_destroy should *not* be called from a RCU read-side
- * critical section. It should *not* be called from a call_rcu thread
- * context neither.
- *
- * Starting from liburcu 0.10, rculfhash implements its own worker
- * thread to handle resize operations, which removes RCU requirements on
- * lttng_ust_lfht_destroy.
- */
-extern int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_count_nodes - count the number of nodes in the hash table.
- * @ht: the hash table.
- * @split_count_before: sample the node count split-counter before traversal.
- * @count: traverse the hash table, count the number of nodes observed.
- * @split_count_after: sample the node count split-counter after traversal.
- *
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- */
-extern void lttng_ust_lfht_count_nodes(struct lttng_ust_lfht *ht,
- long *split_count_before,
- unsigned long *count,
- long *split_count_after)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_lookup - lookup a node by key.
- * @ht: the hash table.
- * @hash: the key hash.
- * @match: the key match function.
- * @key: the current node key.
- * @iter: node, if found (output). *iter->node set to NULL if not found.
- *
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_next_duplicate - get the next item with same key, after iterator.
- * @ht: the hash table.
- * @match: the key match function.
- * @key: the current node key.
- * @iter: input: current iterator.
- * output: node, if found. *iter->node set to NULL if not found.
- *
- * Uses an iterator initialized by a lookup or traversal. Important: the
- * iterator _needs_ to be initialized before calling
- * lttng_ust_lfht_next_duplicate.
- * Sets *iter-node to the following node with same key.
- * Sets *iter->node to NULL if no following node exists with same key.
- * RCU read-side lock must be held across lttng_ust_lfht_lookup and
- * lttng_ust_lfht_next calls, and also between lttng_ust_lfht_next calls using the
- * node returned by a previous lttng_ust_lfht_next.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht,
- lttng_ust_lfht_match_fct match, const void *key,
- struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_first - get the first node in the table.
- * @ht: the hash table.
- * @iter: First node, if exists (output). *iter->node set to NULL if not found.
- *
- * Output in "*iter". *iter->node set to NULL if table is empty.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_next - get the next node in the table.
- * @ht: the hash table.
- * @iter: input: current iterator.
- * output: next node, if exists. *iter->node set to NULL if not found.
- *
- * Input/Output in "*iter". *iter->node set to NULL if *iter was
- * pointing to the last table node.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function acts as a rcu_dereference() to read the node pointer.
- */
-extern void lttng_ust_lfht_next(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add - add a node to the hash table.
- * @ht: the hash table.
- * @hash: the key hash.
- * @node: the node to add.
- *
- * This function supports adding redundant keys into the table.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function issues a full memory barrier before and after its
- * atomic commit.
- */
-extern void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add_unique - add a node to hash table, if key is not present.
- * @ht: the hash table.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @node: the node to try adding.
- *
- * Return the node added upon success.
- * Return the unique node already present upon failure. If
- * lttng_ust_lfht_add_unique fails, the node passed as parameter should be
- * freed by the caller. In this case, the caller does NOT need to wait
- * for a grace period before freeing or re-using the node.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- *
- * The semantic of this function is that if only this function is used
- * to add keys into the table, no duplicated keys should ever be
- * observable in the table. The same guarantee apply for combination of
- * add_unique and add_replace (see below).
- *
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function acts like a
- * simple lookup operation: it acts as a rcu_dereference() to read the
- * node pointer. The failure case does not guarantee any other memory
- * barrier.
- */
-extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_add_replace - replace or add a node within hash table.
- * @ht: the hash table.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @node: the node to add.
- *
- * Return the node replaced upon success. If no node matching the key
- * was present, return NULL, which also means the operation succeeded.
- * This replacement operation should never fail.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful replacement, a grace period must be waited for before
- * freeing or re-using the memory reserved for the returned node.
- *
- * The semantic of replacement vs lookups and traversals is the
- * following: if lookups and traversals are performed between a key
- * unique insertion and its removal, we guarantee that the lookups and
- * traversals will always find exactly one instance of the key if it is
- * replaced concurrently with the lookups.
- *
- * Providing this semantic allows us to ensure that replacement-only
- * schemes will never generate duplicated keys. It also allows us to
- * guarantee that a combination of add_replace and add_unique updates
- * will never generate duplicated keys.
- *
- * This function issues a full memory barrier before and after its
- * atomic commit.
- */
-extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_replace - replace a node pointed to by iter within hash table.
- * @ht: the hash table.
- * @old_iter: the iterator position of the node to replace.
- * @hash: the node's hash.
- * @match: the key match function.
- * @key: the node's key.
- * @new_node: the new node to use as replacement.
- *
- * Return 0 if replacement is successful, negative value otherwise.
- * Replacing a NULL old node or an already removed node will fail with
- * -ENOENT.
- * If the hash or value of the node to replace and the new node differ,
- * this function returns -EINVAL without proceeding to the replacement.
- * Old node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next.
- * RCU read-side lock must be held between lookup and replacement.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful replacement, a grace period must be waited for before
- * freeing or re-using the memory reserved for the old node (which can
- * be accessed with lttng_ust_lfht_iter_get_node).
- *
- * The semantic of replacement vs lookups is the same as
- * lttng_ust_lfht_add_replace().
- *
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function does not issue
- * any memory barrier.
- */
-extern int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
- struct lttng_ust_lfht_iter *old_iter,
- unsigned long hash,
- lttng_ust_lfht_match_fct match,
- const void *key,
- struct lttng_ust_lfht_node *new_node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_del - remove node pointed to by iterator from hash table.
- * @ht: the hash table.
- * @node: the node to delete.
- *
- * Return 0 if the node is successfully removed, negative value
- * otherwise.
- * Deleting a NULL node or an already removed node will fail with a
- * negative value.
- * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
- * followed by use of lttng_ust_lfht_iter_get_node.
- * RCU read-side lock must be held between lookup and removal.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * After successful removal, a grace period must be waited for before
- * freeing or re-using the memory reserved for old node (which can be
- * accessed with lttng_ust_lfht_iter_get_node).
- * Upon success, this function issues a full memory barrier before and
- * after its atomic commit. Upon failure, this function does not issue
- * any memory barrier.
- */
-extern int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_is_node_deleted - query whether a node is removed from hash table.
- *
- * Return non-zero if the node is deleted from the hash table, 0
- * otherwise.
- * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
- * followed by use of lttng_ust_lfht_iter_get_node.
- * RCU read-side lock must be held between lookup and call to this
- * function.
- * Call with rcu_read_lock held.
- * Threads calling this API need to be registered RCU read-side threads.
- * This function does not issue any memory barrier.
- */
-extern int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
- __attribute__((visibility("hidden")));
-
-/*
- * lttng_ust_lfht_resize - Force a hash table resize
- * @ht: the hash table.
- * @new_size: update to this hash table size.
- *
- * Threads calling this API need to be registered RCU read-side threads.
- * This function does not (necessarily) issue memory barriers.
- * lttng_ust_lfht_resize should *not* be called from a RCU read-side critical
- * section.
- */
-extern void lttng_ust_lfht_resize(struct lttng_ust_lfht *ht, unsigned long new_size)
- __attribute__((visibility("hidden")));
-
-/*
- * Note: it is safe to perform element removal (del), replacement, or
- * any hash table update operation during any of the following hash
- * table traversals.
- * These functions act as rcu_dereference() to read the node pointers.
- */
-#define lttng_ust_lfht_for_each(ht, iter, node) \
- for (lttng_ust_lfht_first(ht, iter), \
- node = lttng_ust_lfht_iter_get_node(iter); \
- node != NULL; \
- lttng_ust_lfht_next(ht, iter), \
- node = lttng_ust_lfht_iter_get_node(iter))
-
-#define lttng_ust_lfht_for_each_duplicate(ht, hash, match, key, iter, node) \
- for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
- node = lttng_ust_lfht_iter_get_node(iter); \
- node != NULL; \
- lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
- node = lttng_ust_lfht_iter_get_node(iter))
-
-#define lttng_ust_lfht_for_each_entry(ht, iter, pos, member) \
- for (lttng_ust_lfht_first(ht, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member); \
- lttng_ust_lfht_iter_get_node(iter) != NULL; \
- lttng_ust_lfht_next(ht, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member))
-
-#define lttng_ust_lfht_for_each_entry_duplicate(ht, hash, match, key, \
- iter, pos, member) \
- for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member); \
- lttng_ust_lfht_iter_get_node(iter) != NULL; \
- lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
- pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
- __typeof__(*(pos)), member))
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* _LTTNG_UST_RCULFHASH_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdlib.h>
-#include <string.h>
-#include <stdbool.h>
-#include <assert.h>
-
-#include "string-utils.h"
-
-enum star_glob_pattern_type_flags {
- STAR_GLOB_PATTERN_TYPE_FLAG_NONE = 0,
- STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN = 1,
- STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY = 2,
-};
-
-static
-enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
-{
- enum star_glob_pattern_type_flags ret =
- STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
- const char *p;
-
- assert(pattern);
-
- for (p = pattern; *p != '\0'; p++) {
- switch (*p) {
- case '*':
- ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-
- if (p[1] == '\0') {
- ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
- }
-
- goto end;
- case '\\':
- p++;
-
- if (*p == '\0') {
- goto end;
- }
- break;
- default:
- break;
- }
- }
-
-end:
- return ret;
-}
-
-/*
- * Returns true if `pattern` is a star-only globbing pattern, that is,
- * it contains at least one non-escaped `*`.
- */
-bool strutils_is_star_glob_pattern(const char *pattern)
-{
- return strutils_test_glob_pattern(pattern) &
- STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
-}
-
-/*
- * Returns true if `pattern` is a globbing pattern with a globbing,
- * non-escaped star only at its very end.
- */
-bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
-{
- return strutils_test_glob_pattern(pattern) &
- STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
-}
-
-static inline
-bool at_end_of_pattern(const char *p, const char *pattern, size_t pattern_len)
-{
- return (p - pattern) == pattern_len || *p == '\0';
-}
-
-/*
- * Globbing matching function with the star feature only (`?` and
- * character sets are not supported). This matches `candidate` (plain
- * string) against `pattern`. A literal star can be escaped with `\` in
- * `pattern`.
- *
- * `pattern_len` or `candidate_len` can be greater than the actual
- * string length of `pattern` or `candidate` if the string is
- * null-terminated.
- */
-bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
- const char *candidate, size_t candidate_len) {
- const char *retry_c = candidate, *retry_p = pattern, *c, *p;
- bool got_a_star = false;
-
-retry:
- c = retry_c;
- p = retry_p;
-
- /*
- * The concept here is to retry a match in the specific case
- * where we already got a star. The retry position for the
- * pattern is just after the most recent star, and the retry
- * position for the candidate is the character following the
- * last try's first character.
- *
- * Example:
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^ MISMATCH
- *
- * candidate: hi ev every onyx one
- * ^
- * pattern: hi*every*one
- * ^
- *
- * candidate: hi ev every onyx one
- * ^^
- * pattern: hi*every*one
- * ^^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^
- *
- * candidate: hi ev every onyx one
- * ^ ^
- * pattern: hi*every*one
- * ^ ^ SUCCESS
- */
- while ((c - candidate) < candidate_len && *c != '\0') {
- assert(*c);
-
- if (at_end_of_pattern(p, pattern, pattern_len)) {
- goto end_of_pattern;
- }
-
- switch (*p) {
- case '*':
- got_a_star = true;
-
- /*
- * Our first try starts at the current candidate
- * character and after the star in the pattern.
- */
- retry_c = c;
- retry_p = p + 1;
-
- if (at_end_of_pattern(retry_p, pattern, pattern_len)) {
- /*
- * Star at the end of the pattern at
- * this point: automatic match.
- */
- return true;
- }
-
- goto retry;
- case '\\':
- /* Go to escaped character. */
- p++; /* Fallthrough */
-
- /*
- * Fall through the default case which will
- * compare the escaped character now.
- */
- default:
- if (at_end_of_pattern(p, pattern, pattern_len) ||
- *c != *p) {
-end_of_pattern:
- /* Character mismatch OR end of pattern. */
- if (!got_a_star) {
- /*
- * We didn't get any star yet,
- * so this first mismatch
- * automatically makes the whole
- * test fail.
- */
- return false;
- }
-
- /*
- * Next try: next candidate character,
- * original pattern character (following
- * the most recent star).
- */
- retry_c++;
- goto retry;
- }
- break;
- }
-
- /* Next pattern and candidate characters. */
- c++;
- p++;
- }
-
- /*
- * We checked every candidate character and we're still in a
- * success state: the only pattern character allowed to remain
- * is a star.
- */
- if (at_end_of_pattern(p, pattern, pattern_len)) {
- return true;
- }
-
- p++;
- return p[-1] == '*' && at_end_of_pattern(p, pattern, pattern_len);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
- */
-
-#ifndef _STRING_UTILS_H
-#define _STRING_UTILS_H
-
-#include <stdbool.h>
-#include <stddef.h>
-
-bool strutils_is_star_glob_pattern(const char *pattern)
- __attribute__((visibility("hidden")));
-
-bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
- __attribute__((visibility("hidden")));
-
-bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
- const char *candidate, size_t candidate_len)
- __attribute__((visibility("hidden")));
-
-#endif /* _STRING_UTILS_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <ust-helper.h>
-
-#define TRACEPOINT_CREATE_PROBES
-#define TRACEPOINT_DEFINE
-#include "lttng-ust-tracef-provider.h"
-
-static inline
-void __lttng_ust_vtracef(const char *fmt, va_list ap)
- __attribute__((always_inline, format(printf, 1, 0)));
-static inline
-void __lttng_ust_vtracef(const char *fmt, va_list ap)
-{
- char *msg;
- const int len = vasprintf(&msg, fmt, ap);
-
- /* len does not include the final \0 */
- if (len < 0)
- goto end;
- __tracepoint_cb_lttng_ust_tracef___event(msg, len,
- LTTNG_UST_CALLER_IP());
- free(msg);
-end:
- return;
-}
-
-/*
- * FIXME: We should include <lttng/tracef.h> for the declarations here, but it
- * fails with tracepoint magic above my paygrade.
- */
-
-void _lttng_ust_vtracef(const char *fmt, va_list ap)
- __attribute__((format(printf, 1, 0)));
-void _lttng_ust_vtracef(const char *fmt, va_list ap)
-{
- __lttng_ust_vtracef(fmt, ap);
-}
-
-void _lttng_ust_tracef(const char *fmt, ...)
- __attribute__((format(printf, 1, 2)));
-void _lttng_ust_tracef(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- __lttng_ust_vtracef(fmt, ap);
- va_end(ap);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdio.h>
-#include <ust-helper.h>
-
-#define TRACEPOINT_CREATE_PROBES
-#define TRACEPOINT_DEFINE
-#include "lttng-ust-tracelog-provider.h"
-
-#define TRACELOG_CB(level) \
- static inline \
- void __lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- __attribute__((always_inline, format(printf, 4, 0))); \
- \
- static inline \
- void __lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- { \
- char *msg; \
- const int len = vasprintf(&msg, fmt, ap); \
- \
- /* len does not include the final \0 */ \
- if (len < 0) \
- goto end; \
- __tracepoint_cb_lttng_ust_tracelog___##level(file, \
- line, func, msg, len, \
- LTTNG_UST_CALLER_IP()); \
- free(msg); \
- end: \
- return; \
- } \
- \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- __attribute__ ((format(printf, 4, 0))); \
- \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap); \
- void _lttng_ust_vtracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, va_list ap) \
- { \
- __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
- } \
- \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...) \
- __attribute__ ((format(printf, 4, 5))); \
- \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...); \
- void _lttng_ust_tracelog_##level(const char *file, \
- int line, const char *func, \
- const char *fmt, ...) \
- { \
- va_list ap; \
- \
- va_start(ap, fmt); \
- __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
- va_end(ap); \
- }
-
-TRACELOG_CB(TRACE_EMERG)
-TRACELOG_CB(TRACE_ALERT)
-TRACELOG_CB(TRACE_CRIT)
-TRACELOG_CB(TRACE_ERR)
-TRACELOG_CB(TRACE_WARNING)
-TRACELOG_CB(TRACE_NOTICE)
-TRACELOG_CB(TRACE_INFO)
-TRACELOG_CB(TRACE_DEBUG_SYSTEM)
-TRACELOG_CB(TRACE_DEBUG_PROGRAM)
-TRACELOG_CB(TRACE_DEBUG_PROCESS)
-TRACELOG_CB(TRACE_DEBUG_MODULE)
-TRACELOG_CB(TRACE_DEBUG_UNIT)
-TRACELOG_CB(TRACE_DEBUG_FUNCTION)
-TRACELOG_CB(TRACE_DEBUG_LINE)
-TRACELOG_CB(TRACE_DEBUG)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_TRACEPOINT_INTERNAL_H
-#define _LTTNG_TRACEPOINT_INTERNAL_H
-
-#include <urcu/list.h>
-#include <lttng/tracepoint-types.h>
-#include <lttng/ust-events.h>
-
-#define TRACE_DEFAULT TRACE_DEBUG_LINE
-
-struct tracepoint_lib {
- struct cds_list_head list; /* list of registered libs */
- struct lttng_ust_tracepoint * const *tracepoints_start;
- int tracepoints_count;
- struct cds_list_head callsites;
-};
-
-int tracepoint_probe_register_noupdate(const char *name,
- void (*callback)(void), void *priv,
- const char *signature)
- __attribute__((visibility("hidden")));
-
-int tracepoint_probe_unregister_noupdate(const char *name,
- void (*callback)(void), void *priv)
- __attribute__((visibility("hidden")));
-
-void tracepoint_probe_update_all(void)
- __attribute__((visibility("hidden")));
-
-
-void *lttng_ust_tp_check_weak_hidden1(void)
- __attribute__((visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden2(void)
- __attribute__((visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden3(void)
- __attribute__((visibility("hidden")));
-
-/*
- * These symbols are ABI between liblttng-ust-tracepoint and liblttng-ust,
- * which is why they are not hidden and not part of the public API.
- */
-int lttng_ust_tp_probe_register_queue_release(const char *name,
- void (*func)(void), void *data, const char *signature);
-int lttng_ust_tp_probe_unregister_queue_release(const char *name,
- void (*func)(void), void *data);
-void lttng_ust_tp_probe_prune_release_queue(void);
-
-void lttng_ust_tp_init(void);
-void lttng_ust_tp_exit(void);
-
-
-#endif /* _LTTNG_TRACEPOINT_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include "tracepoint-internal.h"
-
-/* Test compiler support for weak symbols with hidden visibility. */
-int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
-void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
-struct {
- char a[24];
-} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
-
-void *lttng_ust_tp_check_weak_hidden1(void)
-{
- return &__tracepoint_test_symbol1;
-}
-
-void *lttng_ust_tp_check_weak_hidden2(void)
-{
- return &__tracepoint_test_symbol2;
-}
-
-void *lttng_ust_tp_check_weak_hidden3(void)
-{
- return &__tracepoint_test_symbol3;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2008-2011 Mathieu Desnoyers
- * Copyright (C) 2009 Pierre-Marc Fournier
- *
- * Ported to userspace by Pierre-Marc Fournier.
- */
-
-#define _LGPL_SOURCE
-#include <errno.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <stdio.h>
-
-#include <urcu/arch.h>
-#include <lttng/urcu/urcu-ust.h>
-#include <urcu/hlist.h>
-#include <urcu/uatomic.h>
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-
-#include <lttng/tracepoint.h>
-#include <lttng/ust-abi.h> /* for LTTNG_UST_ABI_SYM_NAME_LEN */
-
-#include <usterr-signal-safe.h>
-#include <ust-helper.h>
-
-#include "tracepoint-internal.h"
-#include "lttng-tracer-core.h"
-#include "jhash.h"
-#include "error.h"
-
-/* Test compiler support for weak symbols with hidden visibility. */
-int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
-void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
-struct {
- char a[24];
-} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
-
-/* Set to 1 to enable tracepoint debug output */
-static const int tracepoint_debug;
-static int initialized;
-
-/*
- * If tracepoint_destructors_state = 1, tracepoint destructors are
- * enabled. They are disabled otherwise.
- */
-static int tracepoint_destructors_state = 1;
-
-static void (*new_tracepoint_cb)(struct lttng_ust_tracepoint *);
-
-/*
- * tracepoint_mutex nests inside UST mutex.
- *
- * Note about interaction with fork/clone: UST does not hold the
- * tracepoint mutex across fork/clone because it is either:
- * - nested within UST mutex, in which case holding the UST mutex across
- * fork/clone suffice,
- * - taken by a library constructor, which should never race with a
- * fork/clone if the application is expected to continue running with
- * the same memory layout (no following exec()).
- */
-static pthread_mutex_t tracepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-/*
- * libraries that contain tracepoints (struct tracepoint_lib).
- * Protected by tracepoint mutex.
- */
-static CDS_LIST_HEAD(libs);
-
-/*
- * The tracepoint mutex protects the library tracepoints, the hash table, and
- * the library list.
- * All calls to the tracepoint API must be protected by the tracepoint mutex,
- * excepts calls to tracepoint_register_lib and
- * tracepoint_unregister_lib, which take the tracepoint mutex themselves.
- */
-
-/*
- * Tracepoint hash table, containing the active tracepoints.
- * Protected by tracepoint mutex.
- */
-#define TRACEPOINT_HASH_BITS 12
-#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
-static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
-
-static CDS_LIST_HEAD(old_probes);
-static int need_update;
-
-static CDS_LIST_HEAD(release_queue);
-static int release_queue_need_update;
-
-/*
- * Note about RCU :
- * It is used to to delay the free of multiple probes array until a quiescent
- * state is reached.
- * Tracepoint entries modifications are protected by the tracepoint mutex.
- */
-struct tracepoint_entry {
- struct cds_hlist_node hlist;
- struct lttng_ust_tracepoint_probe *probes;
- int refcount; /* Number of times armed. 0 if disarmed. */
- int callsite_refcount; /* how many libs use this tracepoint */
- char *signature;
- char *name;
-};
-
-struct tp_probes {
- union {
- struct cds_list_head list;
- /* Field below only used for call_rcu scheme */
- /* struct rcu_head head; */
- } u;
- struct lttng_ust_tracepoint_probe probes[0];
-};
-
-/*
- * Callsite hash table, containing the tracepoint call sites.
- * Protected by tracepoint mutex.
- */
-#define CALLSITE_HASH_BITS 12
-#define CALLSITE_TABLE_SIZE (1 << CALLSITE_HASH_BITS)
-static struct cds_hlist_head callsite_table[CALLSITE_TABLE_SIZE];
-
-struct callsite_entry {
- struct cds_hlist_node hlist; /* hash table node */
- struct cds_list_head node; /* lib list of callsites node */
- struct lttng_ust_tracepoint *tp;
- bool tp_entry_callsite_ref; /* Has a tp_entry took a ref on this callsite */
-};
-
-/* coverity[+alloc] */
-static void *allocate_probes(int count)
-{
- struct tp_probes *p =
- zmalloc(count * sizeof(struct lttng_ust_tracepoint_probe)
- + sizeof(struct tp_probes));
- return p == NULL ? NULL : p->probes;
-}
-
-/* coverity[+free : arg-0] */
-static void release_probes(void *old)
-{
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- lttng_ust_urcu_synchronize_rcu();
- free(tp_probes);
- }
-}
-
-static void debug_print_probes(struct tracepoint_entry *entry)
-{
- int i;
-
- if (!tracepoint_debug || !entry->probes)
- return;
-
- for (i = 0; entry->probes[i].func; i++)
- DBG("Probe %d : %p", i, entry->probes[i].func);
-}
-
-static void *
-tracepoint_entry_add_probe(struct tracepoint_entry *entry,
- void (*probe)(void), void *data)
-{
- int nr_probes = 0;
- struct lttng_ust_tracepoint_probe *old, *new;
-
- if (!probe) {
- WARN_ON(1);
- return ERR_PTR(-EINVAL);
- }
- debug_print_probes(entry);
- old = entry->probes;
- if (old) {
- /* (N -> N+1), (N != 0, 1) probes */
- for (nr_probes = 0; old[nr_probes].func; nr_probes++)
- if (old[nr_probes].func == probe &&
- old[nr_probes].data == data)
- return ERR_PTR(-EEXIST);
- }
- /* + 2 : one for new probe, one for NULL func */
- new = allocate_probes(nr_probes + 2);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- if (old)
- memcpy(new, old,
- nr_probes * sizeof(struct lttng_ust_tracepoint_probe));
- new[nr_probes].func = probe;
- new[nr_probes].data = data;
- new[nr_probes + 1].func = NULL;
- entry->refcount = nr_probes + 1;
- entry->probes = new;
- debug_print_probes(entry);
- return old;
-}
-
-static void *
-tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
- void (*probe)(void), void *data)
-{
- int nr_probes = 0, nr_del = 0, i;
- struct lttng_ust_tracepoint_probe *old, *new;
-
- old = entry->probes;
-
- if (!old)
- return ERR_PTR(-ENOENT);
-
- debug_print_probes(entry);
- /* (N -> M), (N > 1, M >= 0) probes */
- if (probe) {
- for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
- if (old[nr_probes].func == probe &&
- old[nr_probes].data == data)
- nr_del++;
- }
- }
-
- if (nr_probes - nr_del == 0) {
- /* N -> 0, (N > 1) */
- entry->probes = NULL;
- entry->refcount = 0;
- debug_print_probes(entry);
- return old;
- } else {
- int j = 0;
- /* N -> M, (N > 1, M > 0) */
- /* + 1 for NULL */
- new = allocate_probes(nr_probes - nr_del + 1);
- if (new == NULL)
- return ERR_PTR(-ENOMEM);
- for (i = 0; old[i].func; i++)
- if (old[i].func != probe || old[i].data != data)
- new[j++] = old[i];
- new[nr_probes - nr_del].func = NULL;
- entry->refcount = nr_probes - nr_del;
- entry->probes = new;
- }
- debug_print_probes(entry);
- return old;
-}
-
-/*
- * Get tracepoint if the tracepoint is present in the tracepoint hash table.
- * Must be called with tracepoint mutex held.
- * Returns NULL if not present.
- */
-static struct tracepoint_entry *get_tracepoint(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct tracepoint_entry *e;
- size_t name_len = strlen(name);
- uint32_t hash;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
- return e;
- }
- return NULL;
-}
-
-/*
- * Add the tracepoint to the tracepoint hash table. Must be called with
- * tracepoint mutex held.
- */
-static struct tracepoint_entry *add_tracepoint(const char *name,
- const char *signature)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct tracepoint_entry *e;
- size_t name_len = strlen(name);
- size_t sig_len = strlen(signature);
- size_t sig_off, name_off;
- uint32_t hash;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1)) {
- DBG("tracepoint %s busy", name);
- return ERR_PTR(-EEXIST); /* Already there */
- }
- }
-
- /*
- * Using zmalloc here to allocate a variable length elements: name and
- * signature. Could cause some memory fragmentation if overused.
- */
- name_off = sizeof(struct tracepoint_entry);
- sig_off = name_off + name_len + 1;
-
- e = zmalloc(sizeof(struct tracepoint_entry) + name_len + 1 + sig_len + 1);
- if (!e)
- return ERR_PTR(-ENOMEM);
- e->name = (char *) e + name_off;
- memcpy(e->name, name, name_len + 1);
- e->name[name_len] = '\0';
-
- e->signature = (char *) e + sig_off;
- memcpy(e->signature, signature, sig_len + 1);
- e->signature[sig_len] = '\0';
-
- e->probes = NULL;
- e->refcount = 0;
- e->callsite_refcount = 0;
-
- cds_hlist_add_head(&e->hlist, head);
- return e;
-}
-
-/*
- * Remove the tracepoint from the tracepoint hash table. Must be called with
- * tracepoint mutex held.
- */
-static void remove_tracepoint(struct tracepoint_entry *e)
-{
- cds_hlist_del(&e->hlist);
- free(e);
-}
-
-/*
- * Sets the probe callback corresponding to one tracepoint.
- */
-static void set_tracepoint(struct tracepoint_entry **entry,
- struct lttng_ust_tracepoint *elem, int active)
-{
- WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1) != 0);
- /*
- * Check that signatures match before connecting a probe to a
- * tracepoint. Warn the user if they don't.
- */
- if (strcmp(elem->signature, (*entry)->signature) != 0) {
- static int warned = 0;
-
- /* Only print once, don't flood console. */
- if (!warned) {
- WARN("Tracepoint signature mismatch, not enabling one or more tracepoints. Ensure that the tracepoint probes prototypes match the application.");
- WARN("Tracepoint \"%s\" signatures: call: \"%s\" vs probe: \"%s\".",
- elem->name, elem->signature, (*entry)->signature);
- warned = 1;
- }
- /* Don't accept connecting non-matching signatures. */
- return;
- }
-
- /*
- * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
- * probe callbacks array is consistent before setting a pointer to it.
- * This array is referenced by __DO_TRACE from
- * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
- * is used.
- */
- lttng_ust_rcu_assign_pointer(elem->probes, (*entry)->probes);
- CMM_STORE_SHARED(elem->state, active);
-}
-
-/*
- * Disable a tracepoint and its probe callback.
- * Note: only waiting an RCU period after setting elem->call to the empty
- * function insures that the original callback is not used anymore. This insured
- * by preempt_disable around the call site.
- */
-static void disable_tracepoint(struct lttng_ust_tracepoint *elem)
-{
- CMM_STORE_SHARED(elem->state, 0);
- lttng_ust_rcu_assign_pointer(elem->probes, NULL);
-}
-
-/*
- * Add the callsite to the callsite hash table. Must be called with
- * tracepoint mutex held.
- */
-static void add_callsite(struct tracepoint_lib * lib, struct lttng_ust_tracepoint *tp)
-{
- struct cds_hlist_head *head;
- struct callsite_entry *e;
- const char *name = tp->name;
- size_t name_len = strlen(name);
- uint32_t hash;
- struct tracepoint_entry *tp_entry;
-
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
- e = zmalloc(sizeof(struct callsite_entry));
- if (!e) {
- PERROR("Unable to add callsite for tracepoint \"%s\"", name);
- return;
- }
- cds_hlist_add_head(&e->hlist, head);
- e->tp = tp;
- cds_list_add(&e->node, &lib->callsites);
-
- tp_entry = get_tracepoint(name);
- if (!tp_entry)
- return;
- tp_entry->callsite_refcount++;
- e->tp_entry_callsite_ref = true;
-}
-
-/*
- * Remove the callsite from the callsite hash table and from lib
- * callsite list. Must be called with tracepoint mutex held.
- */
-static void remove_callsite(struct callsite_entry *e)
-{
- struct tracepoint_entry *tp_entry;
-
- tp_entry = get_tracepoint(e->tp->name);
- if (tp_entry) {
- if (e->tp_entry_callsite_ref)
- tp_entry->callsite_refcount--;
- if (tp_entry->callsite_refcount == 0)
- disable_tracepoint(e->tp);
- }
- cds_hlist_del(&e->hlist);
- cds_list_del(&e->node);
- free(e);
-}
-
-/*
- * Enable/disable all callsites based on the state of a specific
- * tracepoint entry.
- * Must be called with tracepoint mutex held.
- */
-static void tracepoint_sync_callsites(const char *name)
-{
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- struct callsite_entry *e;
- size_t name_len = strlen(name);
- uint32_t hash;
- struct tracepoint_entry *tp_entry;
-
- tp_entry = get_tracepoint(name);
- if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
- WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
- name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
- }
- hash = jhash(name, name_len, 0);
- head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
- cds_hlist_for_each_entry(e, node, head, hlist) {
- struct lttng_ust_tracepoint *tp = e->tp;
-
- if (strncmp(name, tp->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
- continue;
- if (tp_entry) {
- if (!e->tp_entry_callsite_ref) {
- tp_entry->callsite_refcount++;
- e->tp_entry_callsite_ref = true;
- }
- set_tracepoint(&tp_entry, tp,
- !!tp_entry->refcount);
- } else {
- disable_tracepoint(tp);
- e->tp_entry_callsite_ref = false;
- }
- }
-}
-
-/**
- * tracepoint_update_probe_range - Update a probe range
- * @begin: beginning of the range
- * @end: end of the range
- *
- * Updates the probe callback corresponding to a range of tracepoints.
- */
-static
-void tracepoint_update_probe_range(struct lttng_ust_tracepoint * const *begin,
- struct lttng_ust_tracepoint * const *end)
-{
- struct lttng_ust_tracepoint * const *iter;
- struct tracepoint_entry *mark_entry;
-
- for (iter = begin; iter < end; iter++) {
- if (!*iter)
- continue; /* skip dummy */
- if (!(*iter)->name) {
- disable_tracepoint(*iter);
- continue;
- }
- mark_entry = get_tracepoint((*iter)->name);
- if (mark_entry) {
- set_tracepoint(&mark_entry, *iter,
- !!mark_entry->refcount);
- } else {
- disable_tracepoint(*iter);
- }
- }
-}
-
-static void lib_update_tracepoints(struct tracepoint_lib *lib)
-{
- tracepoint_update_probe_range(lib->tracepoints_start,
- lib->tracepoints_start + lib->tracepoints_count);
-}
-
-static void lib_register_callsites(struct tracepoint_lib *lib)
-{
- struct lttng_ust_tracepoint * const *begin;
- struct lttng_ust_tracepoint * const *end;
- struct lttng_ust_tracepoint * const *iter;
-
- begin = lib->tracepoints_start;
- end = lib->tracepoints_start + lib->tracepoints_count;
-
- for (iter = begin; iter < end; iter++) {
- if (!*iter)
- continue; /* skip dummy */
- if (!(*iter)->name) {
- continue;
- }
- add_callsite(lib, *iter);
- }
-}
-
-static void lib_unregister_callsites(struct tracepoint_lib *lib)
-{
- struct callsite_entry *callsite, *tmp;
-
- cds_list_for_each_entry_safe(callsite, tmp, &lib->callsites, node)
- remove_callsite(callsite);
-}
-
-/*
- * Update probes, removing the faulty probes.
- */
-static void tracepoint_update_probes(void)
-{
- struct tracepoint_lib *lib;
-
- /* tracepoints registered from libraries and executable. */
- cds_list_for_each_entry(lib, &libs, list)
- lib_update_tracepoints(lib);
-}
-
-static struct lttng_ust_tracepoint_probe *
-tracepoint_add_probe(const char *name, void (*probe)(void), void *data,
- const char *signature)
-{
- struct tracepoint_entry *entry;
- struct lttng_ust_tracepoint_probe *old;
-
- entry = get_tracepoint(name);
- if (entry) {
- if (strcmp(entry->signature, signature) != 0) {
- ERR("Tracepoint and probe signature do not match.");
- return ERR_PTR(-EINVAL);
- }
- } else {
- entry = add_tracepoint(name, signature);
- if (IS_ERR(entry))
- return (struct lttng_ust_tracepoint_probe *)entry;
- }
- old = tracepoint_entry_add_probe(entry, probe, data);
- if (IS_ERR(old) && !entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-static void tracepoint_release_queue_add_old_probes(void *old)
-{
- release_queue_need_update = 1;
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- cds_list_add(&tp_probes->u.list, &release_queue);
- }
-}
-
-/**
- * __tracepoint_probe_register - Connect a probe to a tracepoint
- * @name: tracepoint name
- * @probe: probe handler
- *
- * Returns 0 if ok, error value on error.
- * The probe address must at least be aligned on the architecture pointer size.
- * Called with the tracepoint mutex held.
- */
-int __tracepoint_probe_register(const char *name, void (*probe)(void),
- void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- DBG("Registering probe to tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
-
- tracepoint_sync_callsites(name);
- release_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/*
- * Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling lttng_ust_tp_probe_register_queue_release() one or multiple
- * times to ensure it does not leak memory.
- */
-int lttng_ust_tp_probe_register_queue_release(const char *name,
- void (*probe)(void), void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- DBG("Registering probe to tracepoint %s. Queuing release.", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
-
- tracepoint_sync_callsites(name);
- tracepoint_release_queue_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-static void *tracepoint_remove_probe(const char *name, void (*probe)(void),
- void *data)
-{
- struct tracepoint_entry *entry;
- void *old;
-
- entry = get_tracepoint(name);
- if (!entry)
- return ERR_PTR(-ENOENT);
- old = tracepoint_entry_remove_probe(entry, probe, data);
- if (IS_ERR(old))
- return old;
- if (!entry->refcount)
- remove_tracepoint(entry);
- return old;
-}
-
-/**
- * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
- * @name: tracepoint name
- * @probe: probe function pointer
- * @probe: probe data pointer
- */
-int __tracepoint_probe_unregister(const char *name, void (*probe)(void),
- void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_sync_callsites(name);
- release_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/*
- * Caller needs to invoke __tracepoint_probe_release_queue() after
- * calling lttng_ust_tp_probe_unregister_queue_release() one or multiple
- * times to ensure it does not leak memory.
- */
-int lttng_ust_tp_probe_unregister_queue_release(const char *name,
- void (*probe)(void), void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s. Queuing release.", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_sync_callsites(name);
- tracepoint_release_queue_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-void lttng_ust_tp_probe_prune_release_queue(void)
-{
- CDS_LIST_HEAD(release_probes);
- struct tp_probes *pos, *next;
-
- DBG("Release queue of unregistered tracepoint probes.");
-
- pthread_mutex_lock(&tracepoint_mutex);
- if (!release_queue_need_update)
- goto end;
- if (!cds_list_empty(&release_queue))
- cds_list_replace_init(&release_queue, &release_probes);
- release_queue_need_update = 0;
-
- /* Wait for grace period between all sync_callsites and free. */
- lttng_ust_urcu_synchronize_rcu();
-
- cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- cds_list_del(&pos->u.list);
- free(pos);
- }
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
-}
-
-static void tracepoint_add_old_probes(void *old)
-{
- need_update = 1;
- if (old) {
- struct tp_probes *tp_probes = caa_container_of(old,
- struct tp_probes, probes[0]);
- cds_list_add(&tp_probes->u.list, &old_probes);
- }
-}
-
-/**
- * tracepoint_probe_register_noupdate - register a probe but not connect
- * @name: tracepoint name
- * @probe: probe handler
- *
- * caller must call tracepoint_probe_update_all()
- */
-int tracepoint_probe_register_noupdate(const char *name, void (*probe)(void),
- void *data, const char *signature)
-{
- void *old;
- int ret = 0;
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_add_probe(name, probe, data, signature);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/**
- * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
- * @name: tracepoint name
- * @probe: probe function pointer
- *
- * caller must call tracepoint_probe_update_all()
- * Called with the tracepoint mutex held.
- */
-int tracepoint_probe_unregister_noupdate(const char *name, void (*probe)(void),
- void *data)
-{
- void *old;
- int ret = 0;
-
- DBG("Un-registering probe from tracepoint %s", name);
-
- pthread_mutex_lock(&tracepoint_mutex);
- old = tracepoint_remove_probe(name, probe, data);
- if (IS_ERR(old)) {
- ret = PTR_ERR(old);
- goto end;
- }
- tracepoint_add_old_probes(old);
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
- return ret;
-}
-
-/**
- * tracepoint_probe_update_all - update tracepoints
- */
-void tracepoint_probe_update_all(void)
-{
- CDS_LIST_HEAD(release_probes);
- struct tp_probes *pos, *next;
-
- pthread_mutex_lock(&tracepoint_mutex);
- if (!need_update) {
- goto end;
- }
- if (!cds_list_empty(&old_probes))
- cds_list_replace_init(&old_probes, &release_probes);
- need_update = 0;
-
- tracepoint_update_probes();
- /* Wait for grace period between update_probes and free. */
- lttng_ust_urcu_synchronize_rcu();
- cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
- cds_list_del(&pos->u.list);
- free(pos);
- }
-end:
- pthread_mutex_unlock(&tracepoint_mutex);
-}
-
-static void new_tracepoints(struct lttng_ust_tracepoint * const *start,
- struct lttng_ust_tracepoint * const *end)
-{
- if (new_tracepoint_cb) {
- struct lttng_ust_tracepoint * const *t;
-
- for (t = start; t < end; t++) {
- if (*t)
- new_tracepoint_cb(*t);
- }
- }
-}
-
-/*
- * tracepoint_{un,}register_lib is meant to be looked up by instrumented
- * applications through dlsym(). If found, those can register their
- * tracepoints, else those tracepoints will not be available for
- * tracing. The number at the end of those symbols acts as a major
- * version for tracepoints.
- *
- * Older instrumented applications should still work with newer
- * liblttng-ust, but it is fine that instrumented applications compiled
- * against recent liblttng-ust headers require a recent liblttng-ust
- * runtime for those tracepoints to be taken into account.
- */
-int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
- int tracepoints_count);
-int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
- int tracepoints_count)
-{
- struct tracepoint_lib *pl, *iter;
-
- lttng_ust_tp_init();
-
- pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
- if (!pl) {
- PERROR("Unable to register tracepoint lib");
- return -1;
- }
- pl->tracepoints_start = tracepoints_start;
- pl->tracepoints_count = tracepoints_count;
- CDS_INIT_LIST_HEAD(&pl->callsites);
-
- pthread_mutex_lock(&tracepoint_mutex);
- /*
- * We sort the libs by struct lib pointer address.
- */
- cds_list_for_each_entry_reverse(iter, &libs, list) {
- BUG_ON(iter == pl); /* Should never be in the list twice */
- if (iter < pl) {
- /* We belong to the location right after iter. */
- cds_list_add(&pl->list, &iter->list);
- goto lib_added;
- }
- }
- /* We should be added at the head of the list */
- cds_list_add(&pl->list, &libs);
-lib_added:
- new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
- lib_register_callsites(pl);
- lib_update_tracepoints(pl);
- pthread_mutex_unlock(&tracepoint_mutex);
-
- DBG("just registered a tracepoints section from %p and having %d tracepoints",
- tracepoints_start, tracepoints_count);
- if (ust_err_debug_enabled()) {
- int i;
-
- for (i = 0; i < tracepoints_count; i++) {
- DBG("registered tracepoint: %s", tracepoints_start[i]->name);
- }
- }
-
- return 0;
-}
-
-int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start);
-int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
-{
- struct tracepoint_lib *lib;
-
- pthread_mutex_lock(&tracepoint_mutex);
- cds_list_for_each_entry(lib, &libs, list) {
- if (lib->tracepoints_start != tracepoints_start)
- continue;
-
- cds_list_del(&lib->list);
- /*
- * Unregistering a callsite also decreases the
- * callsite reference count of the corresponding
- * tracepoint, and disables the tracepoint if
- * the reference count drops to zero.
- */
- lib_unregister_callsites(lib);
- DBG("just unregistered a tracepoints section from %p",
- lib->tracepoints_start);
- free(lib);
- break;
- }
- pthread_mutex_unlock(&tracepoint_mutex);
- return 0;
-}
-
-/*
- * Report in debug message whether the compiler correctly supports weak
- * hidden symbols. This test checks that the address associated with two
- * weak symbols with hidden visibility is the same when declared within
- * two compile units part of the same module.
- */
-static void check_weak_hidden(void)
-{
- DBG("Your compiler treats weak symbols with hidden visibility for integer objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol1 == lttng_ust_tp_check_weak_hidden1() ?
- "SAME address" :
- "DIFFERENT addresses");
- DBG("Your compiler treats weak symbols with hidden visibility for pointer objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol2 == lttng_ust_tp_check_weak_hidden2() ?
- "SAME address" :
- "DIFFERENT addresses");
- DBG("Your compiler treats weak symbols with hidden visibility for 24-byte structure objects as %s between compile units part of the same module.",
- &__tracepoint_test_symbol3 == lttng_ust_tp_check_weak_hidden3() ?
- "SAME address" :
- "DIFFERENT addresses");
-}
-
-void lttng_ust_tp_init(void)
-{
- if (uatomic_xchg(&initialized, 1) == 1)
- return;
- ust_err_init();
- check_weak_hidden();
-}
-
-void lttng_ust_tp_exit(void)
-{
- initialized = 0;
-}
-
-/*
- * Create the wrapper symbols.
- */
-#undef tp_rcu_read_lock
-#undef tp_rcu_read_unlock
-#undef tp_rcu_dereference
-
-void tp_rcu_read_lock(void);
-void tp_rcu_read_lock(void)
-{
- lttng_ust_urcu_read_lock();
-}
-
-void tp_rcu_read_unlock(void);
-void tp_rcu_read_unlock(void)
-{
- lttng_ust_urcu_read_unlock();
-}
-
-void *tp_rcu_dereference_sym(void *p);
-void *tp_rcu_dereference_sym(void *p)
-{
- return lttng_ust_rcu_dereference(p);
-}
-
-/*
- * Programs that have threads that survive after they exit, and therefore call
- * library destructors, should disable the tracepoint destructors by calling
- * tp_disable_destructors(). This will leak the tracepoint
- * instrumentation library shared object, leaving its teardown to the operating
- * system process teardown.
- *
- * To access and/or modify this value, users need to use a combination of
- * dlopen(3) and dlsym(3) to get an handle on the
- * tp_disable_destructors and tp_get_destructors_state symbols below.
- */
-void tp_disable_destructors(void);
-void tp_disable_destructors(void)
-{
- uatomic_set(&tracepoint_destructors_state, 0);
-}
-
-/*
- * Returns 1 if the destructors are enabled and should be executed.
- * Returns 0 if the destructors are disabled.
- */
-int tp_get_destructors_state(void);
-int tp_get_destructors_state(void)
-{
- return uatomic_read(&tracepoint_destructors_state);
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stdint.h>
-#include <stddef.h>
-#include <stdlib.h>
-
-#include "context-internal.h"
-#include "ust-events-internal.h"
-#include <usterr-signal-safe.h>
-#include "lttng-tracer-core.h"
-#include "lttng-rb-clients.h"
-#include "lttng-counter-client.h"
-#include "jhash.h"
-
-static CDS_LIST_HEAD(lttng_transport_list);
-static CDS_LIST_HEAD(lttng_counter_transport_list);
-
-struct lttng_transport *lttng_ust_transport_find(const char *name)
-{
- struct lttng_transport *transport;
-
- cds_list_for_each_entry(transport, <tng_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
- }
- return NULL;
-}
-
-struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
-{
- struct lttng_counter_transport *transport;
-
- cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
- if (!strcmp(transport->name, name))
- return transport;
- }
- return NULL;
-}
-
-/**
- * lttng_transport_register - LTT transport registration
- * @transport: transport structure
- *
- * Registers a transport which can be used as output to extract the data out of
- * LTTng. Called with ust_lock held.
- */
-void lttng_transport_register(struct lttng_transport *transport)
-{
- cds_list_add_tail(&transport->node, <tng_transport_list);
-}
-
-/**
- * lttng_transport_unregister - LTT transport unregistration
- * @transport: transport structure
- * Called with ust_lock held.
- */
-void lttng_transport_unregister(struct lttng_transport *transport)
-{
- cds_list_del(&transport->node);
-}
-
-/**
- * lttng_counter_transport_register - LTTng counter transport registration
- * @transport: transport structure
- *
- * Registers a counter transport which can be used as output to extract
- * the data out of LTTng. Called with ust_lock held.
- */
-void lttng_counter_transport_register(struct lttng_counter_transport *transport)
-{
- cds_list_add_tail(&transport->node, <tng_counter_transport_list);
-}
-
-/**
- * lttng_counter_transport_unregister - LTTng counter transport unregistration
- * @transport: transport structure
- * Called with ust_lock held.
- */
-void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
-{
- cds_list_del(&transport->node);
-}
-
-/*
- * Needed by comm layer.
- */
-struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
- const struct lttng_ust_enum_desc *enum_desc)
-{
- struct lttng_enum *_enum;
- struct cds_hlist_head *head;
- struct cds_hlist_node *node;
- size_t name_len = strlen(enum_desc->name);
- uint32_t hash;
-
- hash = jhash(enum_desc->name, name_len, 0);
- head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
- cds_hlist_for_each_entry(_enum, node, head, hlist) {
- assert(_enum->desc);
- if (_enum->desc == enum_desc)
- return _enum;
- }
- return NULL;
-}
-
-size_t lttng_ust_dummy_get_size(void *priv __attribute__((unused)),
- size_t offset)
-{
- size_t size = 0;
-
- size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
- size += sizeof(char); /* tag */
- return size;
-}
-
-void lttng_ust_dummy_record(void *priv __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan)
-{
- char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
-
- chan->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(sel_char));
-}
-
-void lttng_ust_dummy_get_value(void *priv __attribute__((unused)),
- struct lttng_ust_ctx_value *value)
-{
- value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
-}
-
-int lttng_context_is_app(const char *name)
-{
- if (strncmp(name, "$app.", strlen("$app.")) != 0) {
- return 0;
- }
- return 1;
-}
-
-struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
-{
- struct lttng_ust_channel_buffer *lttng_chan_buf;
- struct lttng_ust_channel_common *lttng_chan_common;
- struct lttng_ust_channel_buffer_private *lttng_chan_buf_priv;
-
- lttng_chan_buf = zmalloc(sizeof(struct lttng_ust_channel_buffer));
- if (!lttng_chan_buf)
- goto lttng_chan_buf_error;
- lttng_chan_buf->struct_size = sizeof(struct lttng_ust_channel_buffer);
- lttng_chan_common = zmalloc(sizeof(struct lttng_ust_channel_common));
- if (!lttng_chan_common)
- goto lttng_chan_common_error;
- lttng_chan_common->struct_size = sizeof(struct lttng_ust_channel_common);
- lttng_chan_buf_priv = zmalloc(sizeof(struct lttng_ust_channel_buffer_private));
- if (!lttng_chan_buf_priv)
- goto lttng_chan_buf_priv_error;
- lttng_chan_buf->parent = lttng_chan_common;
- lttng_chan_common->type = LTTNG_UST_CHANNEL_TYPE_BUFFER;
- lttng_chan_common->child = lttng_chan_buf;
- lttng_chan_buf->priv = lttng_chan_buf_priv;
- lttng_chan_common->priv = <tng_chan_buf_priv->parent;
- lttng_chan_buf_priv->pub = lttng_chan_buf;
- lttng_chan_buf_priv->parent.pub = lttng_chan_common;
-
- return lttng_chan_buf;
-
-lttng_chan_buf_priv_error:
- free(lttng_chan_common);
-lttng_chan_common_error:
- free(lttng_chan_buf);
-lttng_chan_buf_error:
- return NULL;
-}
-
-void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
-{
- switch (chan->type) {
- case LTTNG_UST_CHANNEL_TYPE_BUFFER:
- {
- struct lttng_ust_channel_buffer *chan_buf;
-
- chan_buf = (struct lttng_ust_channel_buffer *)chan->child;
- free(chan_buf->parent);
- free(chan_buf->priv);
- free(chan_buf);
- break;
- }
- default:
- abort();
- }
-}
-
-void lttng_ust_ring_buffer_clients_init(void)
-{
- lttng_ring_buffer_metadata_client_init();
- lttng_ring_buffer_client_overwrite_init();
- lttng_ring_buffer_client_overwrite_rt_init();
- lttng_ring_buffer_client_discard_init();
- lttng_ring_buffer_client_discard_rt_init();
-}
-
-void lttng_ust_ring_buffer_clients_exit(void)
-{
- lttng_ring_buffer_client_discard_rt_exit();
- lttng_ring_buffer_client_discard_exit();
- lttng_ring_buffer_client_overwrite_rt_exit();
- lttng_ring_buffer_client_overwrite_exit();
- lttng_ring_buffer_metadata_client_exit();
-}
-
-void lttng_ust_counter_clients_init(void)
-{
- lttng_counter_client_percpu_64_modular_init();
- lttng_counter_client_percpu_32_modular_init();
-}
-
-void lttng_ust_counter_clients_exit(void)
-{
- lttng_counter_client_percpu_32_modular_exit();
- lttng_counter_client_percpu_64_modular_exit();
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright 2019 (c) Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_EVENTS_INTERNAL_H
-#define _LTTNG_UST_EVENTS_INTERNAL_H
-
-#include <limits.h>
-#include <stdint.h>
-
-#include <urcu/list.h>
-#include <urcu/hlist.h>
-
-#include <lttng/ust-events.h>
-
-#include <ust-helper.h>
-#include "ust-context-provider.h"
-
-struct lttng_ust_abi_obj;
-struct lttng_event_notifier_group;
-
-union lttng_ust_abi_args {
- struct {
- void *chan_data;
- int wakeup_fd;
- } channel;
- struct {
- int shm_fd;
- int wakeup_fd;
- } stream;
- struct {
- struct lttng_ust_abi_field_iter entry;
- } field_list;
- struct {
- char *ctxname;
- } app_context;
- struct {
- int event_notifier_notif_fd;
- } event_notifier_handle;
- struct {
- void *counter_data;
- } counter;
- struct {
- int shm_fd;
- } counter_shm;
-};
-
-struct lttng_ust_abi_objd_ops {
- long (*cmd)(int objd, unsigned int cmd, unsigned long arg,
- union lttng_ust_abi_args *args, void *owner);
- int (*release)(int objd);
-};
-
-enum lttng_enabler_format_type {
- LTTNG_ENABLER_FORMAT_STAR_GLOB,
- LTTNG_ENABLER_FORMAT_EVENT,
-};
-
-/*
- * Enabler field, within whatever object is enabling an event. Target of
- * backward reference.
- */
-struct lttng_enabler {
- enum lttng_enabler_format_type format_type;
-
- /* head list of struct lttng_ust_filter_bytecode_node */
- struct cds_list_head filter_bytecode_head;
- /* head list of struct lttng_ust_excluder_node */
- struct cds_list_head excluder_head;
-
- struct lttng_ust_abi_event event_param;
- unsigned int enabled:1;
-};
-
-struct lttng_event_enabler {
- struct lttng_enabler base;
- struct cds_list_head node; /* per-session list of enablers */
- struct lttng_ust_channel_buffer *chan;
- /*
- * Unused, but kept around to make it explicit that the tracer can do
- * it.
- */
- struct lttng_ust_ctx *ctx;
-};
-
-struct lttng_event_notifier_enabler {
- struct lttng_enabler base;
- uint64_t error_counter_index;
- struct cds_list_head node; /* per-app list of event_notifier enablers */
- struct cds_list_head capture_bytecode_head;
- struct lttng_event_notifier_group *group; /* weak ref */
- uint64_t user_token; /* User-provided token */
- uint64_t num_captures;
-};
-
-enum lttng_ust_bytecode_type {
- LTTNG_UST_BYTECODE_TYPE_FILTER,
- LTTNG_UST_BYTECODE_TYPE_CAPTURE,
-};
-
-struct lttng_ust_bytecode_node {
- enum lttng_ust_bytecode_type type;
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- struct {
- uint32_t len;
- uint32_t reloc_offset;
- uint64_t seqnum;
- char data[];
- } bc;
-};
-
-/*
- * Bytecode interpreter return value.
- */
-enum lttng_ust_bytecode_interpreter_ret {
- LTTNG_UST_BYTECODE_INTERPRETER_ERROR = -1,
- LTTNG_UST_BYTECODE_INTERPRETER_OK = 0,
-};
-
-struct lttng_interpreter_output;
-struct lttng_ust_bytecode_runtime_private;
-
-enum lttng_ust_bytecode_filter_result {
- LTTNG_UST_BYTECODE_FILTER_ACCEPT = 0,
- LTTNG_UST_BYTECODE_FILTER_REJECT = 1,
-};
-
-struct lttng_ust_bytecode_filter_ctx {
- enum lttng_ust_bytecode_filter_result result;
-};
-
-struct lttng_ust_excluder_node {
- struct cds_list_head node;
- struct lttng_enabler *enabler;
- /*
- * struct lttng_ust_event_exclusion had variable sized array,
- * must be last field.
- */
- struct lttng_ust_abi_event_exclusion excluder;
-};
-
-/* Data structures used by the tracer. */
-
-struct tp_list_entry {
- struct lttng_ust_abi_tracepoint_iter tp;
- struct cds_list_head head;
-};
-
-struct lttng_ust_tracepoint_list {
- struct tp_list_entry *iter;
- struct cds_list_head head;
-};
-
-struct tp_field_list_entry {
- struct lttng_ust_abi_field_iter field;
- struct cds_list_head head;
-};
-
-struct lttng_ust_field_list {
- struct tp_field_list_entry *iter;
- struct cds_list_head head;
-};
-
-/*
- * Objects in a linked-list of enablers, owned by an event or event_notifier.
- * This is used because an event (or a event_notifier) can be enabled by more
- * than one enabler and we want a quick way to iterate over all enablers of an
- * object.
- *
- * For example, event rules "my_app:a*" and "my_app:ab*" will both match the
- * event with the name "my_app:abc".
- */
-struct lttng_enabler_ref {
- struct cds_list_head node; /* enabler ref list */
- struct lttng_enabler *ref; /* backward ref */
-};
-
-#define LTTNG_COUNTER_DIMENSION_MAX 8
-struct lttng_counter_dimension {
- uint64_t size;
- uint64_t underflow_index;
- uint64_t overflow_index;
- uint8_t has_underflow;
- uint8_t has_overflow;
-};
-
-struct lttng_counter_ops {
- struct lib_counter *(*counter_create)(size_t nr_dimensions,
- const struct lttng_counter_dimension *dimensions,
- int64_t global_sum_step,
- int global_counter_fd,
- int nr_counter_cpu_fds,
- const int *counter_cpu_fds,
- bool is_daemon);
- void (*counter_destroy)(struct lib_counter *counter);
- int (*counter_add)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t v);
- int (*counter_read)(struct lib_counter *counter,
- const size_t *dimension_indexes, int cpu,
- int64_t *value, bool *overflow, bool *underflow);
- int (*counter_aggregate)(struct lib_counter *counter,
- const size_t *dimension_indexes, int64_t *value,
- bool *overflow, bool *underflow);
- int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
-};
-
-struct lttng_counter {
- int objd;
- struct lttng_event_notifier_group *event_notifier_group; /* owner */
- struct lttng_counter_transport *transport;
- struct lib_counter *counter;
- struct lttng_counter_ops *ops;
-};
-
-#define LTTNG_UST_EVENT_HT_BITS 12
-#define LTTNG_UST_EVENT_HT_SIZE (1U << LTTNG_UST_EVENT_HT_BITS)
-
-struct lttng_ust_event_ht {
- struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
-};
-
-#define LTTNG_UST_EVENT_NOTIFIER_HT_BITS 12
-#define LTTNG_UST_EVENT_NOTIFIER_HT_SIZE (1U << LTTNG_UST_EVENT_NOTIFIER_HT_BITS)
-struct lttng_ust_event_notifier_ht {
- struct cds_hlist_head table[LTTNG_UST_EVENT_NOTIFIER_HT_SIZE];
-};
-
-#define LTTNG_UST_ENUM_HT_BITS 12
-#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
-
-struct lttng_ust_enum_ht {
- struct cds_hlist_head table[LTTNG_UST_ENUM_HT_SIZE];
-};
-
-struct lttng_event_notifier_group {
- int objd;
- void *owner;
- int notification_fd;
- struct cds_list_head node; /* Event notifier group handle list */
- struct cds_list_head enablers_head;
- struct cds_list_head event_notifiers_head; /* list of event_notifiers */
- struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
- struct lttng_ust_ctx *ctx; /* contexts for filters. */
-
- struct lttng_counter *error_counter;
- size_t error_counter_len;
-};
-
-struct lttng_transport {
- const char *name;
- struct cds_list_head node;
- struct lttng_ust_channel_buffer_ops ops;
- const struct lttng_ust_lib_ring_buffer_config *client_config;
-};
-
-struct lttng_counter_transport {
- const char *name;
- struct cds_list_head node;
- struct lttng_counter_ops ops;
- const struct lib_counter_config *client_config;
-};
-
-struct lttng_ust_event_common_private {
- struct lttng_ust_event_common *pub; /* Public event interface */
-
- const struct lttng_ust_event_desc *desc;
- /* Backward references: list of lttng_enabler_ref (ref to enablers) */
- struct cds_list_head enablers_ref_head;
- int registered; /* has reg'd tracepoint probe */
- uint64_t user_token;
-
- int has_enablers_without_filter_bytecode;
- /* list of struct lttng_ust_bytecode_runtime, sorted by seqnum */
- struct cds_list_head filter_bytecode_runtime_head;
-};
-
-struct lttng_ust_event_recorder_private {
- struct lttng_ust_event_common_private parent;
-
- struct lttng_ust_event_recorder *pub; /* Public event interface */
- struct cds_list_head node; /* Event recorder list */
- struct cds_hlist_node hlist; /* Hash table of event recorders */
- struct lttng_ust_ctx *ctx;
- unsigned int id;
-};
-
-struct lttng_ust_event_notifier_private {
- struct lttng_ust_event_common_private parent;
-
- struct lttng_ust_event_notifier *pub; /* Public event notifier interface */
- struct lttng_event_notifier_group *group; /* weak ref */
- size_t num_captures; /* Needed to allocate the msgpack array. */
- uint64_t error_counter_index;
- struct cds_list_head node; /* Event notifier list */
- struct cds_hlist_node hlist; /* Hash table of event notifiers */
- struct cds_list_head capture_bytecode_runtime_head;
-};
-
-struct lttng_ust_bytecode_runtime {
- enum lttng_ust_bytecode_type type;
- struct lttng_ust_bytecode_node *bc;
- int link_failed;
- int (*interpreter_func)(struct lttng_ust_bytecode_runtime *bytecode_runtime,
- const char *interpreter_stack_data,
- void *ctx);
- struct cds_list_head node; /* list of bytecode runtime in event */
- /*
- * Pointer to a URCU-protected pointer owned by an `struct
- * lttng_session`or `struct lttng_event_notifier_group`.
- */
- struct lttng_ust_ctx **pctx;
-};
-
-struct lttng_ust_session_private {
- struct lttng_ust_session *pub; /* Public session interface */
-
- int been_active; /* Been active ? */
- int objd; /* Object associated */
- struct cds_list_head chan_head; /* Channel list head */
- struct cds_list_head events_head; /* list of events */
- struct cds_list_head node; /* Session list */
-
- /* List of enablers */
- struct cds_list_head enablers_head;
- struct lttng_ust_event_ht events_ht; /* ht of events */
- void *owner; /* object owner */
- int tstate:1; /* Transient enable state */
-
- int statedump_pending:1;
-
- struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
- struct cds_list_head enums_head;
- struct lttng_ust_ctx *ctx; /* contexts for filters. */
-
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
- bool uuid_set; /* Is uuid set ? */
-};
-
-struct lttng_enum {
- const struct lttng_ust_enum_desc *desc;
- struct lttng_ust_session *session;
- struct cds_list_head node; /* Enum list in session */
- struct cds_hlist_node hlist; /* Session ht of enums */
- uint64_t id; /* Enumeration ID in sessiond */
-};
-
-struct lttng_ust_shm_handle;
-
-struct lttng_ust_channel_buffer_ops_private {
- struct lttng_ust_channel_buffer_ops *pub; /* Public channel buffer ops interface */
-
- struct lttng_ust_channel_buffer *(*channel_create)(const char *name,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- unsigned char *uuid,
- uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout);
- void (*channel_destroy)(struct lttng_ust_channel_buffer *chan);
- /*
- * packet_avail_size returns the available size in the current
- * packet. Note that the size returned is only a hint, since it
- * may change due to concurrent writes.
- */
- size_t (*packet_avail_size)(struct lttng_ust_channel_buffer *chan);
- int (*is_finalized)(struct lttng_ust_channel_buffer *chan);
- int (*is_disabled)(struct lttng_ust_channel_buffer *chan);
- int (*flush_buffer)(struct lttng_ust_channel_buffer *chan);
-};
-
-struct lttng_ust_channel_common_private {
- struct lttng_ust_channel_common *pub; /* Public channel interface */
-
- int objd; /* Object associated with channel. */
- int tstate:1; /* Transient enable state */
-};
-
-struct lttng_ust_channel_buffer_private {
- struct lttng_ust_channel_common_private parent;
-
- struct lttng_ust_channel_buffer *pub; /* Public channel buffer interface */
- struct cds_list_head node; /* Channel list in session */
- int header_type; /* 0: unset, 1: compact, 2: large */
- unsigned int id; /* Channel ID */
- enum lttng_ust_abi_chan_type type;
- struct lttng_ust_ctx *ctx;
- struct lttng_ust_lib_ring_buffer_channel *rb_chan; /* Ring buffer channel */
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
-};
-
-/*
- * IMPORTANT: this structure is part of the ABI between the consumer
- * daemon and the UST library within traced applications. Changing it
- * breaks the UST communication protocol.
- *
- * TODO: remove unused fields on next UST communication protocol
- * breaking update.
- */
-struct lttng_ust_abi_channel_config {
- void *unused1;
- int unused2;
- void *unused3;
- void *unused4;
- int unused5;
- struct cds_list_head unused6;
- void *unused7;
- int unused8;
- void *unused9;
-
- /* Channel ID */
- unsigned int id;
- enum lttng_ust_abi_chan_type unused10;
- unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
- int unused11:1;
-};
-
-/* Global (filter), event and channel contexts. */
-struct lttng_ust_ctx {
- struct lttng_ust_ctx_field *fields;
- unsigned int nr_fields;
- unsigned int allocated_fields;
- unsigned int largest_align;
-};
-
-struct lttng_ust_registered_probe {
- const struct lttng_ust_probe_desc *desc;
-
- struct cds_list_head head; /* chain registered probes */
- struct cds_list_head lazy_init_head;
- int lazy; /* lazy registration */
-};
-
-/*
- * Context field
- */
-
-struct lttng_ust_ctx_field {
- const struct lttng_ust_event_field *event_field;
- size_t (*get_size)(void *priv, size_t offset);
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan);
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
- void (*destroy)(void *priv);
- void *priv;
-};
-
-static inline
-const struct lttng_ust_type_integer *lttng_ust_get_type_integer(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_integer)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_integer, parent);
-}
-
-static inline
-const struct lttng_ust_type_float *lttng_ust_get_type_float(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_float)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_float, parent);
-}
-
-static inline
-const struct lttng_ust_type_string *lttng_ust_get_type_string(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_string)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_string, parent);
-}
-
-static inline
-const struct lttng_ust_type_enum *lttng_ust_get_type_enum(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_enum)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_enum, parent);
-}
-
-static inline
-const struct lttng_ust_type_array *lttng_ust_get_type_array(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_array)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_array, parent);
-}
-
-static inline
-const struct lttng_ust_type_sequence *lttng_ust_get_type_sequence(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_sequence)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_sequence, parent);
-}
-
-static inline
-const struct lttng_ust_type_struct *lttng_ust_get_type_struct(const struct lttng_ust_type_common *type)
-{
- if (type->type != lttng_ust_type_struct)
- return NULL;
- return caa_container_of(type, const struct lttng_ust_type_struct, parent);
-}
-
-#define lttng_ust_static_type_integer(_size, _alignment, _signedness, _byte_order, _base) \
- ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_integer, { \
- .parent = { \
- .type = lttng_ust_type_integer, \
- }, \
- .struct_size = sizeof(struct lttng_ust_type_integer), \
- .size = (_size), \
- .alignment = (_alignment), \
- .signedness = (_signedness), \
- .reverse_byte_order = (_byte_order) != BYTE_ORDER, \
- .base = (_base), \
- }))
-
-#define lttng_ust_static_type_array_text(_length) \
- ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_array, { \
- .parent = { \
- .type = lttng_ust_type_array, \
- }, \
- .struct_size = sizeof(struct lttng_ust_type_array), \
- .length = (_length), \
- .alignment = 0, \
- .encoding = lttng_ust_string_encoding_UTF8, \
- .elem_type = lttng_ust_static_type_integer(sizeof(char) * CHAR_BIT, \
- lttng_ust_rb_alignof(char) * CHAR_BIT, lttng_ust_is_signed_type(char), \
- BYTE_ORDER, 10), \
- }))
-
-#define lttng_ust_static_event_field(_name, _type, _nowrite, _nofilter) \
- __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, { \
- .struct_size = sizeof(struct lttng_ust_event_field), \
- .name = (_name), \
- .type = (_type), \
- .nowrite = (_nowrite), \
- .nofilter = (_nofilter), \
- })
-
-#define lttng_ust_static_ctx_field(_event_field, _get_size, _record, _get_value, _destroy, _priv) \
- __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_ctx_field, { \
- .event_field = (_event_field), \
- .get_size = (_get_size), \
- .record = (_record), \
- .get_value = (_get_value), \
- .destroy = (_destroy), \
- .priv = (_priv), \
- })
-
-static inline
-struct lttng_enabler *lttng_event_enabler_as_enabler(
- struct lttng_event_enabler *event_enabler)
-{
- return &event_enabler->base;
-}
-
-static inline
-struct lttng_enabler *lttng_event_notifier_enabler_as_enabler(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
-{
- return &event_notifier_enabler->base;
-}
-
-/*
- * Allocate and initialize a `struct lttng_event_enabler` object.
- *
- * On success, returns a `struct lttng_event_enabler`,
- * On memory error, returns NULL.
- */
-struct lttng_event_enabler *lttng_event_enabler_create(
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event *event_param,
- struct lttng_ust_channel_buffer *chan)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_enabler` object.
- */
-void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Enable a `struct lttng_event_enabler` object and all events related to this
- * enabler.
- */
-int lttng_event_enabler_enable(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Disable a `struct lttng_event_enabler` object and all events related to this
- * enabler.
- */
-int lttng_event_enabler_disable(struct lttng_event_enabler *enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach filter bytecode program to `struct lttng_event_enabler` and all
- * events related to this enabler.
- */
-int lttng_event_enabler_attach_filter_bytecode(
- struct lttng_event_enabler *enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach an application context to an event enabler.
- *
- * Not implemented.
- */
-int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
- struct lttng_ust_abi_context *ctx)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach exclusion list to `struct lttng_event_enabler` and all
- * events related to this enabler.
- */
-int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
- struct lttng_ust_excluder_node **excluder)
- __attribute__((visibility("hidden")));
-
-/*
- * Synchronize bytecodes for the enabler and the instance (event or
- * event_notifier).
- *
- * This function goes over all bytecode programs of the enabler (event or
- * event_notifier enabler) to ensure each is linked to the provided instance.
- */
-void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
- struct lttng_ust_ctx **ctx,
- struct cds_list_head *instance_bytecode_runtime_head,
- struct cds_list_head *enabler_bytecode_runtime_head)
- __attribute__((visibility("hidden")));
-
-/*
- * Allocate and initialize a `struct lttng_event_notifier_group` object.
- *
- * On success, returns a `struct lttng_triggre_group`,
- * on memory error, returns NULL.
- */
-struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_notifier_group` object.
- */
-void lttng_event_notifier_group_destroy(
- struct lttng_event_notifier_group *event_notifier_group)
- __attribute__((visibility("hidden")));
-
-/*
- * Allocate and initialize a `struct lttng_event_notifier_enabler` object.
- *
- * On success, returns a `struct lttng_event_notifier_enabler`,
- * On memory error, returns NULL.
- */
-struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
- struct lttng_event_notifier_group *event_notifier_group,
- enum lttng_enabler_format_type format_type,
- struct lttng_ust_abi_event_notifier *event_notifier_param)
- __attribute__((visibility("hidden")));
-
-/*
- * Destroy a `struct lttng_event_notifier_enabler` object.
- */
-void lttng_event_notifier_enabler_destroy(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Enable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_enable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Disable a `struct lttng_event_notifier_enabler` object and all event
- * notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_disable(
- struct lttng_event_notifier_enabler *event_notifier_enabler)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach filter bytecode program to `struct lttng_event_notifier_enabler` and
- * all event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_filter_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach capture bytecode program to `struct lttng_event_notifier_enabler` and
- * all event_notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_capture_bytecode(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_bytecode_node **bytecode)
- __attribute__((visibility("hidden")));
-
-/*
- * Attach exclusion list to `struct lttng_event_notifier_enabler` and all
- * event notifiers related to this enabler.
- */
-int lttng_event_notifier_enabler_attach_exclusion(
- struct lttng_event_notifier_enabler *event_notifier_enabler,
- struct lttng_ust_excluder_node **excluder)
- __attribute__((visibility("hidden")));
-
-void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
- __attribute__((visibility("hidden")));
-
-/*
- * Connect the probe on all enablers matching this event description.
- * Called on library load.
- */
-int lttng_fix_pending_event_notifiers(void)
- __attribute__((visibility("hidden")));
-
-struct lttng_counter *lttng_ust_counter_create(
- const char *counter_transport_name,
- size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
- __attribute__((visibility("hidden")));
-
-#ifdef HAVE_LINUX_PERF_EVENT_H
-
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_perf_counter_init(void)
- __attribute__((visibility("hidden")));
-
-void lttng_perf_counter_exit(void)
- __attribute__((visibility("hidden")));
-
-#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-static inline
-int lttng_add_perf_counter_to_ctx(uint32_t type,
- uint64_t config,
- const char *name,
- struct lttng_ust_ctx **ctx)
-{
- return -ENOSYS;
-}
-static inline
-int lttng_perf_counter_init(void)
-{
- return 0;
-}
-static inline
-void lttng_perf_counter_exit(void)
-{
-}
-#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
-
-int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_abi_tracepoint_iter *
- lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_abi_field_iter *
- lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_session *lttng_session_create(void)
- __attribute__((visibility("hidden")));
-
-int lttng_session_enable(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_session_disable(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-int lttng_session_statedump(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-void lttng_session_destroy(struct lttng_ust_session *session)
- __attribute__((visibility("hidden")));
-
-/*
- * Called with ust lock held.
- */
-int lttng_session_active(void)
- __attribute__((visibility("hidden")));
-
-struct cds_list_head *lttng_get_sessions(void)
- __attribute__((visibility("hidden")));
-
-void lttng_handle_pending_statedump(void *owner)
- __attribute__((visibility("hidden")));
-
-int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
- __attribute__((visibility("hidden")));
-
-int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
- __attribute__((visibility("hidden")));
-
-void lttng_transport_register(struct lttng_transport *transport)
- __attribute__((visibility("hidden")));
-
-void lttng_transport_unregister(struct lttng_transport *transport)
- __attribute__((visibility("hidden")));
-
-/* This is ABI between liblttng-ust and liblttng-ust-ctl */
-struct lttng_transport *lttng_ust_transport_find(const char *name);
-
-/* This is ABI between liblttng-ust and liblttng-ust-dl */
-void lttng_ust_dl_update(void *ip);
-
-void lttng_probe_provider_unregister_events(const struct lttng_ust_probe_desc *desc)
- __attribute__((visibility("hidden")));
-
-int lttng_fix_pending_events(void)
- __attribute__((visibility("hidden")));
-
-struct cds_list_head *lttng_get_probe_list_head(void)
- __attribute__((visibility("hidden")));
-
-struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
- const struct lttng_ust_enum_desc *enum_desc)
- __attribute__((visibility("hidden")));
-
-int lttng_abi_create_root_handle(void)
- __attribute__((visibility("hidden")));
-
-const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_abi_objd_unref(int id, int is_owner)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_events_exit(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
- __attribute__((visibility("hidden")));
-
-struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
- const char *interpreter_stack_data,
- void *filter_ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
- unsigned char *uuid)
- __attribute__((visibility("hidden")));
-
-bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
- char *name)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_add_app_context_to_ctx_rcu(const char *name, struct lttng_ust_ctx **ctx)
- __attribute__((visibility("hidden")));
-
-int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
- const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_context_set_session_provider(const char *name,
- size_t (*get_size)(void *priv, size_t offset),
- void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_channel_buffer *chan),
- void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
- void *priv)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#define TRACEPOINT_CREATE_PROBES
-#define TP_IP_PARAM ip
-#include "ust_lib.h"
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
- * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
- * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#undef TRACEPOINT_PROVIDER
-#define TRACEPOINT_PROVIDER lttng_ust_lib
-
-#if !defined(_TRACEPOINT_UST_LIB_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
-#define _TRACEPOINT_UST_LIB_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-
-#define LTTNG_UST_LIB_PROVIDER
-#include <lttng/tracepoint.h>
-
-TRACEPOINT_EVENT(lttng_ust_lib, load,
- TP_ARGS(void *, ip, void *, baddr, const char*, path,
- uint64_t, memsz, uint8_t, has_build_id,
- uint8_t, has_debug_link),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint64_t, memsz, memsz)
- ctf_string(path, path)
- ctf_integer(uint8_t, has_build_id, has_build_id)
- ctf_integer(uint8_t, has_debug_link, has_debug_link)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, build_id,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- uint8_t *, build_id,
- size_t, build_id_len
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_sequence_hex(uint8_t, build_id, build_id,
- size_t, build_id_len)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, debug_link,
- TP_ARGS(
- void *, ip,
- void *, baddr,
- char *, filename,
- uint32_t, crc
- ),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- ctf_integer(uint32_t, crc, crc)
- ctf_string(filename, filename)
- )
-)
-
-TRACEPOINT_EVENT(lttng_ust_lib, unload,
- TP_ARGS(void *, ip, void *, baddr),
- TP_FIELDS(
- ctf_unused(ip)
- ctf_integer_hex(void *, baddr, baddr)
- )
-)
-
-#endif /* _TRACEPOINT_UST_LIB_H */
-
-#undef TRACEPOINT_INCLUDE
-#define TRACEPOINT_INCLUDE "./ust_lib.h"
-
-/* This part must be outside ifdef protection */
-#include <lttng/tracepoint-event.h>
-
-#ifdef __cplusplus
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _UST_WAIT_H
-#define _UST_WAIT_H
-
-#include <poll.h>
-
-/*
- * Wait until "cond" gets true or timeout (in ms).
- */
-#define wait_cond_interruptible_timeout(_cond, _timeout) \
- ({ \
- int __ret = 0, __pollret; \
- int __timeout = _timeout; \
- \
- for (;;) { \
- if (_cond) \
- break; \
- if (__timeout <= 0) { \
- __ret = -ETIMEDOUT; \
- break; \
- } \
- __pollret = poll(NULL, 0, 10); /* wait 10ms */ \
- if (__pollret < 0) { \
- __ret = -errno; \
- break; \
- } \
- __timeout -= 10; \
- } \
- __ret; \
- })
-
-
-#endif /* _UST_WAIT_H */
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-noinst_LTLIBRARIES = libmsgpack.la
-
-libmsgpack_la_SOURCES = \
- msgpack.c msgpack.h
-
-libmsgpack_la_CFLAGS = -DUST_COMPONENT="libmsgpack" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-
-#define MSGPACK_FIXSTR_ID_MASK 0xA0
-#define MSGPACK_FIXMAP_ID_MASK 0x80
-#define MSGPACK_FIXARRAY_ID_MASK 0x90
-
-#define MSGPACK_NIL_ID 0xC0
-#define MSGPACK_FALSE_ID 0xC2
-#define MSGPACK_TRUE_ID 0xC3
-#define MSGPACK_MAP16_ID 0xDE
-#define MSGPACK_ARRAY16_ID 0xDC
-
-#define MSGPACK_UINT8_ID 0xCC
-#define MSGPACK_UINT16_ID 0xCD
-#define MSGPACK_UINT32_ID 0xCE
-#define MSGPACK_UINT64_ID 0xCF
-
-#define MSGPACK_INT8_ID 0xD0
-#define MSGPACK_INT16_ID 0xD1
-#define MSGPACK_INT32_ID 0xD2
-#define MSGPACK_INT64_ID 0xD3
-
-#define MSGPACK_FLOAT64_ID 0xCB
-#define MSGPACK_STR16_ID 0xDA
-
-#define MSGPACK_FIXINT_MAX ((1 << 7) - 1)
-#define MSGPACK_FIXINT_MIN -(1 << 5)
-#define MSGPACK_FIXMAP_MAX_COUNT 15
-#define MSGPACK_FIXARRAY_MAX_COUNT 15
-#define MSGPACK_FIXSTR_MAX_LENGTH 31
-
-#ifdef __KERNEL__
-#include <linux/bug.h>
-#include <linux/string.h>
-#include <linux/types.h>
-
-#include <lttng/msgpack.h>
-
-#define INT8_MIN (-128)
-#define INT16_MIN (-32767-1)
-#define INT32_MIN (-2147483647-1)
-#define INT8_MAX (127)
-#define INT16_MAX (32767)
-#define INT32_MAX (2147483647)
-#define UINT8_MAX (255)
-#define UINT16_MAX (65535)
-#define UINT32_MAX (4294967295U)
-
-#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp)
-#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp)
-#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp)
-
-#define lttng_msgpack_assert(cond) WARN_ON(!(cond))
-
-#else /* __KERNEL__ */
-
-#include <lttng/ust-endian.h>
-#include <stdio.h>
-#include <string.h>
-
-#include "msgpack.h"
-
-#define byteswap_host_to_be16(_tmp) htobe16(_tmp)
-#define byteswap_host_to_be32(_tmp) htobe32(_tmp)
-#define byteswap_host_to_be64(_tmp) htobe64(_tmp)
-
-#define lttng_msgpack_assert(cond) ({ \
- if (!(cond)) \
- fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \
- })
-#endif /* __KERNEL__ */
-
-static inline int lttng_msgpack_append_buffer(
- struct lttng_msgpack_writer *writer,
- const uint8_t *buf,
- size_t length)
-{
- int ret = 0;
-
- lttng_msgpack_assert(buf);
-
- /* Ensure we are not trying to write after the end of the buffer. */
- if (writer->write_pos + length > writer->end_write_pos) {
- ret = -1;
- goto end;
- }
-
- memcpy(writer->write_pos, buf, length);
- writer->write_pos += length;
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_append_u8(
- struct lttng_msgpack_writer *writer, uint8_t value)
-{
- return lttng_msgpack_append_buffer(writer, &value, sizeof(value));
-}
-
-static inline int lttng_msgpack_append_u16(
- struct lttng_msgpack_writer *writer, uint16_t value)
-{
- value = byteswap_host_to_be16(value);
-
- return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
-}
-
-static inline int lttng_msgpack_append_u32(
- struct lttng_msgpack_writer *writer, uint32_t value)
-{
- value = byteswap_host_to_be32(value);
-
- return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
-}
-
-static inline int lttng_msgpack_append_u64(
- struct lttng_msgpack_writer *writer, uint64_t value)
-{
- value = byteswap_host_to_be64(value);
-
- return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
-}
-
-static inline int lttng_msgpack_append_f64(
- struct lttng_msgpack_writer *writer, double value)
-{
-
- union {
- double d;
- uint64_t u;
- } u;
-
- u.d = value;
-
- return lttng_msgpack_append_u64(writer, u.u);
-}
-
-static inline int lttng_msgpack_append_i8(
- struct lttng_msgpack_writer *writer, int8_t value)
-{
- return lttng_msgpack_append_u8(writer, (uint8_t) value);
-}
-
-static inline int lttng_msgpack_append_i16(
- struct lttng_msgpack_writer *writer, int16_t value)
-{
- return lttng_msgpack_append_u16(writer, (uint16_t) value);
-}
-
-static inline int lttng_msgpack_append_i32(
- struct lttng_msgpack_writer *writer, int32_t value)
-{
- return lttng_msgpack_append_u32(writer, (uint32_t) value);
-}
-
-static inline int lttng_msgpack_append_i64(
- struct lttng_msgpack_writer *writer, int64_t value)
-{
- return lttng_msgpack_append_u64(writer, (uint64_t) value);
-}
-
-static inline int lttng_msgpack_encode_f64(
- struct lttng_msgpack_writer *writer, double value)
-{
- int ret;
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_f64(writer, value);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_fixmap(
- struct lttng_msgpack_writer *writer, uint8_t count)
-{
- int ret = 0;
-
- lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_map16(
- struct lttng_msgpack_writer *writer, uint16_t count)
-{
- int ret;
-
- lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u16(writer, count);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_fixarray(
- struct lttng_msgpack_writer *writer, uint8_t count)
-{
- int ret = 0;
-
- lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_array16(
- struct lttng_msgpack_writer *writer, uint16_t count)
-{
- int ret;
-
- lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u16(writer, count);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_fixstr(
- struct lttng_msgpack_writer *writer,
- const char *str,
- uint8_t len)
-{
- int ret;
-
- lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-static inline int lttng_msgpack_encode_str16(
- struct lttng_msgpack_writer *writer,
- const char *str,
- uint16_t len)
-{
- int ret;
-
- lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH);
-
- ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u16(writer, len);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
- if (ret)
- goto end;
-
-end:
- return ret;
-}
-
-int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
-{
- int ret;
-
- if (count >= (1 << 16)) {
- ret = -1;
- goto end;
- }
-
- if (count <= MSGPACK_FIXMAP_MAX_COUNT)
- ret = lttng_msgpack_encode_fixmap(writer, count);
- else
- ret = lttng_msgpack_encode_map16(writer, count);
-
- writer->map_nesting++;
-end:
- return ret;
-}
-
-int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
-{
- lttng_msgpack_assert(writer->map_nesting > 0);
- writer->map_nesting--;
- return 0;
-}
-
-int lttng_msgpack_begin_array(
- struct lttng_msgpack_writer *writer, size_t count)
-{
- int ret;
-
- if (count >= (1 << 16)) {
- ret = -1;
- goto end;
- }
-
- if (count <= MSGPACK_FIXARRAY_MAX_COUNT)
- ret = lttng_msgpack_encode_fixarray(writer, count);
- else
- ret = lttng_msgpack_encode_array16(writer, count);
-
- writer->array_nesting++;
-end:
- return ret;
-}
-
-int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
-{
- lttng_msgpack_assert(writer->array_nesting > 0);
- writer->array_nesting--;
- return 0;
-}
-
-int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
- const char *str)
-{
- int ret;
- size_t length = strlen(str);
-
- if (length >= (1 << 16)) {
- ret = -1;
- goto end;
- }
-
- if (length <= MSGPACK_FIXSTR_MAX_LENGTH)
- ret = lttng_msgpack_encode_fixstr(writer, str, length);
- else
- ret = lttng_msgpack_encode_str16(writer, str, length);
-
-end:
- return ret;
-}
-
-int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
-{
- return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID);
-}
-
-int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
-{
- return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID);
-}
-
-int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
-{
- return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID);
-}
-
-int lttng_msgpack_write_unsigned_integer(
- struct lttng_msgpack_writer *writer, uint64_t value)
-{
- int ret = 0;
-
- if (value <= MSGPACK_FIXINT_MAX) {
- ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
- if (ret)
- goto end;
- } else if (value <= UINT8_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
- if (ret)
- goto end;
- } else if (value <= UINT16_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u16(writer, (uint16_t) value);
- if (ret)
- goto end;
- } else if (value <= UINT32_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u32(writer, (uint32_t) value);
- if (ret)
- goto end;
- } else {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_u64(writer, value);
- if (ret)
- goto end;
- }
-
-end:
- return ret;
-}
-
-int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value)
-{
- int ret;
-
- if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){
- ret = lttng_msgpack_append_i8(writer, (int8_t) value);
- if (ret)
- goto end;
- } else if (value >= INT8_MIN && value <= INT8_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_i8(writer, (int8_t) value);
- if (ret)
- goto end;
- } else if (value >= INT16_MIN && value <= INT16_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_i16(writer, (int16_t) value);
- if (ret)
- goto end;
- } else if (value >= INT32_MIN && value <= INT32_MAX) {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_i32(writer, (int32_t) value);
- if (ret)
- goto end;
- } else {
- ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID);
- if (ret)
- goto end;
-
- ret = lttng_msgpack_append_i64(writer, value);
- if (ret)
- goto end;
- }
-
-end:
- return ret;
-}
-
-int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
-{
- return lttng_msgpack_encode_f64(writer, value);
-}
-
-void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer,
- uint8_t *buffer, size_t size)
-{
- lttng_msgpack_assert(buffer);
-
- writer->buffer = buffer;
- writer->write_pos = buffer;
- writer->end_write_pos = buffer + size;
-
- writer->array_nesting = 0;
- writer->map_nesting = 0;
-}
-
-void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
-{
- memset(writer, 0, sizeof(*writer));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_MSGPACK_H
-#define _LTTNG_UST_MSGPACK_H
-
-#include <stddef.h>
-#ifdef __KERNEL__
-#include <linux/types.h>
-#else /* __KERNEL__ */
-#include <stdint.h>
-#endif /* __KERNEL__ */
-
-struct lttng_msgpack_writer {
- uint8_t *buffer;
- uint8_t *write_pos;
- const uint8_t *end_write_pos;
- uint8_t array_nesting;
- uint8_t map_nesting;
-};
-
-void lttng_msgpack_writer_init(
- struct lttng_msgpack_writer *writer,
- uint8_t *buffer, size_t size)
- __attribute__((visibility("hidden")));
-
-void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_unsigned_integer(
- struct lttng_msgpack_writer *writer, uint64_t value)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_signed_integer(
- struct lttng_msgpack_writer *writer, int64_t value)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
- const char *value)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_begin_array(
- struct lttng_msgpack_writer *writer, size_t count)
- __attribute__((visibility("hidden")));
-
-int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_MSGPACK_H */
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CFLAGS += -fno-strict-aliasing
-
-noinst_LTLIBRARIES = libringbuffer.la
-
-libringbuffer_la_SOURCES = \
- smp.h smp.c getcpu.h \
- shm.c shm.h shm_types.h shm_internal.h \
- ring_buffer_backend.c \
- ring_buffer_frontend.c \
- api.h mmap.h \
- backend.h backend_internal.h backend_types.h \
- frontend_api.h frontend.h frontend_internal.h frontend_types.h \
- nohz.h vatomic.h rb-init.h ringbuffer-config.h
-
-libringbuffer_la_LIBADD = \
- -lrt
-
-if ENABLE_NUMA
-libringbuffer_la_LIBADD += -lnuma
-endif
-
-libringbuffer_la_CFLAGS = -DUST_COMPONENT="libringbuffer" $(AM_CFLAGS)
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer API.
- */
-
-#ifndef _LTTNG_RING_BUFFER_API_H
-#define _LTTNG_RING_BUFFER_API_H
-
-#include "backend.h"
-#include "frontend.h"
-#include <lttng/ringbuffer-abi.h>
-
-/*
- * ring_buffer_frontend_api.h contains static inline functions that depend on
- * client static inlines. Hence the inclusion of this "api" header only
- * within the client.
- */
-#include "frontend_api.h"
-
-#endif /* _LTTNG_RING_BUFFER_API_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer backend (API).
- *
- * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
- * the reader in flight recorder mode.
- */
-
-#ifndef _LTTNG_RING_BUFFER_BACKEND_H
-#define _LTTNG_RING_BUFFER_BACKEND_H
-
-#include <stddef.h>
-#include <unistd.h>
-
-/* Internal helpers */
-#include "backend_internal.h"
-#include "frontend_internal.h"
-
-/* Ring buffer backend API */
-
-/* Ring buffer backend access (read/write) */
-
-extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset, void *dest, size_t len,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset, void *dest, size_t len,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to write directly to this address,
- * as long as the write is never bigger than a page size.
- */
-extern void *
-lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern void *
-lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/**
- * lib_ring_buffer_write - write data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : source pointer to copy from
- * @len : length of data to copy
- *
- * This function copies "len" bytes of data from a source pointer to a buffer
- * backend, at the current context offset. This is more or less a buffer
- * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
- * if copy is crossing a page boundary.
- */
-static inline
-void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len)
- __attribute__((always_inline));
-static inline
-void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const void *src, size_t len)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct channel_backend *chanb = &ctx_private->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
- size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- void *p;
-
- if (caa_unlikely(!len))
- return;
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
- backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
- if (caa_unlikely(!backend_pages)) {
- if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
- return;
- }
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
- lib_ring_buffer_do_copy(config, p, src, len);
- ctx_private->buf_offset += len;
-}
-
-/*
- * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
- * terminating character is found in @src. Returns the number of bytes
- * copied. Does *not* terminate @dest with NULL terminating character.
- */
-static inline
-size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- char *dest, const char *src, size_t len)
- __attribute__((always_inline));
-static inline
-size_t lib_ring_buffer_do_strcpy(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- char *dest, const char *src, size_t len)
-{
- size_t count;
-
- for (count = 0; count < len; count++) {
- char c;
-
- /*
- * Only read source character once, in case it is
- * modified concurrently.
- */
- c = CMM_LOAD_SHARED(src[count]);
- if (!c)
- break;
- lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
- }
- return count;
-}
-
-/**
- * lib_ring_buffer_strcpy - write string data to a buffer backend
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : source pointer to copy from
- * @len : length of data to copy
- * @pad : character to use for padding
- *
- * This function copies @len - 1 bytes of string data from a source
- * pointer to a buffer backend, followed by a terminating '\0'
- * character, at the current context offset. This is more or less a
- * buffer backend-specific strncpy() operation. If a terminating '\0'
- * character is found in @src before @len - 1 characters are copied, pad
- * the buffer with @pad characters (e.g. '#').
- */
-static inline
-void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len, char pad)
- __attribute__((always_inline));
-static inline
-void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len, char pad)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct channel_backend *chanb = &ctx_private->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
- size_t count;
- size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- void *p;
-
- if (caa_unlikely(!len))
- return;
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
- backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
- if (caa_unlikely(!backend_pages)) {
- if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
- return;
- }
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
-
- count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
- offset += count;
- /* Padding */
- if (caa_unlikely(count < len - 1)) {
- size_t pad_len = len - 1 - count;
-
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
- lib_ring_buffer_do_memset(p, pad, pad_len);
- offset += pad_len;
- }
- /* Final '\0' */
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
- lib_ring_buffer_do_memset(p, '\0', 1);
- ctx_private->buf_offset += len;
-}
-
-/**
- * lib_ring_buffer_pstrcpy - write to a buffer backend P-string
- * @config : ring buffer instance configuration
- * @ctx: ring buffer context. (input arguments only)
- * @src : source pointer to copy from
- * @len : length of data to copy
- * @pad : character to use for padding
- *
- * This function copies up to @len bytes of data from a source pointer
- * to a Pascal String into the buffer backend. If a terminating '\0'
- * character is found in @src before @len characters are copied, pad the
- * buffer with @pad characters (e.g. '\0').
- *
- * The length of the pascal strings in the ring buffer is explicit: it
- * is either the array or sequence length.
- */
-static inline
-void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len, char pad)
- __attribute__((always_inline));
-static inline
-void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- const char *src, size_t len, char pad)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct channel_backend *chanb = &ctx_private->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
- size_t count;
- size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- void *p;
-
- if (caa_unlikely(!len))
- return;
- /*
- * Underlying layer should never ask for writes across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
- backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
- if (caa_unlikely(!backend_pages)) {
- if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
- return;
- }
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
-
- count = lib_ring_buffer_do_strcpy(config, p, src, len);
- offset += count;
- /* Padding */
- if (caa_unlikely(count < len)) {
- size_t pad_len = len - count;
-
- p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!p))
- return;
- lib_ring_buffer_do_memset(p, pad, pad_len);
- }
- ctx_private->buf_offset += len;
-}
-
-/*
- * This accessor counts the number of unread records in a buffer.
- * It only provides a consistent value if no reads not writes are performed
- * concurrently.
- */
-static inline
-unsigned long lib_ring_buffer_get_records_unread(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- unsigned long records_unread = 0, sb_bindex;
- unsigned int i;
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- chan = shmp(handle, bufb->chan);
- if (!chan)
- return 0;
- for (i = 0; i < chan->backend.num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- wsb = shmp_index(handle, bufb->buf_wsb, i);
- if (!wsb)
- return 0;
- sb_bindex = subbuffer_id_get_index(config, wsb->id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- records_unread += v_read(config, &backend_pages->records_unread);
- }
- if (config->mode == RING_BUFFER_OVERWRITE) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- records_unread += v_read(config, &backend_pages->records_unread);
- }
- return records_unread;
-}
-
-#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer backend (internal helpers).
- */
-
-#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
-#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <urcu/compiler.h>
-
-#include <lttng/ringbuffer-context.h>
-#include "ringbuffer-config.h"
-#include "backend_types.h"
-#include "frontend_types.h"
-#include "shm.h"
-
-/* Ring buffer backend API presented to the frontend */
-
-/* Ring buffer and channel backend create/free */
-
-int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct channel_backend *chan,
- int cpu,
- struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj)
- __attribute__((visibility("hidden")));
-
-void channel_backend_unregister_notifiers(struct channel_backend *chanb)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb)
- __attribute__((visibility("hidden")));
-
-int channel_backend_init(struct channel_backend *chanb,
- const char *name,
- const struct lttng_ust_lib_ring_buffer_config *config,
- size_t subbuf_size,
- size_t num_subbuf, struct lttng_ust_shm_handle *handle,
- const int *stream_fds)
- __attribute__((visibility("hidden")));
-
-void channel_backend_free(struct channel_backend *chanb,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-void channel_backend_reset(struct channel_backend *chanb)
- __attribute__((visibility("hidden")));
-
-int lib_ring_buffer_backend_init(void)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_backend_exit(void)
- __attribute__((visibility("hidden")));
-
-extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset, const void *src, size_t len,
- ssize_t pagecpy)
- __attribute__((visibility("hidden")));
-
-/*
- * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
- * exchanged atomically.
- *
- * Top half word, except lowest bit, belongs to "offset", which is used to keep
- * to count the produced buffers. For overwrite mode, this provides the
- * consumer with the capacity to read subbuffers in order, handling the
- * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
- * systems) concurrently with a single execution of get_subbuf (between offset
- * sampling and subbuffer ID exchange).
- */
-
-#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
-
-#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
-#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
-#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
-/*
- * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
- */
-#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
-#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
-#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
-/*
- * In overwrite mode: lowest half of word is used for index.
- * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
- * In producer-consumer mode: whole word used for index.
- */
-#define SB_ID_INDEX_SHIFT 0
-#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
-#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
-
-/*
- * Construct the subbuffer id from offset, index and noref. Use only the index
- * for producer-consumer mode (offset and noref are only used in overwrite
- * mode).
- */
-static inline
-unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long offset, unsigned long noref,
- unsigned long index)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- return (offset << SB_ID_OFFSET_SHIFT)
- | (noref << SB_ID_NOREF_SHIFT)
- | index;
- else
- return index;
-}
-
-/*
- * Compare offset with the offset contained within id. Return 1 if the offset
- * bits are identical, else 0.
- */
-static inline
-int subbuffer_id_compare_offset(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- unsigned long id, unsigned long offset)
-{
- return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
-}
-
-static inline
-unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long id)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- return id & SB_ID_INDEX_MASK;
- else
- return id;
-}
-
-static inline
-unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long id)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- return !!(id & SB_ID_NOREF_MASK);
- else
- return 1;
-}
-
-/*
- * Only used by reader on subbuffer ID it has exclusive access to. No volatile
- * needed.
- */
-static inline
-void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long *id)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- *id |= SB_ID_NOREF_MASK;
-}
-
-static inline
-void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long *id, unsigned long offset)
-{
- unsigned long tmp;
-
- if (config->mode == RING_BUFFER_OVERWRITE) {
- tmp = *id;
- tmp &= ~SB_ID_OFFSET_MASK;
- tmp |= offset << SB_ID_OFFSET_SHIFT;
- tmp |= SB_ID_NOREF_MASK;
- /* Volatile store, read concurrently by readers. */
- CMM_ACCESS_ONCE(*id) = tmp;
- }
-}
-
-/* No volatile access, since already used locally */
-static inline
-void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long *id)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- *id &= ~SB_ID_NOREF_MASK;
-}
-
-/*
- * For overwrite mode, cap the number of subbuffers per buffer to:
- * 2^16 on 32-bit architectures
- * 2^32 on 64-bit architectures
- * This is required to fit in the index part of the ID. Return 0 on success,
- * -EPERM on failure.
- */
-static inline
-int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned long num_subbuf)
-{
- if (config->mode == RING_BUFFER_OVERWRITE)
- return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
- else
- return 0;
-}
-
-static inline
-int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx_private->buf->backend;
- struct channel_backend *chanb = &ctx_private->chan->backend;
- struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
- size_t sbidx;
- size_t offset = ctx_private->buf_offset;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- unsigned long sb_bindex, id;
- struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
-
- offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
- wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
- if (caa_unlikely(!wsb))
- return -1;
- id = wsb->id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (caa_unlikely(!rpages))
- return -1;
- CHAN_WARN_ON(ctx_private->chan,
- config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- _backend_pages = shmp(handle, rpages->shmp);
- if (caa_unlikely(!_backend_pages))
- return -1;
- *backend_pages = _backend_pages;
- return 0;
-}
-
-/* Get backend pages from cache. */
-static inline
-struct lttng_ust_lib_ring_buffer_backend_pages *
- lib_ring_buffer_get_backend_pages_from_ctx(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- return ctx->priv->backend_pages;
-}
-
-/*
- * The ring buffer can count events recorded and overwritten per buffer,
- * but it is disabled by default due to its performance overhead.
- */
-#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
-static inline
-void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx, struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
- if (caa_unlikely(!backend_pages)) {
- if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
- return;
- }
- v_inc(config, &backend_pages->records_commit);
-}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static inline
-void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- const struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_backend *bufb __attribute__((unused)),
- unsigned long idx __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-
-/*
- * Reader has exclusive subbuffer access for record consumption. No need to
- * perform the decrement atomically.
- */
-static inline
-void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- chan = shmp(handle, bufb->chan);
- if (!chan)
- return;
- pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
- if (!pages_shmp)
- return;
- backend_pages = shmp(handle, pages_shmp->shmp);
- if (!backend_pages)
- return;
- CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
- /* Non-atomic decrement protected by exclusive subbuffer access */
- _v_dec(config, &backend_pages->records_unread);
- v_inc(config, &bufb->records_read);
-}
-
-static inline
-unsigned long subbuffer_get_records_count(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return 0;
- sb_bindex = subbuffer_id_get_index(config, wsb->id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- return v_read(config, &backend_pages->records_commit);
-}
-
-/*
- * Must be executed at subbuffer delivery when the writer has _exclusive_
- * subbuffer access. See lib_ring_buffer_check_deliver() for details.
- * lib_ring_buffer_get_records_count() must be called to get the records
- * count before this function, because it resets the records_commit
- * count.
- */
-static inline
-unsigned long subbuffer_count_records_overrun(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long overruns, sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return 0;
- sb_bindex = subbuffer_id_get_index(config, wsb->id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- overruns = v_read(config, &backend_pages->records_unread);
- v_set(config, &backend_pages->records_unread,
- v_read(config, &backend_pages->records_commit));
- v_set(config, &backend_pages->records_commit, 0);
-
- return overruns;
-}
-
-static inline
-void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx,
- unsigned long data_size,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return;
- sb_bindex = subbuffer_id_get_index(config, wsb->id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return;
- backend_pages->data_size = data_size;
-}
-
-static inline
-unsigned long subbuffer_get_read_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
- if (!pages_shmp)
- return 0;
- backend_pages = shmp(handle, pages_shmp->shmp);
- if (!backend_pages)
- return 0;
- return backend_pages->data_size;
-}
-
-static inline
-unsigned long subbuffer_get_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long sb_bindex;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return 0;
- sb_bindex = subbuffer_id_get_index(config, wsb->id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- return backend_pages->data_size;
-}
-
-static inline
-void subbuffer_inc_packet_count(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx, struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend_counts *counts;
-
- counts = shmp_index(handle, bufb->buf_cnt, idx);
- if (!counts)
- return;
- counts->seq_cnt++;
-}
-
-/**
- * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
- * writer.
- */
-static inline
-void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long id, new_id;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
-
- if (config->mode != RING_BUFFER_OVERWRITE)
- return;
-
- /*
- * Performing a volatile access to read the sb_pages, because we want to
- * read a coherent version of the pointer and the associated noref flag.
- */
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return;
- id = CMM_ACCESS_ONCE(wsb->id);
- for (;;) {
- /* This check is called on the fast path for each record. */
- if (caa_likely(!subbuffer_id_is_noref(config, id))) {
- /*
- * Store after load dependency ordering the writes to
- * the subbuffer after load and test of the noref flag
- * matches the memory barrier implied by the cmpxchg()
- * in update_read_sb_index().
- */
- return; /* Already writing to this buffer */
- }
- new_id = id;
- subbuffer_id_clear_noref(config, &new_id);
- new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
- if (caa_likely(new_id == id))
- break;
- id = new_id;
- }
-}
-
-/**
- * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
- * called by writer.
- */
-static inline
-void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- unsigned long idx, unsigned long offset,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- if (config->mode != RING_BUFFER_OVERWRITE)
- return;
-
- wsb = shmp_index(handle, bufb->buf_wsb, idx);
- if (!wsb)
- return;
- /*
- * Because ring_buffer_set_noref() is only called by a single thread
- * (the one which updated the cc_sb value), there are no concurrent
- * updates to take care of: other writers have not updated cc_sb, so
- * they cannot set the noref flag, and concurrent readers cannot modify
- * the pointer because the noref flag is not set yet.
- * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
- * to the subbuffer before this set noref operation.
- * subbuffer_set_noref() uses a volatile store to deal with concurrent
- * readers of the noref flag.
- */
- chan = shmp(handle, bufb->chan);
- if (!chan)
- return;
- CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
- /*
- * Memory barrier that ensures counter stores are ordered before set
- * noref and offset.
- */
- cmm_smp_mb();
- subbuffer_id_set_noref_offset(config, &wsb->id, offset);
-}
-
-/**
- * update_read_sb_index - Read-side subbuffer index update.
- */
-static inline
-int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct channel_backend *chanb __attribute__((unused)),
- unsigned long consumed_idx,
- unsigned long consumed_count,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- unsigned long old_id, new_id;
-
- wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
- if (caa_unlikely(!wsb))
- return -EPERM;
-
- if (config->mode == RING_BUFFER_OVERWRITE) {
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- /*
- * Exchange the target writer subbuffer with our own unused
- * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
- * old_wpage, because the value read will be confirmed by the
- * following cmpxchg().
- */
- old_id = wsb->id;
- if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
- return -EAGAIN;
- /*
- * Make sure the offset count we are expecting matches the one
- * indicated by the writer.
- */
- if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
- consumed_count)))
- return -EAGAIN;
- chan = shmp(handle, bufb->chan);
- if (caa_unlikely(!chan))
- return -EPERM;
- CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
- subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
- consumed_count);
- new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
- if (caa_unlikely(old_id != new_id))
- return -EAGAIN;
- bufb->buf_rsb.id = new_id;
- } else {
- /* No page exchange, use the writer page directly */
- bufb->buf_rsb.id = wsb->id;
- }
- return 0;
-}
-
-#ifndef inline_memcpy
-#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
-#endif
-
-static inline
-void lttng_inline_memcpy(void *dest, const void *src,
- unsigned long len)
- __attribute__((always_inline));
-static inline
-void lttng_inline_memcpy(void *dest, const void *src,
- unsigned long len)
-{
- switch (len) {
- case 1:
- *(uint8_t *) dest = *(const uint8_t *) src;
- break;
- case 2:
- *(uint16_t *) dest = *(const uint16_t *) src;
- break;
- case 4:
- *(uint32_t *) dest = *(const uint32_t *) src;
- break;
- case 8:
- *(uint64_t *) dest = *(const uint64_t *) src;
- break;
- default:
- inline_memcpy(dest, src, len);
- }
-}
-
-/*
- * Use the architecture-specific memcpy implementation for constant-sized
- * inputs, but rely on an inline memcpy for length statically unknown.
- * The function call to memcpy is just way too expensive for a fast path.
- */
-#define lib_ring_buffer_do_copy(config, dest, src, len) \
-do { \
- size_t __len = (len); \
- if (__builtin_constant_p(len)) \
- memcpy(dest, src, __len); \
- else \
- lttng_inline_memcpy(dest, src, __len); \
-} while (0)
-
-/*
- * write len bytes to dest with c
- */
-static inline
-void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len)
-{
- unsigned long i;
-
- for (i = 0; i < len; i++)
- dest[i] = c;
-}
-
-/* arch-agnostic implementation */
-
-static inline int lttng_ust_fls(unsigned int x)
-{
- int r = 32;
-
- if (!x)
- return 0;
- if (!(x & 0xFFFF0000U)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xFF000000U)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xF0000000U)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xC0000000U)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000U)) {
- /* No need to bit shift on last operation */
- r -= 1;
- }
- return r;
-}
-
-static inline int get_count_order(unsigned int count)
-{
- int order;
-
- order = lttng_ust_fls(count) - 1;
- if (count & (count - 1))
- order++;
- return order;
-}
-
-#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer backend (types).
- */
-
-#ifndef _LTTNG_RING_BUFFER_BACKEND_TYPES_H
-#define _LTTNG_RING_BUFFER_BACKEND_TYPES_H
-
-#include <limits.h>
-#include <stdint.h>
-#include "shm_internal.h"
-#include "vatomic.h"
-
-#define RB_BACKEND_PAGES_PADDING 16
-struct lttng_ust_lib_ring_buffer_backend_pages {
- unsigned long mmap_offset; /* offset of the subbuffer in mmap */
- union v_atomic records_commit; /* current records committed count */
- union v_atomic records_unread; /* records to read */
- unsigned long data_size; /* Amount of data to read from subbuf */
- DECLARE_SHMP(char, p); /* Backing memory map */
- char padding[RB_BACKEND_PAGES_PADDING];
-};
-
-struct lttng_ust_lib_ring_buffer_backend_subbuffer {
- /* Identifier for subbuf backend pages. Exchanged atomically. */
- unsigned long id; /* backend subbuffer identifier */
-};
-
-struct lttng_ust_lib_ring_buffer_backend_counts {
- /*
- * Counter specific to the sub-buffer location within the ring buffer.
- * The actual sequence number of the packet within the entire ring
- * buffer can be derived from the formula nr_subbuffers * seq_cnt +
- * subbuf_idx.
- */
- uint64_t seq_cnt; /* packet sequence number */
-};
-
-/*
- * Forward declaration of frontend-specific channel and ring_buffer.
- */
-struct lttng_ust_lib_ring_buffer_channel;
-struct lttng_ust_lib_ring_buffer;
-
-struct lttng_ust_lib_ring_buffer_backend_pages_shmp {
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages, shmp);
-};
-
-#define RB_BACKEND_RING_BUFFER_PADDING 64
-struct lttng_ust_lib_ring_buffer_backend {
- /* Array of ring_buffer_backend_subbuffer for writer */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
- /* ring_buffer_backend_subbuffer for reader */
- struct lttng_ust_lib_ring_buffer_backend_subbuffer buf_rsb;
- /* Array of lib_ring_buffer_backend_counts for the packet counter */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_counts, buf_cnt);
- /*
- * Pointer array of backend pages, for whole buffer.
- * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
- */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, array);
- DECLARE_SHMP(char, memory_map); /* memory mapping */
-
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan); /* Associated channel */
- int cpu; /* This buffer's cpu. -1 if global. */
- union v_atomic records_read; /* Number of records read */
- unsigned int allocated:1; /* is buffer allocated ? */
- char padding[RB_BACKEND_RING_BUFFER_PADDING];
-};
-
-struct lttng_ust_lib_ring_buffer_shmp {
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, shmp); /* Channel per-cpu buffers */
-};
-
-#define RB_BACKEND_CHANNEL_PADDING 64
-struct channel_backend {
- unsigned long buf_size; /* Size of the buffer */
- unsigned long subbuf_size; /* Sub-buffer size */
- unsigned int subbuf_size_order; /* Order of sub-buffer size */
- unsigned int num_subbuf_order; /*
- * Order of number of sub-buffers/buffer
- * for writer.
- */
- unsigned int buf_size_order; /* Order of buffer size */
- unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
- unsigned long num_subbuf; /* Number of sub-buffers for writer */
- uint64_t start_tsc; /* Channel creation TSC value */
- DECLARE_SHMP(void *, priv_data);/* Client-specific information */
- struct lttng_ust_lib_ring_buffer_config config; /* Ring buffer configuration */
- char name[NAME_MAX]; /* Channel name */
- char padding[RB_BACKEND_CHANNEL_PADDING];
- struct lttng_ust_lib_ring_buffer_shmp buf[];
-};
-
-#endif /* _LTTNG_RING_BUFFER_BACKEND_TYPES_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer Library Synchronization Header (API).
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LTTNG_RING_BUFFER_FRONTEND_H
-#define _LTTNG_RING_BUFFER_FRONTEND_H
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <urcu/compiler.h>
-#include <urcu/uatomic.h>
-
-#include "smp.h"
-
-/* Internal helpers */
-#include "frontend_internal.h"
-
-/* Buffer creation/removal and setup operations */
-
-/*
- * switch_timer_interval is the time interval (in us) to fill sub-buffers with
- * padding to let readers get those sub-buffers. Used for live streaming.
- *
- * read_timer_interval is the time interval (in us) to wake up pending readers.
- *
- * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
- * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
- * be set to NULL for other backends.
- *
- * private data is a memory area for configuration data. This memory is
- * managed by lib ring buffer. priv_data_align is the alignment required
- * for the private data area.
- */
-
-extern
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
- const char *name,
- size_t priv_data_align,
- size_t priv_data_size,
- void *priv_data_init,
- void *priv,
- void *buf_addr,
- size_t subbuf_size, size_t num_subbuf,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
- __attribute__((visibility("hidden")));
-
-/*
- * channel_destroy finalizes all channel's buffers, waits for readers to
- * release all references, and destroys the channel.
- */
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int consumer)
- __attribute__((visibility("hidden")));
-
-
-/* Buffer read operations */
-
-/*
- * Iteration on channel cpumask needs to issue a read barrier to match the write
- * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
- * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
- * only performed at channel destruction.
- */
-#define for_each_channel_cpu(cpu, chan) \
- for_each_possible_cpu(cpu)
-
-extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
- struct lttng_ust_shm_handle *handle,
- int *shm_fd, int *wait_fd,
- int *wakeup_fd,
- uint64_t *memory_map_size)
- __attribute__((visibility("hidden")));
-
-extern
-int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern
-int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int cpu)
- __attribute__((visibility("hidden")));
-
-extern
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int cpu)
- __attribute__((visibility("hidden")));
-
-extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * Initialize signals for ring buffer. Should be called early e.g. by
- * main() in the program to affect all threads.
- */
-void lib_ringbuffer_signal_init(void)
- __attribute__((visibility("hidden")));
-
-/*
- * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
- */
-extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long *consumed,
- unsigned long *produced,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern int lib_ring_buffer_snapshot_sample_positions(
- struct lttng_ust_lib_ring_buffer *buf,
- unsigned long *consumed,
- unsigned long *produced,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long consumed_new,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long consumed,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
- * to read sub-buffers sequentially.
- */
-static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- int ret;
-
- ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
- &buf->prod_snapshot, handle);
- if (ret)
- return ret;
- ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
- return ret;
-}
-
-static inline
-void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return;
- lib_ring_buffer_put_subbuf(buf, handle);
- lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
- handle);
-}
-
-extern void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
- __attribute__((visibility("hidden")));
-
-extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-static inline
-unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->offset);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_consumed(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return uatomic_read(&buf->consumed);
-}
-
-/*
- * Must call lib_ring_buffer_is_finalized before reading counters (memory
- * ordering enforced with respect to trace teardown).
- */
-static inline
-int lib_ring_buffer_is_finalized(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
-{
- int finalized = CMM_ACCESS_ONCE(buf->finalized);
- /*
- * Read finalized before counters.
- */
- cmm_smp_rmb();
- return finalized;
-}
-
-static inline
-int lib_ring_buffer_channel_is_finalized(const struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return chan->finalized;
-}
-
-static inline
-int lib_ring_buffer_channel_is_disabled(const struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return uatomic_read(&chan->record_disabled);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_read_data_size(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- return subbuffer_get_read_data_size(config, &buf->backend, handle);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_count(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->records_count);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_overrun(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->records_overrun);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_full(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->records_lost_full);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_wrap(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->records_lost_wrap);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_lost_big(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->records_lost_big);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_records_read(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf)
-{
- return v_read(config, &buf->backend.records_read);
-}
-
-#endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * See ring_buffer_frontend.c for more information on wait-free
- * algorithms.
- * See frontend.h for channel allocation and read-side API.
- */
-
-#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
-#define _LTTNG_RING_BUFFER_FRONTEND_API_H
-
-#include <stddef.h>
-
-#include <urcu/compiler.h>
-
-#include "frontend.h"
-
-/**
- * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection.
- *
- * The rint buffer buffer nesting count is a safety net to ensure tracer
- * client code will never trigger an endless recursion.
- * Returns a nesting level >= 0 on success, -EPERM on failure (nesting
- * count too high).
- *
- * asm volatile and "memory" clobber prevent the compiler from moving
- * instructions out of the ring buffer nesting count. This is required to ensure
- * that probe side-effects which can cause recursion (e.g. unforeseen traps,
- * divisions by 0, ...) are triggered within the incremented nesting count
- * section.
- */
-static inline
-int lib_ring_buffer_nesting_inc(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
-{
- int nesting;
-
- nesting = ++URCU_TLS(lib_ring_buffer_nesting);
- cmm_barrier();
- if (caa_unlikely(nesting >= LIB_RING_BUFFER_MAX_NESTING)) {
- WARN_ON_ONCE(1);
- URCU_TLS(lib_ring_buffer_nesting)--;
- return -EPERM;
- }
- return nesting - 1;
-}
-
-static inline
-int lib_ring_buffer_nesting_count(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
-{
- return URCU_TLS(lib_ring_buffer_nesting);
-}
-
-static inline
-void lib_ring_buffer_nesting_dec(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
-{
- cmm_barrier();
- URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
-}
-
-/*
- * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
- * part of the API per se.
- *
- * returns 0 if reserve ok, or 1 if the slow path must be taken.
- */
-static inline
-int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx,
- unsigned long *o_begin, unsigned long *o_end,
- unsigned long *o_old, size_t *before_hdr_pad)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
- *o_begin = v_read(config, &buf->offset);
- *o_old = *o_begin;
-
- ctx_private->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
- return 1;
-
- /*
- * Prefetch cacheline for read because we have to read the previous
- * commit counter to increment it and commit seq value to compare it to
- * the commit counter.
- */
- //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
-
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-
- if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
- return 1;
-
- ctx_private->slot_size = record_header_size(config, chan, *o_begin,
- before_hdr_pad, ctx, client_ctx);
- ctx_private->slot_size +=
- lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size,
- ctx->largest_align) + ctx->data_size;
- if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size)
- > chan->backend.subbuf_size))
- return 1;
-
- /*
- * Record fits in the current buffer and we are not on a switch
- * boundary. It's safe to write.
- */
- *o_end = *o_begin + ctx_private->slot_size;
-
- if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
- /*
- * The offset_end will fall at the very beginning of the next
- * subbuffer.
- */
- return 1;
-
- return 0;
-}
-
-/**
- * lib_ring_buffer_reserve - Reserve space in a ring buffer.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input and output) Must be already initialized.
- *
- * Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
- *
- * Return :
- * 0 on success.
- * -EAGAIN if channel is disabled.
- * -ENOSPC if event size is too large for packet.
- * -ENOBUFS if there is currently not enough space in buffer for the event.
- * -EIO if data cannot be written into the buffer for any other reason.
- */
-
-static inline
-int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf;
- unsigned long o_begin, o_end, o_old;
- size_t before_hdr_pad = 0;
-
- if (caa_unlikely(uatomic_read(&chan->record_disabled)))
- return -EAGAIN;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- ctx_private->reserve_cpu = lttng_ust_get_cpu();
- buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
- } else {
- buf = shmp(handle, chan->backend.buf[0].shmp);
- }
- if (caa_unlikely(!buf))
- return -EIO;
- if (caa_unlikely(uatomic_read(&buf->record_disabled)))
- return -EAGAIN;
- ctx_private->buf = buf;
-
- /*
- * Perform retryable operations.
- */
- if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
- &o_end, &o_old, &before_hdr_pad)))
- goto slow_path;
-
- if (caa_unlikely(v_cmpxchg(config, &buf->offset, o_old, o_end)
- != o_old))
- goto slow_path;
-
- /*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
- */
- save_last_tsc(config, buf, ctx_private->tsc);
-
- /*
- * Push the reader if necessary
- */
- lib_ring_buffer_reserve_push_reader(buf, chan, o_end - 1);
-
- /*
- * Clear noref flag for this subbuffer.
- */
- lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(o_end - 1, chan), handle);
-
- ctx_private->pre_offset = o_begin;
- ctx_private->buf_offset = o_begin + before_hdr_pad;
- return 0;
-slow_path:
- return lib_ring_buffer_reserve_slow(ctx, client_ctx);
-}
-
-/**
- * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
- * @config: ring buffer instance configuration.
- * @buf: buffer
- * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
- *
- * This operation is completely reentrant : can be called while tracing is
- * active with absolutely no lock held.
- *
- * Note, however, that as a v_cmpxchg is used for some atomic operations and
- * requires to be executed locally for per-CPU buffers, this function must be
- * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
- * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
- */
-static inline
-void lib_ring_buffer_switch(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
- struct lttng_ust_shm_handle *handle)
-{
- lib_ring_buffer_switch_slow(buf, mode, handle);
-}
-
-/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
-
-/**
- * lib_ring_buffer_commit - Commit an record.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input arguments only)
- *
- * Atomic unordered slot commit. Increments the commit count in the
- * specified sub-buffer, and delivers it if necessary.
- */
-static inline
-void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
- unsigned long offset_end = ctx_private->buf_offset;
- unsigned long endidx = subbuf_index(offset_end - 1, chan);
- unsigned long commit_count;
- struct commit_counters_hot *cc_hot = shmp_index(handle,
- buf->commit_hot, endidx);
-
- if (caa_unlikely(!cc_hot))
- return;
-
- /*
- * Must count record before incrementing the commit count.
- */
- subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
-
- /*
- * Order all writes to buffer before the commit count update that will
- * determine that the subbuffer is full.
- */
- cmm_smp_wmb();
-
- v_add(config, ctx_private->slot_size, &cc_hot->cc);
-
- /*
- * commit count read can race with concurrent OOO commit count updates.
- * This is only needed for lib_ring_buffer_check_deliver (for
- * non-polling delivery only) and for
- * lib_ring_buffer_write_commit_counter. The race can only cause the
- * counter to be read with the same value more than once, which could
- * cause :
- * - Multiple delivery for the same sub-buffer (which is handled
- * gracefully by the reader code) if the value is for a full
- * sub-buffer. It's important that we can never miss a sub-buffer
- * delivery. Re-reading the value after the v_add ensures this.
- * - Reading a commit_count with a higher value that what was actually
- * added to it for the lib_ring_buffer_write_commit_counter call
- * (again caused by a concurrent committer). It does not matter,
- * because this function is interested in the fact that the commit
- * count reaches back the reserve offset for a specific sub-buffer,
- * which is completely independent of the order.
- */
- commit_count = v_read(config, &cc_hot->cc);
-
- lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle, ctx_private->tsc);
- /*
- * Update used size at each commit. It's needed only for extracting
- * ring_buffer buffers from vmcore, after crash.
- */
- lib_ring_buffer_write_commit_counter(config, buf, chan,
- offset_end, commit_count, handle, cc_hot);
-}
-
-/**
- * lib_ring_buffer_try_discard_reserve - Try discarding a record.
- * @config: ring buffer instance configuration.
- * @ctx: ring buffer context. (input arguments only)
- *
- * Only succeeds if no other record has been written after the record to
- * discard. If discard fails, the record must be committed to the buffer.
- *
- * Returns 0 upon success, -EPERM if the record cannot be discarded.
- */
-static inline
-int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
- unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size;
-
- /*
- * We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
- * timestamp in the next record.
- *
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
- * records.
- */
- save_last_tsc(config, buf, 0ULL);
-
- if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
- != end_offset))
- return -EPERM;
- else
- return 0;
-}
-
-static inline
-void channel_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- uatomic_inc(&chan->record_disabled);
-}
-
-static inline
-void channel_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- uatomic_dec(&chan->record_disabled);
-}
-
-static inline
-void lib_ring_buffer_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
-{
- uatomic_inc(&buf->record_disabled);
-}
-
-static inline
-void lib_ring_buffer_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
-{
- uatomic_dec(&buf->record_disabled);
-}
-
-#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer Library Synchronization Header (internal helpers).
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-
-#include <urcu/compiler.h>
-#include <urcu/tls-compat.h>
-#include <signal.h>
-#include <stdint.h>
-#include <pthread.h>
-
-#include <lttng/ringbuffer-context.h>
-#include "ringbuffer-config.h"
-#include "backend_types.h"
-#include "backend_internal.h"
-#include "frontend_types.h"
-#include "shm.h"
-
-/* Buffer offset macros */
-
-/* buf_trunc mask selects only the buffer number. */
-static inline
-unsigned long buf_trunc(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return offset & ~(chan->backend.buf_size - 1);
-
-}
-
-/* Select the buffer number value (counter). */
-static inline
-unsigned long buf_trunc_val(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
-}
-
-/* buf_offset mask selects only the offset within the current buffer. */
-static inline
-unsigned long buf_offset(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return offset & (chan->backend.buf_size - 1);
-}
-
-/* subbuf_offset mask selects the offset within the current subbuffer. */
-static inline
-unsigned long subbuf_offset(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return offset & (chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_trunc mask selects the subbuffer number. */
-static inline
-unsigned long subbuf_trunc(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return offset & ~(chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_align aligns the offset to the next subbuffer. */
-static inline
-unsigned long subbuf_align(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return (offset + chan->backend.subbuf_size)
- & ~(chan->backend.subbuf_size - 1);
-}
-
-/* subbuf_index returns the index of the current subbuffer within the buffer. */
-static inline
-unsigned long subbuf_index(unsigned long offset,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
-}
-
-/*
- * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
- * bits from the last TSC read. When overflows are detected, the full 64-bit
- * timestamp counter should be written in the record header. Reads and writes
- * last_tsc atomically.
- */
-
-#if (CAA_BITS_PER_LONG == 32)
-static inline
-void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
-{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
- return;
-
- /*
- * Ensure the compiler performs this update in a single instruction.
- */
- v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
-}
-
-static inline
-int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
-{
- unsigned long tsc_shifted;
-
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
- return 0;
-
- tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (caa_unlikely(tsc_shifted
- - (unsigned long)v_read(config, &buf->last_tsc)))
- return 1;
- else
- return 0;
-}
-#else
-static inline
-void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
-{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
- return;
-
- v_set(config, &buf->last_tsc, (unsigned long)tsc);
-}
-
-static inline
-int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
-{
- if (config->tsc_bits == 0 || config->tsc_bits == 64)
- return 0;
-
- if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
- >> config->tsc_bits))
- return 1;
- else
- return 0;
-}
-#endif
-
-extern
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
- __attribute__((visibility("hidden")));
-
-extern
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
- enum switch_mode mode,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long offset,
- unsigned long commit_count,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle,
- uint64_t tsc)
- __attribute__((visibility("hidden")));
-
-/* Buffer write helpers */
-
-static inline
-void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long offset)
-{
- unsigned long consumed_old, consumed_new;
-
- do {
- consumed_old = uatomic_read(&buf->consumed);
- /*
- * If buffer is in overwrite mode, push the reader consumed
- * count if the write position has reached it and we are not
- * at the first iteration (don't push the reader farther than
- * the writer). This operation can be done concurrently by many
- * writers in the same buffer, the writer being at the farthest
- * write position sub-buffer index in the buffer being the one
- * which will win this loop.
- */
- if (caa_unlikely(subbuf_trunc(offset, chan)
- - subbuf_trunc(consumed_old, chan)
- >= chan->backend.buf_size))
- consumed_new = subbuf_align(consumed_old, chan);
- else
- return;
- } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
- consumed_new) != consumed_old));
-}
-
-/*
- * Move consumed position to the beginning of subbuffer in which the
- * write offset is. Should only be used on ring buffers that are not
- * actively being written into, because clear_reader does not take into
- * account the commit counters when moving the consumed position, which
- * can make concurrent trace producers or consumers observe consumed
- * position further than the write offset, which breaks ring buffer
- * algorithm guarantees.
- */
-static inline
-void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long offset, consumed_old, consumed_new;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return;
- config = &chan->backend.config;
-
- do {
- offset = v_read(config, &buf->offset);
- consumed_old = uatomic_read(&buf->consumed);
- CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
- - subbuf_trunc(consumed_old, chan))
- < 0);
- consumed_new = subbuf_trunc(offset, chan);
- } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
- consumed_new) != consumed_old));
-}
-
-static inline
-int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return !!subbuf_offset(v_read(config, &buf->offset), chan);
-}
-
-static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- return subbuffer_get_data_size(config, &buf->backend, idx, handle);
-}
-
-/*
- * Check if all space reservation in a buffer have been committed. This helps
- * knowing if an execution context is nested (for per-cpu buffers only).
- * This is a very specific ftrace use-case, so we keep this as "internal" API.
- */
-static inline
-int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long offset, idx, commit_count;
- struct commit_counters_hot *cc_hot;
-
- CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
- CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
-
- /*
- * Read offset and commit count in a loop so they are both read
- * atomically wrt interrupts. By deal with interrupt concurrency by
- * restarting both reads if the offset has been pushed. Note that given
- * we only have to deal with interrupt concurrency here, an interrupt
- * modifying the commit count will also modify "offset", so it is safe
- * to only check for offset modifications.
- */
- do {
- offset = v_read(config, &buf->offset);
- idx = subbuf_index(offset, chan);
- cc_hot = shmp_index(handle, buf->commit_hot, idx);
- if (caa_unlikely(!cc_hot))
- return 0;
- commit_count = v_read(config, &cc_hot->cc);
- } while (offset != v_read(config, &buf->offset));
-
- return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- - (commit_count & chan->commit_count_mask) == 0);
-}
-
-/*
- * Receive end of subbuffer TSC as parameter. It has been read in the
- * space reservation loop of either reserve or switch, which ensures it
- * progresses monotonically with event records in the buffer. Therefore,
- * it ensures that the end timestamp of a subbuffer is <= begin
- * timestamp of the following subbuffers.
- */
-static inline
-void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long offset,
- unsigned long commit_count,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle,
- uint64_t tsc)
-{
- unsigned long old_commit_count = commit_count
- - chan->backend.subbuf_size;
-
- /* Check if all commits have been done */
- if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- - (old_commit_count & chan->commit_count_mask) == 0))
- lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
- commit_count, idx, handle, tsc);
-}
-
-/*
- * lib_ring_buffer_write_commit_counter
- *
- * For flight recording. must be called after commit.
- * This function increments the subbuffer's commit_seq counter each time the
- * commit count reaches back the reserve offset (modulo subbuffer size). It is
- * useful for crash dump.
- */
-static inline
-void lib_ring_buffer_write_commit_counter(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long buf_offset,
- unsigned long commit_count,
- struct lttng_ust_shm_handle *handle __attribute__((unused)),
- struct commit_counters_hot *cc_hot)
-{
- unsigned long commit_seq_old;
-
- if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
- return;
-
- /*
- * subbuf_offset includes commit_count_mask. We can simply
- * compare the offsets within the subbuffer without caring about
- * buffer full/empty mismatch because offset is never zero here
- * (subbuffer header and record headers have non-zero length).
- */
- if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
- return;
-
- commit_seq_old = v_read(config, &cc_hot->seq);
- if (caa_likely((long) (commit_seq_old - commit_count) < 0))
- v_set(config, &cc_hot->seq, commit_count);
-}
-
-extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu,
- struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj)
- __attribute__((visibility("hidden")));
-
-extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/* Keep track of trap nesting inside ring buffer code */
-extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring Buffer Library Synchronization Header (types).
- *
- * See ring_buffer_frontend.c for more information on wait-free algorithms.
- */
-
-#ifndef _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
-#define _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
-
-#include <stdint.h>
-#include <string.h>
-#include <time.h> /* for timer_t */
-
-#include <urcu/list.h>
-#include <urcu/uatomic.h>
-
-#include <lttng/ringbuffer-context.h>
-#include "ringbuffer-config.h"
-#include <usterr-signal-safe.h>
-#include "backend_types.h"
-#include "shm_internal.h"
-#include "shm_types.h"
-#include "vatomic.h"
-
-#define LIB_RING_BUFFER_MAX_NESTING 5
-
-/*
- * A switch is done during tracing or as a final flush after tracing (so it
- * won't write in the new sub-buffer).
- */
-enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
-
-/* channel: collection of per-cpu ring buffers. */
-#define RB_CHANNEL_PADDING 32
-struct lttng_ust_lib_ring_buffer_channel {
- int record_disabled;
- unsigned long commit_count_mask; /*
- * Commit count mask, removing
- * the MSBs corresponding to
- * bits used to represent the
- * subbuffer index.
- */
-
- unsigned long switch_timer_interval; /* Buffer flush (us) */
- timer_t switch_timer;
- int switch_timer_enabled;
-
- unsigned long read_timer_interval; /* Reader wakeup (us) */
- timer_t read_timer;
- int read_timer_enabled;
-
- int finalized; /* Has channel been finalized */
- size_t priv_data_offset; /* Offset of private data channel config */
- unsigned int nr_streams; /* Number of streams */
- struct lttng_ust_shm_handle *handle;
- /* Extended options. */
- union {
- struct {
- int32_t blocking_timeout_ms;
- void *priv; /* Private data pointer. */
- } s;
- char padding[RB_CHANNEL_PADDING];
- } u;
- /*
- * Associated backend contains a variable-length array. Needs to
- * be last member.
- */
- struct channel_backend backend; /* Associated backend */
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-/* Per-subbuffer commit counters used on the hot path */
-#define RB_COMMIT_COUNT_HOT_PADDING 16
-struct commit_counters_hot {
- union v_atomic cc; /* Commit counter */
- union v_atomic seq; /* Consecutive commits */
- char padding[RB_COMMIT_COUNT_HOT_PADDING];
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-/* Per-subbuffer commit counters used only on cold paths */
-#define RB_COMMIT_COUNT_COLD_PADDING 24
-struct commit_counters_cold {
- union v_atomic cc_sb; /* Incremented _once_ at sb switch */
- char padding[RB_COMMIT_COUNT_COLD_PADDING];
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-/* ring buffer state */
-#define RB_CRASH_DUMP_ABI_LEN 256
-#define RB_RING_BUFFER_PADDING 60
-
-#define RB_CRASH_DUMP_ABI_MAGIC_LEN 16
-
-/*
- * The 128-bit magic number is xor'd in the process data so it does not
- * cause a false positive when searching for buffers by scanning memory.
- * The actual magic number is:
- * 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17, 0x7B, 0xF1,
- * 0x77, 0xBF, 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17,
- */
-#define RB_CRASH_DUMP_ABI_MAGIC_XOR \
- { \
- 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, 0x77 ^ 0xFF, \
- 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, \
- 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, \
- 0xF1 ^ 0xFF, 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, \
- }
-
-#define RB_CRASH_ENDIAN 0x1234
-
-#define RB_CRASH_DUMP_ABI_MAJOR 0
-#define RB_CRASH_DUMP_ABI_MINOR 0
-
-enum lttng_crash_type {
- LTTNG_CRASH_TYPE_UST = 0,
- LTTNG_CRASH_TYPE_KERNEL = 1,
-};
-
-struct lttng_crash_abi {
- uint8_t magic[RB_CRASH_DUMP_ABI_MAGIC_LEN];
- uint64_t mmap_length; /* Overall lenght of crash record */
- uint16_t endian; /*
- * { 0x12, 0x34 }: big endian
- * { 0x34, 0x12 }: little endian
- */
- uint16_t major; /* Major number. */
- uint16_t minor; /* Minor number. */
- uint8_t word_size; /* Word size (bytes). */
- uint8_t layout_type; /* enum lttng_crash_type */
-
- struct {
- uint32_t prod_offset;
- uint32_t consumed_offset;
- uint32_t commit_hot_array;
- uint32_t commit_hot_seq;
- uint32_t buf_wsb_array;
- uint32_t buf_wsb_id;
- uint32_t sb_array;
- uint32_t sb_array_shmp_offset;
- uint32_t sb_backend_p_offset;
- uint32_t content_size;
- uint32_t packet_size;
- } __attribute__((packed)) offset;
- struct {
- uint8_t prod_offset;
- uint8_t consumed_offset;
- uint8_t commit_hot_seq;
- uint8_t buf_wsb_id;
- uint8_t sb_array_shmp_offset;
- uint8_t sb_backend_p_offset;
- uint8_t content_size;
- uint8_t packet_size;
- } __attribute__((packed)) length;
- struct {
- uint32_t commit_hot_array;
- uint32_t buf_wsb_array;
- uint32_t sb_array;
- } __attribute__((packed)) stride;
-
- uint64_t buf_size; /* Size of the buffer */
- uint64_t subbuf_size; /* Sub-buffer size */
- uint64_t num_subbuf; /* Number of sub-buffers for writer */
- uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
-} __attribute__((packed));
-
-struct lttng_ust_lib_ring_buffer {
- /* First 32 bytes are for the buffer crash dump ABI */
- struct lttng_crash_abi crash_abi;
-
- /* 32 bytes cache-hot cacheline */
- union v_atomic __attribute__((aligned(32))) offset;
- /* Current offset in the buffer */
- DECLARE_SHMP(struct commit_counters_hot, commit_hot);
- /* Commit count per sub-buffer */
- long consumed; /*
- * Current offset in the buffer
- * standard atomic access (shared)
- */
- int record_disabled;
- /* End of cache-hot 32 bytes cacheline */
-
- union v_atomic last_tsc; /*
- * Last timestamp written in the buffer.
- */
-
- struct lttng_ust_lib_ring_buffer_backend backend;
- /* Associated backend */
-
- DECLARE_SHMP(struct commit_counters_cold, commit_cold);
- /* Commit count per sub-buffer */
- DECLARE_SHMP(uint64_t, ts_end); /*
- * timestamp_end per sub-buffer.
- * Time is sampled by the
- * switch_*_end() callbacks
- * which are the last space
- * reservation performed in the
- * sub-buffer before it can be
- * fully committed and
- * delivered. This time value is
- * then read by the deliver
- * callback, performed by the
- * last commit before the buffer
- * becomes readable.
- */
- long active_readers; /*
- * Active readers count
- * standard atomic access (shared)
- */
- /* Dropped records */
- union v_atomic records_lost_full; /* Buffer full */
- union v_atomic records_lost_wrap; /* Nested wrap-around */
- union v_atomic records_lost_big; /* Events too big */
- union v_atomic records_count; /* Number of records written */
- union v_atomic records_overrun; /* Number of overwritten records */
- //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
- int finalized; /* buffer has been finalized */
- unsigned long get_subbuf_consumed; /* Read-side consumed */
- unsigned long prod_snapshot; /* Producer count snapshot */
- unsigned long cons_snapshot; /* Consumer count snapshot */
- unsigned int get_subbuf:1; /* Sub-buffer being held by reader */
- /* shmp pointer to self */
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
- char padding[RB_RING_BUFFER_PADDING];
-} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-
-/*
- * ring buffer private context
- *
- * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
- * lib_ring_buffer_try_discard_reserve(), lttng_ust_lib_ring_buffer_align_ctx() and
- * lib_ring_buffer_write().
- *
- * This context is allocated on an internal shadow-stack by a successful reserve
- * operation, used by align/write, and freed by commit.
- */
-
-struct lttng_ust_lib_ring_buffer_ctx_private {
- /* input received by lib_ring_buffer_reserve(). */
- struct lttng_ust_lib_ring_buffer_ctx *pub;
- struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
-
- /* output from lib_ring_buffer_reserve() */
- int reserve_cpu; /* processor id updated by the reserve */
- size_t slot_size; /* size of the reserved slot */
- unsigned long buf_offset; /* offset following the record header */
- unsigned long pre_offset; /*
- * Initial offset position _before_
- * the record is written. Positioned
- * prior to record header alignment
- * padding.
- */
- uint64_t tsc; /* time-stamp counter value */
- unsigned int rflags; /* reservation flags */
- void *ip; /* caller ip address */
-
- struct lttng_ust_lib_ring_buffer *buf; /*
- * buffer corresponding to processor id
- * for this channel
- */
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-};
-
-static inline
-void *channel_get_private_config(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return ((char *) chan) + chan->priv_data_offset;
-}
-
-static inline
-void *channel_get_private(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- return chan->u.s.priv;
-}
-
-static inline
-void channel_set_private(struct lttng_ust_lib_ring_buffer_channel *chan, void *priv)
-{
- chan->u.s.priv = priv;
-}
-
-#ifndef __rb_same_type
-#define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
-#endif
-
-/*
- * Issue warnings and disable channels upon internal error.
- * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
- * parameters.
- */
-#define CHAN_WARN_ON(c, cond) \
- ({ \
- struct lttng_ust_lib_ring_buffer_channel *__chan; \
- int _____ret = caa_unlikely(cond); \
- if (_____ret) { \
- if (__rb_same_type(*(c), struct channel_backend)) \
- __chan = caa_container_of((void *) (c), \
- struct lttng_ust_lib_ring_buffer_channel, \
- backend); \
- else if (__rb_same_type(*(c), \
- struct lttng_ust_lib_ring_buffer_channel)) \
- __chan = (void *) (c); \
- else \
- BUG_ON(1); \
- uatomic_inc(&__chan->record_disabled); \
- WARN_ON(1); \
- } \
- _____ret = _____ret; /* For clang "unused result". */ \
- })
-
-/**
- * lttng_ust_lib_ring_buffer_align_ctx - Align context offset on "alignment"
- * @ctx: ring buffer context.
- */
-static inline
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- size_t alignment)
- lttng_ust_notrace;
-static inline
-void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- size_t alignment)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
-
- ctx_private->buf_offset += lttng_ust_lib_ring_buffer_align(ctx_private->buf_offset,
- alignment);
-}
-
-#endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_GETCPU_H
-#define _LTTNG_GETCPU_H
-
-#include <urcu/compiler.h>
-#include <urcu/system.h>
-#include <urcu/arch.h>
-
-void lttng_ust_getcpu_init(void)
- __attribute__((visibility("hidden")));
-
-extern int (*lttng_get_cpu)(void)
- __attribute__((visibility("hidden")));
-
-#ifdef LTTNG_UST_DEBUG_VALGRIND
-
-/*
- * Fallback on cpu 0 if liblttng-ust is build with Valgrind support.
- * get_cpu() returns the current CPU number. It may change due to
- * migration, so it is only statistically accurate.
- */
-static inline
-int lttng_ust_get_cpu_internal(void)
-{
- return 0;
-}
-
-#else
-
-/*
- * sched_getcpu.
- */
-#ifdef __linux__
-
-#if !HAVE_SCHED_GETCPU
-#include <sys/syscall.h>
-#define __getcpu(cpu, node, cache) syscall(__NR_getcpu, cpu, node, cache)
-/*
- * If getcpu is not implemented in the kernel, use cpu 0 as fallback.
- */
-static inline
-int lttng_ust_get_cpu_internal(void)
-{
- int cpu, ret;
-
- ret = __getcpu(&cpu, NULL, NULL);
- if (caa_unlikely(ret < 0))
- return 0;
- return cpu;
-}
-#else /* HAVE_SCHED_GETCPU */
-#include <sched.h>
-
-/*
- * If getcpu is not implemented in the kernel, use cpu 0 as fallback.
- */
-static inline
-int lttng_ust_get_cpu_internal(void)
-{
- int cpu;
-
- cpu = sched_getcpu();
- if (caa_unlikely(cpu < 0))
- return 0;
- return cpu;
-}
-#endif /* HAVE_SCHED_GETCPU */
-
-#elif (defined(__FreeBSD__) || defined(__CYGWIN__))
-
-/*
- * FreeBSD and Cygwin do not allow query of CPU ID. Always use CPU
- * number 0, with the assocated performance degradation on SMP.
- */
-static inline
-int lttng_ust_get_cpu_internal(void)
-{
- return 0;
-}
-
-#else
-#error "Please add support for your OS into liblttng-ust/compat.h."
-#endif
-
-#endif
-
-static inline
-int lttng_ust_get_cpu(void)
-{
- int (*getcpu)(void) = CMM_LOAD_SHARED(lttng_get_cpu);
-
- if (caa_likely(!getcpu)) {
- return lttng_ust_get_cpu_internal();
- } else {
- return getcpu();
- }
-}
-
-#endif /* _LTTNG_GETCPU_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2019 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
- */
-
-#ifndef _LTTNG_MMAP_H
-#define _LTTNG_MMAP_H
-
-#include <sys/mman.h>
-
-#if defined(__linux__) && defined(MAP_POPULATE)
-# define LTTNG_MAP_POPULATE MAP_POPULATE
-#else
-# define LTTNG_MAP_POPULATE 0
-#endif /* __linux__ && MAP_POPULATE */
-
-#endif /* _LTTNG_MMAP_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_RING_BUFFER_NOHZ_H
-#define _LTTNG_RING_BUFFER_NOHZ_H
-
-#ifdef CONFIG_LIB_RING_BUFFER
-void lib_ring_buffer_tick_nohz_flush(void)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_tick_nohz_stop(void)
- __attribute__((visibility("hidden")));
-
-void lib_ring_buffer_tick_nohz_restart(void)
- __attribute__((visibility("hidden")));
-
-#else
-
-static inline void lib_ring_buffer_tick_nohz_flush(void)
-{
-}
-
-static inline void lib_ring_buffer_tick_nohz_stop(void)
-{
-}
-
-static inline void lib_ring_buffer_tick_nohz_restart(void)
-{
-}
-#endif
-
-#endif /* _LTTNG_RING_BUFFER_NOHZ_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
-#define _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
-
-void lttng_fixup_ringbuffer_tls(void)
- __attribute__((visibility("hidden")));
-
-void lttng_ust_ringbuffer_set_allow_blocking(void)
- __attribute__((visibility("hidden")));
-
-#endif /* _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <urcu/arch.h>
-#include <limits.h>
-
-#include <lttng/ust-utils.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "ringbuffer-config.h"
-#include "vatomic.h"
-#include "backend.h"
-#include "frontend.h"
-#include "smp.h"
-#include "shm.h"
-#include "ust-compat.h"
-
-/**
- * lib_ring_buffer_backend_allocate - allocate a channel buffer
- * @config: ring buffer instance configuration
- * @buf: the buffer struct
- * @size: total size of the buffer
- * @num_subbuf: number of subbuffers
- * @extra_reader_sb: need extra subbuffer for reader
- */
-static
-int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t size __attribute__((unused)), size_t num_subbuf,
- int extra_reader_sb,
- struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj)
-{
- struct channel_backend *chanb;
- unsigned long subbuf_size, mmap_offset = 0;
- unsigned long num_subbuf_alloc;
- unsigned long i;
- long page_size;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return -EINVAL;
-
- subbuf_size = chanb->subbuf_size;
- num_subbuf_alloc = num_subbuf;
-
- if (extra_reader_sb)
- num_subbuf_alloc++;
-
- page_size = LTTNG_UST_PAGE_SIZE;
- if (page_size <= 0) {
- goto page_size_error;
- }
-
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
- set_shmp(bufb->array, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
- if (caa_unlikely(!shmp(handle, bufb->array)))
- goto array_error;
-
- /*
- * This is the largest element (the buffer pages) which needs to
- * be aligned on page size.
- */
- align_shm(shmobj, page_size);
- set_shmp(bufb->memory_map, zalloc_shm(shmobj,
- subbuf_size * num_subbuf_alloc));
- if (caa_unlikely(!shmp(handle, bufb->memory_map)))
- goto memory_map_error;
-
- /* Allocate backend pages array elements */
- for (i = 0; i < num_subbuf_alloc; i++) {
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
- set_shmp(shmp_index(handle, bufb->array, i)->shmp,
- zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
- if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
- goto free_array;
- }
-
- /* Allocate write-side subbuffer table */
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
- set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
- * num_subbuf));
- if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
- goto free_array;
-
- for (i = 0; i < num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
-
- sb = shmp_index(handle, bufb->buf_wsb, i);
- if (!sb)
- goto free_array;
- sb->id = subbuffer_id(config, 0, 1, i);
- }
-
- /* Assign read-side subbuffer table */
- if (extra_reader_sb)
- bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
- num_subbuf_alloc - 1);
- else
- bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-
- /* Allocate subbuffer packet counter table */
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
- set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
- sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
- * num_subbuf));
- if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
- goto free_wsb;
-
- /* Assign pages to page index */
- for (i = 0; i < num_subbuf_alloc; i++) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
- struct shm_ref ref;
-
- ref.index = bufb->memory_map._ref.index;
- ref.offset = bufb->memory_map._ref.offset;
- ref.offset += i * subbuf_size;
-
- sbp = shmp_index(handle, bufb->array, i);
- if (!sbp)
- goto free_array;
- pages = shmp(handle, sbp->shmp);
- if (!pages)
- goto free_array;
- set_shmp(pages->p, ref);
- if (config->output == RING_BUFFER_MMAP) {
- pages->mmap_offset = mmap_offset;
- mmap_offset += subbuf_size;
- }
- }
- return 0;
-
-free_wsb:
- /* bufb->buf_wsb will be freed by shm teardown */
-free_array:
- /* bufb->array[i] will be freed by shm teardown */
-memory_map_error:
- /* bufb->array will be freed by shm teardown */
-array_error:
-page_size_error:
- return -ENOMEM;
-}
-
-int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct channel_backend *chanb, int cpu,
- struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
-
- set_shmp(bufb->chan, handle->chan._ref);
- bufb->cpu = cpu;
-
- return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
- chanb->num_subbuf,
- chanb->extra_reader_sb,
- handle, shmobj);
-}
-
-void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
- struct lttng_ust_shm_handle *handle)
-{
- struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long num_subbuf_alloc;
- unsigned int i;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return;
- config = &chanb->config;
-
- num_subbuf_alloc = chanb->num_subbuf;
- if (chanb->extra_reader_sb)
- num_subbuf_alloc++;
-
- for (i = 0; i < chanb->num_subbuf; i++) {
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
-
- sb = shmp_index(handle, bufb->buf_wsb, i);
- if (!sb)
- return;
- sb->id = subbuffer_id(config, 0, 1, i);
- }
- if (chanb->extra_reader_sb)
- bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
- num_subbuf_alloc - 1);
- else
- bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
-
- for (i = 0; i < num_subbuf_alloc; i++) {
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
- struct lttng_ust_lib_ring_buffer_backend_pages *pages;
-
- sbp = shmp_index(handle, bufb->array, i);
- if (!sbp)
- return;
- pages = shmp(handle, sbp->shmp);
- if (!pages)
- return;
- /* Don't reset mmap_offset */
- v_set(config, &pages->records_commit, 0);
- v_set(config, &pages->records_unread, 0);
- pages->data_size = 0;
- /* Don't reset backend page and virt addresses */
- }
- /* Don't reset num_pages_per_subbuf, cpu, allocated */
- v_set(config, &bufb->records_read, 0);
-}
-
-/*
- * The frontend is responsible for also calling ring_buffer_backend_reset for
- * each buffer when calling channel_backend_reset.
- */
-void channel_backend_reset(struct channel_backend *chanb)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
-
- /*
- * Don't reset buf_size, subbuf_size, subbuf_size_order,
- * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
- * priv, notifiers, config, cpumask and name.
- */
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-}
-
-/**
- * channel_backend_init - initialize a channel backend
- * @chanb: channel backend
- * @name: channel name
- * @config: client ring buffer configuration
- * @parent: dentry of parent directory, %NULL for root directory
- * @subbuf_size: size of sub-buffers (> page size, power of 2)
- * @num_subbuf: number of sub-buffers (power of 2)
- * @lttng_ust_shm_handle: shared memory handle
- * @stream_fds: stream file descriptors.
- *
- * Returns channel pointer if successful, %NULL otherwise.
- *
- * Creates per-cpu channel buffers using the sizes and attributes
- * specified. The created channel buffer files will be named
- * name_0...name_N-1. File permissions will be %S_IRUSR.
- *
- * Called with CPU hotplug disabled.
- */
-int channel_backend_init(struct channel_backend *chanb,
- const char *name,
- const struct lttng_ust_lib_ring_buffer_config *config,
- size_t subbuf_size, size_t num_subbuf,
- struct lttng_ust_shm_handle *handle,
- const int *stream_fds)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- unsigned int i;
- int ret;
- size_t shmsize = 0, num_subbuf_alloc;
- long page_size;
-
- if (!name)
- return -EPERM;
-
- page_size = LTTNG_UST_PAGE_SIZE;
- if (page_size <= 0) {
- return -ENOMEM;
- }
- /* Check that the subbuffer size is larger than a page. */
- if (subbuf_size < page_size)
- return -EINVAL;
-
- /*
- * Make sure the number of subbuffers and subbuffer size are
- * power of 2, and nonzero.
- */
- if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
- return -EINVAL;
- if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
- return -EINVAL;
- /*
- * Overwrite mode buffers require at least 2 subbuffers per
- * buffer.
- */
- if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
- return -EINVAL;
-
- ret = subbuffer_id_check_index(config, num_subbuf);
- if (ret)
- return ret;
-
- chanb->buf_size = num_subbuf * subbuf_size;
- chanb->subbuf_size = subbuf_size;
- chanb->buf_size_order = get_count_order(chanb->buf_size);
- chanb->subbuf_size_order = get_count_order(subbuf_size);
- chanb->num_subbuf_order = get_count_order(num_subbuf);
- chanb->extra_reader_sb =
- (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
- chanb->num_subbuf = num_subbuf;
- strncpy(chanb->name, name, NAME_MAX);
- chanb->name[NAME_MAX - 1] = '\0';
- memcpy(&chanb->config, config, sizeof(*config));
-
- /* Per-cpu buffer size: control (prior to backend) */
- shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_hot));
- shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_cold));
- shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
- /* Sampled timestamp end */
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(uint64_t));
- shmsize += sizeof(uint64_t) * num_subbuf;
-
- /* Per-cpu buffer size: backend */
- /* num_subbuf + 1 is the worse case */
- num_subbuf_alloc = num_subbuf + 1;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
- shmsize += lttng_ust_offset_align(shmsize, page_size);
- shmsize += subbuf_size * num_subbuf_alloc;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- struct lttng_ust_lib_ring_buffer *buf;
- /*
- * We need to allocate for all possible cpus.
- */
- for_each_possible_cpu(i) {
- struct shm_object *shmobj;
-
- shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[i], i);
- if (!shmobj)
- goto end;
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
- set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
- buf = shmp(handle, chanb->buf[i].shmp);
- if (!buf)
- goto end;
- set_shmp(buf->self, chanb->buf[i].shmp._ref);
- ret = lib_ring_buffer_create(buf, chanb, i,
- handle, shmobj);
- if (ret)
- goto free_bufs; /* cpu hotplug locked */
- }
- } else {
- struct shm_object *shmobj;
- struct lttng_ust_lib_ring_buffer *buf;
-
- shmobj = shm_object_table_alloc(handle->table, shmsize,
- SHM_OBJECT_SHM, stream_fds[0], -1);
- if (!shmobj)
- goto end;
- align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
- set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
- buf = shmp(handle, chanb->buf[0].shmp);
- if (!buf)
- goto end;
- set_shmp(buf->self, chanb->buf[0].shmp._ref);
- ret = lib_ring_buffer_create(buf, chanb, -1,
- handle, shmobj);
- if (ret)
- goto free_bufs;
- }
- chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
-
- return 0;
-
-free_bufs:
- /* We only free the buffer data upon shm teardown */
-end:
- return -ENOMEM;
-}
-
-/**
- * channel_backend_free - destroy the channel
- * @chan: the channel
- *
- * Destroy all channel buffers and frees the channel.
- */
-void channel_backend_free(struct channel_backend *chanb __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- /* SHM teardown takes care of everything */
-}
-
-/**
- * lib_ring_buffer_read - read data from ring_buffer_buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : length to copy to destination
- *
- * Should be protected by get_subbuf/put_subbuf.
- * Returns the length copied.
- */
-size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
- void *dest, size_t len, struct lttng_ust_shm_handle *handle)
-{
- struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
- ssize_t orig_len;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- unsigned long sb_bindex, id;
- void *src;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return 0;
- config = &chanb->config;
- orig_len = len;
- offset &= chanb->buf_size - 1;
-
- if (caa_unlikely(!len))
- return 0;
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return 0;
- /*
- * Underlying layer should never ask for reads across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return 0;
- src = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!src))
- return 0;
- memcpy(dest, src, len);
- return orig_len;
-}
-
-/**
- * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
- * @bufb : buffer backend
- * @offset : offset within the buffer
- * @dest : destination address
- * @len : destination's length
- *
- * Return string's length, or -EINVAL on error.
- * Should be protected by get_subbuf/put_subbuf.
- * Destination length should be at least 1 to hold '\0'.
- */
-int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
- void *dest, size_t len, struct lttng_ust_shm_handle *handle)
-{
- struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
- ssize_t string_len, orig_offset;
- char *str;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- unsigned long sb_bindex, id;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return -EINVAL;
- config = &chanb->config;
- if (caa_unlikely(!len))
- return -EINVAL;
- offset &= chanb->buf_size - 1;
- orig_offset = offset;
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return -EINVAL;
- /*
- * Underlying layer should never ask for reads across
- * subbuffers.
- */
- CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return -EINVAL;
- str = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
- if (caa_unlikely(!str))
- return -EINVAL;
- string_len = strnlen(str, len);
- if (dest && len) {
- memcpy(dest, str, string_len);
- ((char *)dest)[0] = 0;
- }
- return offset - orig_offset;
-}
-
-/**
- * lib_ring_buffer_read_offset_address - get address of a buffer location
- * @bufb : buffer backend
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located (for read).
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's never on a page boundary, it's safe to read/write directly
- * from/to this address, as long as the read/write is never bigger than
- * a page size.
- */
-void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long sb_bindex, id;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return NULL;
- config = &chanb->config;
- offset &= chanb->buf_size - 1;
- id = bufb->buf_rsb.id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return NULL;
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return NULL;
- return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
-}
-
-/**
- * lib_ring_buffer_offset_address - get address of a location within the buffer
- * @bufb : buffer backend
- * @offset : offset within the buffer.
- *
- * Return the address where a given offset is located.
- * Should be used to get the current subbuffer header pointer. Given we know
- * it's always at the beginning of a page, it's safe to write directly to this
- * address, as long as the write is never bigger than a page size.
- */
-void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
- size_t offset,
- struct lttng_ust_shm_handle *handle)
-{
- size_t sbidx;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
- struct channel_backend *chanb;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long sb_bindex, id;
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
-
- chanb = &shmp(handle, bufb->chan)->backend;
- if (!chanb)
- return NULL;
- config = &chanb->config;
- offset &= chanb->buf_size - 1;
- sbidx = offset >> chanb->subbuf_size_order;
- sb = shmp_index(handle, bufb->buf_wsb, sbidx);
- if (!sb)
- return NULL;
- id = sb->id;
- sb_bindex = subbuffer_id_get_index(config, id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return NULL;
- CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, id));
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return NULL;
- return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
- * recorder (overwrite) modes. See thesis:
- *
- * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
- * dissertation, Ecole Polytechnique de Montreal.
- * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
- *
- * - Algorithm presentation in Chapter 5:
- * "Lockless Multi-Core High-Throughput Buffering".
- * - Algorithm formal verification in Section 8.6:
- * "Formal verification of LTTng"
- *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Inspired from LTT and RelayFS:
- * Karim Yaghmour <karim@opersys.com>
- * Tom Zanussi <zanussi@us.ibm.com>
- * Bob Wisniewski <bob@watson.ibm.com>
- * And from K42 :
- * Bob Wisniewski <bob@watson.ibm.com>
- *
- * Buffer reader semantic :
- *
- * - get_subbuf_size
- * while buffer is not finalized and empty
- * - get_subbuf
- * - if return value != 0, continue
- * - splice one subbuffer worth of data to a pipe
- * - splice the data from pipe to disk/network
- * - put_subbuf
- */
-
-#define _LGPL_SOURCE
-#include <sys/types.h>
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <time.h>
-#include <stdbool.h>
-#include <stdint.h>
-#include <urcu/compiler.h>
-#include <urcu/ref.h>
-#include <urcu/tls-compat.h>
-#include <poll.h>
-#include <ust-helper.h>
-
-#include <lttng/ust-utils.h>
-#include <lttng/ringbuffer-context.h>
-
-#include "smp.h"
-#include "ringbuffer-config.h"
-#include "vatomic.h"
-#include "backend.h"
-#include "frontend.h"
-#include "shm.h"
-#include "rb-init.h"
-#include "../liblttng-ust/compat.h" /* For ENODATA */
-
-/* Print DBG() messages about events lost only every 1048576 hits */
-#define DBG_PRINT_NR_LOST (1UL << 20)
-
-#define LTTNG_UST_RB_SIG_FLUSH SIGRTMIN
-#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
-#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
-#define CLOCKID CLOCK_MONOTONIC
-#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
-#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
-#define RETRY_DELAY_MS 100 /* 100 ms. */
-
-/*
- * Non-static to ensure the compiler does not optimize away the xor.
- */
-uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
-
-/*
- * Use POSIX SHM: shm_open(3) and shm_unlink(3).
- * close(2) to close the fd returned by shm_open.
- * shm_unlink releases the shared memory object name.
- * ftruncate(2) sets the size of the memory object.
- * mmap/munmap maps the shared memory obj to a virtual address in the
- * calling proceess (should be done both in libust and consumer).
- * See shm_overview(7) for details.
- * Pass file descriptor returned by shm_open(3) to ltt-sessiond through
- * a UNIX socket.
- *
- * Since we don't need to access the object using its name, we can
- * immediately shm_unlink(3) it, and only keep the handle with its file
- * descriptor.
- */
-
-/*
- * Internal structure representing offsets to use at a sub-buffer switch.
- */
-struct switch_offsets {
- unsigned long begin, end, old;
- size_t pre_header_padding, size;
- unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
- switch_old_end:1;
-};
-
-DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
-
-/*
- * wakeup_fd_mutex protects wakeup fd use by timer from concurrent
- * close.
- */
-static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
- struct lttng_ust_shm_handle *handle);
-
-/*
- * Handle timer teardown race wrt memory free of private data by
- * ring buffer signals are handled by a single thread, which permits
- * a synchronization point between handling of each signal.
- * Protected by the lock within the structure.
- */
-struct timer_signal_data {
- pthread_t tid; /* thread id managing signals */
- int setup_done;
- int qs_done;
- pthread_mutex_t lock;
-};
-
-static struct timer_signal_data timer_signal = {
- .tid = 0,
- .setup_done = 0,
- .qs_done = 0,
- .lock = PTHREAD_MUTEX_INITIALIZER,
-};
-
-static bool lttng_ust_allow_blocking;
-
-void lttng_ust_ringbuffer_set_allow_blocking(void)
-{
- lttng_ust_allow_blocking = true;
-}
-
-/* Get blocking timeout, in ms */
-static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- if (!lttng_ust_allow_blocking)
- return 0;
- return chan->u.s.blocking_timeout_ms;
-}
-
-/**
- * lib_ring_buffer_reset - Reset ring buffer to initial values.
- * @buf: Ring buffer.
- *
- * Effectively empty the ring buffer. Should be called when the buffer is not
- * used for writing. The ring buffer can be opened for reading, but the reader
- * should not be using the iterator concurrently with reset. The previous
- * current iterator record is reset.
- */
-void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned int i;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return;
- config = &chan->backend.config;
- /*
- * Reset iterator first. It will put the subbuffer if it currently holds
- * it.
- */
- v_set(config, &buf->offset, 0);
- for (i = 0; i < chan->backend.num_subbuf; i++) {
- struct commit_counters_hot *cc_hot;
- struct commit_counters_cold *cc_cold;
- uint64_t *ts_end;
-
- cc_hot = shmp_index(handle, buf->commit_hot, i);
- if (!cc_hot)
- return;
- cc_cold = shmp_index(handle, buf->commit_cold, i);
- if (!cc_cold)
- return;
- ts_end = shmp_index(handle, buf->ts_end, i);
- if (!ts_end)
- return;
- v_set(config, &cc_hot->cc, 0);
- v_set(config, &cc_hot->seq, 0);
- v_set(config, &cc_cold->cc_sb, 0);
- *ts_end = 0;
- }
- uatomic_set(&buf->consumed, 0);
- uatomic_set(&buf->record_disabled, 0);
- v_set(config, &buf->last_tsc, 0);
- lib_ring_buffer_backend_reset(&buf->backend, handle);
- /* Don't reset number of active readers */
- v_set(config, &buf->records_lost_full, 0);
- v_set(config, &buf->records_lost_wrap, 0);
- v_set(config, &buf->records_lost_big, 0);
- v_set(config, &buf->records_count, 0);
- v_set(config, &buf->records_overrun, 0);
- buf->finalized = 0;
-}
-
-/**
- * channel_reset - Reset channel to initial values.
- * @chan: Channel.
- *
- * Effectively empty the channel. Should be called when the channel is not used
- * for writing. The channel can be opened for reading, but the reader should not
- * be using the iterator concurrently with reset. The previous current iterator
- * record is reset.
- */
-void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- /*
- * Reset iterators first. Will put the subbuffer if held for reading.
- */
- uatomic_set(&chan->record_disabled, 0);
- /* Don't reset commit_count_mask, still valid */
- channel_backend_reset(&chan->backend);
- /* Don't reset switch/read timer interval */
- /* Don't reset notifiers and notifier enable bits */
- /* Don't reset reader reference count */
-}
-
-static
-void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_crash_abi *crash_abi,
- struct lttng_ust_lib_ring_buffer *buf,
- struct channel_backend *chanb,
- struct shm_object *shmobj,
- struct lttng_ust_shm_handle *handle)
-{
- int i;
-
- for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
- crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
- crash_abi->mmap_length = shmobj->memory_map_size;
- crash_abi->endian = RB_CRASH_ENDIAN;
- crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
- crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
- crash_abi->word_size = sizeof(unsigned long);
- crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
-
- /* Offset of fields */
- crash_abi->offset.prod_offset =
- (uint32_t) ((char *) &buf->offset - (char *) buf);
- crash_abi->offset.consumed_offset =
- (uint32_t) ((char *) &buf->consumed - (char *) buf);
- crash_abi->offset.commit_hot_array =
- (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
- crash_abi->offset.commit_hot_seq =
- offsetof(struct commit_counters_hot, seq);
- crash_abi->offset.buf_wsb_array =
- (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
- crash_abi->offset.buf_wsb_id =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
- crash_abi->offset.sb_array =
- (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
- crash_abi->offset.sb_array_shmp_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
- shmp._ref.offset);
- crash_abi->offset.sb_backend_p_offset =
- offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
- p._ref.offset);
-
- /* Field length */
- crash_abi->length.prod_offset = sizeof(buf->offset);
- crash_abi->length.consumed_offset = sizeof(buf->consumed);
- crash_abi->length.commit_hot_seq =
- sizeof(((struct commit_counters_hot *) NULL)->seq);
- crash_abi->length.buf_wsb_id =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
- crash_abi->length.sb_array_shmp_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
- crash_abi->length.sb_backend_p_offset =
- sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
-
- /* Array stride */
- crash_abi->stride.commit_hot_array =
- sizeof(struct commit_counters_hot);
- crash_abi->stride.buf_wsb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
- crash_abi->stride.sb_array =
- sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
-
- /* Buffer constants */
- crash_abi->buf_size = chanb->buf_size;
- crash_abi->subbuf_size = chanb->subbuf_size;
- crash_abi->num_subbuf = chanb->num_subbuf;
- crash_abi->mode = (uint32_t) chanb->config.mode;
-
- if (config->cb.content_size_field) {
- size_t offset, length;
-
- config->cb.content_size_field(config, &offset, &length);
- crash_abi->offset.content_size = offset;
- crash_abi->length.content_size = length;
- } else {
- crash_abi->offset.content_size = 0;
- crash_abi->length.content_size = 0;
- }
- if (config->cb.packet_size_field) {
- size_t offset, length;
-
- config->cb.packet_size_field(config, &offset, &length);
- crash_abi->offset.packet_size = offset;
- crash_abi->length.packet_size = length;
- } else {
- crash_abi->offset.packet_size = 0;
- crash_abi->length.packet_size = 0;
- }
-}
-
-/*
- * Must be called under cpu hotplug protection.
- */
-int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
- struct channel_backend *chanb, int cpu,
- struct lttng_ust_shm_handle *handle,
- struct shm_object *shmobj)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
- struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
- struct lttng_ust_lib_ring_buffer_channel, backend);
- struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
- struct lttng_ust_lib_ring_buffer_channel *shmp_chan;
- struct commit_counters_hot *cc_hot;
- void *priv = channel_get_private_config(chan);
- size_t subbuf_header_size;
- uint64_t tsc;
- int ret;
-
- /* Test for cpu hotplug */
- if (buf->backend.allocated)
- return 0;
-
- align_shm(shmobj, __alignof__(struct commit_counters_hot));
- set_shmp(buf->commit_hot,
- zalloc_shm(shmobj,
- sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
- if (!shmp(handle, buf->commit_hot)) {
- return -ENOMEM;
- }
-
- align_shm(shmobj, __alignof__(struct commit_counters_cold));
- set_shmp(buf->commit_cold,
- zalloc_shm(shmobj,
- sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
- if (!shmp(handle, buf->commit_cold)) {
- ret = -ENOMEM;
- goto free_commit;
- }
-
- align_shm(shmobj, __alignof__(uint64_t));
- set_shmp(buf->ts_end,
- zalloc_shm(shmobj,
- sizeof(uint64_t) * chan->backend.num_subbuf));
- if (!shmp(handle, buf->ts_end)) {
- ret = -ENOMEM;
- goto free_commit_cold;
- }
-
-
- ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
- cpu, handle, shmobj);
- if (ret) {
- goto free_init;
- }
-
- /*
- * Write the subbuffer header for first subbuffer so we know the total
- * duration of data gathering.
- */
- subbuf_header_size = config->cb.subbuffer_header_size();
- v_set(config, &buf->offset, subbuf_header_size);
- wsb = shmp_index(handle, buf->backend.buf_wsb, 0);
- if (!wsb) {
- ret = -EPERM;
- goto free_chanbuf;
- }
- subbuffer_id_clear_noref(config, &wsb->id);
- shmp_chan = shmp(handle, buf->backend.chan);
- if (!shmp_chan) {
- ret = -EPERM;
- goto free_chanbuf;
- }
- tsc = config->cb.ring_buffer_clock_read(shmp_chan);
- config->cb.buffer_begin(buf, tsc, 0, handle);
- cc_hot = shmp_index(handle, buf->commit_hot, 0);
- if (!cc_hot) {
- ret = -EPERM;
- goto free_chanbuf;
- }
- v_add(config, subbuf_header_size, &cc_hot->cc);
- v_add(config, subbuf_header_size, &cc_hot->seq);
-
- if (config->cb.buffer_create) {
- ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
- if (ret)
- goto free_chanbuf;
- }
-
- init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
-
- buf->backend.allocated = 1;
- return 0;
-
- /* Error handling */
-free_init:
- /* ts_end will be freed by shm teardown */
-free_commit_cold:
- /* commit_cold will be freed by shm teardown */
-free_commit:
- /* commit_hot will be freed by shm teardown */
-free_chanbuf:
- return ret;
-}
-
-static
-void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
- siginfo_t *si, void *uc __attribute__((unused)))
-{
- const struct lttng_ust_lib_ring_buffer_config *config;
- struct lttng_ust_shm_handle *handle;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- int cpu;
-
- assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
-
- chan = si->si_value.sival_ptr;
- handle = chan->handle;
- config = &chan->backend.config;
-
- DBG("Switch timer for channel %p\n", chan);
-
- /*
- * Only flush buffers periodically if readers are active.
- */
- pthread_mutex_lock(&wakeup_fd_mutex);
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[cpu].shmp);
-
- if (!buf)
- goto end;
- if (uatomic_read(&buf->active_readers))
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
- chan->handle);
- }
- } else {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[0].shmp);
-
- if (!buf)
- goto end;
- if (uatomic_read(&buf->active_readers))
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
- chan->handle);
- }
-end:
- pthread_mutex_unlock(&wakeup_fd_mutex);
- return;
-}
-
-static
-int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle)
-{
- unsigned long consumed_old, consumed_idx, commit_count, write_offset;
- struct commit_counters_cold *cc_cold;
-
- consumed_old = uatomic_read(&buf->consumed);
- consumed_idx = subbuf_index(consumed_old, chan);
- cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
- if (!cc_cold)
- return 0;
- commit_count = v_read(config, &cc_cold->cc_sb);
- /*
- * No memory barrier here, since we are only interested
- * in a statistically correct polling result. The next poll will
- * get the data is we are racing. The mb() that ensures correct
- * memory order is in get_subbuf.
- */
- write_offset = v_read(config, &buf->offset);
-
- /*
- * Check that the subbuffer we are trying to consume has been
- * already fully committed.
- */
-
- if (((commit_count - chan->backend.subbuf_size)
- & chan->commit_count_mask)
- - (buf_trunc(consumed_old, chan)
- >> chan->backend.num_subbuf_order)
- != 0)
- return 0;
-
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
- == 0)
- return 0;
-
- return 1;
-}
-
-static
-void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
- sigset_t sigpipe_set, pending_set, old_set;
- int ret, sigpipe_was_pending = 0;
-
- if (wakeup_fd < 0)
- return;
-
- /*
- * Wake-up the other end by writing a null byte in the pipe
- * (non-blocking). Important note: Because writing into the
- * pipe is non-blocking (and therefore we allow dropping wakeup
- * data, as long as there is wakeup data present in the pipe
- * buffer to wake up the consumer), the consumer should perform
- * the following sequence for waiting:
- * 1) empty the pipe (reads).
- * 2) check if there is data in the buffer.
- * 3) wait on the pipe (poll).
- *
- * Discard the SIGPIPE from write(), not disturbing any SIGPIPE
- * that might be already pending. If a bogus SIGPIPE is sent to
- * the entire process concurrently by a malicious user, it may
- * be simply discarded.
- */
- ret = sigemptyset(&pending_set);
- assert(!ret);
- /*
- * sigpending returns the mask of signals that are _both_
- * blocked for the thread _and_ pending for either the thread or
- * the entire process.
- */
- ret = sigpending(&pending_set);
- assert(!ret);
- sigpipe_was_pending = sigismember(&pending_set, SIGPIPE);
- /*
- * If sigpipe was pending, it means it was already blocked, so
- * no need to block it.
- */
- if (!sigpipe_was_pending) {
- ret = sigemptyset(&sigpipe_set);
- assert(!ret);
- ret = sigaddset(&sigpipe_set, SIGPIPE);
- assert(!ret);
- ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set);
- assert(!ret);
- }
- do {
- ret = write(wakeup_fd, "", 1);
- } while (ret == -1L && errno == EINTR);
- if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) {
- struct timespec timeout = { 0, 0 };
- do {
- ret = sigtimedwait(&sigpipe_set, NULL,
- &timeout);
- } while (ret == -1L && errno == EINTR);
- }
- if (!sigpipe_was_pending) {
- ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
- assert(!ret);
- }
-}
-
-static
-void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- const struct lttng_ust_lib_ring_buffer_config *config;
- struct lttng_ust_shm_handle *handle;
- int cpu;
-
- handle = chan->handle;
- config = &chan->backend.config;
-
- /*
- * Only flush buffers periodically if readers are active.
- */
- pthread_mutex_lock(&wakeup_fd_mutex);
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[cpu].shmp);
-
- if (!buf)
- goto end;
- if (uatomic_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf,
- chan, handle)) {
- lib_ring_buffer_wakeup(buf, handle);
- }
- }
- } else {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[0].shmp);
-
- if (!buf)
- goto end;
- if (uatomic_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf,
- chan, handle)) {
- lib_ring_buffer_wakeup(buf, handle);
- }
- }
-end:
- pthread_mutex_unlock(&wakeup_fd_mutex);
-}
-
-static
-void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
- siginfo_t *si, void *uc __attribute__((unused)))
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
-
- assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
- chan = si->si_value.sival_ptr;
- DBG("Read timer for channel %p\n", chan);
- lib_ring_buffer_channel_do_read(chan);
- return;
-}
-
-static
-void rb_setmask(sigset_t *mask)
-{
- int ret;
-
- ret = sigemptyset(mask);
- if (ret) {
- PERROR("sigemptyset");
- }
- ret = sigaddset(mask, LTTNG_UST_RB_SIG_FLUSH);
- if (ret) {
- PERROR("sigaddset");
- }
- ret = sigaddset(mask, LTTNG_UST_RB_SIG_READ);
- if (ret) {
- PERROR("sigaddset");
- }
- ret = sigaddset(mask, LTTNG_UST_RB_SIG_TEARDOWN);
- if (ret) {
- PERROR("sigaddset");
- }
-}
-
-static
-void *sig_thread(void *arg __attribute__((unused)))
-{
- sigset_t mask;
- siginfo_t info;
- int signr;
-
- /* Only self thread will receive signal mask. */
- rb_setmask(&mask);
- CMM_STORE_SHARED(timer_signal.tid, pthread_self());
-
- for (;;) {
- signr = sigwaitinfo(&mask, &info);
- if (signr == -1) {
- if (errno != EINTR)
- PERROR("sigwaitinfo");
- continue;
- }
- if (signr == LTTNG_UST_RB_SIG_FLUSH) {
- lib_ring_buffer_channel_switch_timer(info.si_signo,
- &info, NULL);
- } else if (signr == LTTNG_UST_RB_SIG_READ) {
- lib_ring_buffer_channel_read_timer(info.si_signo,
- &info, NULL);
- } else if (signr == LTTNG_UST_RB_SIG_TEARDOWN) {
- cmm_smp_mb();
- CMM_STORE_SHARED(timer_signal.qs_done, 1);
- cmm_smp_mb();
- } else {
- ERR("Unexptected signal %d\n", info.si_signo);
- }
- }
- return NULL;
-}
-
-/*
- * Ensure only a single thread listens on the timer signal.
- */
-static
-void lib_ring_buffer_setup_timer_thread(void)
-{
- pthread_t thread;
- int ret;
-
- pthread_mutex_lock(&timer_signal.lock);
- if (timer_signal.setup_done)
- goto end;
-
- ret = pthread_create(&thread, NULL, &sig_thread, NULL);
- if (ret) {
- errno = ret;
- PERROR("pthread_create");
- }
- ret = pthread_detach(thread);
- if (ret) {
- errno = ret;
- PERROR("pthread_detach");
- }
- timer_signal.setup_done = 1;
-end:
- pthread_mutex_unlock(&timer_signal.lock);
-}
-
-/*
- * Wait for signal-handling thread quiescent state.
- */
-static
-void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr)
-{
- sigset_t pending_set;
- int ret;
-
- /*
- * We need to be the only thread interacting with the thread
- * that manages signals for teardown synchronization.
- */
- pthread_mutex_lock(&timer_signal.lock);
-
- /*
- * Ensure we don't have any signal queued for this channel.
- */
- for (;;) {
- ret = sigemptyset(&pending_set);
- if (ret == -1) {
- PERROR("sigemptyset");
- }
- ret = sigpending(&pending_set);
- if (ret == -1) {
- PERROR("sigpending");
- }
- if (!sigismember(&pending_set, signr))
- break;
- caa_cpu_relax();
- }
-
- /*
- * From this point, no new signal handler will be fired that
- * would try to access "chan". However, we still need to wait
- * for any currently executing handler to complete.
- */
- cmm_smp_mb();
- CMM_STORE_SHARED(timer_signal.qs_done, 0);
- cmm_smp_mb();
-
- /*
- * Kill with LTTNG_UST_RB_SIG_TEARDOWN, so signal management
- * thread wakes up.
- */
- kill(getpid(), LTTNG_UST_RB_SIG_TEARDOWN);
-
- while (!CMM_LOAD_SHARED(timer_signal.qs_done))
- caa_cpu_relax();
- cmm_smp_mb();
-
- pthread_mutex_unlock(&timer_signal.lock);
-}
-
-static
-void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- struct sigevent sev;
- struct itimerspec its;
- int ret;
-
- if (!chan->switch_timer_interval || chan->switch_timer_enabled)
- return;
-
- chan->switch_timer_enabled = 1;
-
- lib_ring_buffer_setup_timer_thread();
-
- memset(&sev, 0, sizeof(sev));
- sev.sigev_notify = SIGEV_SIGNAL;
- sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH;
- sev.sigev_value.sival_ptr = chan;
- ret = timer_create(CLOCKID, &sev, &chan->switch_timer);
- if (ret == -1) {
- PERROR("timer_create");
- }
-
- its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
- its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
- its.it_interval.tv_sec = its.it_value.tv_sec;
- its.it_interval.tv_nsec = its.it_value.tv_nsec;
-
- ret = timer_settime(chan->switch_timer, 0, &its, NULL);
- if (ret == -1) {
- PERROR("timer_settime");
- }
-}
-
-static
-void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- int ret;
-
- if (!chan->switch_timer_interval || !chan->switch_timer_enabled)
- return;
-
- ret = timer_delete(chan->switch_timer);
- if (ret == -1) {
- PERROR("timer_delete");
- }
-
- lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_FLUSH);
-
- chan->switch_timer = 0;
- chan->switch_timer_enabled = 0;
-}
-
-static
-void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct sigevent sev;
- struct itimerspec its;
- int ret;
-
- if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
- || !chan->read_timer_interval || chan->read_timer_enabled)
- return;
-
- chan->read_timer_enabled = 1;
-
- lib_ring_buffer_setup_timer_thread();
-
- sev.sigev_notify = SIGEV_SIGNAL;
- sev.sigev_signo = LTTNG_UST_RB_SIG_READ;
- sev.sigev_value.sival_ptr = chan;
- ret = timer_create(CLOCKID, &sev, &chan->read_timer);
- if (ret == -1) {
- PERROR("timer_create");
- }
-
- its.it_value.tv_sec = chan->read_timer_interval / 1000000;
- its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
- its.it_interval.tv_sec = its.it_value.tv_sec;
- its.it_interval.tv_nsec = its.it_value.tv_nsec;
-
- ret = timer_settime(chan->read_timer, 0, &its, NULL);
- if (ret == -1) {
- PERROR("timer_settime");
- }
-}
-
-static
-void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- int ret;
-
- if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
- || !chan->read_timer_interval || !chan->read_timer_enabled)
- return;
-
- ret = timer_delete(chan->read_timer);
- if (ret == -1) {
- PERROR("timer_delete");
- }
-
- /*
- * do one more check to catch data that has been written in the last
- * timer period.
- */
- lib_ring_buffer_channel_do_read(chan);
-
- lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_READ);
-
- chan->read_timer = 0;
- chan->read_timer_enabled = 0;
-}
-
-static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- lib_ring_buffer_channel_switch_timer_stop(chan);
- lib_ring_buffer_channel_read_timer_stop(chan);
-}
-
-static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config =
- &chan->backend.config;
- int cpu;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_possible_cpu(cpu) {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[cpu].shmp);
- if (buf)
- lib_ring_buffer_print_errors(chan, buf, cpu, handle);
- }
- } else {
- struct lttng_ust_lib_ring_buffer *buf =
- shmp(handle, chan->backend.buf[0].shmp);
-
- if (buf)
- lib_ring_buffer_print_errors(chan, buf, -1, handle);
- }
-}
-
-static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int consumer)
-{
- channel_backend_free(&chan->backend, handle);
- /* chan is freed by shm teardown */
- shm_object_table_destroy(handle->table, consumer);
- free(handle);
-}
-
-/**
- * channel_create - Create channel.
- * @config: ring buffer instance configuration
- * @name: name of the channel
- * @priv_data_align: alignment, in bytes, of the private data area. (config)
- * @priv_data_size: length, in bytes, of the private data area. (config)
- * @priv_data_init: initialization data for private data. (config)
- * @priv: local private data (memory owner by caller)
- * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
- * address mapping. It is used only by RING_BUFFER_STATIC
- * configuration. It can be set to NULL for other backends.
- * @subbuf_size: subbuffer size
- * @num_subbuf: number of subbuffers
- * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
- * padding to let readers get those sub-buffers.
- * Used for live streaming.
- * @read_timer_interval: Time interval (in us) to wake up pending readers.
- * @stream_fds: array of stream file descriptors.
- * @nr_stream_fds: number of file descriptors in array.
- *
- * Holds cpu hotplug.
- * Returns NULL on failure.
- */
-struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
- const char *name,
- size_t priv_data_align,
- size_t priv_data_size,
- void *priv_data_init,
- void *priv,
- void *buf_addr __attribute__((unused)), size_t subbuf_size,
- size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval,
- const int *stream_fds, int nr_stream_fds,
- int64_t blocking_timeout)
-{
- int ret;
- size_t shmsize, chansize;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- struct lttng_ust_shm_handle *handle;
- struct shm_object *shmobj;
- unsigned int nr_streams;
- int64_t blocking_timeout_ms;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- nr_streams = num_possible_cpus();
- else
- nr_streams = 1;
-
- if (nr_stream_fds != nr_streams)
- return NULL;
-
- if (blocking_timeout < -1) {
- return NULL;
- }
- /* usec to msec */
- if (blocking_timeout == -1) {
- blocking_timeout_ms = -1;
- } else {
- blocking_timeout_ms = blocking_timeout / 1000;
- if (blocking_timeout_ms != (int32_t) blocking_timeout_ms) {
- return NULL;
- }
- }
-
- if (lib_ring_buffer_check_config(config, switch_timer_interval,
- read_timer_interval))
- return NULL;
-
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
- if (!handle)
- return NULL;
-
- /* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
- if (!handle->table)
- goto error_table_alloc;
-
- /* Calculate the shm allocation layout */
- shmsize = sizeof(struct lttng_ust_lib_ring_buffer_channel);
- shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
- shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
- chansize = shmsize;
- if (priv_data_align)
- shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
- shmsize += priv_data_size;
-
- /* Allocate normal memory for channel (not shared) */
- shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
- -1, -1);
- if (!shmobj)
- goto error_append;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
- set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
- assert(handle->chan._ref.index == 0);
- assert(handle->chan._ref.offset == 0);
- chan = shmp(handle, handle->chan);
- if (!chan)
- goto error_append;
- chan->nr_streams = nr_streams;
-
- /* space for private data */
- if (priv_data_size) {
- void *priv_config;
-
- DECLARE_SHMP(void, priv_data_alloc);
-
- align_shm(shmobj, priv_data_align);
- chan->priv_data_offset = shmobj->allocated_len;
- set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
- if (!shmp(handle, priv_data_alloc))
- goto error_append;
- priv_config = channel_get_private_config(chan);
- memcpy(priv_config, priv_data_init, priv_data_size);
- } else {
- chan->priv_data_offset = -1;
- }
-
- chan->u.s.blocking_timeout_ms = (int32_t) blocking_timeout_ms;
-
- channel_set_private(chan, priv);
-
- ret = channel_backend_init(&chan->backend, name, config,
- subbuf_size, num_subbuf, handle,
- stream_fds);
- if (ret)
- goto error_backend_init;
-
- chan->handle = handle;
- chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
-
- chan->switch_timer_interval = switch_timer_interval;
- chan->read_timer_interval = read_timer_interval;
- lib_ring_buffer_channel_switch_timer_start(chan);
- lib_ring_buffer_channel_read_timer_start(chan);
-
- return handle;
-
-error_backend_init:
-error_append:
- shm_object_table_destroy(handle->table, 1);
-error_table_alloc:
- free(handle);
- return NULL;
-}
-
-struct lttng_ust_shm_handle *channel_handle_create(void *data,
- uint64_t memory_map_size,
- int wakeup_fd)
-{
- struct lttng_ust_shm_handle *handle;
- struct shm_object *object;
-
- handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
- if (!handle)
- return NULL;
-
- /* Allocate table for channel + per-cpu buffers */
- handle->table = shm_object_table_create(1 + num_possible_cpus());
- if (!handle->table)
- goto error_table_alloc;
- /* Add channel object */
- object = shm_object_table_append_mem(handle->table, data,
- memory_map_size, wakeup_fd);
- if (!object)
- goto error_table_object;
- /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
- handle->chan._ref.index = 0;
- handle->chan._ref.offset = 0;
- return handle;
-
-error_table_object:
- shm_object_table_destroy(handle->table, 0);
-error_table_alloc:
- free(handle);
- return NULL;
-}
-
-int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
- int shm_fd, int wakeup_fd, uint32_t stream_nr,
- uint64_t memory_map_size)
-{
- struct shm_object *object;
-
- /* Add stream object */
- object = shm_object_table_append_shm(handle->table,
- shm_fd, wakeup_fd, stream_nr,
- memory_map_size);
- if (!object)
- return -EINVAL;
- return 0;
-}
-
-unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
-{
- assert(handle->table);
- return handle->table->allocated_len - 1;
-}
-
-static
-void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
- int consumer)
-{
- channel_free(chan, handle, consumer);
-}
-
-/**
- * channel_destroy - Finalize, wait for q.s. and destroy channel.
- * @chan: channel to destroy
- *
- * Holds cpu hotplug.
- * Call "destroy" callback, finalize channels, decrement the channel
- * reference count. Note that when readers have completed data
- * consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point.
- */
-void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
- int consumer)
-{
- if (consumer) {
- /*
- * Note: the consumer takes care of finalizing and
- * switching the buffers.
- */
- channel_unregister_notifiers(chan, handle);
- /*
- * The consumer prints errors.
- */
- channel_print_errors(chan, handle);
- }
-
- /*
- * sessiond/consumer are keeping a reference on the shm file
- * descriptor directly. No need to refcount.
- */
- channel_release(chan, handle, consumer);
- return;
-}
-
-struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
- const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
- struct lttng_ust_shm_handle *handle,
- int *shm_fd, int *wait_fd,
- int *wakeup_fd,
- uint64_t *memory_map_size)
-{
- struct shm_ref *ref;
-
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
- cpu = 0;
- } else {
- if (cpu >= num_possible_cpus())
- return NULL;
- }
- ref = &chan->backend.buf[cpu].shmp._ref;
- *shm_fd = shm_get_shm_fd(handle, ref);
- *wait_fd = shm_get_wait_fd(handle, ref);
- *wakeup_fd = shm_get_wakeup_fd(handle, ref);
- if (shm_get_shm_size(handle, ref, memory_map_size))
- return NULL;
- return shmp(handle, chan->backend.buf[cpu].shmp);
-}
-
-int ring_buffer_channel_close_wait_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- struct lttng_ust_shm_handle *handle)
-{
- struct shm_ref *ref;
-
- ref = &handle->chan._ref;
- return shm_close_wait_fd(handle, ref);
-}
-
-int ring_buffer_channel_close_wakeup_fd(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
- struct lttng_ust_shm_handle *handle)
-{
- struct shm_ref *ref;
-
- ref = &handle->chan._ref;
- return shm_close_wakeup_fd(handle, ref);
-}
-
-int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int cpu)
-{
- struct shm_ref *ref;
-
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
- cpu = 0;
- } else {
- if (cpu >= num_possible_cpus())
- return -EINVAL;
- }
- ref = &chan->backend.buf[cpu].shmp._ref;
- return shm_close_wait_fd(handle, ref);
-}
-
-int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_shm_handle *handle,
- int cpu)
-{
- struct shm_ref *ref;
- int ret;
-
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
- cpu = 0;
- } else {
- if (cpu >= num_possible_cpus())
- return -EINVAL;
- }
- ref = &chan->backend.buf[cpu].shmp._ref;
- pthread_mutex_lock(&wakeup_fd_mutex);
- ret = shm_close_wakeup_fd(handle, ref);
- pthread_mutex_unlock(&wakeup_fd_mutex);
- return ret;
-}
-
-int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
- if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
- return -EBUSY;
- cmm_smp_mb();
- return 0;
-}
-
-void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
-
- if (!chan)
- return;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
- cmm_smp_mb();
- uatomic_dec(&buf->active_readers);
-}
-
-/**
- * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
- * @buf: ring buffer
- * @consumed: consumed count indicating the position where to read
- * @produced: produced count, indicates position when to stop reading
- *
- * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
- * data to read at consumed position, or 0 if the get operation succeeds.
- */
-
-int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long *consumed, unsigned long *produced,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long consumed_cur, write_offset;
- int finalized;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return -EPERM;
- config = &chan->backend.config;
- finalized = CMM_ACCESS_ONCE(buf->finalized);
- /*
- * Read finalized before counters.
- */
- cmm_smp_rmb();
- consumed_cur = uatomic_read(&buf->consumed);
- /*
- * No need to issue a memory barrier between consumed count read and
- * write offset read, because consumed count can only change
- * concurrently in overwrite mode, and we keep a sequence counter
- * identifier derived from the write offset to check we are getting
- * the same sub-buffer we are expecting (the sub-buffers are atomically
- * "tagged" upon writes, tags are checked upon read).
- */
- write_offset = v_read(config, &buf->offset);
-
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
- == 0)
- goto nodata;
-
- *consumed = consumed_cur;
- *produced = subbuf_trunc(write_offset, chan);
-
- return 0;
-
-nodata:
- /*
- * The memory barriers __wait_event()/wake_up_interruptible() take care
- * of "raw_spin_is_locked" memory ordering.
- */
- if (finalized)
- return -ENODATA;
- else
- return -EAGAIN;
-}
-
-/**
- * Performs the same function as lib_ring_buffer_snapshot(), but the positions
- * are saved regardless of whether the consumed and produced positions are
- * in the same subbuffer.
- * @buf: ring buffer
- * @consumed: consumed byte count indicating the last position read
- * @produced: produced byte count indicating the last position written
- *
- * This function is meant to provide information on the exact producer and
- * consumer positions without regard for the "snapshot" feature.
- */
-int lib_ring_buffer_snapshot_sample_positions(
- struct lttng_ust_lib_ring_buffer *buf,
- unsigned long *consumed, unsigned long *produced,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return -EPERM;
- config = &chan->backend.config;
- cmm_smp_rmb();
- *consumed = uatomic_read(&buf->consumed);
- /*
- * No need to issue a memory barrier between consumed count read and
- * write offset read, because consumed count can only change
- * concurrently in overwrite mode, and we keep a sequence counter
- * identifier derived from the write offset to check we are getting
- * the same sub-buffer we are expecting (the sub-buffers are atomically
- * "tagged" upon writes, tags are checked upon read).
- */
- *produced = v_read(config, &buf->offset);
- return 0;
-}
-
-/**
- * lib_ring_buffer_move_consumer - move consumed counter forward
- * @buf: ring buffer
- * @consumed_new: new consumed count value
- */
-void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long consumed_new,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- unsigned long consumed;
-
- chan = shmp(handle, bufb->chan);
- if (!chan)
- return;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
-
- /*
- * Only push the consumed value forward.
- * If the consumed cmpxchg fails, this is because we have been pushed by
- * the writer in flight recorder mode.
- */
- consumed = uatomic_read(&buf->consumed);
- while ((long) consumed - (long) consumed_new < 0)
- consumed = uatomic_cmpxchg(&buf->consumed, consumed,
- consumed_new);
-}
-
-/**
- * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
- * @buf: ring buffer
- * @consumed: consumed count indicating the position where to read
- *
- * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
- * data to read at consumed position, or 0 if the get operation succeeds.
- */
-int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- unsigned long consumed,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
- int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
- struct commit_counters_cold *cc_cold;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return -EPERM;
- config = &chan->backend.config;
-retry:
- finalized = CMM_ACCESS_ONCE(buf->finalized);
- /*
- * Read finalized before counters.
- */
- cmm_smp_rmb();
- consumed_cur = uatomic_read(&buf->consumed);
- consumed_idx = subbuf_index(consumed, chan);
- cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
- if (!cc_cold)
- return -EPERM;
- commit_count = v_read(config, &cc_cold->cc_sb);
- /*
- * Make sure we read the commit count before reading the buffer
- * data and the write offset. Correct consumed offset ordering
- * wrt commit count is insured by the use of cmpxchg to update
- * the consumed offset.
- */
- /*
- * Local rmb to match the remote wmb to read the commit count
- * before the buffer data and the write offset.
- */
- cmm_smp_rmb();
-
- write_offset = v_read(config, &buf->offset);
-
- /*
- * Check that the buffer we are getting is after or at consumed_cur
- * position.
- */
- if ((long) subbuf_trunc(consumed, chan)
- - (long) subbuf_trunc(consumed_cur, chan) < 0)
- goto nodata;
-
- /*
- * Check that the subbuffer we are trying to consume has been
- * already fully committed. There are a few causes that can make
- * this unavailability situation occur:
- *
- * Temporary (short-term) situation:
- * - Application is running on a different CPU, between reserve
- * and commit ring buffer operations,
- * - Application is preempted between reserve and commit ring
- * buffer operations,
- *
- * Long-term situation:
- * - Application is stopped (SIGSTOP) between reserve and commit
- * ring buffer operations. Could eventually be resumed by
- * SIGCONT.
- * - Application is killed (SIGTERM, SIGINT, SIGKILL) between
- * reserve and commit ring buffer operation.
- *
- * From a consumer perspective, handling short-term
- * unavailability situations is performed by retrying a few
- * times after a delay. Handling long-term unavailability
- * situations is handled by failing to get the sub-buffer.
- *
- * In all of those situations, if the application is taking a
- * long time to perform its commit after ring buffer space
- * reservation, we can end up in a situation where the producer
- * will fill the ring buffer and try to write into the same
- * sub-buffer again (which has a missing commit). This is
- * handled by the producer in the sub-buffer switch handling
- * code of the reserve routine by detecting unbalanced
- * reserve/commit counters and discarding all further events
- * until the situation is resolved in those situations. Two
- * scenarios can occur:
- *
- * 1) The application causing the reserve/commit counters to be
- * unbalanced has been terminated. In this situation, all
- * further events will be discarded in the buffers, and no
- * further buffer data will be readable by the consumer
- * daemon. Tearing down the UST tracing session and starting
- * anew is a work-around for those situations. Note that this
- * only affects per-UID tracing. In per-PID tracing, the
- * application vanishes with the termination, and therefore
- * no more data needs to be written to the buffers.
- * 2) The application causing the unbalance has been delayed for
- * a long time, but will eventually try to increment the
- * commit counter after eventually writing to the sub-buffer.
- * This situation can cause events to be discarded until the
- * application resumes its operations.
- */
- if (((commit_count - chan->backend.subbuf_size)
- & chan->commit_count_mask)
- - (buf_trunc(consumed, chan)
- >> chan->backend.num_subbuf_order)
- != 0) {
- if (nr_retry-- > 0) {
- if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
- (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
- goto retry;
- } else {
- goto nodata;
- }
- }
-
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
- == 0)
- goto nodata;
-
- /*
- * Failure to get the subbuffer causes a busy-loop retry without going
- * to a wait queue. These are caused by short-lived race windows where
- * the writer is getting access to a subbuffer we were trying to get
- * access to. Also checks that the "consumed" buffer count we are
- * looking for matches the one contained in the subbuffer id.
- *
- * The short-lived race window described here can be affected by
- * application signals and preemption, thus requiring to bound
- * the loop to a maximum number of retry.
- */
- ret = update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan),
- handle);
- if (ret) {
- if (nr_retry-- > 0) {
- if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
- (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
- goto retry;
- } else {
- goto nodata;
- }
- }
- subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
-
- buf->get_subbuf_consumed = consumed;
- buf->get_subbuf = 1;
-
- return 0;
-
-nodata:
- /*
- * The memory barriers __wait_event()/wake_up_interruptible() take care
- * of "raw_spin_is_locked" memory ordering.
- */
- if (finalized)
- return -ENODATA;
- else
- return -EAGAIN;
-}
-
-/**
- * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
- * @buf: ring buffer
- */
-void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- unsigned long sb_bindex, consumed_idx, consumed;
- struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
- struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
-
- chan = shmp(handle, bufb->chan);
- if (!chan)
- return;
- config = &chan->backend.config;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
-
- if (!buf->get_subbuf) {
- /*
- * Reader puts a subbuffer it did not get.
- */
- CHAN_WARN_ON(chan, 1);
- return;
- }
- consumed = buf->get_subbuf_consumed;
- buf->get_subbuf = 0;
-
- /*
- * Clear the records_unread counter. (overruns counter)
- * Can still be non-zero if a file reader simply grabbed the data
- * without using iterators.
- * Can be below zero if an iterator is used on a snapshot more than
- * once.
- */
- sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
- rpages = shmp_index(handle, bufb->array, sb_bindex);
- if (!rpages)
- return;
- backend_pages = shmp(handle, rpages->shmp);
- if (!backend_pages)
- return;
- v_add(config, v_read(config, &backend_pages->records_unread),
- &bufb->records_read);
- v_set(config, &backend_pages->records_unread, 0);
- CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
- && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
- subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
-
- /*
- * Exchange the reader subbuffer with the one we put in its place in the
- * writer subbuffer table. Expect the original consumed count. If
- * update_read_sb_index fails, this is because the writer updated the
- * subbuffer concurrently. We should therefore keep the subbuffer we
- * currently have: it has become invalid to try reading this sub-buffer
- * consumed count value anyway.
- */
- consumed_idx = subbuf_index(consumed, chan);
- update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan),
- handle);
- /*
- * update_read_sb_index return value ignored. Don't exchange sub-buffer
- * if the writer concurrently updated it.
- */
-}
-
-/*
- * cons_offset is an iterator on all subbuffer offsets between the reader
- * position and the writer position. (inclusive)
- */
-static
-void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long cons_offset,
- int cpu,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long cons_idx, commit_count, commit_count_sb;
- struct commit_counters_hot *cc_hot;
- struct commit_counters_cold *cc_cold;
-
- cons_idx = subbuf_index(cons_offset, chan);
- cc_hot = shmp_index(handle, buf->commit_hot, cons_idx);
- if (!cc_hot)
- return;
- cc_cold = shmp_index(handle, buf->commit_cold, cons_idx);
- if (!cc_cold)
- return;
- commit_count = v_read(config, &cc_hot->cc);
- commit_count_sb = v_read(config, &cc_cold->cc_sb);
-
- if (subbuf_offset(commit_count, chan) != 0)
- DBG("ring buffer %s, cpu %d: "
- "commit count in subbuffer %lu,\n"
- "expecting multiples of %lu bytes\n"
- " [ %lu bytes committed, %lu bytes reader-visible ]\n",
- chan->backend.name, cpu, cons_idx,
- chan->backend.subbuf_size,
- commit_count, commit_count_sb);
-
- DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
- chan->backend.name, cpu, commit_count);
-}
-
-static
-void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- int cpu, struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long write_offset, cons_offset;
-
- /*
- * No need to order commit_count, write_offset and cons_offset reads
- * because we execute at teardown when no more writer nor reader
- * references are left.
- */
- write_offset = v_read(config, &buf->offset);
- cons_offset = uatomic_read(&buf->consumed);
- if (write_offset != cons_offset)
- DBG("ring buffer %s, cpu %d: "
- "non-consumed data\n"
- " [ %lu bytes written, %lu bytes read ]\n",
- chan->backend.name, cpu, write_offset, cons_offset);
-
- for (cons_offset = uatomic_read(&buf->consumed);
- (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
- chan)
- - cons_offset) > 0;
- cons_offset = subbuf_align(cons_offset, chan))
- lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
- cpu, handle);
-}
-
-static
-void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf, int cpu,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-
- if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
- DBG("ring buffer %s: %lu records written, "
- "%lu records overrun\n",
- chan->backend.name,
- v_read(config, &buf->records_count),
- v_read(config, &buf->records_overrun));
- } else {
- DBG("ring buffer %s, cpu %d: %lu records written, "
- "%lu records overrun\n",
- chan->backend.name, cpu,
- v_read(config, &buf->records_count),
- v_read(config, &buf->records_overrun));
-
- if (v_read(config, &buf->records_lost_full)
- || v_read(config, &buf->records_lost_wrap)
- || v_read(config, &buf->records_lost_big))
- DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
- " [ %lu buffer full, %lu nest buffer wrap-around, "
- "%lu event too big ]\n",
- chan->backend.name, cpu,
- v_read(config, &buf->records_lost_full),
- v_read(config, &buf->records_lost_wrap),
- v_read(config, &buf->records_lost_big));
- }
- lib_ring_buffer_print_buffer_errors(buf, chan, cpu, handle);
-}
-
-/*
- * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
- *
- * Only executed by SWITCH_FLUSH, which can be issued while tracing is
- * active or at buffer finalization (destroy).
- */
-static
-void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- uint64_t tsc,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long oldidx = subbuf_index(offsets->old, chan);
- unsigned long commit_count;
- struct commit_counters_hot *cc_hot;
-
- config->cb.buffer_begin(buf, tsc, oldidx, handle);
-
- /*
- * Order all writes to buffer before the commit count update that will
- * determine that the subbuffer is full.
- */
- cmm_smp_wmb();
- cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
- if (!cc_hot)
- return;
- v_add(config, config->cb.subbuffer_header_size(),
- &cc_hot->cc);
- commit_count = v_read(config, &cc_hot->cc);
- /* Check if the written buffer has to be delivered */
- lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx, handle, tsc);
- lib_ring_buffer_write_commit_counter(config, buf, chan,
- offsets->old + config->cb.subbuffer_header_size(),
- commit_count, handle, cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_old_end: switch old subbuffer
- *
- * Note : offset_old should never be 0 here. It is ok, because we never perform
- * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
- * increments the offset_old value when doing a SWITCH_FLUSH on an empty
- * subbuffer.
- */
-static
-void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- uint64_t tsc,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
- unsigned long commit_count, padding_size, data_size;
- struct commit_counters_hot *cc_hot;
- uint64_t *ts_end;
-
- data_size = subbuf_offset(offsets->old - 1, chan) + 1;
- padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
- handle);
-
- ts_end = shmp_index(handle, buf->ts_end, oldidx);
- if (!ts_end)
- return;
- /*
- * This is the last space reservation in that sub-buffer before
- * it gets delivered. This provides exclusive access to write to
- * this sub-buffer's ts_end. There are also no concurrent
- * readers of that ts_end because delivery of that sub-buffer is
- * postponed until the commit counter is incremented for the
- * current space reservation.
- */
- *ts_end = tsc;
-
- /*
- * Order all writes to buffer and store to ts_end before the commit
- * count update that will determine that the subbuffer is full.
- */
- cmm_smp_wmb();
- cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
- if (!cc_hot)
- return;
- v_add(config, padding_size, &cc_hot->cc);
- commit_count = v_read(config, &cc_hot->cc);
- lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx, handle, tsc);
- lib_ring_buffer_write_commit_counter(config, buf, chan,
- offsets->old + padding_size, commit_count, handle,
- cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_new_start: Populate new subbuffer.
- *
- * This code can be executed unordered : writers may already have written to the
- * sub-buffer before this code gets executed, caution. The commit makes sure
- * that this code is executed before the deliver of this sub-buffer.
- */
-static
-void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- uint64_t tsc,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long beginidx = subbuf_index(offsets->begin, chan);
- unsigned long commit_count;
- struct commit_counters_hot *cc_hot;
-
- config->cb.buffer_begin(buf, tsc, beginidx, handle);
-
- /*
- * Order all writes to buffer before the commit count update that will
- * determine that the subbuffer is full.
- */
- cmm_smp_wmb();
- cc_hot = shmp_index(handle, buf->commit_hot, beginidx);
- if (!cc_hot)
- return;
- v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
- commit_count = v_read(config, &cc_hot->cc);
- /* Check if the written buffer has to be delivered */
- lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx, handle, tsc);
- lib_ring_buffer_write_commit_counter(config, buf, chan,
- offsets->begin + config->cb.subbuffer_header_size(),
- commit_count, handle, cc_hot);
-}
-
-/*
- * lib_ring_buffer_switch_new_end: finish switching current subbuffer
- *
- * Calls subbuffer_set_data_size() to set the data size of the current
- * sub-buffer. We do not need to perform check_deliver nor commit here,
- * since this task will be done by the "commit" of the event for which
- * we are currently doing the space reservation.
- */
-static
-void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- uint64_t tsc,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long endidx, data_size;
- uint64_t *ts_end;
-
- endidx = subbuf_index(offsets->end - 1, chan);
- data_size = subbuf_offset(offsets->end - 1, chan) + 1;
- subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
- handle);
- ts_end = shmp_index(handle, buf->ts_end, endidx);
- if (!ts_end)
- return;
- /*
- * This is the last space reservation in that sub-buffer before
- * it gets delivered. This provides exclusive access to write to
- * this sub-buffer's ts_end. There are also no concurrent
- * readers of that ts_end because delivery of that sub-buffer is
- * postponed until the commit counter is incremented for the
- * current space reservation.
- */
- *ts_end = tsc;
-}
-
-/*
- * Returns :
- * 0 if ok
- * !0 if execution must be aborted.
- */
-static
-int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- uint64_t *tsc,
- struct lttng_ust_shm_handle *handle)
-{
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long off, reserve_commit_diff;
-
- offsets->begin = v_read(config, &buf->offset);
- offsets->old = offsets->begin;
- offsets->switch_old_start = 0;
- off = subbuf_offset(offsets->begin, chan);
-
- *tsc = config->cb.ring_buffer_clock_read(chan);
-
- /*
- * Ensure we flush the header of an empty subbuffer when doing the
- * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
- * total data gathering duration even if there were no records saved
- * after the last buffer switch.
- * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
- * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
- * subbuffer header as appropriate.
- * The next record that reserves space will be responsible for
- * populating the following subbuffer header. We choose not to populate
- * the next subbuffer header here because we want to be able to use
- * SWITCH_ACTIVE for periodical buffer flush, which must
- * guarantee that all the buffer content (records and header
- * timestamps) are visible to the reader. This is required for
- * quiescence guarantees for the fusion merge.
- */
- if (mode != SWITCH_FLUSH && !off)
- return -1; /* we do not have to switch : buffer is empty */
-
- if (caa_unlikely(off == 0)) {
- unsigned long sb_index, commit_count;
- struct commit_counters_cold *cc_cold;
-
- /*
- * We are performing a SWITCH_FLUSH. There may be concurrent
- * writes into the buffer if e.g. invoked while performing a
- * snapshot on an active trace.
- *
- * If the client does not save any header information
- * (sub-buffer header size == 0), don't switch empty subbuffer
- * on finalize, because it is invalid to deliver a completely
- * empty subbuffer.
- */
- if (!config->cb.subbuffer_header_size())
- return -1;
-
- /* Test new buffer integrity */
- sb_index = subbuf_index(offsets->begin, chan);
- cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
- if (!cc_cold)
- return -1;
- commit_count = v_read(config, &cc_cold->cc_sb);
- reserve_commit_diff =
- (buf_trunc(offsets->begin, chan)
- >> chan->backend.num_subbuf_order)
- - (commit_count & chan->commit_count_mask);
- if (caa_likely(reserve_commit_diff == 0)) {
- /* Next subbuffer not being written to. */
- if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
- subbuf_trunc(offsets->begin, chan)
- - subbuf_trunc((unsigned long)
- uatomic_read(&buf->consumed), chan)
- >= chan->backend.buf_size)) {
- /*
- * We do not overwrite non consumed buffers
- * and we are full : don't switch.
- */
- return -1;
- } else {
- /*
- * Next subbuffer not being written to, and we
- * are either in overwrite mode or the buffer is
- * not full. It's safe to write in this new
- * subbuffer.
- */
- }
- } else {
- /*
- * Next subbuffer reserve offset does not match the
- * commit offset. Don't perform switch in
- * producer-consumer and overwrite mode. Caused by
- * either a writer OOPS or too many nested writes over a
- * reserve/commit pair.
- */
- return -1;
- }
-
- /*
- * Need to write the subbuffer start header on finalize.
- */
- offsets->switch_old_start = 1;
- }
- offsets->begin = subbuf_align(offsets->begin, chan);
- /* Note: old points to the next subbuf at offset 0 */
- offsets->end = offsets->begin;
- return 0;
-}
-
-/*
- * Force a sub-buffer switch. This operation is completely reentrant : can be
- * called while tracing is active with absolutely no lock held.
- *
- * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for
- * some atomic operations, this function must be called from the CPU
- * which owns the buffer for a ACTIVE flush. However, for
- * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
- * from any CPU.
- */
-void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
- struct lttng_ust_shm_handle *handle)
-{
- struct lttng_ust_lib_ring_buffer_channel *chan;
- const struct lttng_ust_lib_ring_buffer_config *config;
- struct switch_offsets offsets;
- unsigned long oldidx;
- uint64_t tsc;
-
- chan = shmp(handle, buf->backend.chan);
- if (!chan)
- return;
- config = &chan->backend.config;
-
- offsets.size = 0;
-
- /*
- * Perform retryable operations.
- */
- do {
- if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
- &tsc, handle))
- return; /* Switch not needed */
- } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
- != offsets.old);
-
- /*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
- */
- save_last_tsc(config, buf, tsc);
-
- /*
- * Push the reader if necessary
- */
- lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
-
- oldidx = subbuf_index(offsets.old, chan);
- lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
-
- /*
- * May need to populate header start on SWITCH_FLUSH.
- */
- if (offsets.switch_old_start) {
- lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
- offsets.old += config->cb.subbuffer_header_size();
- }
-
- /*
- * Switch old subbuffer.
- */
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
-}
-
-static
-bool handle_blocking_retry(int *timeout_left_ms)
-{
- int timeout = *timeout_left_ms, delay;
-
- if (caa_likely(!timeout))
- return false; /* Do not retry, discard event. */
- if (timeout < 0) /* Wait forever. */
- delay = RETRY_DELAY_MS;
- else
- delay = min_t(int, timeout, RETRY_DELAY_MS);
- (void) poll(NULL, 0, delay);
- if (timeout > 0)
- *timeout_left_ms -= delay;
- return true; /* Retry. */
-}
-
-/*
- * Returns :
- * 0 if ok
- * -ENOSPC if event size is too large for packet.
- * -ENOBUFS if there is currently not enough space in buffer for the event.
- * -EIO if data cannot be written into the buffer for any other reason.
- */
-static
-int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct switch_offsets *offsets,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct lttng_ust_shm_handle *handle = chan->handle;
- unsigned long reserve_commit_diff, offset_cmp;
- int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
-
-retry:
- offsets->begin = offset_cmp = v_read(config, &buf->offset);
- offsets->old = offsets->begin;
- offsets->switch_new_start = 0;
- offsets->switch_new_end = 0;
- offsets->switch_old_end = 0;
- offsets->pre_header_padding = 0;
-
- ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
- return -EIO;
-
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
-
- if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
- offsets->switch_new_start = 1; /* For offsets->begin */
- } else {
- offsets->size = config->cb.record_header_size(config, chan,
- offsets->begin,
- &offsets->pre_header_padding,
- ctx, client_ctx);
- offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
- ctx->largest_align)
- + ctx->data_size;
- if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
- offsets->size > chan->backend.subbuf_size)) {
- offsets->switch_old_end = 1; /* For offsets->old */
- offsets->switch_new_start = 1; /* For offsets->begin */
- }
- }
- if (caa_unlikely(offsets->switch_new_start)) {
- unsigned long sb_index, commit_count;
- struct commit_counters_cold *cc_cold;
-
- /*
- * We are typically not filling the previous buffer completely.
- */
- if (caa_likely(offsets->switch_old_end))
- offsets->begin = subbuf_align(offsets->begin, chan);
- offsets->begin = offsets->begin
- + config->cb.subbuffer_header_size();
- /* Test new buffer integrity */
- sb_index = subbuf_index(offsets->begin, chan);
- /*
- * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
- * lib_ring_buffer_check_deliver() has the matching
- * memory barriers required around commit_cold cc_sb
- * updates to ensure reserve and commit counter updates
- * are not seen reordered when updated by another CPU.
- */
- cmm_smp_rmb();
- cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
- if (!cc_cold)
- return -1;
- commit_count = v_read(config, &cc_cold->cc_sb);
- /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
- cmm_smp_rmb();
- if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
- /*
- * The reserve counter have been concurrently updated
- * while we read the commit counter. This means the
- * commit counter we read might not match buf->offset
- * due to concurrent update. We therefore need to retry.
- */
- goto retry;
- }
- reserve_commit_diff =
- (buf_trunc(offsets->begin, chan)
- >> chan->backend.num_subbuf_order)
- - (commit_count & chan->commit_count_mask);
- if (caa_likely(reserve_commit_diff == 0)) {
- /* Next subbuffer not being written to. */
- if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
- subbuf_trunc(offsets->begin, chan)
- - subbuf_trunc((unsigned long)
- uatomic_read(&buf->consumed), chan)
- >= chan->backend.buf_size)) {
- unsigned long nr_lost;
-
- if (handle_blocking_retry(&timeout_left_ms))
- goto retry;
-
- /*
- * We do not overwrite non consumed buffers
- * and we are full : record is lost.
- */
- nr_lost = v_read(config, &buf->records_lost_full);
- v_inc(config, &buf->records_lost_full);
- if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
- DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
- nr_lost + 1, chan->backend.name,
- buf->backend.cpu);
- }
- return -ENOBUFS;
- } else {
- /*
- * Next subbuffer not being written to, and we
- * are either in overwrite mode or the buffer is
- * not full. It's safe to write in this new
- * subbuffer.
- */
- }
- } else {
- unsigned long nr_lost;
-
- /*
- * Next subbuffer reserve offset does not match the
- * commit offset, and this did not involve update to the
- * reserve counter. Drop record in producer-consumer and
- * overwrite mode. Caused by either a writer OOPS or too
- * many nested writes over a reserve/commit pair.
- */
- nr_lost = v_read(config, &buf->records_lost_wrap);
- v_inc(config, &buf->records_lost_wrap);
- if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
- DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
- nr_lost + 1, chan->backend.name,
- buf->backend.cpu);
- }
- return -EIO;
- }
- offsets->size =
- config->cb.record_header_size(config, chan,
- offsets->begin,
- &offsets->pre_header_padding,
- ctx, client_ctx);
- offsets->size +=
- lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
- ctx->largest_align)
- + ctx->data_size;
- if (caa_unlikely(subbuf_offset(offsets->begin, chan)
- + offsets->size > chan->backend.subbuf_size)) {
- unsigned long nr_lost;
-
- /*
- * Record too big for subbuffers, report error, don't
- * complete the sub-buffer switch.
- */
- nr_lost = v_read(config, &buf->records_lost_big);
- v_inc(config, &buf->records_lost_big);
- if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
- DBG("%lu or more records lost in (%s:%d) record size "
- " of %zu bytes is too large for buffer\n",
- nr_lost + 1, chan->backend.name,
- buf->backend.cpu, offsets->size);
- }
- return -ENOSPC;
- } else {
- /*
- * We just made a successful buffer switch and the
- * record fits in the new subbuffer. Let's write.
- */
- }
- } else {
- /*
- * Record fits in the current buffer and we are not on a switch
- * boundary. It's safe to write.
- */
- }
- offsets->end = offsets->begin + offsets->size;
-
- if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
- /*
- * The offset_end will fall at the very beginning of the next
- * subbuffer.
- */
- offsets->switch_new_end = 1; /* For offsets->begin */
- }
- return 0;
-}
-
-/**
- * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
- * @ctx: ring buffer context.
- *
- * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
- * -EIO for other errors, else returns 0.
- * It will take care of sub-buffer switching.
- */
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx)
-{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_shm_handle *handle = chan->handle;
- const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
- struct lttng_ust_lib_ring_buffer *buf;
- struct switch_offsets offsets;
- int ret;
-
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
- else
- buf = shmp(handle, chan->backend.buf[0].shmp);
- if (!buf)
- return -EIO;
- ctx_private->buf = buf;
-
- offsets.size = 0;
-
- do {
- ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
- ctx, client_ctx);
- if (caa_unlikely(ret))
- return ret;
- } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
- offsets.end)
- != offsets.old));
-
- /*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * records, never the opposite (missing a full TSC record when it would
- * be needed).
- */
- save_last_tsc(config, buf, ctx_private->tsc);
-
- /*
- * Push the reader if necessary
- */
- lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
-
- /*
- * Clear noref flag for this subbuffer.
- */
- lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.end - 1, chan),
- handle);
-
- /*
- * Switch old subbuffer if needed.
- */
- if (caa_unlikely(offsets.switch_old_end)) {
- lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.old - 1, chan),
- handle);
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
- }
-
- /*
- * Populate new subbuffer.
- */
- if (caa_unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
-
- if (caa_unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
-
- ctx_private->slot_size = offsets.size;
- ctx_private->pre_offset = offsets.begin;
- ctx_private->buf_offset = offsets.begin + offsets.pre_header_padding;
- return 0;
-}
-
-static
-void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- unsigned long commit_count,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- struct commit_counters_hot *cc_hot;
-
- if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
- return;
- cc_hot = shmp_index(handle, buf->commit_hot, idx);
- if (!cc_hot)
- return;
- v_set(config, &cc_hot->seq, commit_count);
-}
-
-/*
- * The ring buffer can count events recorded and overwritten per buffer,
- * but it is disabled by default due to its performance overhead.
- */
-#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
-static
-void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle)
-{
- v_add(config, subbuffer_get_records_count(config,
- &buf->backend, idx, handle),
- &buf->records_count);
- v_add(config, subbuffer_count_records_overrun(config,
- &buf->backend, idx, handle),
- &buf->records_overrun);
-}
-#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
-static
-void deliver_count_events(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
- unsigned long idx __attribute__((unused)),
- struct lttng_ust_shm_handle *handle __attribute__((unused)))
-{
-}
-#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
-
-void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer *buf,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- unsigned long offset,
- unsigned long commit_count,
- unsigned long idx,
- struct lttng_ust_shm_handle *handle,
- uint64_t tsc __attribute__((unused)))
-{
- unsigned long old_commit_count = commit_count
- - chan->backend.subbuf_size;
- struct commit_counters_cold *cc_cold;
-
- /*
- * If we succeeded at updating cc_sb below, we are the subbuffer
- * writer delivering the subbuffer. Deals with concurrent
- * updates of the "cc" value without adding a add_return atomic
- * operation to the fast path.
- *
- * We are doing the delivery in two steps:
- * - First, we cmpxchg() cc_sb to the new value
- * old_commit_count + 1. This ensures that we are the only
- * subbuffer user successfully filling the subbuffer, but we
- * do _not_ set the cc_sb value to "commit_count" yet.
- * Therefore, other writers that would wrap around the ring
- * buffer and try to start writing to our subbuffer would
- * have to drop records, because it would appear as
- * non-filled.
- * We therefore have exclusive access to the subbuffer control
- * structures. This mutual exclusion with other writers is
- * crucially important to perform record overruns count in
- * flight recorder mode locklessly.
- * - When we are ready to release the subbuffer (either for
- * reading or for overrun by other writers), we simply set the
- * cc_sb value to "commit_count" and perform delivery.
- *
- * The subbuffer size is least 2 bytes (minimum size: 1 page).
- * This guarantees that old_commit_count + 1 != commit_count.
- */
-
- /*
- * Order prior updates to reserve count prior to the
- * commit_cold cc_sb update.
- */
- cmm_smp_wmb();
- cc_cold = shmp_index(handle, buf->commit_cold, idx);
- if (!cc_cold)
- return;
- if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb,
- old_commit_count, old_commit_count + 1)
- == old_commit_count)) {
- uint64_t *ts_end;
-
- /*
- * Start of exclusive subbuffer access. We are
- * guaranteed to be the last writer in this subbuffer
- * and any other writer trying to access this subbuffer
- * in this state is required to drop records.
- *
- * We can read the ts_end for the current sub-buffer
- * which has been saved by the very last space
- * reservation for the current sub-buffer.
- *
- * Order increment of commit counter before reading ts_end.
- */
- cmm_smp_mb();
- ts_end = shmp_index(handle, buf->ts_end, idx);
- if (!ts_end)
- return;
- deliver_count_events(config, buf, idx, handle);
- config->cb.buffer_end(buf, *ts_end, idx,
- lib_ring_buffer_get_data_size(config,
- buf,
- idx,
- handle),
- handle);
-
- /*
- * Increment the packet counter while we have exclusive
- * access.
- */
- subbuffer_inc_packet_count(config, &buf->backend, idx, handle);
-
- /*
- * Set noref flag and offset for this subbuffer id.
- * Contains a memory barrier that ensures counter stores
- * are ordered before set noref and offset.
- */
- lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
- buf_trunc_val(offset, chan), handle);
-
- /*
- * Order set_noref and record counter updates before the
- * end of subbuffer exclusive access. Orders with
- * respect to writers coming into the subbuffer after
- * wrap around, and also order wrt concurrent readers.
- */
- cmm_smp_mb();
- /* End of exclusive subbuffer access */
- v_set(config, &cc_cold->cc_sb, commit_count);
- /*
- * Order later updates to reserve count after
- * the commit cold cc_sb update.
- */
- cmm_smp_wmb();
- lib_ring_buffer_vmcore_check_deliver(config, buf,
- commit_count, idx, handle);
-
- /*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
- */
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
- && uatomic_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
- lib_ring_buffer_wakeup(buf, handle);
- }
- }
-}
-
-/*
- * Force a read (imply TLS fixup for dlopen) of TLS variables.
- */
-void lttng_fixup_ringbuffer_tls(void)
-{
- asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
-}
-
-void lib_ringbuffer_signal_init(void)
-{
- sigset_t mask;
- int ret;
-
- /*
- * Block signal for entire process, so only our thread processes
- * it.
- */
- rb_setmask(&mask);
- ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
- if (ret) {
- errno = ret;
- PERROR("pthread_sigmask");
- }
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2021 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * Ring buffer configuration header. Note: after declaring the standard inline
- * functions, clients should also include linux/ringbuffer/api.h.
- */
-
-#ifndef _LTTNG_RING_BUFFER_CONFIG_H
-#define _LTTNG_RING_BUFFER_CONFIG_H
-
-#include <errno.h>
-#include <stdint.h>
-#include <stddef.h>
-#include <urcu/arch.h>
-#include <string.h>
-
-#include <lttng/ust-utils.h>
-#include <lttng/ust-compiler.h>
-#include <lttng/ust-tracer.h>
-
-struct lttng_ust_lib_ring_buffer;
-struct lttng_ust_lib_ring_buffer_channel;
-struct lttng_ust_lib_ring_buffer_config;
-struct lttng_ust_lib_ring_buffer_ctx_private;
-struct lttng_ust_shm_handle;
-
-/*
- * Ring buffer client callbacks. Only used by slow path, never on fast path.
- * For the fast path, record_header_size(), ring_buffer_clock_read() should be
- * provided as inline functions too. These may simply return 0 if not used by
- * the client.
- */
-struct lttng_ust_lib_ring_buffer_client_cb {
- /* Mandatory callbacks */
-
- /* A static inline version is also required for fast path */
- uint64_t (*ring_buffer_clock_read) (struct lttng_ust_lib_ring_buffer_channel *chan);
- size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- size_t offset,
- size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
- void *client_ctx);
-
- /* Slow path only, at subbuffer switch */
- size_t (*subbuffer_header_size) (void);
- void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx,
- struct lttng_ust_shm_handle *handle);
- void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
- unsigned int subbuf_idx, unsigned long data_size,
- struct lttng_ust_shm_handle *handle);
-
- /* Optional callbacks (can be set to NULL) */
-
- /* Called at buffer creation/finalize */
- int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
- int cpu, const char *name,
- struct lttng_ust_shm_handle *handle);
- /*
- * Clients should guarantee that no new reader handle can be opened
- * after finalize.
- */
- void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
- void *priv, int cpu,
- struct lttng_ust_shm_handle *handle);
-
- /*
- * Extract header length, payload length and timestamp from event
- * record. Used by buffer iterators. Timestamp is only used by channel
- * iterator.
- */
- void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_channel *chan,
- struct lttng_ust_lib_ring_buffer *buf,
- size_t offset, size_t *header_len,
- size_t *payload_len, uint64_t *timestamp,
- struct lttng_ust_shm_handle *handle);
- /*
- * Offset and size of content size field in client.
- */
- void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
- size_t *offset, size_t *length);
- void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
- size_t *offset, size_t *length);
-};
-
-/*
- * Ring buffer instance configuration.
- *
- * Declare as "static const" within the client object to ensure the inline fast
- * paths can be optimized.
- *
- * alloc/sync pairs:
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
- * Per-cpu buffers with per-cpu synchronization.
- *
- * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
- * Per-cpu buffer with global synchronization. Tracing can be performed with
- * preemption enabled, statistically stays on the local buffers.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
- * Should only be used for buffers belonging to a single thread or protected
- * by mutual exclusion by the client. Note that periodical sub-buffer switch
- * should be disabled in this kind of configuration.
- *
- * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
- * Global shared buffer with global synchronization.
- *
- * wakeup:
- *
- * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
- * buffers and wake up readers if data is ready. Mainly useful for tracers which
- * don't want to call into the wakeup code on the tracing path. Use in
- * combination with "read_timer_interval" channel_create() argument.
- *
- * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
- * ready to read. Lower latencies before the reader is woken up. Mainly suitable
- * for drivers.
- *
- * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
- * has the responsibility to perform wakeups.
- */
-#define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
-
-enum lttng_ust_lib_ring_buffer_alloc_types {
- RING_BUFFER_ALLOC_PER_CPU,
- RING_BUFFER_ALLOC_GLOBAL,
-};
-
-enum lttng_ust_lib_ring_buffer_sync_types {
- RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
- RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
-};
-
-enum lttng_ust_lib_ring_buffer_mode_types {
- RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
- RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
-};
-
-enum lttng_ust_lib_ring_buffer_output_types {
- RING_BUFFER_SPLICE,
- RING_BUFFER_MMAP,
- RING_BUFFER_READ, /* TODO */
- RING_BUFFER_ITERATOR,
- RING_BUFFER_NONE,
-};
-
-enum lttng_ust_lib_ring_buffer_backend_types {
- RING_BUFFER_PAGE,
- RING_BUFFER_VMAP, /* TODO */
- RING_BUFFER_STATIC, /* TODO */
-};
-
-enum lttng_ust_lib_ring_buffer_oops_types {
- RING_BUFFER_NO_OOPS_CONSISTENCY,
- RING_BUFFER_OOPS_CONSISTENCY,
-};
-
-enum lttng_ust_lib_ring_buffer_ipi_types {
- RING_BUFFER_IPI_BARRIER,
- RING_BUFFER_NO_IPI_BARRIER,
-};
-
-enum lttng_ust_lib_ring_buffer_wakeup_types {
- RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
- RING_BUFFER_WAKEUP_BY_WRITER, /*
- * writer wakes up reader,
- * not lock-free
- * (takes spinlock).
- */
-};
-
-struct lttng_ust_lib_ring_buffer_config {
- enum lttng_ust_lib_ring_buffer_alloc_types alloc;
- enum lttng_ust_lib_ring_buffer_sync_types sync;
- enum lttng_ust_lib_ring_buffer_mode_types mode;
- enum lttng_ust_lib_ring_buffer_output_types output;
- enum lttng_ust_lib_ring_buffer_backend_types backend;
- enum lttng_ust_lib_ring_buffer_oops_types oops;
- enum lttng_ust_lib_ring_buffer_ipi_types ipi;
- enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
- /*
- * tsc_bits: timestamp bits saved at each record.
- * 0 and 64 disable the timestamp compression scheme.
- */
- unsigned int tsc_bits;
- struct lttng_ust_lib_ring_buffer_client_cb cb;
- /*
- * client_type is used by the consumer process (which is in a
- * different address space) to lookup the appropriate client
- * callbacks and update the cb pointers.
- */
- int client_type;
- int _unused1;
- const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
- char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
-};
-
-/*
- * Reservation flags.
- *
- * RING_BUFFER_RFLAG_FULL_TSC
- *
- * This flag is passed to record_header_size() and to the primitive used to
- * write the record header. It indicates that the full 64-bit time value is
- * needed in the record header. If this flag is not set, the record header needs
- * only to contain "tsc_bits" bit of time value.
- *
- * Reservation flags can be added by the client, starting from
- * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
- * record_header_size() to lib_ring_buffer_write_record_header().
- */
-#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
-#define RING_BUFFER_RFLAG_END (1U << 1)
-
-/*
- * lib_ring_buffer_check_config() returns 0 on success.
- * Used internally to check for valid configurations at channel creation.
- */
-static inline
-int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
- lttng_ust_notrace;
-
-static inline
-int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
- unsigned int switch_timer_interval,
- unsigned int read_timer_interval __attribute__((unused)))
-{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
- && config->sync == RING_BUFFER_SYNC_PER_CPU
- && switch_timer_interval)
- return -EINVAL;
- return 0;
-}
-
-#endif /* _LTTNG_RING_BUFFER_CONFIG_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include "shm.h"
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/stat.h> /* For mode constants */
-#include <fcntl.h> /* For O_* constants */
-#include <assert.h>
-#include <stdio.h>
-#include <signal.h>
-#include <dirent.h>
-#include <limits.h>
-#include <stdbool.h>
-#include <stdint.h>
-
-#ifdef HAVE_LIBNUMA
-#include <numa.h>
-#include <numaif.h>
-#endif
-
-#include <lttng/ust-utils.h>
-
-#include <ust-helper.h>
-#include <ust-fd.h>
-#include "mmap.h"
-
-/*
- * Ensure we have the required amount of space available by writing 0
- * into the entire buffer. Not doing so can trigger SIGBUS when going
- * beyond the available shm space.
- */
-static
-int zero_file(int fd, size_t len)
-{
- ssize_t retlen;
- size_t written = 0;
- char *zeropage;
- long pagelen;
- int ret;
-
- pagelen = sysconf(_SC_PAGESIZE);
- if (pagelen < 0)
- return (int) pagelen;
- zeropage = calloc(pagelen, 1);
- if (!zeropage)
- return -ENOMEM;
-
- while (len > written) {
- do {
- retlen = write(fd, zeropage,
- min_t(size_t, pagelen, len - written));
- } while (retlen == -1UL && errno == EINTR);
- if (retlen < 0) {
- ret = (int) retlen;
- goto error;
- }
- written += retlen;
- }
- ret = 0;
-error:
- free(zeropage);
- return ret;
-}
-
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
-{
- struct shm_object_table *table;
-
- table = zmalloc(sizeof(struct shm_object_table) +
- max_nb_obj * sizeof(table->objects[0]));
- if (!table)
- return NULL;
- table->size = max_nb_obj;
- return table;
-}
-
-static
-struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
- size_t memory_map_size,
- int stream_fd)
-{
- int shmfd, waitfd[2], ret, i;
- struct shm_object *obj;
- char *memory_map;
-
- if (stream_fd < 0)
- return NULL;
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- /* wait_fd: create pipe */
- ret = pipe(waitfd);
- if (ret < 0) {
- PERROR("pipe");
- goto error_pipe;
- }
- for (i = 0; i < 2; i++) {
- ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
- }
- /* The write end of the pipe needs to be non-blocking */
- ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
- memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
-
- /*
- * Set POSIX shared memory object size
- *
- * First, use ftruncate() to set its size, some implementations won't
- * allow writes past the size set by ftruncate.
- * Then, use write() to fill it with zeros, this allows us to fully
- * allocate it and detect a shortage of shm space without dealing with
- * a SIGBUS.
- */
-
- shmfd = stream_fd;
- ret = ftruncate(shmfd, memory_map_size);
- if (ret) {
- PERROR("ftruncate");
- goto error_ftruncate;
- }
- ret = zero_file(shmfd, memory_map_size);
- if (ret) {
- PERROR("zero_file");
- goto error_zero_file;
- }
-
- /*
- * Also ensure the file metadata is synced with the storage by using
- * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
- * EINVAL accordingly.
- */
- ret = fsync(shmfd);
- if (ret && errno != EINVAL) {
- PERROR("fsync");
- goto error_fsync;
- }
- obj->shm_fd_ownership = 0;
- obj->shm_fd = shmfd;
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_mmap:
-error_fsync:
-error_ftruncate:
-error_zero_file:
-error_fcntl:
- for (i = 0; i < 2; i++) {
- ret = close(waitfd[i]);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
-error_pipe:
- return NULL;
-}
-
-static
-struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
- size_t memory_map_size)
-{
- struct shm_object *obj;
- void *memory_map;
- int waitfd[2], i, ret;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- memory_map = zmalloc(memory_map_size);
- if (!memory_map)
- goto alloc_error;
-
- /* wait_fd: create pipe */
- ret = pipe(waitfd);
- if (ret < 0) {
- PERROR("pipe");
- goto error_pipe;
- }
- for (i = 0; i < 2; i++) {
- ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
- }
- /* The write end of the pipe needs to be non-blocking */
- ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
- memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
-
- /* no shm_fd */
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- obj->type = SHM_OBJECT_MEM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = 0;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_fcntl:
- for (i = 0; i < 2; i++) {
- ret = close(waitfd[i]);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
-error_pipe:
- free(memory_map);
-alloc_error:
- return NULL;
-}
-
-/*
- * libnuma prints errors on the console even for numa_available().
- * Work-around this limitation by using get_mempolicy() directly to
- * check whether the kernel supports mempolicy.
- */
-#ifdef HAVE_LIBNUMA
-static bool lttng_is_numa_available(void)
-{
- int ret;
-
- ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
- if (ret && errno == ENOSYS) {
- return false;
- }
- return numa_available() > 0;
-}
-#endif
-
-struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
- size_t memory_map_size,
- enum shm_object_type type,
- int stream_fd,
- int cpu)
-{
- struct shm_object *shm_object;
-#ifdef HAVE_LIBNUMA
- int oldnode = 0, node;
- bool numa_avail;
-
- numa_avail = lttng_is_numa_available();
- if (numa_avail) {
- oldnode = numa_preferred();
- if (cpu >= 0) {
- node = numa_node_of_cpu(cpu);
- if (node >= 0)
- numa_set_preferred(node);
- }
- if (cpu < 0 || node < 0)
- numa_set_localalloc();
- }
-#endif /* HAVE_LIBNUMA */
- switch (type) {
- case SHM_OBJECT_SHM:
- shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
- stream_fd);
- break;
- case SHM_OBJECT_MEM:
- shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
- break;
- default:
- assert(0);
- }
-#ifdef HAVE_LIBNUMA
- if (numa_avail)
- numa_set_preferred(oldnode);
-#endif /* HAVE_LIBNUMA */
- return shm_object;
-}
-
-struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
- int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
-{
- struct shm_object *obj;
- char *memory_map;
- int ret;
-
- if (table->allocated_len >= table->size)
- return NULL;
- /* streams _must_ be received in sequential order, else fail. */
- if (stream_nr + 1 != table->allocated_len)
- return NULL;
-
- obj = &table->objects[table->allocated_len];
-
- /* wait_fd: set write end of the pipe. */
- obj->wait_fd[0] = -1; /* read end is unset */
- obj->wait_fd[1] = wakeup_fd;
- obj->shm_fd = shm_fd;
- obj->shm_fd_ownership = 1;
-
- /* The write end of the pipe needs to be non-blocking */
- ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
-
- /* memory_map: mmap */
- memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
- MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
- if (memory_map == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
- obj->type = SHM_OBJECT_SHM;
- obj->memory_map = memory_map;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_fcntl:
-error_mmap:
- return NULL;
-}
-
-/*
- * Passing ownership of mem to object.
- */
-struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
- void *mem, size_t memory_map_size, int wakeup_fd)
-{
- struct shm_object *obj;
- int ret;
-
- if (table->allocated_len >= table->size)
- return NULL;
- obj = &table->objects[table->allocated_len];
-
- obj->wait_fd[0] = -1; /* read end is unset */
- obj->wait_fd[1] = wakeup_fd;
- obj->shm_fd = -1;
- obj->shm_fd_ownership = 0;
-
- ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
- /* The write end of the pipe needs to be non-blocking */
- ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
- if (ret < 0) {
- PERROR("fcntl");
- goto error_fcntl;
- }
-
- obj->type = SHM_OBJECT_MEM;
- obj->memory_map = mem;
- obj->memory_map_size = memory_map_size;
- obj->allocated_len = memory_map_size;
- obj->index = table->allocated_len++;
-
- return obj;
-
-error_fcntl:
- return NULL;
-}
-
-static
-void shmp_object_destroy(struct shm_object *obj, int consumer)
-{
- switch (obj->type) {
- case SHM_OBJECT_SHM:
- {
- int ret, i;
-
- ret = munmap(obj->memory_map, obj->memory_map_size);
- if (ret) {
- PERROR("umnmap");
- assert(0);
- }
-
- if (obj->shm_fd_ownership) {
- /* Delete FDs only if called from app (not consumer). */
- if (!consumer) {
- lttng_ust_lock_fd_tracker();
- ret = close(obj->shm_fd);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(obj->shm_fd);
- } else {
- PERROR("close");
- assert(0);
- }
- lttng_ust_unlock_fd_tracker();
- } else {
- ret = close(obj->shm_fd);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
- }
- for (i = 0; i < 2; i++) {
- if (obj->wait_fd[i] < 0)
- continue;
- if (!consumer) {
- lttng_ust_lock_fd_tracker();
- ret = close(obj->wait_fd[i]);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
- } else {
- PERROR("close");
- assert(0);
- }
- lttng_ust_unlock_fd_tracker();
- } else {
- ret = close(obj->wait_fd[i]);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
- }
- break;
- }
- case SHM_OBJECT_MEM:
- {
- int ret, i;
-
- for (i = 0; i < 2; i++) {
- if (obj->wait_fd[i] < 0)
- continue;
- if (!consumer) {
- lttng_ust_lock_fd_tracker();
- ret = close(obj->wait_fd[i]);
- if (!ret) {
- lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
- } else {
- PERROR("close");
- assert(0);
- }
- lttng_ust_unlock_fd_tracker();
- } else {
- ret = close(obj->wait_fd[i]);
- if (ret) {
- PERROR("close");
- assert(0);
- }
- }
- }
- free(obj->memory_map);
- break;
- }
- default:
- assert(0);
- }
-}
-
-void shm_object_table_destroy(struct shm_object_table *table, int consumer)
-{
- int i;
-
- for (i = 0; i < table->allocated_len; i++)
- shmp_object_destroy(&table->objects[i], consumer);
- free(table);
-}
-
-/*
- * zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
-{
- struct shm_ref ref;
- struct shm_ref shm_ref_error = { -1, -1 };
-
- if (obj->memory_map_size - obj->allocated_len < len)
- return shm_ref_error;
- ref.index = obj->index;
- ref.offset = obj->allocated_len;
- obj->allocated_len += len;
- return ref;
-}
-
-void align_shm(struct shm_object *obj, size_t align)
-{
- size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
- obj->allocated_len += offset_len;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBRINGBUFFER_SHM_H
-#define _LIBRINGBUFFER_SHM_H
-
-#include <stddef.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <usterr-signal-safe.h>
-#include <urcu/compiler.h>
-#include "shm_types.h"
-
-/* channel_handle_create - for UST. */
-extern
-struct lttng_ust_shm_handle *channel_handle_create(void *data,
- uint64_t memory_map_size, int wakeup_fd)
- __attribute__((visibility("hidden")));
-
-/* channel_handle_add_stream - for UST. */
-extern
-int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
- int shm_fd, int wakeup_fd, uint32_t stream_nr,
- uint64_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
- __attribute__((visibility("hidden")));
-
-/*
- * Pointer dereferencing. We don't trust the shm_ref, so we validate
- * both the index and offset with known boundaries.
- *
- * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
- * target type, even in the occurrence of shm_ref modification by an
- * untrusted process having write access to the shm_ref. We return a
- * NULL pointer if the ranges are invalid.
- */
-static inline
-char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
- size_t idx, size_t elem_size)
-{
- struct shm_object *obj;
- size_t objindex, ref_offset;
-
- objindex = (size_t) ref->index;
- if (caa_unlikely(objindex >= table->allocated_len))
- return NULL;
- obj = &table->objects[objindex];
- ref_offset = (size_t) ref->offset;
- ref_offset += idx * elem_size;
- /* Check if part of the element returned would exceed the limits. */
- if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
- return NULL;
- return &obj->memory_map[ref_offset];
-}
-
-#define shmp_index(handle, ref, index) \
- ((__typeof__((ref)._type)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*((ref)._type))))
-
-#define shmp(handle, ref) shmp_index(handle, ref, 0)
-
-static inline
-void _set_shmp(struct shm_ref *ref, struct shm_ref src)
-{
- *ref = src;
-}
-
-#define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
-
-struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
- __attribute__((visibility("hidden")));
-
-struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
- size_t memory_map_size,
- enum shm_object_type type,
- const int stream_fd,
- int cpu)
- __attribute__((visibility("hidden")));
-
-struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
- int shm_fd, int wakeup_fd, uint32_t stream_nr,
- size_t memory_map_size)
- __attribute__((visibility("hidden")));
-
-/* mem ownership is passed to shm_object_table_append_mem(). */
-struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
- void *mem, size_t memory_map_size, int wakeup_fd)
- __attribute__((visibility("hidden")));
-
-void shm_object_table_destroy(struct shm_object_table *table, int consumer)
- __attribute__((visibility("hidden")));
-
-/*
- * zalloc_shm - allocate memory within a shm object.
- *
- * Shared memory is already zeroed by shmget.
- * *NOT* multithread-safe (should be protected by mutex).
- * Returns a -1, -1 tuple on error.
- */
-struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
- __attribute__((visibility("hidden")));
-
-void align_shm(struct shm_object *obj, size_t align)
- __attribute__((visibility("hidden")));
-
-static inline
-int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- return obj->wait_fd[0];
-}
-
-static inline
-int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- return obj->wait_fd[1];
-}
-
-static inline
-int shm_close_wait_fd(struct lttng_ust_shm_handle *handle,
- struct shm_ref *ref)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- int wait_fd;
- size_t index;
- int ret;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- wait_fd = obj->wait_fd[0];
- if (wait_fd < 0)
- return -ENOENT;
- obj->wait_fd[0] = -1;
- ret = close(wait_fd);
- if (ret) {
- ret = -errno;
- return ret;
- }
- return 0;
-}
-
-static inline
-int shm_close_wakeup_fd(struct lttng_ust_shm_handle *handle,
- struct shm_ref *ref)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- int wakeup_fd;
- size_t index;
- int ret;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- wakeup_fd = obj->wait_fd[1];
- if (wakeup_fd < 0)
- return -ENOENT;
- obj->wait_fd[1] = -1;
- ret = close(wakeup_fd);
- if (ret) {
- ret = -errno;
- return ret;
- }
- return 0;
-}
-
-static inline
-int shm_get_shm_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- return obj->shm_fd;
-}
-
-
-static inline
-int shm_get_shm_size(struct lttng_ust_shm_handle *handle, struct shm_ref *ref,
- uint64_t *size)
-{
- struct shm_object_table *table = handle->table;
- struct shm_object *obj;
- size_t index;
-
- index = (size_t) ref->index;
- if (caa_unlikely(index >= table->allocated_len))
- return -EPERM;
- obj = &table->objects[index];
- *size = obj->memory_map_size;
- return 0;
-}
-
-#endif /* _LIBRINGBUFFER_SHM_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBRINGBUFFER_SHM_INTERNAL_H
-#define _LIBRINGBUFFER_SHM_INTERNAL_H
-
-struct shm_ref {
- volatile ssize_t index; /* within the object table */
- volatile ssize_t offset; /* within the object */
-};
-
-#define DECLARE_SHMP(type, name) \
- union { \
- struct shm_ref _ref; \
- type *_type; \
- } name
-
-#endif /* _LIBRINGBUFFER_SHM_INTERNAL_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBRINGBUFFER_SHM_TYPES_H
-#define _LIBRINGBUFFER_SHM_TYPES_H
-
-#include <stdint.h>
-#include <stddef.h>
-#include <limits.h>
-#include "shm_internal.h"
-
-struct lttng_ust_lib_ring_buffer_channel;
-
-enum shm_object_type {
- SHM_OBJECT_SHM,
- SHM_OBJECT_MEM,
-};
-
-struct shm_object {
- enum shm_object_type type;
- size_t index; /* within the object table */
- int shm_fd; /* shm fd */
- int wait_fd[2]; /* fd for wait/wakeup */
- char *memory_map;
- size_t memory_map_size;
- uint64_t allocated_len;
- int shm_fd_ownership;
-};
-
-struct shm_object_table {
- size_t size;
- size_t allocated_len;
- struct shm_object objects[];
-};
-
-struct lttng_ust_shm_handle {
- struct shm_object_table *table;
- DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan);
-};
-
-#endif /* _LIBRINGBUFFER_SHM_TYPES_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
- */
-
-#define _LGPL_SOURCE
-#include <unistd.h>
-#include <pthread.h>
-#include "smp.h"
-
-int __num_possible_cpus;
-
-#if (defined(__GLIBC__) || defined( __UCLIBC__))
-void _get_num_possible_cpus(void)
-{
- int result;
-
- /* On Linux, when some processors are offline
- * _SC_NPROCESSORS_CONF counts the offline
- * processors, whereas _SC_NPROCESSORS_ONLN
- * does not. If we used _SC_NPROCESSORS_ONLN,
- * getcpu() could return a value greater than
- * this sysconf, in which case the arrays
- * indexed by processor would overflow.
- */
- result = sysconf(_SC_NPROCESSORS_CONF);
- if (result == -1)
- return;
- __num_possible_cpus = result;
-}
-
-#else
-
-/*
- * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
- * return the number of configured CPUs in the system but relies on the cpu
- * affinity mask of the current task.
- *
- * So instead we use a strategy similar to GLIBC's, counting the cpu
- * directories in "/sys/devices/system/cpu" and fallback on the value from
- * sysconf if it fails.
- */
-
-#include <dirent.h>
-#include <limits.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/types.h>
-
-#define __max(a,b) ((a)>(b)?(a):(b))
-
-void _get_num_possible_cpus(void)
-{
- int result, count = 0;
- DIR *cpudir;
- struct dirent *entry;
-
- cpudir = opendir("/sys/devices/system/cpu");
- if (cpudir == NULL)
- goto end;
-
- /*
- * Count the number of directories named "cpu" followed by and
- * integer. This is the same strategy as glibc uses.
- */
- while ((entry = readdir(cpudir))) {
- if (entry->d_type == DT_DIR &&
- strncmp(entry->d_name, "cpu", 3) == 0) {
-
- char *endptr;
- unsigned long cpu_num;
-
- cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
- if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
- && (*endptr == '\0')) {
- count++;
- }
- }
- }
-
-end:
- /*
- * Get the sysconf value as a fallback. Keep the highest number.
- */
- result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
-
- /*
- * If both methods failed, don't store the value.
- */
- if (result < 1)
- return;
- __num_possible_cpus = result;
-}
-#endif
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LIBRINGBUFFER_SMP_H
-#define _LIBRINGBUFFER_SMP_H
-
-#include "getcpu.h"
-
-/*
- * 4kB of per-cpu data available. Enough to hold the control structures,
- * but not ring buffers.
- */
-#define PER_CPU_MEM_SIZE 4096
-
-extern int __num_possible_cpus
- __attribute__((visibility("hidden")));
-
-extern void _get_num_possible_cpus(void)
- __attribute__((visibility("hidden")));
-
-static inline
-int num_possible_cpus(void)
-{
- if (!__num_possible_cpus)
- _get_num_possible_cpus();
- return __num_possible_cpus;
-}
-
-#define for_each_possible_cpu(cpu) \
- for ((cpu) = 0; (cpu) < num_possible_cpus(); (cpu)++)
-
-#endif /* _LIBRINGBUFFER_SMP_H */
+++ /dev/null
-/*
- * SPDX-License-Identifier: MIT
- *
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_RING_BUFFER_VATOMIC_H
-#define _LTTNG_RING_BUFFER_VATOMIC_H
-
-#include <assert.h>
-#include <urcu/uatomic.h>
-
-/*
- * Same data type (long) accessed differently depending on configuration.
- * v field is for non-atomic access (protected by mutual exclusion).
- * In the fast-path, the ring_buffer_config structure is constant, so the
- * compiler can statically select the appropriate branch.
- * local_t is used for per-cpu and per-thread buffers.
- * atomic_long_t is used for globally shared buffers.
- */
-union v_atomic {
- long a; /* accessed through uatomic */
- long v;
-};
-
-static inline
-long v_read(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- return uatomic_read(&v_a->a);
-}
-
-static inline
-void v_set(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
- long v)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_set(&v_a->a, v);
-}
-
-static inline
-void v_add(const struct lttng_ust_lib_ring_buffer_config *config, long v, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_add(&v_a->a, v);
-}
-
-static inline
-void v_inc(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- uatomic_inc(&v_a->a);
-}
-
-/*
- * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
- */
-static inline
-void _v_dec(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), union v_atomic *v_a)
-{
- --v_a->v;
-}
-
-static inline
-long v_cmpxchg(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
- long old, long _new)
-{
- assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
- return uatomic_cmpxchg(&v_a->a, old, _new);
-}
-
-#endif /* _LTTNG_RING_BUFFER_VATOMIC_H */
+++ /dev/null
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: LTTng Userspace Tracer control
-Description: The LTTng Userspace Tracer (UST) is a library accompanied by a set of tools to trace userspace code.
-Version: @PACKAGE_VERSION@
-Requires:
-Libs: -L${libdir} -llttng-ust-ctl
-Cflags: -I${includedir}
-
+++ /dev/null
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: LTTng Userspace Tracer
-Description: The LTTng Userspace Tracer (UST) is a library accompanied by a set of tools to trace userspace code.
-Version: @PACKAGE_VERSION@
-Requires:
-Libs: -L${libdir} -llttng-ust -llttng-ust-common -ldl
-Cflags: -I${includedir}
-
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-INSTALLED_FILES=$(builddir)/installed_files.txt
-
-STATIC_BINDINGS_DEPS = \
- lttngust/__init__.py \
- lttngust/agent.py \
- lttngust/cmd.py \
- lttngust/compat.py \
- lttngust/debug.py \
- lttngust/loghandler.py
-
-all-local: build-python-bindings.stamp
-
-copy-static-deps.stamp: $(addprefix $(srcdir)/, $(STATIC_BINDINGS_DEPS))
- @if [ x"$(srcdir)" != x"$(builddir)" ]; then \
- for file in $(STATIC_BINDINGS_DEPS); do \
- cp -f $(srcdir)/$$file $(builddir)/$$file; \
- done; \
- fi
- touch $@
-
-# Use setup.py for the installation instead of Autoconf.
-# This ease the installation process and assure a *pythonic*
-# installation.
-build-python-bindings.stamp: copy-static-deps.stamp
- $(PYTHON) $(builddir)/setup.py build --force
- touch $@
-
-install-exec-local: build-python-bindings.stamp
- @opts="--prefix=$(prefix) --record $(INSTALLED_FILES) --verbose --no-compile $(DISTSETUPOPTS)"; \
- if [ "$(DESTDIR)" != "" ]; then \
- opts="$$opts --root=$(DESTDIR)"; \
- fi; \
- $(PYTHON) $(builddir)/setup.py install $$opts;
-
-clean-local:
- rm -rf $(builddir)/build
- @if [ x"$(srcdir)" != x"$(builddir)" ]; then \
- for file in $(STATIC_BINDINGS_DEPS); do \
- rm -f $(builddir)/$$file; \
- done; \
- fi
-
-# Distutils' setup.py does not include an uninstall target, we thus need to do
-# it manually. We save the path of the files that were installed during the install target
-# and delete them during the uninstallation.
-uninstall-local:
- if [ "$(DESTDIR)" != "" ]; then \
- $(SED) -i "s|^|$(DESTDIR)/|g" $(INSTALLED_FILES); \
- fi
- cat $(INSTALLED_FILES) | xargs rm -rf || true
- $(GREP) "__init__.py" $(INSTALLED_FILES) | xargs dirname | xargs rm -rf || true
- rm -f $(INSTALLED_FILES)
-
-EXTRA_DIST = $(STATIC_BINDINGS_DEPS)
-
-CLEANFILES = \
- build-python-bindings.stamp \
- copy-static-deps.stamp
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
-
-from __future__ import unicode_literals
-
-from .version import __version__
-from .version import __soname_major__
-
-# this creates the daemon threads and registers the application
-import lttngust.agent
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
-# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
-
-from __future__ import unicode_literals
-from __future__ import print_function
-from __future__ import division
-import lttngust.debug as dbg
-import lttngust.loghandler
-import lttngust.compat
-import lttngust.cmd
-from io import open
-import threading
-import logging
-import socket
-import time
-import sys
-import os
-
-
-try:
- # Python 2
- import Queue as queue
-except ImportError:
- # Python 3
- import queue
-
-
-_PROTO_DOMAIN = 5
-_PROTO_MAJOR = 2
-_PROTO_MINOR = 0
-
-
-def _get_env_value_ms(key, default_s):
- try:
- val = int(os.getenv(key, default_s * 1000)) / 1000
- except:
- val = -1
-
- if val < 0:
- fmt = 'invalid ${} value; {} seconds will be used'
- dbg._pwarning(fmt.format(key, default_s))
- val = default_s
-
- return val
-
-
-_REG_TIMEOUT = _get_env_value_ms('LTTNG_UST_PYTHON_REGISTER_TIMEOUT', 5)
-_RETRY_REG_DELAY = _get_env_value_ms('LTTNG_UST_PYTHON_REGISTER_RETRY_DELAY', 3)
-
-
-class _TcpClient(object):
- def __init__(self, name, host, port, reg_queue):
- super(self.__class__, self).__init__()
- self._name = name
- self._host = host
- self._port = port
-
- try:
- self._log_handler = lttngust.loghandler._Handler()
- except (OSError) as e:
- dbg._pwarning('cannot load library: {}'.format(e))
- raise e
-
- self._root_logger = logging.getLogger()
- self._root_logger.setLevel(logging.NOTSET)
- self._ref_count = 0
- self._sessiond_sock = None
- self._reg_queue = reg_queue
- self._server_cmd_handlers = {
- lttngust.cmd._ServerCmdRegistrationDone: self._handle_server_cmd_reg_done,
- lttngust.cmd._ServerCmdEnable: self._handle_server_cmd_enable,
- lttngust.cmd._ServerCmdDisable: self._handle_server_cmd_disable,
- lttngust.cmd._ServerCmdList: self._handle_server_cmd_list,
- }
-
- def _debug(self, msg):
- return 'client "{}": {}'.format(self._name, msg)
-
- def run(self):
- while True:
- try:
- # connect to the session daemon
- dbg._pdebug(self._debug('connecting to session daemon'))
- self._connect_to_sessiond()
-
- # register to the session daemon after a successful connection
- dbg._pdebug(self._debug('registering to session daemon'))
- self._register()
-
- # wait for commands from the session daemon
- self._wait_server_cmd()
- except (Exception) as e:
- # Whatever happens here, we have to close the socket and
- # retry to connect to the session daemon since either
- # the socket was closed, a network timeout occured, or
- # invalid data was received.
- dbg._pdebug(self._debug('got exception: {}'.format(e)))
- self._cleanup_socket()
- dbg._pdebug(self._debug('sleeping for {} s'.format(_RETRY_REG_DELAY)))
- time.sleep(_RETRY_REG_DELAY)
-
- def _recv_server_cmd_header(self):
- data = self._sessiond_sock.recv(lttngust.cmd._SERVER_CMD_HEADER_SIZE)
-
- if not data:
- dbg._pdebug(self._debug('received empty server command header'))
- return None
-
- assert(len(data) == lttngust.cmd._SERVER_CMD_HEADER_SIZE)
- dbg._pdebug(self._debug('received server command header ({} bytes)'.format(len(data))))
-
- return lttngust.cmd._server_cmd_header_from_data(data)
-
- def _recv_server_cmd(self):
- server_cmd_header = self._recv_server_cmd_header()
-
- if server_cmd_header is None:
- return None
-
- dbg._pdebug(self._debug('server command header: data size: {} bytes'.format(server_cmd_header.data_size)))
- dbg._pdebug(self._debug('server command header: command ID: {}'.format(server_cmd_header.cmd_id)))
- dbg._pdebug(self._debug('server command header: command version: {}'.format(server_cmd_header.cmd_version)))
- data = bytes()
-
- if server_cmd_header.data_size > 0:
- data = self._sessiond_sock.recv(server_cmd_header.data_size)
- assert(len(data) == server_cmd_header.data_size)
-
- return lttngust.cmd._server_cmd_from_data(server_cmd_header, data)
-
- def _send_cmd_reply(self, cmd_reply):
- data = cmd_reply.get_data()
- dbg._pdebug(self._debug('sending command reply ({} bytes)'.format(len(data))))
- self._sessiond_sock.sendall(data)
-
- def _handle_server_cmd_reg_done(self, server_cmd):
- dbg._pdebug(self._debug('got "registration done" server command'))
-
- if self._reg_queue is not None:
- dbg._pdebug(self._debug('notifying _init_threads()'))
-
- try:
- self._reg_queue.put(True)
- except (Exception) as e:
- # read side could be closed by now; ignore it
- pass
-
- self._reg_queue = None
-
- def _handle_server_cmd_enable(self, server_cmd):
- dbg._pdebug(self._debug('got "enable" server command'))
- self._ref_count += 1
-
- if self._ref_count == 1:
- dbg._pdebug(self._debug('adding our handler to the root logger'))
- self._root_logger.addHandler(self._log_handler)
-
- dbg._pdebug(self._debug('ref count is {}'.format(self._ref_count)))
-
- return lttngust.cmd._ClientCmdReplyEnable()
-
- def _handle_server_cmd_disable(self, server_cmd):
- dbg._pdebug(self._debug('got "disable" server command'))
- self._ref_count -= 1
-
- if self._ref_count < 0:
- # disable command could be sent again when a session is destroyed
- self._ref_count = 0
-
- if self._ref_count == 0:
- dbg._pdebug(self._debug('removing our handler from the root logger'))
- self._root_logger.removeHandler(self._log_handler)
-
- dbg._pdebug(self._debug('ref count is {}'.format(self._ref_count)))
-
- return lttngust.cmd._ClientCmdReplyDisable()
-
- def _handle_server_cmd_list(self, server_cmd):
- dbg._pdebug(self._debug('got "list" server command'))
- names = logging.Logger.manager.loggerDict.keys()
- dbg._pdebug(self._debug('found {} loggers'.format(len(names))))
- cmd_reply = lttngust.cmd._ClientCmdReplyList(names=names)
-
- return cmd_reply
-
- def _handle_server_cmd(self, server_cmd):
- cmd_reply = None
-
- if server_cmd is None:
- dbg._pdebug(self._debug('bad server command'))
- status = lttngust.cmd._CLIENT_CMD_REPLY_STATUS_INVALID_CMD
- cmd_reply = lttngust.cmd._ClientCmdReply(status)
- elif type(server_cmd) in self._server_cmd_handlers:
- cmd_reply = self._server_cmd_handlers[type(server_cmd)](server_cmd)
- else:
- dbg._pdebug(self._debug('unknown server command'))
- status = lttngust.cmd._CLIENT_CMD_REPLY_STATUS_INVALID_CMD
- cmd_reply = lttngust.cmd._ClientCmdReply(status)
-
- if cmd_reply is not None:
- self._send_cmd_reply(cmd_reply)
-
- def _wait_server_cmd(self):
- while True:
- try:
- server_cmd = self._recv_server_cmd()
- except socket.timeout:
- # simply retry here; the protocol has no KA and we could
- # wait for hours
- continue
-
- self._handle_server_cmd(server_cmd)
-
- def _cleanup_socket(self):
- try:
- self._sessiond_sock.shutdown(socket.SHUT_RDWR)
- self._sessiond_sock.close()
- except:
- pass
-
- self._sessiond_sock = None
-
- def _connect_to_sessiond(self):
- # create session daemon TCP socket
- if self._sessiond_sock is None:
- self._sessiond_sock = socket.socket(socket.AF_INET,
- socket.SOCK_STREAM)
-
- # Use str(self._host) here. Since this host could be a string
- # literal, and since we're importing __future__.unicode_literals,
- # we want to make sure the host is a native string in Python 2.
- # This avoids an indirect module import (unicode module to
- # decode the unicode string, eventually imported by the
- # socket module if needed), which is not allowed in a thread
- # directly created by a module in Python 2 (our case).
- #
- # tl;dr: Do NOT remove str() here, or this call in Python 2
- # _will_ block on an interpreter's mutex until the waiting
- # register queue timeouts.
- self._sessiond_sock.connect((str(self._host), self._port))
-
- def _register(self):
- cmd = lttngust.cmd._ClientRegisterCmd(_PROTO_DOMAIN, os.getpid(),
- _PROTO_MAJOR, _PROTO_MINOR)
- data = cmd.get_data()
- self._sessiond_sock.sendall(data)
-
-
-def _get_port_from_file(path):
- port = None
- dbg._pdebug('reading port from file "{}"'.format(path))
-
- try:
- f = open(path)
- r_port = int(f.readline())
- f.close()
-
- if r_port > 0 or r_port <= 65535:
- port = r_port
- except:
- pass
-
- return port
-
-
-def _get_user_home_path():
- # $LTTNG_HOME overrides $HOME if it exists
- return os.getenv('LTTNG_HOME', os.path.expanduser('~'))
-
-
-_initialized = False
-_SESSIOND_HOST = '127.0.0.1'
-
-
-def _client_thread_target(name, port, reg_queue):
- dbg._pdebug('creating client "{}" using TCP port {}'.format(name, port))
- client = _TcpClient(name, _SESSIOND_HOST, port, reg_queue)
- dbg._pdebug('starting client "{}"'.format(name))
- client.run()
-
-
-def _init_threads():
- global _initialized
-
- dbg._pdebug('entering')
-
- if _initialized:
- dbg._pdebug('agent is already initialized')
- return
-
- # This makes sure that the appropriate modules for encoding and
- # decoding strings/bytes are imported now, since no import should
- # happen within a thread at import time (our case).
- 'lttng'.encode().decode()
-
- _initialized = True
- sys_port = _get_port_from_file('/var/run/lttng/agent.port')
- user_port_file = os.path.join(_get_user_home_path(), '.lttng', 'agent.port')
- user_port = _get_port_from_file(user_port_file)
- reg_queue = queue.Queue()
- reg_expecting = 0
-
- dbg._pdebug('system session daemon port: {}'.format(sys_port))
- dbg._pdebug('user session daemon port: {}'.format(user_port))
-
- if sys_port == user_port and sys_port is not None:
- # The two session daemon ports are the same. This is not normal.
- # Connect to only one.
- dbg._pdebug('both user and system session daemon have the same port')
- sys_port = None
-
- try:
- if sys_port is not None:
- dbg._pdebug('creating system client thread')
- t = threading.Thread(target=_client_thread_target,
- args=('system', sys_port, reg_queue))
- t.name = 'system'
- t.daemon = True
- t.start()
- dbg._pdebug('created and started system client thread')
- reg_expecting += 1
-
- if user_port is not None:
- dbg._pdebug('creating user client thread')
- t = threading.Thread(target=_client_thread_target,
- args=('user', user_port, reg_queue))
- t.name = 'user'
- t.daemon = True
- t.start()
- dbg._pdebug('created and started user client thread')
- reg_expecting += 1
- except:
- # cannot create threads for some reason; stop this initialization
- dbg._pwarning('cannot create client threads')
- return
-
- if reg_expecting == 0:
- # early exit: looks like there's not even one valid port
- dbg._pwarning('no valid LTTng session daemon port found (is the session daemon started?)')
- return
-
- cur_timeout = _REG_TIMEOUT
-
- # We block here to make sure the agent is properly registered to
- # the session daemon. If we timeout, the client threads will still
- # continue to try to connect and register to the session daemon,
- # but there is no guarantee that all following logging statements
- # will make it to LTTng-UST.
- #
- # When a client thread receives a "registration done" confirmation
- # from the session daemon it's connected to, it puts True in
- # reg_queue.
- while True:
- try:
- dbg._pdebug('waiting for registration done (expecting {}, timeout is {} s)'.format(reg_expecting,
- cur_timeout))
- t1 = lttngust.compat._clock()
- reg_queue.get(timeout=cur_timeout)
- t2 = lttngust.compat._clock()
- reg_expecting -= 1
- dbg._pdebug('unblocked')
-
- if reg_expecting == 0:
- # done!
- dbg._pdebug('successfully registered to session daemon(s)')
- break
-
- cur_timeout -= (t2 - t1)
-
- if cur_timeout <= 0:
- # timeout
- dbg._pdebug('ran out of time')
- break
- except queue.Empty:
- dbg._pdebug('ran out of time')
- break
-
- dbg._pdebug('leaving')
-
-
-_init_threads()
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
-# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
-# Copyright (C) 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
-
-from __future__ import unicode_literals
-import lttngust.debug as dbg
-import struct
-
-
-# server command header
-_server_cmd_header_struct = struct.Struct('>QII')
-
-
-# server command header size
-_SERVER_CMD_HEADER_SIZE = _server_cmd_header_struct.size
-
-
-# agent protocol symbol size
-_LTTNG_SYMBOL_NAME_LEN = 256
-
-
-class _ServerCmdHeader(object):
- def __init__(self, data_size, cmd_id, cmd_version):
- self.data_size = data_size
- self.cmd_id = cmd_id
- self.cmd_version = cmd_version
-
-
-def _server_cmd_header_from_data(data):
- try:
- data_size, cmd_id, cmd_version = _server_cmd_header_struct.unpack(data)
- except (Exception) as e:
- dbg._pdebug('cannot decode command header: {}'.format(e))
- return None
-
- return _ServerCmdHeader(data_size, cmd_id, cmd_version)
-
-
-class _ServerCmd(object):
- def __init__(self, header):
- self.header = header
-
- @classmethod
- def from_data(cls, header, data):
- raise NotImplementedError()
-
-
-class _ServerCmdList(_ServerCmd):
- @classmethod
- def from_data(cls, header, data):
- return cls(header)
-
-
-class _ServerCmdEnable(_ServerCmd):
- _NAME_OFFSET = 8
- _loglevel_struct = struct.Struct('>II')
- # filter expression size
- _filter_exp_len_struct = struct.Struct('>I')
-
- def __init__(self, header, loglevel, loglevel_type, name, filter_exp):
- super(self.__class__, self).__init__(header)
- self.loglevel = loglevel
- self.loglevel_type = loglevel_type
- self.name = name
- self.filter_expression = filter_exp
- dbg._pdebug('server enable command {}'.format(self.__dict__))
-
- @classmethod
- def from_data(cls, header, data):
- try:
- loglevel, loglevel_type = cls._loglevel_struct.unpack_from(data)
- name_start = cls._loglevel_struct.size
- name_end = name_start + _LTTNG_SYMBOL_NAME_LEN
- data_name = data[name_start:name_end]
- name = data_name.rstrip(b'\0').decode()
-
- filter_exp_start = name_end + cls._filter_exp_len_struct.size
- filter_exp_len, = cls._filter_exp_len_struct.unpack_from(
- data[name_end:filter_exp_start])
- filter_exp_end = filter_exp_start + filter_exp_len
-
- filter_exp = data[filter_exp_start:filter_exp_end].rstrip(
- b'\0').decode()
-
- return cls(header, loglevel, loglevel_type, name, filter_exp)
- except (Exception) as e:
- dbg._pdebug('cannot decode enable command: {}'.format(e))
- return None
-
-
-class _ServerCmdDisable(_ServerCmd):
- def __init__(self, header, name):
- super(self.__class__, self).__init__(header)
- self.name = name
-
- @classmethod
- def from_data(cls, header, data):
- try:
- name = data.rstrip(b'\0').decode()
-
- return cls(header, name)
- except (Exception) as e:
- dbg._pdebug('cannot decode disable command: {}'.format(e))
- return None
-
-
-class _ServerCmdRegistrationDone(_ServerCmd):
- @classmethod
- def from_data(cls, header, data):
- return cls(header)
-
-
-_SERVER_CMD_ID_TO_SERVER_CMD = {
- 1: _ServerCmdList,
- 2: _ServerCmdEnable,
- 3: _ServerCmdDisable,
- 4: _ServerCmdRegistrationDone,
-}
-
-
-def _server_cmd_from_data(header, data):
- if header.cmd_id not in _SERVER_CMD_ID_TO_SERVER_CMD:
- return None
-
- return _SERVER_CMD_ID_TO_SERVER_CMD[header.cmd_id].from_data(header, data)
-
-
-_CLIENT_CMD_REPLY_STATUS_SUCCESS = 1
-_CLIENT_CMD_REPLY_STATUS_INVALID_CMD = 2
-
-
-class _ClientCmdReplyHeader(object):
- _payload_struct = struct.Struct('>I')
-
- def __init__(self, status_code=_CLIENT_CMD_REPLY_STATUS_SUCCESS):
- self.status_code = status_code
-
- def get_data(self):
- return self._payload_struct.pack(self.status_code)
-
-
-class _ClientCmdReplyEnable(_ClientCmdReplyHeader):
- pass
-
-
-class _ClientCmdReplyDisable(_ClientCmdReplyHeader):
- pass
-
-
-class _ClientCmdReplyList(_ClientCmdReplyHeader):
- _nb_events_struct = struct.Struct('>I')
- _data_size_struct = struct.Struct('>I')
-
- def __init__(self, names, status_code=_CLIENT_CMD_REPLY_STATUS_SUCCESS):
- super(self.__class__, self).__init__(status_code)
- self.names = names
-
- def get_data(self):
- upper_data = super(self.__class__, self).get_data()
- nb_events_data = self._nb_events_struct.pack(len(self.names))
- names_data = bytes()
-
- for name in self.names:
- names_data += name.encode() + b'\0'
-
- data_size_data = self._data_size_struct.pack(len(names_data))
-
- return upper_data + data_size_data + nb_events_data + names_data
-
-
-class _ClientRegisterCmd(object):
- _payload_struct = struct.Struct('>IIII')
-
- def __init__(self, domain, pid, major, minor):
- self.domain = domain
- self.pid = pid
- self.major = major
- self.minor = minor
-
- def get_data(self):
- return self._payload_struct.pack(self.domain, self.pid, self.major,
- self.minor)
+++ /dev/null
-# -*- coding: utf-8 -*-
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2020 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
-
-import sys
-import time
-
-
-# Support for deprecation of time.clock().
-# Deprecated since python 3.3 and removed in python 3.8.
-# See PEP 418 for more details.
-def _clock():
- if sys.version_info > (3,2):
- clock = time.perf_counter()
- else:
- clock = time.clock()
- return clock
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
-
-from __future__ import unicode_literals, print_function
-import lttngust.compat
-import time
-import sys
-import os
-
-
-_ENABLE_DEBUG = os.getenv('LTTNG_UST_PYTHON_DEBUG', '0') == '1'
-
-
-if _ENABLE_DEBUG:
- import inspect
-
- def _pwarning(msg):
- fname = inspect.stack()[1][3]
- fmt = '[{:.6f}] LTTng-UST warning: {}(): {}'
- print(fmt.format(lttngust.compat._clock(), fname, msg), file=sys.stderr)
-
- def _pdebug(msg):
- fname = inspect.stack()[1][3]
- fmt = '[{:.6f}] LTTng-UST debug: {}(): {}'
- print(fmt.format(lttngust.compat._clock(), fname, msg), file=sys.stderr)
-
- _pdebug('debug is enabled')
-else:
- def _pwarning(msg):
- pass
-
- def _pdebug(msg):
- pass
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
-# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
-
-from __future__ import unicode_literals
-import logging
-import ctypes
-
-from .version import __soname_major__
-
-class _Handler(logging.Handler):
- _LIB_NAME = 'liblttng-ust-python-agent.so.' + __soname_major__
-
- def __init__(self):
- super(self.__class__, self).__init__(level=logging.NOTSET)
- self.setFormatter(logging.Formatter('%(asctime)s'))
-
- # will raise if library is not found: caller should catch
- self.agent_lib = ctypes.cdll.LoadLibrary(_Handler._LIB_NAME)
-
- def emit(self, record):
- self.agent_lib.py_tracepoint(self.format(record).encode(),
- record.getMessage().encode(),
- record.name.encode(),
- record.funcName.encode(),
- record.lineno, record.levelno,
- record.thread,
- record.threadName.encode())
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
-
-__version__ = '@PACKAGE_VERSION@'
-__soname_major__ = '@LTTNG_UST_LIB_SONAME_MAJOR@'
+++ /dev/null
-# -*- coding: utf-8 -*-
-#
-# SPDX-License-Identifier: LGPL-2.1-only
-#
-# Copyright (C) 2015 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
-
-import os
-import sys
-
-from distutils.core import setup, Extension
-
-PY_PATH_WARN_MSG = """
--------------------------------------WARNING------------------------------------
-The install directory used:\n ({0})\nis not included in your PYTHONPATH.
-
-To add this directory to your Python search path permanently you can add the
-following command to your .bashrc/.zshrc:
- export PYTHONPATH="${{PYTHONPATH}}:{0}"
---------------------------------------------------------------------------------
-"""
-
-def main():
- dist = setup(name='lttngust',
- version='@PACKAGE_VERSION@',
- description='LTTng-UST Python agent',
- packages=['lttngust'],
- package_dir={'lttngust': 'lttngust'},
- options={'build': {'build_base': 'build'}},
- url='http://lttng.org',
- license='LGPL-2.1',
- classifiers=[
- 'Development Status :: 5 - Production/Stable',
- 'Intended Audience :: Developers',
- 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
- 'Programming Language :: Python :: 2.7',
- 'Programming Language :: Python :: 3'
- 'Topic :: System :: Logging',
- ])
-
-# After the installation, we check that the install directory is included in
-# the Python search path and we print a warning message when it's not. We need
-# to do this because Python search path differs depending on the distro and
-# some distros don't include any `/usr/local/` (the default install prefix) in
-# the search path. This is also useful for out-of-tree installs and tests. It's
-# only relevant to make this check on the `install` command.
-
- if 'install' in dist.command_obj:
- install_dir = dist.command_obj['install'].install_libbase
- if install_dir not in sys.path:
- # We can't consider this an error because if affects every
- # distro differently. We only warn the user that some
- # extra configuration is needed to use the agent
- abs_install_dir = os.path.abspath(install_dir)
- print(PY_PATH_WARN_MSG.format(abs_install_dir))
-
-if __name__ == '__main__':
- main()
+++ /dev/null
-# SPDX-License-Identifier: LGPL-2.1-only
-
-AM_CPPFLAGS += -I$(top_srcdir)/libustcomm
-AM_CFLAGS += -fno-strict-aliasing
-
-noinst_LTLIBRARIES = libustsnprintf.la
-libustsnprintf_la_SOURCES = \
- fflush.c \
- fileext.h \
- floatio.h \
- fvwrite.c \
- fvwrite.h \
- local.h \
- mbrtowc_sb.c \
- snprintf.c \
- various.h \
- vfprintf.c \
- wcio.h \
- wsetup.c \
- core.c \
- patient_write.c
-
-libustsnprintf_la_LDFLAGS = -no-undefined -static
-libustsnprintf_la_CFLAGS = -DUST_COMPONENT="lttng_ust_snprintf" -fPIC $(AM_CFLAGS)
-
-EXTRA_DIST = README
+++ /dev/null
-This is a signal safe version of snprintf/vsnprintf. The code is
-originally from the OpenBSD libc.
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-only
- *
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <usterr-signal-safe.h>
-
-volatile enum ust_err_loglevel ust_err_loglevel;
-
-void ust_err_init(void)
-{
- char *ust_debug;
-
- if (ust_err_loglevel == UST_ERR_LOGLEVEL_UNKNOWN) {
- /*
- * This getenv is not part of lttng_ust_getenv() because it
- * is required to print ERR() performed during getenv
- * initialization.
- */
- ust_debug = getenv("LTTNG_UST_DEBUG");
- if (ust_debug)
- ust_err_loglevel = UST_ERR_LOGLEVEL_DEBUG;
- else
- ust_err_loglevel = UST_ERR_LOGLEVEL_NORMAL;
- }
-}
+++ /dev/null
-/* $OpenBSD: fflush.c,v 1.7 2009/10/22 01:23:16 guenther Exp $ */
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-#include <errno.h>
-#include <stdio.h>
-#include "local.h"
-
-/* Flush a single file, or (if fp is NULL) all files. */
-int ust_safe_fflush(LTTNG_UST_LFILE *fp)
-{
-
- if (fp == NULL)
- return 0;
-// return (_fwalk(__sflush));
- if ((fp->_flags & (__SWR | __SRW)) == 0) {
- errno = EBADF;
- return (EOF);
- }
- return (__sflush(fp));
-}
-
-int
-__sflush(LTTNG_UST_LFILE *fp)
-{
- unsigned char *p;
- int n, t;
-
- t = fp->_flags;
- if ((t & __SWR) == 0)
- return (0);
-
- if ((p = fp->_bf._base) == NULL)
- return (0);
-
- n = fp->_p - p; /* write this much */
-
- /*
- * Set these immediately to avoid problems with longjmp and to allow
- * exchange buffering (via setvbuf) in user write function.
- */
- fp->_p = p;
- fp->_w = t & (__SLBF|__SNBF) ? 0 : fp->_bf._size;
-
- for (; n > 0; n -= t, p += t) {
- t = (*fp->_write)(fp->_cookie, (char *)p, n);
- if (t <= 0) {
- fp->_flags |= __SERR;
- return (EOF);
- }
- }
- return (0);
-}
+++ /dev/null
-/* $OpenBSD: fileext.h,v 1.2 2005/06/17 20:40:32 espie Exp $ */
-/* $NetBSD: fileext.h,v 1.5 2003/07/18 21:46:41 nathanw Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (C)2001 Citrus Project,
- * All rights reserved.
- *
- * $Citrus$
- */
-
-/*
- * file extension
- */
-struct __lttng_ust_sfileext {
- struct __lttng_ust_sbuf _ub; /* ungetc buffer */
- struct wchar_io_data _wcio; /* wide char io status */
-};
-
-#define _EXT(fp) ((struct __lttng_ust_sfileext *)((fp)->_ext._base))
-#define _UB(fp) _EXT(fp)->_ub
-
-#define _FILEEXT_INIT(fp) \
-do { \
- _UB(fp)._base = NULL; \
- _UB(fp)._size = 0; \
- WCIO_INIT(fp); \
-} while (0)
-
-#define _FILEEXT_SETUP(f, fext) \
-do { \
- (f)->_ext._base = (unsigned char *)(fext); \
- _FILEEXT_INIT(f); \
-} while (0)
+++ /dev/null
-/* $OpenBSD: floatio.h,v 1.4 2008/09/07 20:36:08 martynas Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-/*
- * Floating point scanf/printf (input/output) definitions.
- */
-
-/* 11-bit exponent (VAX G floating point) is 308 decimal digits */
-#define MAXEXP 308
-/* 128 bit fraction takes up 39 decimal digits; max reasonable precision */
-#define MAXFRACT 39
-
-/*
- * MAXEXPDIG is the maximum number of decimal digits needed to store a
- * floating point exponent in the largest supported format. It should
- * be ceil(log10(LDBL_MAX_10_EXP)) or, if hexadecimal floating point
- * conversions are supported, ceil(log10(LDBL_MAX_EXP)). But since it
- * is presently never greater than 5 in practice, we fudge it.
- */
-#define MAXEXPDIG 6
-#if LDBL_MAX_EXP > 999999
-#error "floating point buffers too small"
-#endif
-
-char *__hdtoa(double, const char *, int, int *, int *, char **)
- __attribute__((visibility("hidden")));
-
-char *__hldtoa(long double, const char *, int, int *, int *, char **)
- __attribute__((visibility("hidden")));
-
-char *__ldtoa(long double *, int, int, int *, int *, char **)
- __attribute__((visibility("hidden")));
+++ /dev/null
-/* $OpenBSD: fvwrite.c,v 1.16 2009/10/22 01:23:16 guenther Exp $ */
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-#include "local.h"
-#include "fvwrite.h"
-#include "various.h"
-
-/*
- * Write some memory regions. Return zero on success, EOF on error.
- *
- * This routine is large and unsightly, but most of the ugliness due
- * to the three different kinds of output buffering is handled here.
- */
-int
-__sfvwrite(LTTNG_UST_LFILE *fp, struct __lttng_ust_suio *uio)
-{
- size_t len;
- char *p;
- struct __lttng_ust_siov *iov;
- int w, s;
- char *nl;
- int nlknown, nldist;
-
- if ((len = uio->uio_resid) == 0)
- return (0);
- /* make sure we can write */
- if (cantwrite(fp)) {
- errno = EBADF;
- return (EOF);
- }
-
-#define MIN(a, b) ((a) < (b) ? (a) : (b))
-#define COPY(n) (void)memcpy((void *)fp->_p, (void *)p, (size_t)(n))
-
- iov = uio->uio_iov;
- p = iov->iov_base;
- len = iov->iov_len;
- iov++;
-#define GETIOV(extra_work) \
- while (len == 0) { \
- extra_work; \
- p = iov->iov_base; \
- len = iov->iov_len; \
- iov++; \
- }
- if (fp->_flags & __SNBF) {
- /*
- * Unbuffered: write up to BUFSIZ bytes at a time.
- */
- do {
- GETIOV(;);
- w = (*fp->_write)(fp->_cookie, p, MIN(len, BUFSIZ));
- if (w <= 0)
- goto err;
- p += w;
- len -= w;
- } while ((uio->uio_resid -= w) != 0);
- } else if ((fp->_flags & __SLBF) == 0) {
- /*
- * Fully buffered: fill partially full buffer, if any,
- * and then flush. If there is no partial buffer, write
- * one _bf._size byte chunk directly (without copying).
- *
- * String output is a special case: write as many bytes
- * as fit, but pretend we wrote everything. This makes
- * snprintf() return the number of bytes needed, rather
- * than the number used, and avoids its write function
- * (so that the write function can be invalid).
- */
- do {
- GETIOV(;);
- if ((fp->_flags & (__SALC | __SSTR)) ==
- (__SALC | __SSTR) && fp->_w < len) {
- size_t blen = fp->_p - fp->_bf._base;
- unsigned char *_base;
- int _size;
-
- /* Allocate space exponentially. */
- _size = fp->_bf._size;
- do {
- _size = (_size << 1) + 1;
- } while (_size < blen + len);
- _base = realloc(fp->_bf._base, _size + 1);
- if (_base == NULL)
- goto err;
- fp->_w += _size - fp->_bf._size;
- fp->_bf._base = _base;
- fp->_bf._size = _size;
- fp->_p = _base + blen;
- }
- w = fp->_w;
- if (fp->_flags & __SSTR) {
- if (len < w)
- w = len;
- COPY(w); /* copy MIN(fp->_w,len), */
- fp->_w -= w;
- fp->_p += w;
- w = len; /* but pretend copied all */
- } else if (fp->_p > fp->_bf._base && len > w) {
- /* fill and flush */
- COPY(w);
- /* fp->_w -= w; */ /* unneeded */
- fp->_p += w;
- if (ust_safe_fflush(fp))
- goto err;
- } else if (len >= (w = fp->_bf._size)) {
- /* write directly */
- w = (*fp->_write)(fp->_cookie, p, w);
- if (w <= 0)
- goto err;
- } else {
- /* fill and done */
- w = len;
- COPY(w);
- fp->_w -= w;
- fp->_p += w;
- }
- p += w;
- len -= w;
- } while ((uio->uio_resid -= w) != 0);
- } else {
- /*
- * Line buffered: like fully buffered, but we
- * must check for newlines. Compute the distance
- * to the first newline (including the newline),
- * or `infinity' if there is none, then pretend
- * that the amount to write is MIN(len,nldist).
- */
- nlknown = 0;
- nldist = 0; /* XXX just to keep gcc happy */
- do {
- GETIOV(nlknown = 0);
- if (!nlknown) {
- nl = memchr((void *)p, '\n', len);
- nldist = nl ? nl + 1 - p : len + 1;
- nlknown = 1;
- }
- s = MIN(len, nldist);
- w = fp->_w + fp->_bf._size;
- if (fp->_p > fp->_bf._base && s > w) {
- COPY(w);
- /* fp->_w -= w; */
- fp->_p += w;
- if (ust_safe_fflush(fp))
- goto err;
- } else if (s >= (w = fp->_bf._size)) {
- w = (*fp->_write)(fp->_cookie, p, w);
- if (w <= 0)
- goto err;
- } else {
- w = s;
- COPY(w);
- fp->_w -= w;
- fp->_p += w;
- }
- if ((nldist -= w) == 0) {
- /* copied the newline: flush and forget */
- if (ust_safe_fflush(fp))
- goto err;
- nlknown = 0;
- }
- p += w;
- len -= w;
- } while ((uio->uio_resid -= w) != 0);
- }
- return (0);
-
-err:
- fp->_flags |= __SERR;
- return (EOF);
-}
+++ /dev/null
-/* $OpenBSD: fvwrite.h,v 1.5 2003/06/02 20:18:37 millert Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-/*
- * I/O descriptors for __sfvwrite().
- */
-#include <stddef.h>
-
-struct __lttng_ust_siov {
- void *iov_base;
- size_t iov_len;
-};
-struct __lttng_ust_suio {
- struct __lttng_ust_siov *uio_iov;
- int uio_iovcnt;
- int uio_resid;
-};
-
-extern int __sfvwrite(LTTNG_UST_LFILE *, struct __lttng_ust_suio *)
- __attribute__((visibility("hidden")));
+++ /dev/null
-/* $OpenBSD: local.h,v 1.14 2009/10/22 01:23:16 guenther Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-/*
- * Information local to this implementation of stdio,
- * in particular, macros and private variables.
- */
-
-#include <stdio.h>
-#include <wchar.h>
-#include "various.h"
-#include "wcio.h"
-#include "fileext.h"
-
-int __sflush(LTTNG_UST_LFILE *)
- __attribute__((visibility("hidden")));
-
-LTTNG_UST_LFILE *__sfp(void)
- __attribute__((visibility("hidden")));
-
-int __srefill(LTTNG_UST_LFILE *)
- __attribute__((visibility("hidden")));
-
-int __sread(void *, char *, int)
- __attribute__((visibility("hidden")));
-
-int __swrite(void *, const char *, int)
- __attribute__((visibility("hidden")));
-
-fpos_t __sseek(void *, fpos_t, int)
- __attribute__((visibility("hidden")));
-
-int __sclose(void *)
- __attribute__((visibility("hidden")));
-
-void __sinit(void)
- __attribute__((visibility("hidden")));
-
-void _cleanup(void)
- __attribute__((visibility("hidden")));
-
-void __smakebuf(LTTNG_UST_LFILE *)
- __attribute__((visibility("hidden")));
-
-int __swhatbuf(LTTNG_UST_LFILE *, size_t *, int *)
- __attribute__((visibility("hidden")));
-
-int _fwalk(int (*)(LTTNG_UST_LFILE *))
- __attribute__((visibility("hidden")));
-
-int __swsetup(LTTNG_UST_LFILE *)
- __attribute__((visibility("hidden")));
-
-int __sflags(const char *, int *)
- __attribute__((visibility("hidden")));
-
-wint_t __fgetwc_unlock(LTTNG_UST_LFILE *)
- __attribute__((visibility("hidden")));
-
-extern void __atexit_register_cleanup(void (*)(void))
- __attribute__((visibility("hidden")));
-
-extern int __sdidinit
- __attribute__((visibility("hidden")));
-
-/*
- * Return true if the given LTTNG_UST_LFILE cannot be written now.
- */
-#define cantwrite(fp) \
- ((((fp)->_flags & __SWR) == 0 || (fp)->_bf._base == NULL) && \
- __swsetup(fp))
-
-/*
- * Test whether the given stdio file has an active ungetc buffer;
- * release such a buffer, without restoring ordinary unread data.
- */
-#define HASUB(fp) (_UB(fp)._base != NULL)
-#define FREEUB(fp) { \
- if (_UB(fp)._base != (fp)->_ubuf) \
- free(_UB(fp)._base); \
- _UB(fp)._base = NULL; \
-}
-
-/*
- * test for an fgetln() buffer.
- */
-#define HASLB(fp) ((fp)->_lb._base != NULL)
-#define FREELB(fp) { \
- free((char *)(fp)->_lb._base); \
- (fp)->_lb._base = NULL; \
-}
+++ /dev/null
-/* $OpenBSD: mbrtowc_sb.c,v 1.4 2005/11/27 20:03:06 cloder Exp $ */
-/* $NetBSD: multibyte_sb.c,v 1.4 2003/08/07 16:43:04 agc Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1991 The Regents of the University of California.
- * All rights reserved.
- */
-
-#include <errno.h>
-#include <stdlib.h>
-#include <wchar.h>
-
-#include "various.h"
-
-/*ARGSUSED*/
-size_t
-ust_safe_mbrtowc(wchar_t *pwc, const char *s, size_t n,
- mbstate_t *ps __attribute__((unused)))
-{
-
- /* pwc may be NULL */
- /* s may be NULL */
- /* ps appears to be unused */
-
- if (s == NULL)
- return 0;
- if (n == 0)
- return (size_t)-1;
- if (pwc)
- *pwc = (wchar_t)(unsigned char)*s;
- return (*s != '\0');
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: LGPL-2.1-or-later
- *
- * Copyright (C) 2009 Pierre-Marc Fournier
- * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#include <stddef.h>
-
-/* write() */
-#include <unistd.h>
-
-/* writev() */
-#include <sys/uio.h>
-
-/* send() */
-#include <sys/types.h>
-#include <sys/socket.h>
-
-#include <errno.h>
-
-#include <ust-share.h>
-
-/*
- * This write is patient because it restarts if it was incomplete.
- */
-
-ssize_t ust_patient_write(int fd, const void *buf, size_t count)
-{
- const char *bufc = (const char *) buf;
- int result;
-
- for(;;) {
- result = write(fd, bufc, count);
- if (result == -1 && errno == EINTR) {
- continue;
- }
- if (result <= 0) {
- return result;
- }
- count -= result;
- bufc += result;
-
- if (count == 0) {
- break;
- }
- }
-
- return bufc-(const char *)buf;
-}
-
-/*
- * The `struct iovec *iov` is not `const` because we modify it to support
- * partial writes.
- */
-ssize_t ust_patient_writev(int fd, struct iovec *iov, int iovcnt)
-{
- ssize_t written, total_written = 0;
- int curr_element_idx = 0;
-
- for(;;) {
- written = writev(fd, iov + curr_element_idx,
- iovcnt - curr_element_idx);
- if (written == -1 && errno == EINTR) {
- continue;
- }
- if (written <= 0) {
- return written;
- }
-
- total_written += written;
-
- /*
- * If it's not the last element in the vector and we have
- * written more than the current element size, then increment
- * the current element index until we reach the element that
- * was partially written.
- */
- while (curr_element_idx < iovcnt &&
- written >= iov[curr_element_idx].iov_len) {
- written -= iov[curr_element_idx].iov_len;
- curr_element_idx++;
- }
-
- /* Maybe we are done. */
- if (curr_element_idx >= iovcnt) {
- break;
- }
-
- /* Update the current element base and size. */
- iov[curr_element_idx].iov_base += written;
- iov[curr_element_idx].iov_len -= written;
- }
-
- return total_written;
-}
-
-ssize_t ust_patient_send(int fd, const void *buf, size_t count, int flags)
-{
- const char *bufc = (const char *) buf;
- int result;
-
- for(;;) {
- result = send(fd, bufc, count, flags);
- if (result == -1 && errno == EINTR) {
- continue;
- }
- if (result <= 0) {
- return result;
- }
- count -= result;
- bufc += result;
-
- if (count == 0) {
- break;
- }
- }
-
- return bufc - (const char *) buf;
-}
+++ /dev/null
-/* $OpenBSD: snprintf.c,v 1.16 2009/10/22 01:23:16 guenther Exp $ */
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-#include <limits.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <string.h>
-#include <stdarg.h>
-#include "local.h"
-#include "ust-snprintf.h"
-
-#define DUMMY_LEN 1
-
-int ust_safe_vsnprintf(char *str, size_t n, const char *fmt, va_list ap)
-{
- int ret;
- char dummy[DUMMY_LEN];
- LTTNG_UST_LFILE f;
- struct __lttng_ust_sfileext fext;
-
- /* While snprintf(3) specifies size_t stdio uses an int internally */
- if (n > INT_MAX)
- n = INT_MAX;
- /* Stdio internals do not deal correctly with zero length buffer */
- if (n == 0) {
- str = dummy;
- n = DUMMY_LEN;
- }
- _FILEEXT_SETUP(&f, &fext);
- f._file = -1;
- f._flags = __SWR | __SSTR;
- f._bf._base = f._p = (unsigned char *)str;
- f._bf._size = f._w = n - 1;
- ret = ust_safe_vfprintf(&f, fmt, ap);
- *f._p = '\0';
- return (ret);
-}
-
-int ust_safe_snprintf(char *str, size_t n, const char *fmt, ...)
-{
- va_list ap;
- int ret;
-
- va_start(ap, fmt);
- ret = ust_safe_vsnprintf(str, n, fmt, ap);
- va_end(ap);
-
- return ret;
-}
+++ /dev/null
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990 The Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-#ifndef UST_SNPRINTF_VARIOUS_H
-#define UST_SNPRINTF_VARIOUS_H
-
-#include <stdarg.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <wchar.h>
-
-struct __lttng_ust_sbuf {
- unsigned char *_base;
- int _size;
-};
-
-/*
- * stdio state variables.
- *
- * The following always hold:
- *
- * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR),
- * _lbfsize is -_bf._size, else _lbfsize is 0
- * if _flags&__SRD, _w is 0
- * if _flags&__SWR, _r is 0
- *
- * This ensures that the getc and putc macros (or inline functions) never
- * try to write or read from a file that is in `read' or `write' mode.
- * (Moreover, they can, and do, automatically switch from read mode to
- * write mode, and back, on "r+" and "w+" files.)
- *
- * _lbfsize is used only to make the inline line-buffered output stream
- * code as compact as possible.
- *
- * _ub, _up, and _ur are used when ungetc() pushes back more characters
- * than fit in the current _bf, or when ungetc() pushes back a character
- * that does not match the previous one in _bf. When this happens,
- * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff
- * _ub._base!=NULL) and _up and _ur save the current values of _p and _r.
- */
-typedef struct __lttng_ust_sFILE {
- unsigned char *_p; /* current position in (some) buffer */
- int _r; /* read space left for getc() */
- int _w; /* write space left for putc() */
- short _flags; /* flags, below; this FILE is free if 0 */
- short _file; /* fileno, if Unix descriptor, else -1 */
- struct __lttng_ust_sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */
- int _lbfsize; /* 0 or -_bf._size, for inline putc */
-
- /* operations */
- void *_cookie; /* cookie passed to io functions */
- int (*_close)(void *);
- int (*_read)(void *, char *, int);
- fpos_t (*_seek)(void *, fpos_t, int);
- int (*_write)(void *, const char *, int);
-
- /* extension data, to avoid further ABI breakage */
- struct __lttng_ust_sbuf _ext;
- /* data for long sequences of ungetc() */
- unsigned char *_up; /* saved _p when _p is doing ungetc data */
- int _ur; /* saved _r when _r is counting ungetc data */
-
- /* tricks to meet minimum requirements even when malloc() fails */
- unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */
- unsigned char _nbuf[1]; /* guarantee a getc() buffer */
-
- /* separate buffer for fgetln() when line crosses buffer boundary */
- struct __lttng_ust_sbuf _lb; /* buffer for fgetln() */
-
- /* Unix stdio files get aligned to block boundaries on fseek() */
- int _blksize; /* stat.st_blksize (may be != _bf._size) */
- fpos_t _offset; /* current lseek offset */
-} LTTNG_UST_LFILE;
-
-#define __SLBF 0x0001 /* line buffered */
-#define __SNBF 0x0002 /* unbuffered */
-#define __SRD 0x0004 /* OK to read */
-#define __SWR 0x0008 /* OK to write */
- /* RD and WR are never simultaneously asserted */
-#define __SRW 0x0010 /* open for reading & writing */
-#define __SEOF 0x0020 /* found EOF */
-#define __SERR 0x0040 /* found error */
-#define __SMBF 0x0080 /* _buf is from malloc */
-#define __SAPP 0x0100 /* fdopen()ed in append mode */
-#define __SSTR 0x0200 /* this is an sprintf/snprintf string */
-#define __SOPT 0x0400 /* do fseek() optimisation */
-#define __SNPT 0x0800 /* do not do fseek() optimisation */
-#define __SOFF 0x1000 /* set iff _offset is in fact correct */
-#define __SMOD 0x2000 /* true => fgetln modified _p text */
-#define __SALC 0x4000 /* allocate string space dynamically */
-
-#define __sferror(p) (((p)->_flags & __SERR) != 0)
-
-extern int ust_safe_fflush(LTTNG_UST_LFILE *fp)
- __attribute__((visibility("hidden")));
-
-extern int ust_safe_vfprintf(LTTNG_UST_LFILE *fp, const char *fmt0, va_list ap)
- __attribute__((visibility("hidden")));
-
-extern size_t ust_safe_mbrtowc(wchar_t *pwc, const char *s, size_t n, mbstate_t *ps)
- __attribute__((visibility("hidden")));
-
-#endif /* UST_SNPRINTF_VARIOUS_H */
+++ /dev/null
-/* $OpenBSD: vfprintf.c,v 1.57 2009/10/28 21:15:02 naddy Exp $ */
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990 The Regents of the University of California.
- * All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-/*
- * Actual printf innards.
- *
- * This code is large and complicated...
- */
-
-//#define FLOATING_POINT
-
-#include <sys/types.h>
-#include <sys/mman.h>
-
-#include <errno.h>
-#include <limits.h>
-#include <stdarg.h>
-#include <stddef.h>
-#include <stdio.h>
-#include <stdint.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <wchar.h>
-
-#include "local.h"
-#include "fvwrite.h"
-#include "various.h"
-
-static char null_str[] = "(null)";
-static char bad_base_str[] = "bug in ust_safe_vfprintf: bad base";
-
-union arg {
- int intarg;
- unsigned int uintarg;
- long longarg;
- unsigned long ulongarg;
- long long longlongarg;
- unsigned long long ulonglongarg;
- ptrdiff_t ptrdiffarg;
- size_t sizearg;
- size_t ssizearg;
- intmax_t intmaxarg;
- uintmax_t uintmaxarg;
- void *pvoidarg;
- char *pchararg;
- signed char *pschararg;
- short *pshortarg;
- int *pintarg;
- long *plongarg;
- long long *plonglongarg;
- ptrdiff_t *pptrdiffarg;
- ssize_t *pssizearg;
- intmax_t *pintmaxarg;
-#ifdef FLOATING_POINT
- double doublearg;
- long double longdoublearg;
-#endif
-};
-
-static int __find_arguments(const char *fmt0, va_list ap, union arg **argtable,
- size_t *argtablesiz);
-static int __grow_type_table(unsigned char **typetable, int *tablesize);
-
-/*
- * Flush out all the vectors defined by the given uio,
- * then reset it so that it can be reused.
- */
-static int
-__sprint(LTTNG_UST_LFILE *fp, struct __lttng_ust_suio *uio)
-{
- int err;
-
- if (uio->uio_resid == 0) {
- uio->uio_iovcnt = 0;
- return (0);
- }
- err = __sfvwrite(fp, uio);
- uio->uio_resid = 0;
- uio->uio_iovcnt = 0;
- return (err);
-}
-
-/*
- * Helper function for `fprintf to unbuffered unix file': creates a
- * temporary buffer. We only work on write-only files; this avoids
- * worries about ungetc buffers and so forth.
- */
-//static int
-//__sbprintf(LTTNG_UST_LFILE *fp, const char *fmt, va_list ap)
-//{
-// int ret;
-// LTTNG_UST_LFILE fake;
-// struct __sfileext fakeext;
-// unsigned char buf[BUFSIZ];
-//
-// _FILEEXT_SETUP(&fake, &fakeext);
-// /* copy the important variables */
-// fake._flags = fp->_flags & ~__SNBF;
-// fake._file = fp->_file;
-// fake._cookie = fp->_cookie;
-// fake._write = fp->_write;
-//
-// /* set up the buffer */
-// fake._bf._base = fake._p = buf;
-// fake._bf._size = fake._w = sizeof(buf);
-// fake._lbfsize = 0; /* not actually used, but Just In Case */
-//
-// /* do the work, then copy any error status */
-// ret = ust_safe_vfprintf(&fake, fmt, ap);
-// if (ret >= 0 && fflush(&fake))
-// ret = EOF;
-// if (fake._flags & __SERR)
-// fp->_flags |= __SERR;
-// return (ret);
-//}
-
-
-#ifdef FLOATING_POINT
-#include <float.h>
-#include <locale.h>
-#include <math.h>
-#include "floatio.h"
-
-#define DEFPREC 6
-
-extern char *__dtoa(double, int, int, int *, int *, char **);
-extern void __freedtoa(char *);
-static int exponent(char *, int, int);
-#endif /* FLOATING_POINT */
-
-/*
- * The size of the buffer we use as scratch space for integer
- * conversions, among other things. Technically, we would need the
- * most space for base 10 conversions with thousands' grouping
- * characters between each pair of digits. 100 bytes is a
- * conservative overestimate even for a 128-bit uintmax_t.
- */
-#define BUF 100
-
-#define STATIC_ARG_TBL_SIZE 8 /* Size of static argument table. */
-
-
-/*
- * Macros for converting digits to letters and vice versa
- */
-#define to_digit(c) ((c) - '0')
-#define is_digit(c) ((unsigned)to_digit(c) <= 9)
-#define to_char(n) ((n) + '0')
-
-/*
- * Flags used during conversion.
- */
-#define ALT 0x0001 /* alternate form */
-#define LADJUST 0x0004 /* left adjustment */
-#define LONGDBL 0x0008 /* long double; unimplemented */
-#define LONGINT 0x0010 /* long integer */
-#define LLONGINT 0x0020 /* long long integer */
-#define SHORTINT 0x0040 /* short integer */
-#define ZEROPAD 0x0080 /* zero (as opposed to blank) pad */
-#define FPT 0x0100 /* Floating point number */
-#define PTRINT 0x0200 /* (unsigned) ptrdiff_t */
-#define SIZEINT 0x0400 /* (signed) size_t */
-#define CHARINT 0x0800 /* 8 bit integer */
-#define MAXINT 0x1000 /* largest integer size (intmax_t) */
-
-int ust_safe_vfprintf(LTTNG_UST_LFILE *fp, const char *fmt0, va_list ap)
-{
- char *fmt; /* format string */
- int ch; /* character from fmt */
- int n, n2; /* handy integers (short term usage) */
- char *cp; /* handy char pointer (short term usage) */
- struct __lttng_ust_siov *iovp; /* for PRINT macro */
- int flags; /* flags as above */
- int ret; /* return value accumulator */
- int width; /* width from format (%8d), or 0 */
- int prec; /* precision from format; <0 for N/A */
- char sign; /* sign prefix (' ', '+', '-', or \0) */
- wchar_t wc;
- mbstate_t ps;
-#ifdef FLOATING_POINT
- /*
- * We can decompose the printed representation of floating
- * point numbers into several parts, some of which may be empty:
- *
- * [+|-| ] [0x|0X] MMM . NNN [e|E|p|P] [+|-] ZZ
- * A B ---C--- D E F
- *
- * A: 'sign' holds this value if present; '\0' otherwise
- * B: ox[1] holds the 'x' or 'X'; '\0' if not hexadecimal
- * C: cp points to the string MMMNNN. Leading and trailing
- * zeros are not in the string and must be added.
- * D: expchar holds this character; '\0' if no exponent, e.g. %f
- * F: at least two digits for decimal, at least one digit for hex
- */
- char *decimal_point = localeconv()->decimal_point;
- int signflag; /* true if float is negative */
- union { /* floating point arguments %[aAeEfFgG] */
- double dbl;
- long double ldbl;
- } fparg;
- int expt; /* integer value of exponent */
- char expchar; /* exponent character: [eEpP\0] */
- char *dtoaend; /* pointer to end of converted digits */
- int expsize; /* character count for expstr */
- int lead; /* sig figs before decimal or group sep */
- int ndig; /* actual number of digits returned by dtoa */
- char expstr[MAXEXPDIG+2]; /* buffer for exponent string: e+ZZZ */
- char *dtoaresult = NULL;
-#endif
-
- uintmax_t _umax; /* integer arguments %[diouxX] */
- enum { OCT, DEC, HEX } base; /* base for %[diouxX] conversion */
- int dprec; /* a copy of prec if %[diouxX], 0 otherwise */
- int realsz; /* field size expanded by dprec */
- int size; /* size of converted field or string */
- const char *xdigs = NULL; /* digits for %[xX] conversion */
-#define NIOV 8
- struct __lttng_ust_suio uio; /* output information: summary */
- struct __lttng_ust_siov iov[NIOV];/* ... and individual io vectors */
- char buf[BUF]; /* buffer with space for digits of uintmax_t */
- char ox[2]; /* space for 0x; ox[1] is either x, X, or \0 */
- union arg *argtable; /* args, built due to positional arg */
- union arg statargtable[STATIC_ARG_TBL_SIZE];
- size_t argtablesiz;
- int nextarg; /* 1-based argument index */
- va_list orgap; /* original argument pointer */
-
- /*
- * Choose PADSIZE to trade efficiency vs. size. If larger printf
- * fields occur frequently, increase PADSIZE and make the initialisers
- * below longer.
- */
-#define PADSIZE 16 /* pad chunk size */
- static char blanks[PADSIZE] =
- {' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' '};
- static char zeroes[PADSIZE] =
- {'0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0'};
-
- static const char xdigs_lower[16] = "0123456789abcdef";
- static const char xdigs_upper[16] = "0123456789ABCDEF";
-
- /*
- * BEWARE, these `goto error' on error, and PAD uses `n'.
- */
-#define PRINT(ptr, len) do { \
- iovp->iov_base = (ptr); \
- iovp->iov_len = (len); \
- uio.uio_resid += (len); \
- iovp++; \
- if (++uio.uio_iovcnt >= NIOV) { \
- if (__sprint(fp, &uio)) \
- goto error; \
- iovp = iov; \
- } \
-} while (0)
-#define PAD(howmany, with) do { \
- if ((n = (howmany)) > 0) { \
- while (n > PADSIZE) { \
- PRINT(with, PADSIZE); \
- n -= PADSIZE; \
- } \
- PRINT(with, n); \
- } \
-} while (0)
-#define PRINTANDPAD(p, ep, len, with) do { \
- n2 = (ep) - (p); \
- if (n2 > (len)) \
- n2 = (len); \
- if (n2 > 0) \
- PRINT((p), n2); \
- PAD((len) - (n2 > 0 ? n2 : 0), (with)); \
-} while(0)
-#define FLUSH() do { \
- if (uio.uio_resid && __sprint(fp, &uio)) \
- goto error; \
- uio.uio_iovcnt = 0; \
- iovp = iov; \
-} while (0)
-
- /*
- * To extend shorts properly, we need both signed and unsigned
- * argument extraction methods.
- */
-#define SARG() \
- ((intmax_t)(flags&MAXINT ? GETARG(intmax_t) : \
- flags&LLONGINT ? GETARG(long long) : \
- flags&LONGINT ? GETARG(long) : \
- flags&PTRINT ? GETARG(ptrdiff_t) : \
- flags&SIZEINT ? GETARG(ssize_t) : \
- flags&SHORTINT ? (short)GETARG(int) : \
- flags&CHARINT ? (__signed char)GETARG(int) : \
- GETARG(int)))
-#define UARG() \
- ((uintmax_t)(flags&MAXINT ? GETARG(uintmax_t) : \
- flags&LLONGINT ? GETARG(unsigned long long) : \
- flags&LONGINT ? GETARG(unsigned long) : \
- flags&PTRINT ? (uintptr_t)GETARG(ptrdiff_t) : /* XXX */ \
- flags&SIZEINT ? GETARG(size_t) : \
- flags&SHORTINT ? (unsigned short)GETARG(int) : \
- flags&CHARINT ? (unsigned char)GETARG(int) : \
- GETARG(unsigned int)))
-
- /*
- * Append a digit to a value and check for overflow.
- */
-#define APPEND_DIGIT(val, dig) do { \
- if ((val) > INT_MAX / 10) \
- goto overflow; \
- (val) *= 10; \
- if ((val) > INT_MAX - to_digit((dig))) \
- goto overflow; \
- (val) += to_digit((dig)); \
-} while (0)
-
- /*
- * Get * arguments, including the form *nn$. Preserve the nextarg
- * that the argument can be gotten once the type is determined.
- */
-#define GETASTER(val) \
- n2 = 0; \
- cp = fmt; \
- while (is_digit(*cp)) { \
- APPEND_DIGIT(n2, *cp); \
- cp++; \
- } \
- if (*cp == '$') { \
- int hold = nextarg; \
- if (argtable == NULL) { \
- argtable = statargtable; \
- __find_arguments(fmt0, orgap, &argtable, &argtablesiz); \
- } \
- nextarg = n2; \
- val = GETARG(int); \
- nextarg = hold; \
- fmt = ++cp; \
- } else { \
- val = GETARG(int); \
- }
-
-/*
-* Get the argument indexed by nextarg. If the argument table is
-* built, use it to get the argument. If its not, get the next
-* argument (and arguments must be gotten sequentially).
-*/
-#define GETARG(type) \
- ((argtable != NULL) ? *((type*)(&argtable[nextarg++])) : \
- (nextarg++, va_arg(ap, type)))
-
- _SET_ORIENTATION(fp, -1);
- /* sorry, fprintf(read_only_file, "") returns EOF, not 0 */
- if (cantwrite(fp)) {
- errno = EBADF;
- return (EOF);
- }
-
- /* optimise fprintf(stderr) (and other unbuffered Unix files) */
-// if ((fp->_flags & (__SNBF|__SWR|__SRW)) == (__SNBF|__SWR) &&
-// fp->_file >= 0)
-// return (__sbprintf(fp, fmt0, ap));
-
- fmt = (char *)fmt0;
- argtable = NULL;
- nextarg = 1;
- va_copy(orgap, ap);
- uio.uio_iov = iovp = iov;
- uio.uio_resid = 0;
- uio.uio_iovcnt = 0;
- ret = 0;
-
- memset(&ps, 0, sizeof(ps));
- /*
- * Scan the format for conversions (`%' character).
- */
- for (;;) {
- cp = fmt;
- while ((n = ust_safe_mbrtowc(&wc, fmt, MB_CUR_MAX, &ps)) > 0) {
- fmt += n;
- if (wc == '%') {
- fmt--;
- break;
- }
- }
- if (fmt != cp) {
- ptrdiff_t m = fmt - cp;
- if (m < 0 || m > INT_MAX - ret)
- goto overflow;
- PRINT(cp, m);
- ret += m;
- }
- if (n <= 0)
- goto done;
- fmt++; /* skip over '%' */
-
- flags = 0;
- dprec = 0;
- width = 0;
- prec = -1;
- sign = '\0';
- ox[1] = '\0';
-
-rflag: ch = *fmt++;
-reswitch: switch (ch) {
- case ' ':
- /*
- * ``If the space and + flags both appear, the space
- * flag will be ignored.''
- * -- ANSI X3J11
- */
- if (!sign)
- sign = ' ';
- goto rflag;
- case '#':
- flags |= ALT;
- goto rflag;
- case '\'':
- /* grouping not implemented */
- goto rflag;
- case '*':
- /*
- * ``A negative field width argument is taken as a
- * - flag followed by a positive field width.''
- * -- ANSI X3J11
- * They don't exclude field widths read from args.
- */
- GETASTER(width);
- if (width >= 0)
- goto rflag;
- if (width == INT_MIN)
- goto overflow;
- width = -width;
- /* FALLTHROUGH */
- case '-':
- flags |= LADJUST;
- goto rflag;
- case '+':
- sign = '+';
- goto rflag;
- case '.':
- if ((ch = *fmt++) == '*') {
- GETASTER(n);
- prec = n < 0 ? -1 : n;
- goto rflag;
- }
- n = 0;
- while (is_digit(ch)) {
- APPEND_DIGIT(n, ch);
- ch = *fmt++;
- }
- if (ch == '$') {
- nextarg = n;
- if (argtable == NULL) {
- argtable = statargtable;
- __find_arguments(fmt0, orgap,
- &argtable, &argtablesiz);
- }
- goto rflag;
- }
- prec = n;
- goto reswitch;
- case '0':
- /*
- * ``Note that 0 is taken as a flag, not as the
- * beginning of a field width.''
- * -- ANSI X3J11
- */
- flags |= ZEROPAD;
- goto rflag;
- case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- n = 0;
- do {
- APPEND_DIGIT(n, ch);
- ch = *fmt++;
- } while (is_digit(ch));
- if (ch == '$') {
- nextarg = n;
- if (argtable == NULL) {
- argtable = statargtable;
- __find_arguments(fmt0, orgap,
- &argtable, &argtablesiz);
- }
- goto rflag;
- }
- width = n;
- goto reswitch;
-#ifdef FLOATING_POINT
- case 'L':
- flags |= LONGDBL;
- goto rflag;
-#endif
- case 'h':
- if (*fmt == 'h') {
- fmt++;
- flags |= CHARINT;
- } else {
- flags |= SHORTINT;
- }
- goto rflag;
- case 'j':
- flags |= MAXINT;
- goto rflag;
- case 'l':
- if (*fmt == 'l') {
- fmt++;
- flags |= LLONGINT;
- } else {
- flags |= LONGINT;
- }
- goto rflag;
- case 'q':
- flags |= LLONGINT;
- goto rflag;
- case 't':
- flags |= PTRINT;
- goto rflag;
- case 'z':
- flags |= SIZEINT;
- goto rflag;
- case 'c':
- *(cp = buf) = GETARG(int);
- size = 1;
- sign = '\0';
- break;
- case 'D':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'd':
- case 'i':
- _umax = SARG();
- if ((intmax_t)_umax < 0) {
- _umax = -_umax;
- sign = '-';
- }
- base = DEC;
- goto number;
-#ifdef FLOATING_POINT
- case 'a':
- case 'A':
- if (ch == 'a') {
- ox[1] = 'x';
- xdigs = xdigs_lower;
- expchar = 'p';
- } else {
- ox[1] = 'X';
- xdigs = xdigs_upper;
- expchar = 'P';
- }
- if (prec >= 0)
- prec++;
- if (dtoaresult)
- __freedtoa(dtoaresult);
- if (flags & LONGDBL) {
- fparg.ldbl = GETARG(long double);
- dtoaresult = cp =
- __hldtoa(fparg.ldbl, xdigs, prec,
- &expt, &signflag, &dtoaend);
- if (dtoaresult == NULL) {
- errno = ENOMEM;
- goto error;
- }
- } else {
- fparg.dbl = GETARG(double);
- dtoaresult = cp =
- __hdtoa(fparg.dbl, xdigs, prec,
- &expt, &signflag, &dtoaend);
- if (dtoaresult == NULL) {
- errno = ENOMEM;
- goto error;
- }
- }
- if (prec < 0)
- prec = dtoaend - cp;
- if (expt == INT_MAX)
- ox[1] = '\0';
- goto fp_common;
- case 'e':
- case 'E':
- expchar = ch;
- if (prec < 0) /* account for digit before decpt */
- prec = DEFPREC + 1;
- else
- prec++;
- goto fp_begin;
- case 'f':
- case 'F':
- expchar = '\0';
- goto fp_begin;
- case 'g':
- case 'G':
- expchar = ch - ('g' - 'e');
- if (prec == 0)
- prec = 1;
-fp_begin:
- if (prec < 0)
- prec = DEFPREC;
- if (dtoaresult)
- __freedtoa(dtoaresult);
- if (flags & LONGDBL) {
- fparg.ldbl = GETARG(long double);
- dtoaresult = cp =
- __ldtoa(&fparg.ldbl, expchar ? 2 : 3, prec,
- &expt, &signflag, &dtoaend);
- if (dtoaresult == NULL) {
- errno = ENOMEM;
- goto error;
- }
- } else {
- fparg.dbl = GETARG(double);
- dtoaresult = cp =
- __dtoa(fparg.dbl, expchar ? 2 : 3, prec,
- &expt, &signflag, &dtoaend);
- if (dtoaresult == NULL) {
- errno = ENOMEM;
- goto error;
- }
- if (expt == 9999)
- expt = INT_MAX;
- }
-fp_common:
- if (signflag)
- sign = '-';
- if (expt == INT_MAX) { /* inf or nan */
- if (*cp == 'N') {
- cp = (ch >= 'a') ? "nan" : "NAN";
- sign = '\0';
- } else
- cp = (ch >= 'a') ? "inf" : "INF";
- size = 3;
- flags &= ~ZEROPAD;
- break;
- }
- flags |= FPT;
- ndig = dtoaend - cp;
- if (ch == 'g' || ch == 'G') {
- if (expt > -4 && expt <= prec) {
- /* Make %[gG] smell like %[fF] */
- expchar = '\0';
- if (flags & ALT)
- prec -= expt;
- else
- prec = ndig - expt;
- if (prec < 0)
- prec = 0;
- } else {
- /*
- * Make %[gG] smell like %[eE], but
- * trim trailing zeroes if no # flag.
- */
- if (!(flags & ALT))
- prec = ndig;
- }
- }
- if (expchar) {
- expsize = exponent(expstr, expt - 1, expchar);
- size = expsize + prec;
- if (prec > 1 || flags & ALT)
- ++size;
- } else {
- /* space for digits before decimal point */
- if (expt > 0)
- size = expt;
- else /* "0" */
- size = 1;
- /* space for decimal pt and following digits */
- if (prec || flags & ALT)
- size += prec + 1;
- lead = expt;
- }
- break;
-#endif /* FLOATING_POINT */
- case 'n':
- if (flags & LLONGINT)
- *GETARG(long long *) = ret;
- else if (flags & LONGINT)
- *GETARG(long *) = ret;
- else if (flags & SHORTINT)
- *GETARG(short *) = ret;
- else if (flags & CHARINT)
- *GETARG(__signed char *) = ret;
- else if (flags & PTRINT)
- *GETARG(ptrdiff_t *) = ret;
- else if (flags & SIZEINT)
- *GETARG(ssize_t *) = ret;
- else if (flags & MAXINT)
- *GETARG(intmax_t *) = ret;
- else
- *GETARG(int *) = ret;
- continue; /* no output */
- case 'O':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'o':
- _umax = UARG();
- base = OCT;
- goto nosign;
- case 'p':
- /*
- * ``The argument shall be a pointer to void. The
- * value of the pointer is converted to a sequence
- * of printable characters, in an implementation-
- * defined manner.''
- * -- ANSI X3J11
- */
- /* NOSTRICT */
- _umax = (u_long)GETARG(void *);
- base = HEX;
- xdigs = xdigs_lower;
- ox[1] = 'x';
- goto nosign;
- case 's':
- if ((cp = GETARG(char *)) == NULL)
- cp = null_str;
- if (prec >= 0) {
- /*
- * can't use strlen; can only look for the
- * NUL in the first `prec' characters, and
- * strlen() will go further.
- */
- char *p = memchr(cp, 0, prec);
-
- size = p ? (p - cp) : prec;
- } else {
- size_t len;
-
- if ((len = strlen(cp)) > INT_MAX)
- goto overflow;
- size = (int)len;
- }
- sign = '\0';
- break;
- case 'U':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'u':
- _umax = UARG();
- base = DEC;
- goto nosign;
- case 'X':
- xdigs = xdigs_upper;
- goto hex;
- case 'x':
- xdigs = xdigs_lower;
-hex: _umax = UARG();
- base = HEX;
- /* leading 0x/X only if non-zero */
- if (flags & ALT && _umax != 0)
- ox[1] = ch;
-
- /* unsigned conversions */
-nosign: sign = '\0';
- /*
- * ``... diouXx conversions ... if a precision is
- * specified, the 0 flag will be ignored.''
- * -- ANSI X3J11
- */
-number: if ((dprec = prec) >= 0)
- flags &= ~ZEROPAD;
-
- /*
- * ``The result of converting a zero value with an
- * explicit precision of zero is no characters.''
- * -- ANSI X3J11
- */
- cp = buf + BUF;
- if (_umax != 0 || prec != 0) {
- /*
- * Unsigned mod is hard, and unsigned mod
- * by a constant is easier than that by
- * a variable; hence this switch.
- */
- switch (base) {
- case OCT:
- do {
- *--cp = to_char(_umax & 7);
- _umax >>= 3;
- } while (_umax);
- /* handle octal leading 0 */
- if (flags & ALT && *cp != '0')
- *--cp = '0';
- break;
-
- case DEC:
- /* many numbers are 1 digit */
- while (_umax >= 10) {
- *--cp = to_char(_umax % 10);
- _umax /= 10;
- }
- *--cp = to_char(_umax);
- break;
-
- case HEX:
- do {
- *--cp = xdigs[_umax & 15];
- _umax >>= 4;
- } while (_umax);
- break;
-
- default:
- cp = bad_base_str;
- size = strlen(cp);
- goto skipsize;
- }
- }
- size = buf + BUF - cp;
- if (size > BUF) /* should never happen */
- abort();
- skipsize:
- break;
- default: /* "%?" prints ?, unless ? is NUL */
- if (ch == '\0')
- goto done;
- /* pretend it was %c with argument ch */
- cp = buf;
- *cp = ch;
- size = 1;
- sign = '\0';
- break;
- }
-
- /*
- * All reasonable formats wind up here. At this point, `cp'
- * points to a string which (if not flags&LADJUST) should be
- * padded out to `width' places. If flags&ZEROPAD, it should
- * first be prefixed by any sign or other prefix; otherwise,
- * it should be blank padded before the prefix is emitted.
- * After any left-hand padding and prefixing, emit zeroes
- * required by a decimal %[diouxX] precision, then print the
- * string proper, then emit zeroes required by any leftover
- * floating precision; finally, if LADJUST, pad with blanks.
- *
- * Compute actual size, so we know how much to pad.
- * size excludes decimal prec; realsz includes it.
- */
- realsz = dprec > size ? dprec : size;
- if (sign)
- realsz++;
- if (ox[1])
- realsz+= 2;
-
- /* right-adjusting blank padding */
- if ((flags & (LADJUST|ZEROPAD)) == 0)
- PAD(width - realsz, blanks);
-
- /* prefix */
- if (sign)
- PRINT(&sign, 1);
- if (ox[1]) { /* ox[1] is either x, X, or \0 */
- ox[0] = '0';
- PRINT(ox, 2);
- }
-
- /* right-adjusting zero padding */
- if ((flags & (LADJUST|ZEROPAD)) == ZEROPAD)
- PAD(width - realsz, zeroes);
-
- /* leading zeroes from decimal precision */
- PAD(dprec - size, zeroes);
-
- /* the string or number proper */
-#ifdef FLOATING_POINT
- if ((flags & FPT) == 0) {
- PRINT(cp, size);
- } else { /* glue together f_p fragments */
- if (!expchar) { /* %[fF] or sufficiently short %[gG] */
- if (expt <= 0) {
- PRINT(zeroes, 1);
- if (prec || flags & ALT)
- PRINT(decimal_point, 1);
- PAD(-expt, zeroes);
- /* already handled initial 0's */
- prec += expt;
- } else {
- PRINTANDPAD(cp, dtoaend, lead, zeroes);
- cp += lead;
- if (prec || flags & ALT)
- PRINT(decimal_point, 1);
- }
- PRINTANDPAD(cp, dtoaend, prec, zeroes);
- } else { /* %[eE] or sufficiently long %[gG] */
- if (prec > 1 || flags & ALT) {
- buf[0] = *cp++;
- buf[1] = *decimal_point;
- PRINT(buf, 2);
- PRINT(cp, ndig-1);
- PAD(prec - ndig, zeroes);
- } else { /* XeYYY */
- PRINT(cp, 1);
- }
- PRINT(expstr, expsize);
- }
- }
-#else
- PRINT(cp, size);
-#endif
- /* left-adjusting padding (always blank) */
- if (flags & LADJUST)
- PAD(width - realsz, blanks);
-
- /* finally, adjust ret */
- if (width < realsz)
- width = realsz;
- if (width > INT_MAX - ret)
- goto overflow;
- ret += width;
-
- FLUSH(); /* copy out the I/O vectors */
- }
-done:
- FLUSH();
-error:
- if (__sferror(fp))
- ret = -1;
- goto finish;
-
-overflow:
- errno = ENOMEM;
- ret = -1;
-
-finish:
- va_end(orgap);
-#ifdef FLOATING_POINT
- if (dtoaresult)
- __freedtoa(dtoaresult);
-#endif
- if (argtable != NULL && argtable != statargtable) {
- munmap(argtable, argtablesiz);
- argtable = NULL;
- }
- return (ret);
-}
-
-/*
- * Type ids for argument type table.
- */
-#define T_UNUSED 0
-#define T_SHORT 1
-#define T_U_SHORT 2
-#define TP_SHORT 3
-#define T_INT 4
-#define T_U_INT 5
-#define TP_INT 6
-#define T_LONG 7
-#define T_U_LONG 8
-#define TP_LONG 9
-#define T_LLONG 10
-#define T_U_LLONG 11
-#define TP_LLONG 12
-#define T_DOUBLE 13
-#define T_LONG_DOUBLE 14
-#define TP_CHAR 15
-#define TP_VOID 16
-#define T_PTRINT 17
-#define TP_PTRINT 18
-#define T_SIZEINT 19
-#define T_SSIZEINT 20
-#define TP_SSIZEINT 21
-#define T_MAXINT 22
-#define T_MAXUINT 23
-#define TP_MAXINT 24
-#define T_CHAR 25
-#define T_U_CHAR 26
-
-/*
- * Find all arguments when a positional parameter is encountered. Returns a
- * table, indexed by argument number, of pointers to each arguments. The
- * initial argument table should be an array of STATIC_ARG_TBL_SIZE entries.
- * It will be replaced with a mmap-ed one if it overflows (malloc cannot be
- * used since we are attempting to make snprintf thread safe, and alloca is
- * problematic since we have nested functions..)
- */
-static int
-__find_arguments(const char *fmt0, va_list ap, union arg **argtable,
- size_t *argtablesiz)
-{
- char *fmt; /* format string */
- int ch; /* character from fmt */
- int n, n2; /* handy integer (short term usage) */
- char *cp; /* handy char pointer (short term usage) */
- int flags; /* flags as above */
- unsigned char *typetable; /* table of types */
- unsigned char stattypetable[STATIC_ARG_TBL_SIZE];
- int tablesize; /* current size of type table */
- int tablemax; /* largest used index in table */
- int nextarg; /* 1-based argument index */
- int ret = 0; /* return value */
- wchar_t wc;
- mbstate_t ps;
-
- /*
- * Add an argument type to the table, expanding if necessary.
- */
-#define ADDTYPE(type) \
- ((nextarg >= tablesize) ? \
- __grow_type_table(&typetable, &tablesize) : 0, \
- (nextarg > tablemax) ? tablemax = nextarg : 0, \
- typetable[nextarg++] = type)
-
-#define ADDSARG() \
- ((flags&MAXINT) ? ADDTYPE(T_MAXINT) : \
- ((flags&PTRINT) ? ADDTYPE(T_PTRINT) : \
- ((flags&SIZEINT) ? ADDTYPE(T_SSIZEINT) : \
- ((flags&LLONGINT) ? ADDTYPE(T_LLONG) : \
- ((flags&LONGINT) ? ADDTYPE(T_LONG) : \
- ((flags&SHORTINT) ? ADDTYPE(T_SHORT) : \
- ((flags&CHARINT) ? ADDTYPE(T_CHAR) : ADDTYPE(T_INT))))))))
-
-#define ADDUARG() \
- ((flags&MAXINT) ? ADDTYPE(T_MAXUINT) : \
- ((flags&PTRINT) ? ADDTYPE(T_PTRINT) : \
- ((flags&SIZEINT) ? ADDTYPE(T_SIZEINT) : \
- ((flags&LLONGINT) ? ADDTYPE(T_U_LLONG) : \
- ((flags&LONGINT) ? ADDTYPE(T_U_LONG) : \
- ((flags&SHORTINT) ? ADDTYPE(T_U_SHORT) : \
- ((flags&CHARINT) ? ADDTYPE(T_U_CHAR) : ADDTYPE(T_U_INT))))))))
-
- /*
- * Add * arguments to the type array.
- */
-#define ADDASTER() \
- n2 = 0; \
- cp = fmt; \
- while (is_digit(*cp)) { \
- APPEND_DIGIT(n2, *cp); \
- cp++; \
- } \
- if (*cp == '$') { \
- int hold = nextarg; \
- nextarg = n2; \
- ADDTYPE(T_INT); \
- nextarg = hold; \
- fmt = ++cp; \
- } else { \
- ADDTYPE(T_INT); \
- }
- fmt = (char *)fmt0;
- typetable = stattypetable;
- tablesize = STATIC_ARG_TBL_SIZE;
- tablemax = 0;
- nextarg = 1;
- memset(typetable, T_UNUSED, STATIC_ARG_TBL_SIZE);
- memset(&ps, 0, sizeof(ps));
-
- /*
- * Scan the format for conversions (`%' character).
- */
- for (;;) {
- cp = fmt;
- while ((n = ust_safe_mbrtowc(&wc, fmt, MB_CUR_MAX, &ps)) > 0) {
- fmt += n;
- if (wc == '%') {
- fmt--;
- break;
- }
- }
- if (n <= 0)
- goto done;
- fmt++; /* skip over '%' */
-
- flags = 0;
-
-rflag: ch = *fmt++;
-reswitch: switch (ch) {
- case ' ':
- case '#':
- case '\'':
- goto rflag;
- case '*':
- ADDASTER();
- goto rflag;
- case '-':
- case '+':
- goto rflag;
- case '.':
- if ((ch = *fmt++) == '*') {
- ADDASTER();
- goto rflag;
- }
- while (is_digit(ch)) {
- ch = *fmt++;
- }
- goto reswitch;
- case '0':
- goto rflag;
- case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- n = 0;
- do {
- APPEND_DIGIT(n ,ch);
- ch = *fmt++;
- } while (is_digit(ch));
- if (ch == '$') {
- nextarg = n;
- goto rflag;
- }
- goto reswitch;
-#ifdef FLOATING_POINT
- case 'L':
- flags |= LONGDBL;
- goto rflag;
-#endif
- case 'h':
- if (*fmt == 'h') {
- fmt++;
- flags |= CHARINT;
- } else {
- flags |= SHORTINT;
- }
- goto rflag;
- case 'l':
- if (*fmt == 'l') {
- fmt++;
- flags |= LLONGINT;
- } else {
- flags |= LONGINT;
- }
- goto rflag;
- case 'q':
- flags |= LLONGINT;
- goto rflag;
- case 't':
- flags |= PTRINT;
- goto rflag;
- case 'z':
- flags |= SIZEINT;
- goto rflag;
- case 'c':
- ADDTYPE(T_INT);
- break;
- case 'D':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'd':
- case 'i':
- ADDSARG();
- break;
-#ifdef FLOATING_POINT
- case 'a':
- case 'A':
- case 'e':
- case 'E':
- case 'f':
- case 'F':
- case 'g':
- case 'G':
- if (flags & LONGDBL)
- ADDTYPE(T_LONG_DOUBLE);
- else
- ADDTYPE(T_DOUBLE);
- break;
-#endif /* FLOATING_POINT */
- case 'n':
- if (flags & LLONGINT)
- ADDTYPE(TP_LLONG);
- else if (flags & LONGINT)
- ADDTYPE(TP_LONG);
- else if (flags & SHORTINT)
- ADDTYPE(TP_SHORT);
- else if (flags & PTRINT)
- ADDTYPE(TP_PTRINT);
- else if (flags & SIZEINT)
- ADDTYPE(TP_SSIZEINT);
- else if (flags & MAXINT)
- ADDTYPE(TP_MAXINT);
- else
- ADDTYPE(TP_INT);
- continue; /* no output */
- case 'O':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'o':
- ADDUARG();
- break;
- case 'p':
- ADDTYPE(TP_VOID);
- break;
- case 's':
- ADDTYPE(TP_CHAR);
- break;
- case 'U':
- flags |= LONGINT;
- /*FALLTHROUGH*/
- case 'u':
- case 'X':
- case 'x':
- ADDUARG();
- break;
- default: /* "%?" prints ?, unless ? is NUL */
- if (ch == '\0')
- goto done;
- break;
- }
- }
-done:
- /*
- * Build the argument table.
- */
- if (tablemax >= STATIC_ARG_TBL_SIZE) {
- *argtablesiz = sizeof(union arg) * (tablemax + 1);
- *argtable = mmap(NULL, *argtablesiz,
- PROT_WRITE|PROT_READ, MAP_ANON|MAP_PRIVATE, -1, 0);
- if (*argtable == MAP_FAILED)
- return (-1);
- }
-
-#if 0
- /* XXX is this required? */
- (*argtable)[0].intarg = 0;
-#endif
- for (n = 1; n <= tablemax; n++) {
- switch (typetable[n]) {
- case T_UNUSED:
- case T_CHAR:
- case T_U_CHAR:
- case T_SHORT:
- case T_U_SHORT:
- case T_INT:
- (*argtable)[n].intarg = va_arg(ap, int);
- break;
- case TP_SHORT:
- (*argtable)[n].pshortarg = va_arg(ap, short *);
- break;
- case T_U_INT:
- (*argtable)[n].uintarg = va_arg(ap, unsigned int);
- break;
- case TP_INT:
- (*argtable)[n].pintarg = va_arg(ap, int *);
- break;
- case T_LONG:
- (*argtable)[n].longarg = va_arg(ap, long);
- break;
- case T_U_LONG:
- (*argtable)[n].ulongarg = va_arg(ap, unsigned long);
- break;
- case TP_LONG:
- (*argtable)[n].plongarg = va_arg(ap, long *);
- break;
- case T_LLONG:
- (*argtable)[n].longlongarg = va_arg(ap, long long);
- break;
- case T_U_LLONG:
- (*argtable)[n].ulonglongarg = va_arg(ap, unsigned long long);
- break;
- case TP_LLONG:
- (*argtable)[n].plonglongarg = va_arg(ap, long long *);
- break;
-#ifdef FLOATING_POINT
- case T_DOUBLE:
- (*argtable)[n].doublearg = va_arg(ap, double);
- break;
- case T_LONG_DOUBLE:
- (*argtable)[n].longdoublearg = va_arg(ap, long double);
- break;
-#endif
- case TP_CHAR:
- (*argtable)[n].pchararg = va_arg(ap, char *);
- break;
- case TP_VOID:
- (*argtable)[n].pvoidarg = va_arg(ap, void *);
- break;
- case T_PTRINT:
- (*argtable)[n].ptrdiffarg = va_arg(ap, ptrdiff_t);
- break;
- case TP_PTRINT:
- (*argtable)[n].pptrdiffarg = va_arg(ap, ptrdiff_t *);
- break;
- case T_SIZEINT:
- (*argtable)[n].sizearg = va_arg(ap, size_t);
- break;
- case T_SSIZEINT:
- (*argtable)[n].ssizearg = va_arg(ap, ssize_t);
- break;
- case TP_SSIZEINT:
- (*argtable)[n].pssizearg = va_arg(ap, ssize_t *);
- break;
- case TP_MAXINT:
- (*argtable)[n].intmaxarg = va_arg(ap, intmax_t);
- break;
- }
- }
- goto finish;
-
-overflow:
- errno = ENOMEM;
- ret = -1;
-
-finish:
- if (typetable != NULL && typetable != stattypetable) {
- munmap(typetable, *argtablesiz);
- typetable = NULL;
- }
- return (ret);
-}
-
-/*
- * Increase the size of the type table.
- */
-static int
-__grow_type_table(unsigned char **typetable, int *tablesize)
-{
- unsigned char *oldtable = *typetable;
- int newsize = *tablesize * 2;
-
- if (newsize < getpagesize())
- newsize = getpagesize();
-
- if (*tablesize == STATIC_ARG_TBL_SIZE) {
- *typetable = mmap(NULL, newsize, PROT_WRITE|PROT_READ,
- MAP_ANON|MAP_PRIVATE, -1, 0);
- if (*typetable == MAP_FAILED)
- return (-1);
- bcopy(oldtable, *typetable, *tablesize);
- } else {
- unsigned char *new = mmap(NULL, newsize, PROT_WRITE|PROT_READ,
- MAP_ANON|MAP_PRIVATE, -1, 0);
- if (new == MAP_FAILED)
- return (-1);
- memmove(new, *typetable, *tablesize);
- munmap(*typetable, *tablesize);
- *typetable = new;
- }
- memset(*typetable + *tablesize, T_UNUSED, (newsize - *tablesize));
-
- *tablesize = newsize;
- return (0);
-}
-
-
-#ifdef FLOATING_POINT
-static int
-exponent(char *p0, int exp, int fmtch)
-{
- char *p, *t;
- char expbuf[MAXEXPDIG];
-
- p = p0;
- *p++ = fmtch;
- if (exp < 0) {
- exp = -exp;
- *p++ = '-';
- } else
- *p++ = '+';
- t = expbuf + MAXEXPDIG;
- if (exp > 9) {
- do {
- *--t = to_char(exp % 10);
- } while ((exp /= 10) > 9);
- *--t = to_char(exp);
- for (; t < expbuf + MAXEXPDIG; *p++ = *t++)
- /* nothing */;
- } else {
- /*
- * Exponents for decimal floating point conversions
- * (%[eEgG]) must be at least two characters long,
- * whereas exponents for hexadecimal conversions can
- * be only one character long.
- */
- if (fmtch == 'e' || fmtch == 'E')
- *p++ = '0';
- *p++ = to_char(exp);
- }
- return (p - p0);
-}
-#endif /* FLOATING_POINT */
+++ /dev/null
-/* $OpenBSD: wcio.h,v 1.1 2005/06/17 20:40:32 espie Exp $ */
-/* $NetBSD: wcio.h,v 1.3 2003/01/18 11:30:00 thorpej Exp $ */
-
-/*
- * SPDX-License-Identifier: BSD-2-Clause
- *
- * Copyright (C)2001 Citrus Project,
- * All rights reserved.
- *
- * $Citrus$
- */
-
-#ifndef _WCIO_H_
-#define _WCIO_H_
-
-#include <stddef.h>
-#include <wchar.h>
-
-/* minimal requirement of SUSv2 */
-#define WCIO_UNGETWC_BUFSIZE 1
-
-struct wchar_io_data {
- mbstate_t wcio_mbstate_in;
- mbstate_t wcio_mbstate_out;
-
- wchar_t wcio_ungetwc_buf[WCIO_UNGETWC_BUFSIZE];
- size_t wcio_ungetwc_inbuf;
-
- int wcio_mode; /* orientation */
-};
-
-#define WCIO_GET(fp) \
- (_EXT(fp) ? &(_EXT(fp)->_wcio) : (struct wchar_io_data *)0)
-
-#define WCIO_GET_NONULL(fp) \
- (&(_EXT(fp)->_wcio))
-
-#define _SET_ORIENTATION(fp, mode) \
-do {\
- struct wchar_io_data *_wcio = WCIO_GET(fp); \
- if (_wcio && _wcio->wcio_mode == 0) \
- _wcio->wcio_mode = (mode);\
-} while (0)
-
-/*
- * WCIO_FREE should be called by fclose
- */
-#define WCIO_FREE(fp) \
-do {\
- struct wchar_io_data *_wcio = WCIO_GET(fp); \
- if (_wcio) { \
- _wcio->wcio_mode = 0;\
- _wcio->wcio_ungetwc_inbuf = 0;\
- } \
-} while (0)
-
-#define WCIO_FREEUB(fp) \
-do {\
- struct wchar_io_data *_wcio = WCIO_GET(fp); \
- if (_wcio) { \
- _wcio->wcio_ungetwc_inbuf = 0;\
- } \
-} while (0)
-
-#define WCIO_INIT(fp) \
- memset(WCIO_GET_NONULL(fp), 0, sizeof(struct wchar_io_data))
-
-#endif /*_WCIO_H_*/
+++ /dev/null
-/* $OpenBSD: wsetup.c,v 1.7 2005/08/08 08:05:36 espie Exp $ */
-/*
- * SPDX-License-Identifier: BSD-3-Clause
- *
- * Copyright (C) 1990, 1993
- * The Regents of the University of California. All rights reserved.
- *
- * This code is derived from software contributed to Berkeley by
- * Chris Torek.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-#include "local.h"
-
-/*
- * Various output routines call wsetup to be sure it is safe to write,
- * because either _flags does not include __SWR, or _buf is NULL.
- * _wsetup returns 0 if OK to write, nonzero otherwise.
- */
-int
-__swsetup(LTTNG_UST_LFILE *fp)
-{
- /* make sure stdio is set up */
-// if (!__sdidinit)
-// __sinit();
-
- /*
- * If we are not writing, we had better be reading and writing.
- */
- if ((fp->_flags & __SWR) == 0) {
- if ((fp->_flags & __SRW) == 0)
- return (EOF);
- if (fp->_flags & __SRD) {
- /* clobber any ungetc data */
- if (HASUB(fp))
- FREEUB(fp);
- fp->_flags &= ~(__SRD|__SEOF);
- fp->_r = 0;
- fp->_p = fp->_bf._base;
- }
- fp->_flags |= __SWR;
- }
-
- /*
- * Make a buffer if necessary, then set _w.
- */
- if (fp->_bf._base == NULL) {
-// if ((fp->_flags & (__SSTR | __SALC)) == __SSTR)
-// return (EOF);
-// __smakebuf(fp);
- assert(0);
- }
- if (fp->_flags & __SLBF) {
- /*
- * It is line buffered, so make _lbfsize be -_bufsize
- * for the putc() macro. We will change _lbfsize back
- * to 0 whenever we turn off __SWR.
- */
- fp->_w = 0;
- fp->_lbfsize = -fp->_bf._size;
- } else
- fp->_w = fp->_flags & __SNBF ? 0 : fp->_bf._size;
- return (0);
-}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+SUBDIRS = \
+ snprintf \
+ libringbuffer \
+ liblttng-ust-comm \
+ libcounter \
+ libmsgpack \
+ liblttng-ust \
+ liblttng-ust-ctl \
+ liblttng-ust-fd \
+ liblttng-ust-fork \
+ liblttng-ust-libc-wrapper \
+ liblttng-ust-cyg-profile
+
+if ENABLE_UST_DL
+SUBDIRS += liblttng-ust-dl
+endif
+
+if ENABLE_JNI_INTERFACE
+SUBDIRS += liblttng-ust-java
+endif
+
+if ENABLE_JAVA_AGENT
+SUBDIRS += liblttng-ust-java-agent
+endif
+
+if ENABLE_PYTHON_AGENT
+SUBDIRS += \
+ python-lttngust \
+ liblttng-ust-python-agent
+endif
+
+pkgconfigdir = $(libdir)/pkgconfig
+pkgconfig_DATA = lttng-ust.pc lttng-ust-ctl.pc
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libcounter.la
+
+libcounter_la_SOURCES = \
+ counter.c smp.c smp.h shm.c shm.h shm_internal.h shm_types.h \
+ counter-api.h counter.h counter-internal.h counter-types.h \
+ counter-config.h
+
+libcounter_la_LIBADD = \
+ -lpthread \
+ -lrt
+
+if ENABLE_NUMA
+libcounter_la_LIBADD += -lnuma
+endif
+
+libcounter_la_CFLAGS = -DUST_COMPONENT="libcounter" $(AM_CFLAGS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters API, requiring counter/config.h
+ */
+
+#ifndef _LTTNG_COUNTER_API_H
+#define _LTTNG_COUNTER_API_H
+
+#include <stdint.h>
+#include <limits.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+#include "ust-bitmap.h"
+#include "../libringbuffer/getcpu.h"
+
+/*
+ * Using unsigned arithmetic because overflow is defined.
+ */
+static inline int __lttng_counter_add(const struct lib_counter_config *config,
+ enum lib_counter_config_alloc alloc,
+ enum lib_counter_config_sync sync,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v,
+ int64_t *remainder)
+{
+ size_t index;
+ bool overflow = false, underflow = false;
+ struct lib_counter_layout *layout;
+ int64_t move_sum = 0;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ layout = &counter->percpu_counters[lttng_ust_get_cpu()];
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ int8_t old, n, res;
+ int8_t global_sum_step = counter->global_sum_step.s8;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ if (caa_unlikely(n > (int8_t) global_sum_step))
+ move_sum = (int8_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int8_t) global_sum_step))
+ move_sum = -((int8_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int8_t) ((uint8_t) old + (uint8_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT8_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT8_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ int16_t old, n, res;
+ int16_t global_sum_step = counter->global_sum_step.s16;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ if (caa_unlikely(n > (int16_t) global_sum_step))
+ move_sum = (int16_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int16_t) global_sum_step))
+ move_sum = -((int16_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int16_t) ((uint16_t) old + (uint16_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT16_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT16_MAX || n > old))
+ underflow = true;
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ int32_t old, n, res;
+ int32_t global_sum_step = counter->global_sum_step.s32;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ if (caa_unlikely(n > (int32_t) global_sum_step))
+ move_sum = (int32_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int32_t) global_sum_step))
+ move_sum = -((int32_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int32_t) ((uint32_t) old + (uint32_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && (v >= UINT32_MAX || n < old))
+ overflow = true;
+ else if (v < 0 && (v <= -(int64_t) UINT32_MAX || n > old))
+ underflow = true;
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ int64_t old, n, res;
+ int64_t global_sum_step = counter->global_sum_step.s64;
+
+ res = *int_p;
+ switch (sync) {
+ case COUNTER_SYNC_PER_CPU:
+ {
+ do {
+ move_sum = 0;
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (caa_unlikely(n > (int64_t) global_sum_step))
+ move_sum = (int64_t) global_sum_step / 2;
+ else if (caa_unlikely(n < -(int64_t) global_sum_step))
+ move_sum = -((int64_t) global_sum_step / 2);
+ n -= move_sum;
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ case COUNTER_SYNC_GLOBAL:
+ {
+ do {
+ old = res;
+ n = (int64_t) ((uint64_t) old + (uint64_t) v);
+ res = uatomic_cmpxchg(int_p, old, n);
+ } while (old != res);
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ if (v > 0 && n < old)
+ overflow = true;
+ else if (v < 0 && n > old)
+ underflow = true;
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(overflow && !lttng_bitmap_test_bit(index, layout->overflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->overflow_bitmap);
+ else if (caa_unlikely(underflow && !lttng_bitmap_test_bit(index, layout->underflow_bitmap)))
+ lttng_bitmap_set_bit(index, layout->underflow_bitmap);
+ if (remainder)
+ *remainder = move_sum;
+ return 0;
+}
+
+static inline int __lttng_counter_add_percpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ int64_t move_sum;
+ int ret;
+
+ ret = __lttng_counter_add(config, COUNTER_ALLOC_PER_CPU, config->sync,
+ counter, dimension_indexes, v, &move_sum);
+ if (caa_unlikely(ret))
+ return ret;
+ if (caa_unlikely(move_sum))
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, COUNTER_SYNC_GLOBAL,
+ counter, dimension_indexes, move_sum, NULL);
+ return 0;
+}
+
+static inline int __lttng_counter_add_global(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ return __lttng_counter_add(config, COUNTER_ALLOC_GLOBAL, config->sync, counter,
+ dimension_indexes, v, NULL);
+}
+
+static inline int lttng_counter_add(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v)
+{
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_percpu(config, counter, dimension_indexes, v);
+ case COUNTER_ALLOC_GLOBAL:
+ return __lttng_counter_add_global(config, counter, dimension_indexes, v);
+ default:
+ return -EINVAL;
+ }
+}
+
+static inline int lttng_counter_inc(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, 1);
+}
+
+static inline int lttng_counter_dec(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return lttng_counter_add(config, counter, dimension_indexes, -1);
+}
+
+#endif /* _LTTNG_COUNTER_API_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Configuration
+ */
+
+#ifndef _LTTNG_COUNTER_CONFIG_H
+#define _LTTNG_COUNTER_CONFIG_H
+
+#include <stdint.h>
+
+enum lib_counter_config_alloc {
+ COUNTER_ALLOC_PER_CPU = (1 << 0),
+ COUNTER_ALLOC_GLOBAL = (1 << 1),
+};
+
+enum lib_counter_config_sync {
+ COUNTER_SYNC_PER_CPU,
+ COUNTER_SYNC_GLOBAL,
+};
+
+struct lib_counter_config {
+ uint32_t alloc; /* enum lib_counter_config_alloc flags */
+ enum lib_counter_config_sync sync;
+ enum {
+ COUNTER_ARITHMETIC_MODULAR,
+ COUNTER_ARITHMETIC_SATURATE, /* TODO */
+ } arithmetic;
+ enum {
+ COUNTER_SIZE_8_BIT = 1,
+ COUNTER_SIZE_16_BIT = 2,
+ COUNTER_SIZE_32_BIT = 4,
+ COUNTER_SIZE_64_BIT = 8,
+ } counter_size;
+};
+
+#endif /* _LTTNG_COUNTER_CONFIG_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Internal Header
+ */
+
+#ifndef _LTTNG_COUNTER_INTERNAL_H
+#define _LTTNG_COUNTER_INTERNAL_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include <urcu/compiler.h>
+#include "counter-types.h"
+
+static inline int lttng_counter_validate_indexes(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ if (caa_unlikely(dimension_indexes[i] >= counter->dimensions[i].max_nr_elem))
+ return -EOVERFLOW;
+ }
+ return 0;
+}
+
+
+static inline size_t lttng_counter_get_index(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ size_t nr_dimensions = counter->nr_dimensions, i;
+ size_t index = 0;
+
+ for (i = 0; i < nr_dimensions; i++) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ const size_t *dimension_index = &dimension_indexes[i];
+
+ index += *dimension_index * dimension->stride;
+ }
+ return index;
+}
+
+#endif /* _LTTNG_COUNTER_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters Types
+ */
+
+#ifndef _LTTNG_COUNTER_TYPES_H
+#define _LTTNG_COUNTER_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include "counter-config.h"
+#include <lttng/ust-config.h>
+#include "shm_types.h"
+
+struct lib_counter_dimension {
+ /*
+ * Max. number of indexable elements.
+ */
+ size_t max_nr_elem;
+ /*
+ * The stride for a dimension is the multiplication factor which
+ * should be applied to its index to take into account other
+ * dimensions nested inside.
+ */
+ size_t stride;
+};
+
+struct lib_counter_layout {
+ void *counters;
+ unsigned long *overflow_bitmap;
+ unsigned long *underflow_bitmap;
+ int shm_fd;
+ size_t shm_len;
+ struct lttng_counter_shm_handle handle;
+};
+
+enum lib_counter_arithmetic {
+ LIB_COUNTER_ARITHMETIC_MODULAR,
+ LIB_COUNTER_ARITHMETIC_SATURATE,
+};
+
+struct lib_counter {
+ size_t nr_dimensions;
+ int64_t allocated_elem;
+ struct lib_counter_dimension *dimensions;
+ enum lib_counter_arithmetic arithmetic;
+ union {
+ struct {
+ int32_t max, min;
+ } limits_32_bit;
+ struct {
+ int64_t max, min;
+ } limits_64_bit;
+ } saturation;
+ union {
+ int8_t s8;
+ int16_t s16;
+ int32_t s32;
+ int64_t s64;
+ } global_sum_step; /* 0 if unused */
+ struct lib_counter_config config;
+
+ struct lib_counter_layout global_counters;
+ struct lib_counter_layout *percpu_counters;
+
+ bool is_daemon;
+ struct lttng_counter_shm_object_table *object_table;
+};
+
+#endif /* _LTTNG_COUNTER_TYPES_H */
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only OR LGPL-2.1-only)
+ *
+ * counter.c
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <errno.h>
+#include "counter.h"
+#include "counter-internal.h"
+#include <urcu/system.h>
+#include <urcu/compiler.h>
+#include <stdbool.h>
+#include <ust-helper.h>
+#include "smp.h"
+#include "shm.h"
+#include "ust-compat.h"
+
+#include "ust-bitmap.h"
+
+static size_t lttng_counter_get_dimension_nr_elements(struct lib_counter_dimension *dimension)
+{
+ return dimension->max_nr_elem;
+}
+
+static int lttng_counter_init_stride(
+ const struct lib_counter_config *config __attribute__((unused)),
+ struct lib_counter *counter)
+{
+ size_t nr_dimensions = counter->nr_dimensions;
+ size_t stride = 1;
+ ssize_t i;
+
+ for (i = nr_dimensions - 1; i >= 0; i--) {
+ struct lib_counter_dimension *dimension = &counter->dimensions[i];
+ size_t nr_elem;
+
+ nr_elem = lttng_counter_get_dimension_nr_elements(dimension);
+ dimension->stride = stride;
+ /* nr_elem should be minimum 1 for each dimension. */
+ if (!nr_elem)
+ return -EINVAL;
+ stride *= nr_elem;
+ if (stride > SIZE_MAX / nr_elem)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int lttng_counter_layout_init(struct lib_counter *counter, int cpu, int shm_fd)
+{
+ struct lib_counter_layout *layout;
+ size_t counter_size;
+ size_t nr_elem = counter->allocated_elem;
+ size_t shm_length = 0, counters_offset, overflow_offset, underflow_offset;
+ struct lttng_counter_shm_object *shm_object;
+
+ if (shm_fd < 0)
+ return 0; /* Skip, will be populated later. */
+
+ if (cpu == -1)
+ layout = &counter->global_counters;
+ else
+ layout = &counter->percpu_counters[cpu];
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ case COUNTER_SIZE_16_BIT:
+ case COUNTER_SIZE_32_BIT:
+ case COUNTER_SIZE_64_BIT:
+ counter_size = (size_t) counter->config.counter_size;
+ break;
+ default:
+ return -EINVAL;
+ }
+ layout->shm_fd = shm_fd;
+ counters_offset = shm_length;
+ shm_length += counter_size * nr_elem;
+ overflow_offset = shm_length;
+ shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
+ underflow_offset = shm_length;
+ shm_length += LTTNG_UST_ALIGN(nr_elem, 8) / 8;
+ layout->shm_len = shm_length;
+ if (counter->is_daemon) {
+ /* Allocate and clear shared memory. */
+ shm_object = lttng_counter_shm_object_table_alloc(counter->object_table,
+ shm_length, LTTNG_COUNTER_SHM_OBJECT_SHM, shm_fd, cpu);
+ if (!shm_object)
+ return -ENOMEM;
+ } else {
+ /* Map pre-existing shared memory. */
+ shm_object = lttng_counter_shm_object_table_append_shm(counter->object_table,
+ shm_fd, shm_length);
+ if (!shm_object)
+ return -ENOMEM;
+ }
+ layout->counters = shm_object->memory_map + counters_offset;
+ layout->overflow_bitmap = (unsigned long *)(shm_object->memory_map + overflow_offset);
+ layout->underflow_bitmap = (unsigned long *)(shm_object->memory_map + underflow_offset);
+ return 0;
+}
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL))
+ return -EINVAL;
+ layout = &counter->global_counters;
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, -1, fd);
+}
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+{
+ struct lib_counter_config *config = &counter->config;
+ struct lib_counter_layout *layout;
+
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU))
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ if (layout->shm_fd >= 0)
+ return -EBUSY;
+ return lttng_counter_layout_init(counter, cpu, fd);
+}
+
+static
+int lttng_counter_set_global_sum_step(struct lib_counter *counter,
+ int64_t global_sum_step)
+{
+ if (global_sum_step < 0)
+ return -EINVAL;
+
+ switch (counter->config.counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ if (global_sum_step > INT8_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s8 = (int8_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_16_BIT:
+ if (global_sum_step > INT16_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s16 = (int16_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_32_BIT:
+ if (global_sum_step > INT32_MAX)
+ return -EINVAL;
+ counter->global_sum_step.s32 = (int32_t) global_sum_step;
+ break;
+ case COUNTER_SIZE_64_BIT:
+ counter->global_sum_step.s64 = global_sum_step;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static
+int validate_args(const struct lib_counter_config *config,
+ size_t nr_dimensions __attribute__((unused)),
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds)
+{
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (CAA_BITS_PER_LONG != 64 && config->counter_size == COUNTER_SIZE_64_BIT) {
+ WARN_ON_ONCE(1);
+ return -1;
+ }
+ if (!max_nr_elem)
+ return -1;
+ /*
+ * global sum step is only useful with allocating both per-cpu
+ * and global counters.
+ */
+ if (global_sum_step && (!(config->alloc & COUNTER_ALLOC_GLOBAL) ||
+ !(config->alloc & COUNTER_ALLOC_PER_CPU)))
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_GLOBAL) && global_counter_fd >= 0)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds)
+ return -1;
+ if (!(config->alloc & COUNTER_ALLOC_PER_CPU) && nr_counter_cpu_fds >= 0)
+ return -1;
+ if (counter_cpu_fds && nr_cpus != nr_counter_cpu_fds)
+ return -1;
+ return 0;
+}
+
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ struct lib_counter *counter;
+ size_t dimension, nr_elem = 1;
+ int cpu, ret;
+ int nr_handles = 0;
+ int nr_cpus = lttng_counter_num_possible_cpus();
+
+ if (validate_args(config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds))
+ return NULL;
+ counter = zmalloc(sizeof(struct lib_counter));
+ if (!counter)
+ return NULL;
+ counter->global_counters.shm_fd = -1;
+ counter->config = *config;
+ counter->is_daemon = is_daemon;
+ if (lttng_counter_set_global_sum_step(counter, global_sum_step))
+ goto error_sum_step;
+ counter->nr_dimensions = nr_dimensions;
+ counter->dimensions = zmalloc(nr_dimensions * sizeof(*counter->dimensions));
+ if (!counter->dimensions)
+ goto error_dimensions;
+ for (dimension = 0; dimension < nr_dimensions; dimension++)
+ counter->dimensions[dimension].max_nr_elem = max_nr_elem[dimension];
+ if (config->alloc & COUNTER_ALLOC_PER_CPU) {
+ counter->percpu_counters = zmalloc(sizeof(struct lib_counter_layout) * nr_cpus);
+ if (!counter->percpu_counters)
+ goto error_alloc_percpu;
+ lttng_counter_for_each_possible_cpu(cpu)
+ counter->percpu_counters[cpu].shm_fd = -1;
+ }
+
+ if (lttng_counter_init_stride(config, counter))
+ goto error_init_stride;
+ //TODO saturation values.
+ for (dimension = 0; dimension < counter->nr_dimensions; dimension++)
+ nr_elem *= lttng_counter_get_dimension_nr_elements(&counter->dimensions[dimension]);
+ counter->allocated_elem = nr_elem;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL)
+ nr_handles++;
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ nr_handles += nr_cpus;
+ /* Allocate table for global and per-cpu counters. */
+ counter->object_table = lttng_counter_shm_object_table_create(nr_handles);
+ if (!counter->object_table)
+ goto error_alloc_object_table;
+
+ if (config->alloc & COUNTER_ALLOC_GLOBAL) {
+ ret = lttng_counter_layout_init(counter, -1, global_counter_fd); /* global */
+ if (ret)
+ goto layout_init_error;
+ }
+ if ((config->alloc & COUNTER_ALLOC_PER_CPU) && counter_cpu_fds) {
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_layout_init(counter, cpu, counter_cpu_fds[cpu]);
+ if (ret)
+ goto layout_init_error;
+ }
+ }
+ return counter;
+
+layout_init_error:
+ lttng_counter_shm_object_table_destroy(counter->object_table, is_daemon);
+error_alloc_object_table:
+error_init_stride:
+ free(counter->percpu_counters);
+error_alloc_percpu:
+ free(counter->dimensions);
+error_dimensions:
+error_sum_step:
+ free(counter);
+ return NULL;
+}
+
+void lttng_counter_destroy(struct lib_counter *counter)
+{
+ struct lib_counter_config *config = &counter->config;
+
+ if (config->alloc & COUNTER_ALLOC_PER_CPU)
+ free(counter->percpu_counters);
+ lttng_counter_shm_object_table_destroy(counter->object_table, counter->is_daemon);
+ free(counter->dimensions);
+ free(counter);
+}
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+{
+ int shm_fd;
+
+ shm_fd = counter->global_counters.shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = counter->global_counters.shm_len;
+ return 0;
+}
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+{
+ struct lib_counter_layout *layout;
+ int shm_fd;
+
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -1;
+ layout = &counter->percpu_counters[cpu];
+ shm_fd = layout->shm_fd;
+ if (shm_fd < 0)
+ return -1;
+ *fd = shm_fd;
+ *len = layout->shm_len;
+ return 0;
+}
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ *value = (int64_t) CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ *value = CMM_LOAD_SHARED(*int_p);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ *overflow = lttng_bitmap_test_bit(index, layout->overflow_bitmap);
+ *underflow = lttng_bitmap_test_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value, bool *overflow,
+ bool *underflow)
+{
+ int cpu, ret;
+ int64_t v, sum = 0;
+ bool of, uf;
+
+ *overflow = false;
+ *underflow = false;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Read global counter. */
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ -1, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ sum += v;
+ *overflow |= of;
+ *underflow |= uf;
+ break;
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ int64_t old = sum;
+
+ ret = lttng_counter_read(config, counter, dimension_indexes,
+ cpu, &v, &of, &uf);
+ if (ret < 0)
+ return ret;
+ *overflow |= of;
+ *underflow |= uf;
+ /* Overflow is defined on unsigned types. */
+ sum = (int64_t) ((uint64_t) old + (uint64_t) v);
+ if (v > 0 && sum < old)
+ *overflow = true;
+ else if (v < 0 && sum > old)
+ *underflow = true;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+ *value = sum;
+ return 0;
+}
+
+static
+int lttng_counter_clear_cpu(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu)
+{
+ size_t index;
+ struct lib_counter_layout *layout;
+
+ if (caa_unlikely(lttng_counter_validate_indexes(config, counter, dimension_indexes)))
+ return -EOVERFLOW;
+ index = lttng_counter_get_index(config, counter, dimension_indexes);
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ if (cpu < 0 || cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ break;
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0) {
+ if (cpu >= lttng_counter_num_possible_cpus())
+ return -EINVAL;
+ layout = &counter->percpu_counters[cpu];
+ } else {
+ layout = &counter->global_counters;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ if (cpu >= 0)
+ return -EINVAL;
+ layout = &counter->global_counters;
+ break;
+ default:
+ return -EINVAL;
+ }
+ if (caa_unlikely(!layout->counters))
+ return -ENODEV;
+
+ switch (config->counter_size) {
+ case COUNTER_SIZE_8_BIT:
+ {
+ int8_t *int_p = (int8_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_16_BIT:
+ {
+ int16_t *int_p = (int16_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+ case COUNTER_SIZE_32_BIT:
+ {
+ int32_t *int_p = (int32_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#if CAA_BITS_PER_LONG == 64
+ case COUNTER_SIZE_64_BIT:
+ {
+ int64_t *int_p = (int64_t *) layout->counters + index;
+ CMM_STORE_SHARED(*int_p, 0);
+ break;
+ }
+#endif
+ default:
+ return -EINVAL;
+ }
+ lttng_bitmap_clear_bit(index, layout->overflow_bitmap);
+ lttng_bitmap_clear_bit(index, layout->underflow_bitmap);
+ return 0;
+}
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+{
+ int cpu, ret;
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU:
+ break;
+ case COUNTER_ALLOC_GLOBAL: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ /* Clear global counter. */
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, -1);
+ if (ret < 0)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (config->alloc) {
+ case COUNTER_ALLOC_PER_CPU: /* Fallthrough */
+ case COUNTER_ALLOC_PER_CPU | COUNTER_ALLOC_GLOBAL:
+ lttng_counter_for_each_possible_cpu(cpu) {
+ ret = lttng_counter_clear_cpu(config, counter, dimension_indexes, cpu);
+ if (ret < 0)
+ return ret;
+ }
+ break;
+ case COUNTER_ALLOC_GLOBAL:
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng Counters API
+ */
+
+#ifndef _LTTNG_COUNTER_H
+#define _LTTNG_COUNTER_H
+
+#include <stdint.h>
+#include <lttng/ust-config.h>
+#include "counter-types.h"
+
+/* max_nr_elem is for each dimension. */
+struct lib_counter *lttng_counter_create(const struct lib_counter_config *config,
+ size_t nr_dimensions,
+ const size_t *max_nr_elem,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_destroy(struct lib_counter *counter)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_set_global_shm(struct lib_counter *counter, int fd)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_set_cpu_shm(struct lib_counter *counter, int cpu, int fd)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_get_global_shm(struct lib_counter *counter, int *fd, size_t *len)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_get_cpu_shm(struct lib_counter *counter, int cpu, int *fd, size_t *len)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_read(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_aggregate(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow)
+ __attribute__((visibility("hidden")));
+
+int lttng_counter_clear(const struct lib_counter_config *config,
+ struct lib_counter *counter,
+ const size_t *dimension_indexes)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_COUNTER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "shm.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h> /* For mode constants */
+#include <fcntl.h> /* For O_* constants */
+#include <assert.h>
+#include <stdio.h>
+#include <signal.h>
+#include <dirent.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef HAVE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+#endif
+
+#include <lttng/ust-utils.h>
+
+#include <ust-helper.h>
+#include <ust-fd.h>
+#include "../libringbuffer/mmap.h"
+
+/*
+ * Ensure we have the required amount of space available by writing 0
+ * into the entire buffer. Not doing so can trigger SIGBUS when going
+ * beyond the available shm space.
+ */
+static
+int zero_file(int fd, size_t len)
+{
+ ssize_t retlen;
+ size_t written = 0;
+ char *zeropage;
+ long pagelen;
+ int ret;
+
+ pagelen = sysconf(_SC_PAGESIZE);
+ if (pagelen < 0)
+ return (int) pagelen;
+ zeropage = calloc(pagelen, 1);
+ if (!zeropage)
+ return -ENOMEM;
+
+ while (len > written) {
+ do {
+ retlen = write(fd, zeropage,
+ min_t(size_t, pagelen, len - written));
+ } while (retlen == -1UL && errno == EINTR);
+ if (retlen < 0) {
+ ret = (int) retlen;
+ goto error;
+ }
+ written += retlen;
+ }
+ ret = 0;
+error:
+ free(zeropage);
+ return ret;
+}
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+{
+ struct lttng_counter_shm_object_table *table;
+
+ table = zmalloc(sizeof(struct lttng_counter_shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]));
+ if (!table)
+ return NULL;
+ table->size = max_nb_obj;
+ return table;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_shm(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ int cpu_fd)
+{
+ int shmfd, ret;
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (cpu_fd < 0)
+ return NULL;
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ /* create shm */
+
+ shmfd = cpu_fd;
+ ret = zero_file(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("zero_file");
+ goto error_zero_file;
+ }
+ ret = ftruncate(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("ftruncate");
+ goto error_ftruncate;
+ }
+ /*
+ * Also ensure the file metadata is synced with the storage by using
+ * fsync(2).
+ */
+ ret = fsync(shmfd);
+ if (ret) {
+ PERROR("fsync");
+ goto error_fsync;
+ }
+ obj->shm_fd_ownership = 0;
+ obj->shm_fd = shmfd;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+error_fsync:
+error_ftruncate:
+error_zero_file:
+ return NULL;
+}
+
+static
+struct lttng_counter_shm_object *_lttng_counter_shm_object_table_alloc_mem(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ void *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ memory_map = zmalloc(memory_map_size);
+ if (!memory_map)
+ goto alloc_error;
+
+ /* no shm_fd */
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+alloc_error:
+ return NULL;
+}
+
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+ int ret;
+
+ ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+ if (ret && errno == ENOSYS) {
+ return false;
+ }
+ return numa_available() > 0;
+}
+#endif
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ int cpu_fd,
+ int cpu)
+{
+ struct lttng_counter_shm_object *shm_object;
+#ifdef HAVE_LIBNUMA
+ int oldnode = 0, node;
+ bool numa_avail;
+
+ numa_avail = lttng_is_numa_available();
+ if (numa_avail) {
+ oldnode = numa_preferred();
+ if (cpu >= 0) {
+ node = numa_node_of_cpu(cpu);
+ if (node >= 0)
+ numa_set_preferred(node);
+ }
+ if (cpu < 0 || node < 0)
+ numa_set_localalloc();
+ }
+#endif /* HAVE_LIBNUMA */
+ switch (type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ shm_object = _lttng_counter_shm_object_table_alloc_shm(table, memory_map_size,
+ cpu_fd);
+ break;
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ shm_object = _lttng_counter_shm_object_table_alloc_mem(table, memory_map_size);
+ break;
+ default:
+ assert(0);
+ }
+#ifdef HAVE_LIBNUMA
+ if (numa_avail)
+ numa_set_preferred(oldnode);
+#endif /* HAVE_LIBNUMA */
+ return shm_object;
+}
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd,
+ size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+ char *memory_map;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = shm_fd;
+ obj->shm_fd_ownership = 1;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+ return NULL;
+}
+
+/*
+ * Passing ownership of mem to object.
+ */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+{
+ struct lttng_counter_shm_object *obj;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = LTTNG_COUNTER_SHM_OBJECT_MEM;
+ obj->memory_map = mem;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+ return NULL;
+}
+
+static
+void lttng_counter_shmp_object_destroy(struct lttng_counter_shm_object *obj, int consumer)
+{
+ switch (obj->type) {
+ case LTTNG_COUNTER_SHM_OBJECT_SHM:
+ {
+ int ret;
+
+ ret = munmap(obj->memory_map, obj->memory_map_size);
+ if (ret) {
+ PERROR("umnmap");
+ assert(0);
+ }
+
+ if (obj->shm_fd_ownership) {
+ /* Delete FDs only if called from app (not consumer). */
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->shm_fd);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->shm_fd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ break;
+ }
+ case LTTNG_COUNTER_SHM_OBJECT_MEM:
+ {
+ free(obj->memory_map);
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+{
+ int i;
+
+ for (i = 0; i < table->allocated_len; i++)
+ lttng_counter_shmp_object_destroy(&table->objects[i], consumer);
+ free(table);
+}
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+{
+ struct lttng_counter_shm_ref ref;
+ struct lttng_counter_shm_ref shm_ref_error = { -1, -1 };
+
+ if (obj->memory_map_size - obj->allocated_len < len)
+ return shm_ref_error;
+ ref.index = obj->index;
+ ref.offset = obj->allocated_len;
+ obj->allocated_len += len;
+ return ref;
+}
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+{
+ size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
+ obj->allocated_len += offset_len;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_H
+#define _LIBCOUNTER_SHM_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <usterr-signal-safe.h>
+#include <urcu/compiler.h>
+#include "shm_types.h"
+
+/* lttng_counter_handle_create - for UST. */
+extern
+struct lttng_counter_shm_handle *lttng_counter_handle_create(void *data,
+ uint64_t memory_map_size, int wakeup_fd);
+/* lttng_counter_handle_add_cpu - for UST. */
+extern
+int lttng_counter_handle_add_cpu(struct lttng_counter_shm_handle *handle,
+ int shm_fd, uint32_t cpu_nr,
+ uint64_t memory_map_size);
+
+unsigned int lttng_counter_handle_get_nr_cpus(struct lttng_counter_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Pointer dereferencing. We don't trust the shm_ref, so we validate
+ * both the index and offset with known boundaries.
+ *
+ * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
+ * target type, even in the occurrence of shm_ref modification by an
+ * untrusted process having write access to the shm_ref. We return a
+ * NULL pointer if the ranges are invalid.
+ */
+static inline
+char *_lttng_counter_shmp_offset(struct lttng_counter_shm_object_table *table,
+ struct lttng_counter_shm_ref *ref,
+ size_t idx, size_t elem_size)
+{
+ struct lttng_counter_shm_object *obj;
+ size_t objindex, ref_offset;
+
+ objindex = (size_t) ref->index;
+ if (caa_unlikely(objindex >= table->allocated_len))
+ return NULL;
+ obj = &table->objects[objindex];
+ ref_offset = (size_t) ref->offset;
+ ref_offset += idx * elem_size;
+ /* Check if part of the element returned would exceed the limits. */
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
+ return NULL;
+ return &obj->memory_map[ref_offset];
+}
+
+#define lttng_counter_shmp_index(handle, ref, index) \
+ ({ \
+ __typeof__((ref)._type) ____ptr_ret; \
+ ____ptr_ret = (__typeof__(____ptr_ret)) _lttng_counter_shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*____ptr_ret)); \
+ ____ptr_ret; \
+ })
+
+#define lttng_counter_shmp(handle, ref) lttng_counter_shmp_index(handle, ref, 0)
+
+static inline
+void _lttng_counter_set_shmp(struct lttng_counter_shm_ref *ref, struct lttng_counter_shm_ref src)
+{
+ *ref = src;
+}
+
+#define lttng_counter_set_shmp(ref, src) _lttng_counter_set_shmp(&(ref)._ref, src)
+
+struct lttng_counter_shm_object_table *lttng_counter_shm_object_table_create(size_t max_nb_obj)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_alloc(struct lttng_counter_shm_object_table *table,
+ size_t memory_map_size,
+ enum lttng_counter_shm_object_type type,
+ const int cpu_fd,
+ int cpu)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_shm(struct lttng_counter_shm_object_table *table,
+ int shm_fd, size_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+/* mem ownership is passed to lttng_counter_shm_object_table_append_mem(). */
+struct lttng_counter_shm_object *lttng_counter_shm_object_table_append_mem(struct lttng_counter_shm_object_table *table,
+ void *mem, size_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_shm_object_table_destroy(struct lttng_counter_shm_object_table *table, int consumer)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_counter_zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct lttng_counter_shm_ref lttng_counter_zalloc_shm(struct lttng_counter_shm_object *obj, size_t len)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_align_shm(struct lttng_counter_shm_object *obj, size_t align)
+ __attribute__((visibility("hidden")));
+
+static inline
+int lttng_counter_shm_get_shm_fd(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->shm_fd;
+}
+
+
+static inline
+int lttng_counter_shm_get_shm_size(struct lttng_counter_shm_handle *handle, struct lttng_counter_shm_ref *ref,
+ uint64_t *size)
+{
+ struct lttng_counter_shm_object_table *table = handle->table;
+ struct lttng_counter_shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ *size = obj->memory_map_size;
+ return 0;
+}
+
+#endif /* _LIBCOUNTER_SHM_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_INTERNAL_H
+#define _LIBCOUNTER_SHM_INTERNAL_H
+
+struct lttng_counter_shm_ref {
+ volatile ssize_t index; /* within the object table */
+ volatile ssize_t offset; /* within the object */
+};
+
+#define DECLARE_LTTNG_COUNTER_SHMP(type, name) \
+ union { \
+ struct lttng_counter_shm_ref _ref; \
+ type *_type; \
+ } name
+
+#endif /* _LIBCOUNTER_SHM_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SHM_TYPES_H
+#define _LIBCOUNTER_SHM_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+#include "shm_internal.h"
+
+enum lttng_counter_shm_object_type {
+ LTTNG_COUNTER_SHM_OBJECT_SHM,
+ LTTNG_COUNTER_SHM_OBJECT_MEM,
+};
+
+struct lttng_counter_shm_object {
+ enum lttng_counter_shm_object_type type;
+ size_t index; /* within the object table */
+ int shm_fd; /* shm fd */
+ char *memory_map;
+ size_t memory_map_size;
+ uint64_t allocated_len;
+ int shm_fd_ownership;
+};
+
+struct lttng_counter_shm_object_table {
+ size_t size;
+ size_t allocated_len;
+ struct lttng_counter_shm_object objects[];
+};
+
+struct lttng_counter_shm_handle {
+ struct lttng_counter_shm_object_table *table;
+};
+
+#endif /* _LIBCOUNTER_SHM_TYPES_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+
+#include <unistd.h>
+#include <pthread.h>
+#include "smp.h"
+
+int __lttng_counter_num_possible_cpus;
+
+#if (defined(__GLIBC__) || defined( __UCLIBC__))
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result;
+
+ /* On Linux, when some processors are offline
+ * _SC_NPROCESSORS_CONF counts the offline
+ * processors, whereas _SC_NPROCESSORS_ONLN
+ * does not. If we used _SC_NPROCESSORS_ONLN,
+ * getcpu() could return a value greater than
+ * this sysconf, in which case the arrays
+ * indexed by processor would overflow.
+ */
+ result = sysconf(_SC_NPROCESSORS_CONF);
+ if (result == -1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+
+#else
+
+/*
+ * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
+ * return the number of configured CPUs in the system but relies on the cpu
+ * affinity mask of the current task.
+ *
+ * So instead we use a strategy similar to GLIBC's, counting the cpu
+ * directories in "/sys/devices/system/cpu" and fallback on the value from
+ * sysconf if it fails.
+ */
+
+#include <dirent.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define __max(a,b) ((a)>(b)?(a):(b))
+
+void _lttng_counter_get_num_possible_cpus(void)
+{
+ int result, count = 0;
+ DIR *cpudir;
+ struct dirent *entry;
+
+ cpudir = opendir("/sys/devices/system/cpu");
+ if (cpudir == NULL)
+ goto end;
+
+ /*
+ * Count the number of directories named "cpu" followed by and
+ * integer. This is the same strategy as glibc uses.
+ */
+ while ((entry = readdir(cpudir))) {
+ if (entry->d_type == DT_DIR &&
+ strncmp(entry->d_name, "cpu", 3) == 0) {
+
+ char *endptr;
+ unsigned long cpu_num;
+
+ cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
+ if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
+ && (*endptr == '\0')) {
+ count++;
+ }
+ }
+ }
+
+end:
+ /*
+ * Get the sysconf value as a fallback. Keep the highest number.
+ */
+ result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
+
+ /*
+ * If both methods failed, don't store the value.
+ */
+ if (result < 1)
+ return;
+ __lttng_counter_num_possible_cpus = result;
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBCOUNTER_SMP_H
+#define _LIBCOUNTER_SMP_H
+
+/*
+ * 4kB of per-cpu data available.
+ */
+#define LTTNG_COUNTER_PER_CPU_MEM_SIZE 4096
+
+extern int __lttng_counter_num_possible_cpus
+ __attribute__((visibility("hidden")));
+
+extern void _lttng_counter_get_num_possible_cpus(void)
+ __attribute__((visibility("hidden")));
+
+static inline
+int lttng_counter_num_possible_cpus(void)
+{
+ if (!__lttng_counter_num_possible_cpus)
+ _lttng_counter_get_num_possible_cpus();
+ return __lttng_counter_num_possible_cpus;
+}
+
+#define lttng_counter_for_each_possible_cpu(cpu) \
+ for ((cpu) = 0; (cpu) < lttng_counter_num_possible_cpus(); (cpu)++)
+
+#endif /* _LIBCOUNTER_SMP_H */
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+noinst_LTLIBRARIES = liblttng-ust-comm.la
+
+liblttng_ust_comm_la_SOURCES = lttng-ust-comm.c lttng-ust-fd-tracker.c
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <limits.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/un.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+
+#include <lttng/ust-ctl.h>
+#include <ust-comm.h>
+#include <ust-fd.h>
+#include <ust-helper.h>
+#include <lttng/ust-error.h>
+#include <ust-dynamic-type.h>
+#include <usterr-signal-safe.h>
+
+#include "../liblttng-ust/ust-events-internal.h"
+#include "../liblttng-ust/compat.h"
+
+#define USTCOMM_CODE_OFFSET(code) \
+ (code == LTTNG_UST_OK ? 0 : (code - LTTNG_UST_ERR + 1))
+
+#define USTCOMM_MAX_SEND_FDS 4
+
+static
+ssize_t count_fields_recursive(size_t nr_fields,
+ const struct lttng_ust_event_field **lttng_fields);
+static
+int serialize_one_field(struct lttng_ust_session *session,
+ struct ustctl_field *fields, size_t *iter_output,
+ const struct lttng_ust_event_field *lf);
+static
+int serialize_fields(struct lttng_ust_session *session,
+ struct ustctl_field *ustctl_fields,
+ size_t *iter_output, size_t nr_lttng_fields,
+ const struct lttng_ust_event_field **lttng_fields);
+
+/*
+ * Human readable error message.
+ */
+static const char *ustcomm_readable_code[] = {
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_OK) ] = "Success",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR) ] = "Unknown error",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_NOENT) ] = "No entry",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_EXIST) ] = "Object already exists",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL) ] = "Invalid argument",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PERM) ] = "Permission denied",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_NOSYS) ] = "Not implemented",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_EXITING) ] = "Process is exiting",
+
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL_MAGIC) ] = "Invalid magic number",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_INVAL_SOCKET_TYPE) ] = "Invalid socket type",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_UNSUP_MAJOR) ] = "Unsupported major version",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PEERCRED) ] = "Cannot get unix socket peer credentials",
+ [ USTCOMM_CODE_OFFSET(LTTNG_UST_ERR_PEERCRED_PID) ] = "Peer credentials PID is invalid. Socket appears to belong to a distinct, non-nested pid namespace.",
+};
+
+/*
+ * lttng_ust_strerror
+ * @code: must be a negative value of enum lttng_ust_error_code (or 0).
+ *
+ * Returns a ptr to a string representing a human readable error code from the
+ * ustcomm_return_code enum.
+ */
+const char *lttng_ust_strerror(int code)
+{
+ code = -code;
+
+ if (code < LTTNG_UST_OK || code >= LTTNG_UST_ERR_NR)
+ code = LTTNG_UST_ERR;
+
+ return ustcomm_readable_code[USTCOMM_CODE_OFFSET(code)];
+}
+
+/*
+ * ustcomm_connect_unix_sock
+ *
+ * Connect to unix socket using the path name.
+ *
+ * Caller handles FD tracker.
+ */
+int ustcomm_connect_unix_sock(const char *pathname, long timeout)
+{
+ struct sockaddr_un sun;
+ int fd, ret;
+
+ /*
+ * libust threads require the close-on-exec flag for all
+ * resources so it does not leak file descriptors upon exec.
+ * SOCK_CLOEXEC is not used since it is linux specific.
+ */
+ fd = socket(PF_UNIX, SOCK_STREAM, 0);
+ if (fd < 0) {
+ PERROR("socket");
+ ret = -errno;
+ goto error;
+ }
+ if (timeout >= 0) {
+ /* Give at least 10ms. */
+ if (timeout < 10)
+ timeout = 10;
+ ret = ustcomm_setsockopt_snd_timeout(fd, timeout);
+ if (ret < 0) {
+ WARN("Error setting connect socket send timeout");
+ }
+ }
+ ret = fcntl(fd, F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ PERROR("fcntl");
+ ret = -errno;
+ goto error_fcntl;
+ }
+
+ memset(&sun, 0, sizeof(sun));
+ sun.sun_family = AF_UNIX;
+ strncpy(sun.sun_path, pathname, sizeof(sun.sun_path));
+ sun.sun_path[sizeof(sun.sun_path) - 1] = '\0';
+
+ ret = connect(fd, (struct sockaddr *) &sun, sizeof(sun));
+ if (ret < 0) {
+ /*
+ * Don't print message on connect ENOENT error, because
+ * connect is used in normal execution to detect if
+ * sessiond is alive. ENOENT is when the unix socket
+ * file does not exist, and ECONNREFUSED is when the
+ * file exists but no sessiond is listening.
+ */
+ if (errno != ECONNREFUSED && errno != ECONNRESET
+ && errno != ENOENT && errno != EACCES)
+ PERROR("connect");
+ ret = -errno;
+ if (ret == -ECONNREFUSED || ret == -ECONNRESET)
+ ret = -EPIPE;
+ goto error_connect;
+ }
+
+ return fd;
+
+error_connect:
+error_fcntl:
+ {
+ int closeret;
+
+ closeret = close(fd);
+ if (closeret)
+ PERROR("close");
+ }
+error:
+ return ret;
+}
+
+/*
+ * ustcomm_accept_unix_sock
+ *
+ * Do an accept(2) on the sock and return the
+ * new file descriptor. The socket MUST be bind(2) before.
+ */
+int ustcomm_accept_unix_sock(int sock)
+{
+ int new_fd;
+ struct sockaddr_un sun;
+ socklen_t len = 0;
+
+ /* Blocking call */
+ new_fd = accept(sock, (struct sockaddr *) &sun, &len);
+ if (new_fd < 0) {
+ if (errno != ECONNABORTED)
+ PERROR("accept");
+ new_fd = -errno;
+ if (new_fd == -ECONNABORTED)
+ new_fd = -EPIPE;
+ }
+ return new_fd;
+}
+
+/*
+ * ustcomm_create_unix_sock
+ *
+ * Creates a AF_UNIX local socket using pathname
+ * bind the socket upon creation and return the fd.
+ */
+int ustcomm_create_unix_sock(const char *pathname)
+{
+ struct sockaddr_un sun;
+ int fd, ret;
+
+ /* Create server socket */
+ if ((fd = socket(PF_UNIX, SOCK_STREAM, 0)) < 0) {
+ PERROR("socket");
+ ret = -errno;
+ goto error;
+ }
+
+ memset(&sun, 0, sizeof(sun));
+ sun.sun_family = AF_UNIX;
+ strncpy(sun.sun_path, pathname, sizeof(sun.sun_path));
+ sun.sun_path[sizeof(sun.sun_path) - 1] = '\0';
+
+ /* Unlink the old file if present */
+ (void) unlink(pathname);
+ ret = bind(fd, (struct sockaddr *) &sun, sizeof(sun));
+ if (ret < 0) {
+ PERROR("bind");
+ ret = -errno;
+ goto error_close;
+ }
+
+ return fd;
+
+error_close:
+ {
+ int closeret;
+
+ closeret = close(fd);
+ if (closeret) {
+ PERROR("close");
+ }
+ }
+error:
+ return ret;
+}
+
+/*
+ * ustcomm_listen_unix_sock
+ *
+ * Make the socket listen using LTTNG_UST_COMM_MAX_LISTEN.
+ */
+int ustcomm_listen_unix_sock(int sock)
+{
+ int ret;
+
+ ret = listen(sock, LTTNG_UST_COMM_MAX_LISTEN);
+ if (ret < 0) {
+ ret = -errno;
+ PERROR("listen");
+ }
+
+ return ret;
+}
+
+/*
+ * ustcomm_close_unix_sock
+ *
+ * Shutdown cleanly a unix socket.
+ *
+ * Handles fd tracker internally.
+ */
+int ustcomm_close_unix_sock(int sock)
+{
+ int ret;
+
+ lttng_ust_lock_fd_tracker();
+ ret = close(sock);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(sock);
+ } else {
+ PERROR("close");
+ ret = -errno;
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+/*
+ * ustcomm_recv_unix_sock
+ *
+ * Receive data of size len in put that data into
+ * the buf param. Using recvmsg API.
+ * Return the size of received data.
+ * Return 0 on orderly shutdown.
+ */
+ssize_t ustcomm_recv_unix_sock(int sock, void *buf, size_t len)
+{
+ struct msghdr msg;
+ struct iovec iov[1];
+ ssize_t ret = -1;
+ size_t len_last;
+
+ memset(&msg, 0, sizeof(msg));
+
+ iov[0].iov_base = buf;
+ iov[0].iov_len = len;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+
+ do {
+ len_last = iov[0].iov_len;
+ ret = recvmsg(sock, &msg, 0);
+ if (ret > 0) {
+ iov[0].iov_base += ret;
+ iov[0].iov_len -= ret;
+ assert(ret <= len_last);
+ }
+ } while ((ret > 0 && ret < len_last) || (ret < 0 && errno == EINTR));
+
+ if (ret < 0) {
+ int shutret;
+
+ if (errno != EPIPE && errno != ECONNRESET && errno != ECONNREFUSED)
+ PERROR("recvmsg");
+ ret = -errno;
+ if (ret == -ECONNRESET || ret == -ECONNREFUSED)
+ ret = -EPIPE;
+
+ shutret = shutdown(sock, SHUT_RDWR);
+ if (shutret)
+ ERR("Socket shutdown error");
+ } else if (ret > 0) {
+ ret = len;
+ }
+ /* ret = 0 means an orderly shutdown. */
+
+ return ret;
+}
+
+/*
+ * ustcomm_send_unix_sock
+ *
+ * Send buf data of size len. Using sendmsg API.
+ * Return the size of sent data.
+ */
+ssize_t ustcomm_send_unix_sock(int sock, const void *buf, size_t len)
+{
+ struct msghdr msg;
+ struct iovec iov[1];
+ ssize_t ret;
+
+ memset(&msg, 0, sizeof(msg));
+
+ iov[0].iov_base = (void *) buf;
+ iov[0].iov_len = len;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+
+ /*
+ * Using the MSG_NOSIGNAL when sending data from sessiond to
+ * libust, so libust does not receive an unhandled SIGPIPE or
+ * SIGURG. The sessiond receiver side can be made more resilient
+ * by ignoring SIGPIPE, but we don't have this luxury on the
+ * libust side.
+ */
+ do {
+ ret = sendmsg(sock, &msg, MSG_NOSIGNAL);
+ } while (ret < 0 && errno == EINTR);
+
+ if (ret < 0) {
+ int shutret;
+
+ if (errno != EPIPE && errno != ECONNRESET)
+ PERROR("sendmsg");
+ ret = -errno;
+ if (ret == -ECONNRESET)
+ ret = -EPIPE;
+
+ shutret = shutdown(sock, SHUT_RDWR);
+ if (shutret)
+ ERR("Socket shutdown error");
+ }
+
+ return ret;
+}
+
+/*
+ * Send a message accompanied by fd(s) over a unix socket.
+ *
+ * Returns the size of data sent, or negative error value.
+ */
+ssize_t ustcomm_send_fds_unix_sock(int sock, int *fds, size_t nb_fd)
+{
+ struct msghdr msg;
+ struct cmsghdr *cmptr;
+ struct iovec iov[1];
+ ssize_t ret = -1;
+ unsigned int sizeof_fds = nb_fd * sizeof(int);
+ char tmp[CMSG_SPACE(sizeof_fds)];
+ char dummy = 0;
+
+ memset(&msg, 0, sizeof(msg));
+ memset(tmp, 0, CMSG_SPACE(sizeof_fds) * sizeof(char));
+
+ if (nb_fd > USTCOMM_MAX_SEND_FDS)
+ return -EINVAL;
+
+ msg.msg_control = (caddr_t)tmp;
+ msg.msg_controllen = CMSG_LEN(sizeof_fds);
+
+ cmptr = CMSG_FIRSTHDR(&msg);
+ if (!cmptr)
+ return -EINVAL;
+ cmptr->cmsg_level = SOL_SOCKET;
+ cmptr->cmsg_type = SCM_RIGHTS;
+ cmptr->cmsg_len = CMSG_LEN(sizeof_fds);
+ memcpy(CMSG_DATA(cmptr), fds, sizeof_fds);
+ /* Sum of the length of all control messages in the buffer: */
+ msg.msg_controllen = cmptr->cmsg_len;
+
+ iov[0].iov_base = &dummy;
+ iov[0].iov_len = 1;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+
+ do {
+ ret = sendmsg(sock, &msg, MSG_NOSIGNAL);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ /*
+ * We consider EPIPE and ECONNRESET as expected.
+ */
+ if (errno != EPIPE && errno != ECONNRESET) {
+ PERROR("sendmsg");
+ }
+ ret = -errno;
+ if (ret == -ECONNRESET)
+ ret = -EPIPE;
+ }
+ return ret;
+}
+
+/*
+ * Recv a message accompanied by fd(s) from a unix socket.
+ *
+ * Expect at most "nb_fd" file descriptors. Returns the number of fd
+ * actually received in nb_fd.
+ * Returns -EPIPE on orderly shutdown.
+ */
+ssize_t ustcomm_recv_fds_unix_sock(int sock, int *fds, size_t nb_fd)
+{
+ struct iovec iov[1];
+ ssize_t ret = 0;
+ struct cmsghdr *cmsg;
+ size_t sizeof_fds = nb_fd * sizeof(int);
+ char recv_fd[CMSG_SPACE(sizeof_fds)];
+ struct msghdr msg;
+ char dummy;
+ int i;
+
+ memset(&msg, 0, sizeof(msg));
+
+ /* Prepare to receive the structures */
+ iov[0].iov_base = &dummy;
+ iov[0].iov_len = 1;
+ msg.msg_iov = iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = recv_fd;
+ msg.msg_controllen = sizeof(recv_fd);
+
+ do {
+ ret = recvmsg(sock, &msg, 0);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ if (errno != EPIPE && errno != ECONNRESET) {
+ PERROR("recvmsg fds");
+ }
+ ret = -errno;
+ if (ret == -ECONNRESET)
+ ret = -EPIPE;
+ goto end;
+ }
+ if (ret == 0) {
+ /* orderly shutdown */
+ ret = -EPIPE;
+ goto end;
+ }
+ if (ret != 1) {
+ ERR("Error: Received %zd bytes, expected %d\n",
+ ret, 1);
+ goto end;
+ }
+ if (msg.msg_flags & MSG_CTRUNC) {
+ ERR("Error: Control message truncated.\n");
+ ret = -1;
+ goto end;
+ }
+ cmsg = CMSG_FIRSTHDR(&msg);
+ if (!cmsg) {
+ ERR("Error: Invalid control message header\n");
+ ret = -1;
+ goto end;
+ }
+ if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
+ ERR("Didn't received any fd\n");
+ ret = -1;
+ goto end;
+ }
+ if (cmsg->cmsg_len != CMSG_LEN(sizeof_fds)) {
+ ERR("Error: Received %zu bytes of ancillary data, expected %zu\n",
+ (size_t) cmsg->cmsg_len, (size_t) CMSG_LEN(sizeof_fds));
+ ret = -1;
+ goto end;
+ }
+
+ memcpy(fds, CMSG_DATA(cmsg), sizeof_fds);
+
+ /* Set FD_CLOEXEC */
+ for (i = 0; i < nb_fd; i++) {
+ ret = fcntl(fds[i], F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ PERROR("fcntl failed to set FD_CLOEXEC on fd %d",
+ fds[i]);
+ }
+ }
+
+ ret = nb_fd;
+end:
+ return ret;
+}
+
+int ustcomm_send_app_msg(int sock, struct ustcomm_ust_msg *lum)
+{
+ ssize_t len;
+
+ len = ustcomm_send_unix_sock(sock, lum, sizeof(*lum));
+ switch (len) {
+ case sizeof(*lum):
+ break;
+ default:
+ if (len < 0) {
+ return len;
+ } else {
+ ERR("incorrect message size: %zd\n", len);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+int ustcomm_recv_app_reply(int sock, struct ustcomm_ust_reply *lur,
+ uint32_t expected_handle, uint32_t expected_cmd)
+{
+ ssize_t len;
+
+ memset(lur, 0, sizeof(*lur));
+ len = ustcomm_recv_unix_sock(sock, lur, sizeof(*lur));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ return -EPIPE;
+ case sizeof(*lur):
+ {
+ int err = 0;
+
+ if (lur->handle != expected_handle) {
+ ERR("Unexpected result message handle: "
+ "expected: %u vs received: %u\n",
+ expected_handle, lur->handle);
+ err = 1;
+ }
+ if (lur->cmd != expected_cmd) {
+ ERR("Unexpected result message command "
+ "expected: %u vs received: %u\n",
+ expected_cmd, lur->cmd);
+ err = 1;
+ }
+ if (err) {
+ return -EINVAL;
+ } else {
+ return lur->ret_code;
+ }
+ }
+ default:
+ if (len >= 0) {
+ ERR("incorrect message size: %zd\n", len);
+ }
+ return len;
+ }
+}
+
+int ustcomm_send_app_cmd(int sock,
+ struct ustcomm_ust_msg *lum,
+ struct ustcomm_ust_reply *lur)
+{
+ int ret;
+
+ ret = ustcomm_send_app_msg(sock, lum);
+ if (ret)
+ return ret;
+ ret = ustcomm_recv_app_reply(sock, lur, lum->handle, lum->cmd);
+ if (ret > 0)
+ return -EIO;
+ return ret;
+}
+
+/*
+ * chan_data is allocated internally if this function returns the
+ * expected var_len.
+ */
+ssize_t ustcomm_recv_channel_from_sessiond(int sock,
+ void **_chan_data, uint64_t var_len,
+ int *_wakeup_fd)
+{
+ void *chan_data;
+ ssize_t len, nr_fd;
+ int wakeup_fd, ret;
+
+ if (var_len > LTTNG_UST_ABI_CHANNEL_DATA_MAX_LEN) {
+ len = -EINVAL;
+ goto error_check;
+ }
+ /* Receive variable length data */
+ chan_data = zmalloc(var_len);
+ if (!chan_data) {
+ len = -ENOMEM;
+ goto error_alloc;
+ }
+ len = ustcomm_recv_unix_sock(sock, chan_data, var_len);
+ if (len != var_len) {
+ goto error_recv;
+ }
+ /* recv wakeup fd */
+ lttng_ust_lock_fd_tracker();
+ nr_fd = ustcomm_recv_fds_unix_sock(sock, &wakeup_fd, 1);
+ if (nr_fd <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (nr_fd < 0) {
+ len = nr_fd;
+ goto error_recv;
+ } else {
+ len = -EIO;
+ goto error_recv;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(wakeup_fd);
+ if (ret < 0) {
+ ret = close(wakeup_fd);
+ if (ret) {
+ PERROR("close on wakeup_fd");
+ }
+ len = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error_recv;
+ }
+
+ *_wakeup_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ *_chan_data = chan_data;
+ return len;
+
+error_recv:
+ free(chan_data);
+error_alloc:
+error_check:
+ return len;
+}
+
+ssize_t ustcomm_recv_event_notifier_notif_fd_from_sessiond(int sock,
+ int *_event_notifier_notif_fd)
+{
+ ssize_t nr_fd;
+ int event_notifier_notif_fd, ret;
+
+ /* Receive event_notifier notification fd */
+ lttng_ust_lock_fd_tracker();
+ nr_fd = ustcomm_recv_fds_unix_sock(sock, &event_notifier_notif_fd, 1);
+ if (nr_fd <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (nr_fd < 0) {
+ ret = nr_fd;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(event_notifier_notif_fd);
+ if (ret < 0) {
+ ret = close(event_notifier_notif_fd);
+ if (ret) {
+ PERROR("close on event_notifier notif fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ *_event_notifier_notif_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ret = nr_fd;
+
+error:
+ return ret;
+}
+
+int ustcomm_recv_stream_from_sessiond(int sock,
+ uint64_t *memory_map_size __attribute__((unused)),
+ int *shm_fd, int *wakeup_fd)
+{
+ ssize_t len;
+ int ret;
+ int fds[2];
+
+ /* recv shm fd and wakeup fd */
+ lttng_ust_lock_fd_tracker();
+ len = ustcomm_recv_fds_unix_sock(sock, fds, 2);
+ if (len <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (len < 0) {
+ ret = len;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fds[0]);
+ if (ret < 0) {
+ ret = close(fds[0]);
+ if (ret) {
+ PERROR("close on received shm_fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ *shm_fd = ret;
+
+ ret = lttng_ust_add_fd_to_tracker(fds[1]);
+ if (ret < 0) {
+ ret = close(*shm_fd);
+ if (ret) {
+ PERROR("close on shm_fd");
+ }
+ *shm_fd = -1;
+ ret = close(fds[1]);
+ if (ret) {
+ PERROR("close on received wakeup_fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ *wakeup_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+ return 0;
+
+error:
+ return ret;
+}
+
+ssize_t ustcomm_recv_counter_from_sessiond(int sock,
+ void **_counter_data, uint64_t var_len)
+{
+ void *counter_data;
+ ssize_t len;
+
+ if (var_len > LTTNG_UST_ABI_COUNTER_DATA_MAX_LEN) {
+ len = -EINVAL;
+ goto error_check;
+ }
+ /* Receive variable length data */
+ counter_data = zmalloc(var_len);
+ if (!counter_data) {
+ len = -ENOMEM;
+ goto error_alloc;
+ }
+ len = ustcomm_recv_unix_sock(sock, counter_data, var_len);
+ if (len != var_len) {
+ goto error_recv;
+ }
+ *_counter_data = counter_data;
+ return len;
+
+error_recv:
+ free(counter_data);
+error_alloc:
+error_check:
+ return len;
+}
+
+int ustcomm_recv_counter_shm_from_sessiond(int sock,
+ int *shm_fd)
+{
+ ssize_t len;
+ int ret;
+ int fds[1];
+
+ /* recv shm fd fd */
+ lttng_ust_lock_fd_tracker();
+ len = ustcomm_recv_fds_unix_sock(sock, fds, 1);
+ if (len <= 0) {
+ lttng_ust_unlock_fd_tracker();
+ if (len < 0) {
+ ret = len;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fds[0]);
+ if (ret < 0) {
+ ret = close(fds[0]);
+ if (ret) {
+ PERROR("close on received shm_fd");
+ }
+ ret = -EIO;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ *shm_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+ return 0;
+
+error:
+ return ret;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustcomm_send_reg_msg(int sock,
+ enum ustctl_socket_type type,
+ uint32_t bits_per_long,
+ uint32_t uint8_t_alignment,
+ uint32_t uint16_t_alignment,
+ uint32_t uint32_t_alignment,
+ uint32_t uint64_t_alignment,
+ uint32_t long_alignment)
+{
+ ssize_t len;
+ struct ustctl_reg_msg reg_msg;
+
+ reg_msg.magic = LTTNG_UST_ABI_COMM_MAGIC;
+ reg_msg.major = LTTNG_UST_ABI_MAJOR_VERSION;
+ reg_msg.minor = LTTNG_UST_ABI_MINOR_VERSION;
+ reg_msg.pid = getpid();
+ reg_msg.ppid = getppid();
+ reg_msg.uid = getuid();
+ reg_msg.gid = getgid();
+ reg_msg.bits_per_long = bits_per_long;
+ reg_msg.uint8_t_alignment = uint8_t_alignment;
+ reg_msg.uint16_t_alignment = uint16_t_alignment;
+ reg_msg.uint32_t_alignment = uint32_t_alignment;
+ reg_msg.uint64_t_alignment = uint64_t_alignment;
+ reg_msg.long_alignment = long_alignment;
+ reg_msg.socket_type = type;
+ lttng_pthread_getname_np(reg_msg.name, LTTNG_UST_ABI_PROCNAME_LEN);
+ memset(reg_msg.padding, 0, sizeof(reg_msg.padding));
+
+ len = ustcomm_send_unix_sock(sock, ®_msg, sizeof(reg_msg));
+ if (len > 0 && len != sizeof(reg_msg))
+ return -EIO;
+ if (len < 0)
+ return len;
+ return 0;
+}
+
+static
+ssize_t count_one_type(const struct lttng_ust_type_common *lt)
+{
+ switch (lt->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_float:
+ case lttng_ust_type_string:
+ return 1;
+ case lttng_ust_type_enum:
+ return count_one_type(lttng_ust_get_type_enum(lt)->container_type) + 1;
+ case lttng_ust_type_array:
+ return count_one_type(lttng_ust_get_type_array(lt)->elem_type) + 1;
+ case lttng_ust_type_sequence:
+ return count_one_type(lttng_ust_get_type_sequence(lt)->elem_type) + 1;
+ case lttng_ust_type_struct:
+ return count_fields_recursive(lttng_ust_get_type_struct(lt)->nr_fields,
+ lttng_ust_get_type_struct(lt)->fields) + 1;
+
+ case lttng_ust_type_dynamic:
+ {
+ const struct lttng_ust_event_field **choices;
+ size_t nr_choices;
+ int ret;
+
+ ret = lttng_ust_dynamic_type_choices(&nr_choices,
+ &choices);
+ if (ret)
+ return ret;
+ /*
+ * Two fields for enum, one field for variant, and
+ * one field per choice.
+ */
+ return count_fields_recursive(nr_choices, choices) + 3;
+ }
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+ssize_t count_fields_recursive(size_t nr_fields,
+ const struct lttng_ust_event_field **lttng_fields)
+{
+ int i;
+ ssize_t ret, count = 0;
+
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_event_field *lf;
+
+ lf = lttng_fields[i];
+ /* skip 'nowrite' fields */
+ if (lf->nowrite)
+ continue;
+ ret = count_one_type(lf->type);
+ if (ret < 0)
+ return ret; /* error */
+ count += ret;
+ }
+ return count;
+}
+
+static
+ssize_t count_ctx_fields_recursive(size_t nr_fields,
+ struct lttng_ust_ctx_field *lttng_fields)
+{
+ int i;
+ ssize_t ret, count = 0;
+
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_event_field *lf;
+
+ lf = lttng_fields[i].event_field;
+ /* skip 'nowrite' fields */
+ if (lf->nowrite)
+ continue;
+ ret = count_one_type(lf->type);
+ if (ret < 0)
+ return ret; /* error */
+ count += ret;
+ }
+ return count;
+}
+
+static
+int serialize_string_encoding(int32_t *ue,
+ enum lttng_ust_string_encoding le)
+{
+ switch (le) {
+ case lttng_ust_string_encoding_none:
+ *ue = ustctl_encode_none;
+ break;
+ case lttng_ust_string_encoding_UTF8:
+ *ue = ustctl_encode_UTF8;
+ break;
+ case lttng_ust_string_encoding_ASCII:
+ *ue = ustctl_encode_ASCII;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int serialize_integer_type(struct ustctl_integer_type *uit,
+ const struct lttng_ust_type_integer *lit,
+ enum lttng_ust_string_encoding lencoding)
+{
+ int32_t encoding;
+
+ uit->size = lit->size;
+ uit->signedness = lit->signedness;
+ uit->reverse_byte_order = lit->reverse_byte_order;
+ uit->base = lit->base;
+ if (serialize_string_encoding(&encoding, lencoding))
+ return -EINVAL;
+ uit->encoding = encoding;
+ uit->alignment = lit->alignment;
+ return 0;
+}
+
+static
+int serialize_dynamic_type(struct lttng_ust_session *session,
+ struct ustctl_field *fields, size_t *iter_output,
+ const char *field_name)
+{
+ const struct lttng_ust_event_field **choices;
+ char tag_field_name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ const struct lttng_ust_type_common *tag_type;
+ const struct lttng_ust_event_field *tag_field_generic;
+ struct lttng_ust_event_field tag_field = {
+ .name = tag_field_name,
+ .nowrite = 0,
+ };
+ struct ustctl_field *uf;
+ size_t nr_choices, i;
+ int ret;
+
+ tag_field_generic = lttng_ust_dynamic_type_tag_field();
+ tag_type = tag_field_generic->type;
+
+ /* Serialize enum field. */
+ strncpy(tag_field_name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ tag_field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ strncat(tag_field_name,
+ "_tag",
+ LTTNG_UST_ABI_SYM_NAME_LEN - strlen(tag_field_name) - 1);
+ tag_field.type = tag_type;
+ ret = serialize_one_field(session, fields, iter_output,
+ &tag_field);
+ if (ret)
+ return ret;
+
+ /* Serialize variant field. */
+ uf = &fields[*iter_output];
+ ret = lttng_ust_dynamic_type_choices(&nr_choices, &choices);
+ if (ret)
+ return ret;
+
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ uf->type.atype = ustctl_atype_variant;
+ uf->type.u.variant_nestable.nr_choices = nr_choices;
+ strncpy(uf->type.u.variant_nestable.tag_name,
+ tag_field_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->type.u.variant_nestable.tag_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ uf->type.u.variant_nestable.alignment = 0;
+ (*iter_output)++;
+
+ /* Serialize choice fields after variant. */
+ for (i = 0; i < nr_choices; i++) {
+ ret = serialize_one_field(session, fields,
+ iter_output, choices[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static
+int serialize_one_type(struct lttng_ust_session *session,
+ struct ustctl_field *fields, size_t *iter_output,
+ const char *field_name, const struct lttng_ust_type_common *lt,
+ enum lttng_ust_string_encoding parent_encoding)
+{
+ int ret;
+
+ /*
+ * Serializing a type (rather than a field) generates a ustctl_field
+ * entry with 0-length name.
+ */
+
+ switch (lt->type) {
+ case lttng_ust_type_integer:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ ret = serialize_integer_type(&ut->u.integer, lttng_ust_get_type_integer(lt),
+ parent_encoding);
+ if (ret)
+ return ret;
+ ut->atype = ustctl_atype_integer;
+ (*iter_output)++;
+ break;
+ }
+ case lttng_ust_type_float:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+ struct ustctl_float_type *uft;
+ const struct lttng_ust_type_float *lft;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ uft = &ut->u._float;
+ lft = lttng_ust_get_type_float(lt);
+ uft->exp_dig = lft->exp_dig;
+ uft->mant_dig = lft->mant_dig;
+ uft->alignment = lft->alignment;
+ uft->reverse_byte_order = lft->reverse_byte_order;
+ ut->atype = ustctl_atype_float;
+ (*iter_output)++;
+ break;
+ }
+ case lttng_ust_type_string:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+ int32_t encoding;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ ret = serialize_string_encoding(&encoding, lttng_ust_get_type_string(lt)->encoding);
+ if (ret)
+ return ret;
+ ut->u.string.encoding = encoding;
+ ut->atype = ustctl_atype_string;
+ (*iter_output)++;
+ break;
+ }
+ case lttng_ust_type_array:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ ut->atype = ustctl_atype_array_nestable;
+ ut->u.array_nestable.length = lttng_ust_get_type_array(lt)->length;
+ ut->u.array_nestable.alignment = lttng_ust_get_type_array(lt)->alignment;
+ (*iter_output)++;
+
+ ret = serialize_one_type(session, fields, iter_output, NULL,
+ lttng_ust_get_type_array(lt)->elem_type,
+ lttng_ust_get_type_array(lt)->encoding);
+ if (ret)
+ return -EINVAL;
+ break;
+ }
+ case lttng_ust_type_sequence:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ ut->atype = ustctl_atype_sequence_nestable;
+ strncpy(ut->u.sequence_nestable.length_name,
+ lttng_ust_get_type_sequence(lt)->length_name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ ut->u.sequence_nestable.length_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ ut->u.sequence_nestable.alignment = lttng_ust_get_type_sequence(lt)->alignment;
+ (*iter_output)++;
+
+ ret = serialize_one_type(session, fields, iter_output, NULL,
+ lttng_ust_get_type_sequence(lt)->elem_type,
+ lttng_ust_get_type_sequence(lt)->encoding);
+ if (ret)
+ return -EINVAL;
+ break;
+ }
+ case lttng_ust_type_dynamic:
+ {
+ ret = serialize_dynamic_type(session, fields, iter_output,
+ field_name);
+ if (ret)
+ return -EINVAL;
+ break;
+ }
+ case lttng_ust_type_struct:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ uf->type.atype = ustctl_atype_struct_nestable;
+ uf->type.u.struct_nestable.nr_fields = lttng_ust_get_type_struct(lt)->nr_fields;
+ uf->type.u.struct_nestable.alignment = lttng_ust_get_type_struct(lt)->alignment;
+ (*iter_output)++;
+
+ ret = serialize_fields(session, fields, iter_output,
+ lttng_ust_get_type_struct(lt)->nr_fields,
+ lttng_ust_get_type_struct(lt)->fields);
+ if (ret)
+ return -EINVAL;
+ break;
+ }
+ case lttng_ust_type_enum:
+ {
+ struct ustctl_field *uf = &fields[*iter_output];
+ struct ustctl_type *ut = &uf->type;
+
+ if (field_name) {
+ strncpy(uf->name, field_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uf->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ } else {
+ uf->name[0] = '\0';
+ }
+ strncpy(ut->u.enum_nestable.name, lttng_ust_get_type_enum(lt)->desc->name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ ut->u.enum_nestable.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ ut->atype = ustctl_atype_enum_nestable;
+ (*iter_output)++;
+
+ ret = serialize_one_type(session, fields, iter_output, NULL,
+ lttng_ust_get_type_enum(lt)->container_type,
+ lttng_ust_string_encoding_none);
+ if (ret)
+ return -EINVAL;
+ if (session) {
+ const struct lttng_enum *_enum;
+
+ _enum = lttng_ust_enum_get_from_desc(session, lttng_ust_get_type_enum(lt)->desc);
+ if (!_enum)
+ return -EINVAL;
+ ut->u.enum_nestable.id = _enum->id;
+ } else {
+ ut->u.enum_nestable.id = -1ULL;
+ }
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int serialize_one_field(struct lttng_ust_session *session,
+ struct ustctl_field *fields, size_t *iter_output,
+ const struct lttng_ust_event_field *lf)
+{
+ /* skip 'nowrite' fields */
+ if (lf->nowrite)
+ return 0;
+
+ return serialize_one_type(session, fields, iter_output, lf->name, lf->type, lttng_ust_string_encoding_none);
+}
+
+static
+int serialize_fields(struct lttng_ust_session *session,
+ struct ustctl_field *ustctl_fields,
+ size_t *iter_output, size_t nr_lttng_fields,
+ const struct lttng_ust_event_field **lttng_fields)
+{
+ int ret;
+ size_t i;
+
+ for (i = 0; i < nr_lttng_fields; i++) {
+ ret = serialize_one_field(session, ustctl_fields,
+ iter_output, lttng_fields[i]);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static
+int alloc_serialize_fields(struct lttng_ust_session *session,
+ size_t *_nr_write_fields,
+ struct ustctl_field **ustctl_fields,
+ size_t nr_fields,
+ const struct lttng_ust_event_field **lttng_fields)
+{
+ struct ustctl_field *fields;
+ int ret;
+ size_t iter_output = 0;
+ ssize_t nr_write_fields;
+
+ nr_write_fields = count_fields_recursive(nr_fields, lttng_fields);
+ if (nr_write_fields < 0) {
+ return (int) nr_write_fields;
+ }
+
+ fields = zmalloc(nr_write_fields * sizeof(*fields));
+ if (!fields)
+ return -ENOMEM;
+
+ ret = serialize_fields(session, fields, &iter_output, nr_fields,
+ lttng_fields);
+ if (ret)
+ goto error_type;
+
+ *_nr_write_fields = nr_write_fields;
+ *ustctl_fields = fields;
+ return 0;
+
+error_type:
+ free(fields);
+ return ret;
+}
+
+static
+int serialize_entries(struct ustctl_enum_entry **_entries,
+ size_t nr_entries,
+ const struct lttng_ust_enum_entry **lttng_entries)
+{
+ struct ustctl_enum_entry *entries;
+ int i;
+
+ /* Serialize the entries */
+ entries = zmalloc(nr_entries * sizeof(*entries));
+ if (!entries)
+ return -ENOMEM;
+ for (i = 0; i < nr_entries; i++) {
+ struct ustctl_enum_entry *uentry;
+ const struct lttng_ust_enum_entry *lentry;
+
+ uentry = &entries[i];
+ lentry = lttng_entries[i];
+
+ uentry->start.value = lentry->start.value;
+ uentry->start.signedness = lentry->start.signedness;
+ uentry->end.value = lentry->end.value;
+ uentry->end.signedness = lentry->end.signedness;
+ strncpy(uentry->string, lentry->string, LTTNG_UST_ABI_SYM_NAME_LEN);
+ uentry->string[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+
+ if (lentry->options & LTTNG_UST_ENUM_ENTRY_OPTION_IS_AUTO) {
+ uentry->u.extra.options |=
+ USTCTL_UST_ENUM_ENTRY_OPTION_IS_AUTO;
+ }
+ }
+ *_entries = entries;
+ return 0;
+}
+
+static
+int serialize_ctx_fields(struct lttng_ust_session *session,
+ size_t *_nr_write_fields,
+ struct ustctl_field **ustctl_fields,
+ size_t nr_fields,
+ struct lttng_ust_ctx_field *lttng_fields)
+{
+ struct ustctl_field *fields;
+ int ret;
+ size_t i, iter_output = 0;
+ ssize_t nr_write_fields;
+
+ nr_write_fields = count_ctx_fields_recursive(nr_fields,
+ lttng_fields);
+ if (nr_write_fields < 0) {
+ return (int) nr_write_fields;
+ }
+
+ fields = zmalloc(nr_write_fields * sizeof(*fields));
+ if (!fields)
+ return -ENOMEM;
+
+ for (i = 0; i < nr_fields; i++) {
+ ret = serialize_one_field(session, fields, &iter_output,
+ lttng_fields[i].event_field);
+ if (ret)
+ goto error_type;
+ }
+
+ *_nr_write_fields = nr_write_fields;
+ *ustctl_fields = fields;
+ return 0;
+
+error_type:
+ free(fields);
+ return ret;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustcomm_register_event(int sock,
+ struct lttng_ust_session *session,
+ int session_objd, /* session descriptor */
+ int channel_objd, /* channel descriptor */
+ const char *event_name, /* event name (input) */
+ int loglevel,
+ const char *signature, /* event signature (input) */
+ size_t nr_fields, /* fields */
+ const struct lttng_ust_event_field **lttng_fields,
+ const char *model_emf_uri,
+ uint32_t *id) /* event id (output) */
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_event_msg m;
+ } msg;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_event_reply r;
+ } reply;
+ size_t signature_len, fields_len, model_emf_uri_len;
+ struct ustctl_field *fields = NULL;
+ size_t nr_write_fields = 0;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
+ msg.m.session_objd = session_objd;
+ msg.m.channel_objd = channel_objd;
+ strncpy(msg.m.event_name, event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ msg.m.event_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ msg.m.loglevel = loglevel;
+ signature_len = strlen(signature) + 1;
+ msg.m.signature_len = signature_len;
+
+ /* Calculate fields len, serialize fields. */
+ if (nr_fields > 0) {
+ ret = alloc_serialize_fields(session, &nr_write_fields, &fields,
+ nr_fields, lttng_fields);
+ if (ret)
+ return ret;
+ }
+
+ fields_len = sizeof(*fields) * nr_write_fields;
+ msg.m.fields_len = fields_len;
+ if (model_emf_uri) {
+ model_emf_uri_len = strlen(model_emf_uri) + 1;
+ } else {
+ model_emf_uri_len = 0;
+ }
+ msg.m.model_emf_uri_len = model_emf_uri_len;
+
+ len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg)) {
+ ret = -EIO;
+ goto error_fields;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_fields;
+ }
+
+ /* send signature */
+ len = ustcomm_send_unix_sock(sock, signature, signature_len);
+ if (len > 0 && len != signature_len) {
+ ret = -EIO;
+ goto error_fields;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_fields;
+ }
+
+ /* send fields */
+ if (fields_len > 0) {
+ len = ustcomm_send_unix_sock(sock, fields, fields_len);
+ if (len > 0 && len != fields_len) {
+ ret = -EIO;
+ goto error_fields;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_fields;
+ }
+ }
+ free(fields);
+
+ if (model_emf_uri_len) {
+ /* send model_emf_uri */
+ len = ustcomm_send_unix_sock(sock, model_emf_uri,
+ model_emf_uri_len);
+ if (len > 0 && len != model_emf_uri_len) {
+ return -EIO;
+ }
+ if (len < 0) {
+ return len;
+ }
+ }
+
+ /* receive reply */
+ len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ return -EPIPE;
+ case sizeof(reply):
+ if (reply.header.notify_cmd != msg.header.notify_cmd) {
+ ERR("Unexpected result message command "
+ "expected: %u vs received: %u\n",
+ msg.header.notify_cmd, reply.header.notify_cmd);
+ return -EINVAL;
+ }
+ if (reply.r.ret_code > 0)
+ return -EINVAL;
+ if (reply.r.ret_code < 0)
+ return reply.r.ret_code;
+ *id = reply.r.event_id;
+ DBG("Sent register event notification for name \"%s\": ret_code %d, event_id %u\n",
+ event_name, reply.r.ret_code, reply.r.event_id);
+ return 0;
+ default:
+ if (len < 0) {
+ /* Transport level error */
+ if (errno == EPIPE || errno == ECONNRESET)
+ len = -errno;
+ return len;
+ } else {
+ ERR("incorrect message size: %zd\n", len);
+ return len;
+ }
+ }
+ /* Unreached. */
+
+ /* Error path only. */
+error_fields:
+ free(fields);
+ return ret;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ * Returns -EPIPE or -ECONNRESET if other end has hung up.
+ */
+int ustcomm_register_enum(int sock,
+ int session_objd, /* session descriptor */
+ const char *enum_name, /* enum name (input) */
+ size_t nr_entries, /* entries */
+ const struct lttng_ust_enum_entry **lttng_entries,
+ uint64_t *id)
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_enum_msg m;
+ } msg;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_enum_reply r;
+ } reply;
+ size_t entries_len;
+ struct ustctl_enum_entry *entries = NULL;
+ int ret;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
+ msg.m.session_objd = session_objd;
+ strncpy(msg.m.enum_name, enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ msg.m.enum_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+
+ /* Calculate entries len, serialize entries. */
+ if (nr_entries > 0) {
+ ret = serialize_entries(&entries,
+ nr_entries, lttng_entries);
+ if (ret)
+ return ret;
+ }
+
+ entries_len = sizeof(*entries) * nr_entries;
+ msg.m.entries_len = entries_len;
+
+ len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg)) {
+ ret = -EIO;
+ goto error_entries;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_entries;
+ }
+
+ /* send entries */
+ if (entries_len > 0) {
+ len = ustcomm_send_unix_sock(sock, entries, entries_len);
+ if (len > 0 && len != entries_len) {
+ ret = -EIO;
+ goto error_entries;
+ }
+ if (len < 0) {
+ ret = len;
+ goto error_entries;
+ }
+ }
+ free(entries);
+ entries = NULL;
+
+ /* receive reply */
+ len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ return -EPIPE;
+ case sizeof(reply):
+ if (reply.header.notify_cmd != msg.header.notify_cmd) {
+ ERR("Unexpected result message command "
+ "expected: %u vs received: %u\n",
+ msg.header.notify_cmd, reply.header.notify_cmd);
+ return -EINVAL;
+ }
+ if (reply.r.ret_code > 0)
+ return -EINVAL;
+ if (reply.r.ret_code < 0)
+ return reply.r.ret_code;
+ *id = reply.r.enum_id;
+ DBG("Sent register enum notification for name \"%s\": ret_code %d\n",
+ enum_name, reply.r.ret_code);
+ return 0;
+ default:
+ if (len < 0) {
+ /* Transport level error */
+ if (errno == EPIPE || errno == ECONNRESET)
+ len = -errno;
+ return len;
+ } else {
+ ERR("incorrect message size: %zd\n", len);
+ return len;
+ }
+ }
+ return ret;
+
+error_entries:
+ free(entries);
+ return ret;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ * Returns -EPIPE or -ECONNRESET if other end has hung up.
+ */
+int ustcomm_register_channel(int sock,
+ struct lttng_ust_session *session,
+ int session_objd, /* session descriptor */
+ int channel_objd, /* channel descriptor */
+ size_t nr_ctx_fields,
+ struct lttng_ust_ctx_field *ctx_fields,
+ uint32_t *chan_id, /* channel id (output) */
+ int *header_type) /* header type (output) */
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_channel_msg m;
+ } msg;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_channel_reply r;
+ } reply;
+ size_t fields_len;
+ struct ustctl_field *fields = NULL;
+ int ret;
+ size_t nr_write_fields = 0;
+
+ memset(&msg, 0, sizeof(msg));
+ msg.header.notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
+ msg.m.session_objd = session_objd;
+ msg.m.channel_objd = channel_objd;
+
+ /* Calculate fields len, serialize fields. */
+ if (nr_ctx_fields > 0) {
+ ret = serialize_ctx_fields(session, &nr_write_fields, &fields,
+ nr_ctx_fields, ctx_fields);
+ if (ret)
+ return ret;
+ }
+
+ fields_len = sizeof(*fields) * nr_write_fields;
+ msg.m.ctx_fields_len = fields_len;
+ len = ustcomm_send_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg)) {
+ free(fields);
+ return -EIO;
+ }
+ if (len < 0) {
+ free(fields);
+ return len;
+ }
+
+ /* send fields */
+ if (fields_len > 0) {
+ len = ustcomm_send_unix_sock(sock, fields, fields_len);
+ free(fields);
+ if (len > 0 && len != fields_len) {
+ return -EIO;
+ }
+ if (len < 0) {
+ return len;
+ }
+ } else {
+ free(fields);
+ }
+
+ len = ustcomm_recv_unix_sock(sock, &reply, sizeof(reply));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ return -EPIPE;
+ case sizeof(reply):
+ if (reply.header.notify_cmd != msg.header.notify_cmd) {
+ ERR("Unexpected result message command "
+ "expected: %u vs received: %u\n",
+ msg.header.notify_cmd, reply.header.notify_cmd);
+ return -EINVAL;
+ }
+ if (reply.r.ret_code > 0)
+ return -EINVAL;
+ if (reply.r.ret_code < 0)
+ return reply.r.ret_code;
+ *chan_id = reply.r.chan_id;
+ switch (reply.r.header_type) {
+ case 1:
+ case 2:
+ *header_type = reply.r.header_type;
+ break;
+ default:
+ ERR("Unexpected channel header type %u\n",
+ reply.r.header_type);
+ return -EINVAL;
+ }
+ DBG("Sent register channel notification: chan_id %d, header_type %d\n",
+ reply.r.chan_id, reply.r.header_type);
+ return 0;
+ default:
+ if (len < 0) {
+ /* Transport level error */
+ if (errno == EPIPE || errno == ECONNRESET)
+ len = -errno;
+ return len;
+ } else {
+ ERR("incorrect message size: %zd\n", len);
+ return len;
+ }
+ }
+}
+
+/*
+ * Set socket reciving timeout.
+ */
+int ustcomm_setsockopt_rcv_timeout(int sock, unsigned int msec)
+{
+ int ret;
+ struct timeval tv;
+
+ tv.tv_sec = msec / 1000;
+ tv.tv_usec = (msec * 1000 % 1000000);
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv));
+ if (ret < 0) {
+ PERROR("setsockopt SO_RCVTIMEO");
+ ret = -errno;
+ }
+
+ return ret;
+}
+
+/*
+ * Set socket sending timeout.
+ */
+int ustcomm_setsockopt_snd_timeout(int sock, unsigned int msec)
+{
+ int ret;
+ struct timeval tv;
+
+ tv.tv_sec = msec / 1000;
+ tv.tv_usec = (msec * 1000) % 1000000;
+
+ ret = setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO, &tv, sizeof(tv));
+ if (ret < 0) {
+ PERROR("setsockopt SO_SNDTIMEO");
+ ret = -errno;
+ }
+
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Aravind HT <aravind.ht@gmail.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <assert.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/select.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdbool.h>
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/system.h>
+
+#include <ust-fd.h>
+#include <ust-helper.h>
+#include <lttng/ust-error.h>
+#include <usterr-signal-safe.h>
+
+#include "../liblttng-ust/compat.h"
+#include "../liblttng-ust/lttng-tracer-core.h"
+
+/* Operations on the fd set. */
+#define IS_FD_VALID(fd) ((fd) >= 0 && (fd) < lttng_ust_max_fd)
+#define GET_FD_SET_FOR_FD(fd, fd_sets) (&((fd_sets)[(fd) / FD_SETSIZE]))
+#define CALC_INDEX_TO_SET(fd) ((fd) % FD_SETSIZE)
+#define IS_FD_STD(fd) (IS_FD_VALID(fd) && (fd) <= STDERR_FILENO)
+
+/* Check fd validity before calling these. */
+#define ADD_FD_TO_SET(fd, fd_sets) \
+ FD_SET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define IS_FD_SET(fd, fd_sets) \
+ FD_ISSET(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+#define DEL_FD_FROM_SET(fd, fd_sets) \
+ FD_CLR(CALC_INDEX_TO_SET(fd), GET_FD_SET_FOR_FD(fd, fd_sets))
+
+/*
+ * Protect the lttng_fd_set. Nests within the ust_lock, and therefore
+ * within the libc dl lock. Therefore, we need to fixup the TLS before
+ * nesting into this lock.
+ *
+ * The ust_safe_guard_fd_mutex nests within the ust_mutex. This mutex
+ * is also held across fork.
+ */
+static pthread_mutex_t ust_safe_guard_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_safe_guard_fd_mutex. Saved when
+ * locking, restored on unlock. Protected by ust_safe_guard_fd_mutex.
+ */
+static int ust_safe_guard_saved_cancelstate;
+
+/*
+ * Track whether we are within lttng-ust or application, for close
+ * system call override by LD_PRELOAD library. This also tracks whether
+ * we are invoking close() from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_fd_mutex_nest);
+
+/* fd_set used to book keep fd being used by lttng-ust. */
+static fd_set *lttng_fd_set;
+static int lttng_ust_max_fd;
+static int num_fd_sets;
+static int init_done;
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_fd_tracker_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_fd_mutex_nest)));
+}
+
+/*
+ * Allocate the fd set array based on the hard limit set for this
+ * process. This will be called during the constructor execution
+ * and will also be called in the child after fork via lttng_ust_init.
+ */
+void lttng_ust_init_fd_tracker(void)
+{
+ struct rlimit rlim;
+ int i;
+
+ if (CMM_LOAD_SHARED(init_done))
+ return;
+
+ memset(&rlim, 0, sizeof(rlim));
+ /* Get the current possible max number of fd for this process. */
+ if (getrlimit(RLIMIT_NOFILE, &rlim) < 0)
+ abort();
+ /*
+ * FD set array size determined using the hard limit. Even if
+ * the process wishes to increase its limit using setrlimit, it
+ * can only do so with the softlimit which will be less than the
+ * hard limit.
+ */
+ lttng_ust_max_fd = rlim.rlim_max;
+ num_fd_sets = lttng_ust_max_fd / FD_SETSIZE;
+ if (lttng_ust_max_fd % FD_SETSIZE)
+ ++num_fd_sets;
+ if (lttng_fd_set != NULL) {
+ free(lttng_fd_set);
+ lttng_fd_set = NULL;
+ }
+ lttng_fd_set = malloc(num_fd_sets * (sizeof(fd_set)));
+ if (!lttng_fd_set)
+ abort();
+ for (i = 0; i < num_fd_sets; i++)
+ FD_ZERO((<tng_fd_set[i]));
+ CMM_STORE_SHARED(init_done, 1);
+}
+
+void lttng_ust_lock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_fd_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_safe_guard_fd_mutex);
+ ust_safe_guard_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_ust_unlock_fd_tracker(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_fd_mutex_nest)) {
+ newstate = ust_safe_guard_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_safe_guard_fd_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
+static int dup_std_fd(int fd)
+{
+ int ret, i;
+ int fd_to_close[STDERR_FILENO + 1];
+ int fd_to_close_count = 0;
+ int dup_cmd = F_DUPFD; /* Default command */
+ int fd_valid = -1;
+
+ if (!(IS_FD_STD(fd))) {
+ /* Should not be here */
+ ret = -1;
+ goto error;
+ }
+
+ /* Check for FD_CLOEXEC flag */
+ ret = fcntl(fd, F_GETFD);
+ if (ret < 0) {
+ PERROR("fcntl on f_getfd");
+ ret = -1;
+ goto error;
+ }
+
+ if (ret & FD_CLOEXEC) {
+ dup_cmd = F_DUPFD_CLOEXEC;
+ }
+
+ /* Perform dup */
+ for (i = 0; i < STDERR_FILENO + 1; i++) {
+ ret = fcntl(fd, dup_cmd, 0);
+ if (ret < 0) {
+ PERROR("fcntl dup fd");
+ goto error;
+ }
+
+ if (!(IS_FD_STD(ret))) {
+ /* fd is outside of STD range, use it. */
+ fd_valid = ret;
+ /* Close fd received as argument. */
+ fd_to_close[i] = fd;
+ fd_to_close_count++;
+ break;
+ }
+
+ fd_to_close[i] = ret;
+ fd_to_close_count++;
+ }
+
+ /* Close intermediary fds */
+ for (i = 0; i < fd_to_close_count; i++) {
+ ret = close(fd_to_close[i]);
+ if (ret) {
+ PERROR("close on temporary fd: %d.", fd_to_close[i]);
+ /*
+ * Not using an abort here would yield a complicated
+ * error handling for the caller. If a failure occurs
+ * here, the system is already in a bad state.
+ */
+ abort();
+ }
+ }
+
+ ret = fd_valid;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking of fd validity.
+ *
+ * If fd <= 2, dup the fd until fd > 2. This enables us to bypass
+ * problems that can be encountered if UST uses stdin, stdout, stderr
+ * fds for internal use (daemon etc.). This can happen if the
+ * application closes either of those file descriptors. Intermediary fds
+ * are closed as needed.
+ *
+ * Return -1 on error.
+ *
+ */
+int lttng_ust_add_fd_to_tracker(int fd)
+{
+ int ret;
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+ assert(URCU_TLS(ust_fd_mutex_nest));
+
+ if (IS_FD_STD(fd)) {
+ ret = dup_std_fd(fd);
+ if (ret < 0) {
+ goto error;
+ }
+ fd = ret;
+ }
+
+ /* Trying to add an fd which we can not accommodate. */
+ assert(IS_FD_VALID(fd));
+ /* Setting an fd thats already set. */
+ assert(!IS_FD_SET(fd, lttng_fd_set));
+
+ ADD_FD_TO_SET(fd, lttng_fd_set);
+ return fd;
+error:
+ return ret;
+}
+
+/*
+ * Needs to be called with ust_safe_guard_fd_mutex held when opening the fd.
+ * Has strict checking for fd validity.
+ */
+void lttng_ust_delete_fd_from_tracker(int fd)
+{
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ assert(URCU_TLS(ust_fd_mutex_nest));
+ /* Not a valid fd. */
+ assert(IS_FD_VALID(fd));
+ /* Deleting an fd which was not set. */
+ assert(IS_FD_SET(fd, lttng_fd_set));
+
+ DEL_FD_FROM_SET(fd, lttng_fd_set);
+}
+
+/*
+ * Interface allowing applications to close arbitrary file descriptors.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_close_fd(int fd, int (*close_cb)(int fd))
+{
+ int ret = 0;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return close_cb(fd);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = close_cb(fd);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+/*
+ * Interface allowing applications to close arbitrary streams.
+ * We check if it is owned by lttng-ust, and return -1, errno=EBADF
+ * instead of closing it if it is the case.
+ */
+int lttng_ust_safe_fclose_stream(FILE *stream, int (*fclose_cb)(FILE *stream))
+{
+ int ret = 0, fd;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ /*
+ * If called from lttng-ust, we directly call fclose without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest))
+ return fclose_cb(stream);
+
+ fd = fileno(stream);
+
+ lttng_ust_lock_fd_tracker();
+ if (IS_FD_VALID(fd) && IS_FD_SET(fd, lttng_fd_set)) {
+ ret = -1;
+ errno = EBADF;
+ } else {
+ ret = fclose_cb(stream);
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ return ret;
+}
+
+#ifdef __OpenBSD__
+static void set_close_success(int *p)
+{
+ *p = 1;
+}
+static int test_close_success(const int *p)
+{
+ return *p;
+}
+#else
+static void set_close_success(int *p __attribute__((unused)))
+{
+}
+static int test_close_success(const int *p __attribute__((unused)))
+{
+ return 1;
+}
+#endif
+
+/*
+ * Implement helper for closefrom() override.
+ */
+int lttng_ust_safe_closefrom_fd(int lowfd, int (*close_cb)(int fd))
+{
+ int ret = 0, close_success = 0, i;
+
+ lttng_ust_fixup_fd_tracker_tls();
+
+ /*
+ * Ensure the tracker is initialized when called from
+ * constructors.
+ */
+ lttng_ust_init_fd_tracker();
+
+ if (lowfd < 0) {
+ /*
+ * NetBSD return EBADF if fd is invalid.
+ */
+ errno = EBADF;
+ ret = -1;
+ goto end;
+ }
+ /*
+ * If called from lttng-ust, we directly call close without
+ * validating whether the FD is part of the tracked set.
+ */
+ if (URCU_TLS(ust_fd_mutex_nest)) {
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ } else {
+ lttng_ust_lock_fd_tracker();
+ for (i = lowfd; i < lttng_ust_max_fd; i++) {
+ if (IS_FD_VALID(i) && IS_FD_SET(i, lttng_fd_set))
+ continue;
+ if (close_cb(i) < 0) {
+ switch (errno) {
+ case EBADF:
+ continue;
+ case EINTR:
+ default:
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ goto end;
+ }
+ }
+ set_close_success(&close_success);
+ }
+ lttng_ust_unlock_fd_tracker();
+ }
+ if (!test_close_success(&close_success)) {
+ /*
+ * OpenBSD return EBADF if fd is greater than all open
+ * file descriptors.
+ */
+ ret = -1;
+ errno = EBADF;
+ }
+end:
+ return ret;
+}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CPPFLAGS += -I$(top_srcdir)/liblttng-ust-comm
+AM_CFLAGS += -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-ctl.la
+
+liblttng_ust_ctl_la_SOURCES = ustctl.c
+liblttng_ust_ctl_la_LDFLAGS = \
+ -version-info $(LTTNG_UST_CTL_LIBRARY_VERSION)
+
+liblttng_ust_ctl_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust-comm/liblttng-ust-comm.la \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust-support.la \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
+ $(DL_LIBS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Copyright (C) 2011 Julien Desfossez <julien.desfossez@polymtl.ca>
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stdint.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <lttng/ust-config.h>
+#include <lttng/ust-ctl.h>
+#include <lttng/ust-abi.h>
+#include <lttng/ust-endian.h>
+
+#include <usterr-signal-safe.h>
+#include <ust-comm.h>
+#include <ust-helper.h>
+#include "ust-compat.h"
+
+#include "../libringbuffer/backend.h"
+#include "../libringbuffer/frontend.h"
+#include "../liblttng-ust/ust-events-internal.h"
+#include "../liblttng-ust/wait.h"
+#include "../liblttng-ust/lttng-rb-clients.h"
+#include "../liblttng-ust/clock.h"
+#include "../liblttng-ust/getenv.h"
+#include "../liblttng-ust/lttng-tracer-core.h"
+#include "../liblttng-ust/lttng-counter-client.h"
+
+#include "../libcounter/shm.h"
+#include "../libcounter/smp.h"
+#include "../libcounter/counter.h"
+
+/*
+ * Number of milliseconds to retry before failing metadata writes on
+ * buffer full condition. (10 seconds)
+ */
+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
+
+/*
+ * Channel representation within consumer.
+ */
+struct ustctl_consumer_channel {
+ struct lttng_ust_channel_buffer *chan; /* lttng channel buffers */
+
+ /* initial attributes */
+ struct ustctl_consumer_channel_attr attr;
+ int wait_fd; /* monitor close() */
+ int wakeup_fd; /* monitor close() */
+};
+
+/*
+ * Stream representation within consumer.
+ */
+struct ustctl_consumer_stream {
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *chan;
+ int shm_fd, wait_fd, wakeup_fd;
+ int cpu;
+ uint64_t memory_map_size;
+};
+
+#define USTCTL_COUNTER_ATTR_DIMENSION_MAX 8
+struct ustctl_counter_attr {
+ enum ustctl_counter_arithmetic arithmetic;
+ enum ustctl_counter_bitness bitness;
+ uint32_t nr_dimensions;
+ int64_t global_sum_step;
+ struct ustctl_counter_dimension dimensions[USTCTL_COUNTER_ATTR_DIMENSION_MAX];
+ bool coalesce_hits;
+};
+
+/*
+ * Counter representation within daemon.
+ */
+struct ustctl_daemon_counter {
+ struct lib_counter *counter;
+ const struct lttng_counter_ops *ops;
+ struct ustctl_counter_attr *attr; /* initial attributes */
+};
+
+int ustctl_release_handle(int sock, int handle)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+
+ if (sock < 0 || handle < 0)
+ return 0;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = handle;
+ lum.cmd = LTTNG_UST_ABI_RELEASE;
+ return ustcomm_send_app_cmd(sock, &lum, &lur);
+}
+
+/*
+ * If sock is negative, it means we don't have to notify the other side
+ * (e.g. application has already vanished).
+ */
+int ustctl_release_object(int sock, struct lttng_ust_abi_object_data *data)
+{
+ int ret;
+
+ if (!data)
+ return -EINVAL;
+
+ switch (data->type) {
+ case LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL:
+ if (data->u.channel.wakeup_fd >= 0) {
+ ret = close(data->u.channel.wakeup_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.channel.wakeup_fd = -1;
+ }
+ free(data->u.channel.data);
+ data->u.channel.data = NULL;
+ break;
+ case LTTNG_UST_ABI_OBJECT_TYPE_STREAM:
+ if (data->u.stream.shm_fd >= 0) {
+ ret = close(data->u.stream.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.stream.shm_fd = -1;
+ }
+ if (data->u.stream.wakeup_fd >= 0) {
+ ret = close(data->u.stream.wakeup_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.stream.wakeup_fd = -1;
+ }
+ break;
+ case LTTNG_UST_ABI_OBJECT_TYPE_EVENT:
+ case LTTNG_UST_ABI_OBJECT_TYPE_CONTEXT:
+ case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER_GROUP:
+ case LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER:
+ break;
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER:
+ free(data->u.counter.data);
+ data->u.counter.data = NULL;
+ break;
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL:
+ if (data->u.counter_global.shm_fd >= 0) {
+ ret = close(data->u.counter_global.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_global.shm_fd = -1;
+ }
+ break;
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU:
+ if (data->u.counter_cpu.shm_fd >= 0) {
+ ret = close(data->u.counter_cpu.shm_fd);
+ if (ret < 0) {
+ ret = -errno;
+ return ret;
+ }
+ data->u.counter_cpu.shm_fd = -1;
+ }
+ break;
+ default:
+ assert(0);
+ }
+ return ustctl_release_handle(sock, data->handle);
+}
+
+/*
+ * Send registration done packet to the application.
+ */
+int ustctl_register_done(int sock)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ DBG("Sending register done command to %d", sock);
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_REGISTER_DONE;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ return 0;
+}
+
+/*
+ * returns session handle.
+ */
+int ustctl_create_session(int sock)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, session_handle;
+
+ /* Create session */
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_SESSION;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ session_handle = lur.ret_val;
+ DBG("received session handle %u", session_handle);
+ return session_handle;
+}
+
+int ustctl_create_event(int sock, struct lttng_ust_abi_event *ev,
+ struct lttng_ust_abi_object_data *channel_data,
+ struct lttng_ust_abi_object_data **_event_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ struct lttng_ust_abi_object_data *event_data;
+ int ret;
+
+ if (!channel_data || !_event_data)
+ return -EINVAL;
+
+ event_data = zmalloc(sizeof(*event_data));
+ if (!event_data)
+ return -ENOMEM;
+ event_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = channel_data->handle;
+ lum.cmd = LTTNG_UST_ABI_EVENT;
+ strncpy(lum.u.event.name, ev->name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ lum.u.event.instrumentation = ev->instrumentation;
+ lum.u.event.loglevel_type = ev->loglevel_type;
+ lum.u.event.loglevel = ev->loglevel;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret) {
+ free(event_data);
+ return ret;
+ }
+ event_data->handle = lur.ret_val;
+ DBG("received event handle %u", event_data->handle);
+ *_event_data = event_data;
+ return 0;
+}
+
+int ustctl_add_context(int sock, struct lttng_ust_context_attr *ctx,
+ struct lttng_ust_abi_object_data *obj_data,
+ struct lttng_ust_abi_object_data **_context_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ struct lttng_ust_abi_object_data *context_data = NULL;
+ char *buf = NULL;
+ size_t len;
+ int ret;
+
+ if (!obj_data || !_context_data) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ context_data = zmalloc(sizeof(*context_data));
+ if (!context_data) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ context_data->type = LTTNG_UST_ABI_OBJECT_TYPE_CONTEXT;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = obj_data->handle;
+ lum.cmd = LTTNG_UST_ABI_CONTEXT;
+
+ lum.u.context.ctx = ctx->ctx;
+ switch (ctx->ctx) {
+ case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+ lum.u.context.u.perf_counter = ctx->u.perf_counter;
+ break;
+ case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+ {
+ size_t provider_name_len = strlen(
+ ctx->u.app_ctx.provider_name) + 1;
+ size_t ctx_name_len = strlen(ctx->u.app_ctx.ctx_name) + 1;
+
+ lum.u.context.u.app_ctx.provider_name_len = provider_name_len;
+ lum.u.context.u.app_ctx.ctx_name_len = ctx_name_len;
+
+ len = provider_name_len + ctx_name_len;
+ buf = zmalloc(len);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ memcpy(buf, ctx->u.app_ctx.provider_name,
+ provider_name_len);
+ memcpy(buf + provider_name_len, ctx->u.app_ctx.ctx_name,
+ ctx_name_len);
+ break;
+ }
+ default:
+ break;
+ }
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ goto end;
+ if (buf) {
+ /* send var len ctx_name */
+ ret = ustcomm_send_unix_sock(sock, buf, len);
+ if (ret < 0) {
+ goto end;
+ }
+ if (ret != len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (ret < 0) {
+ goto end;
+ }
+ context_data->handle = -1;
+ DBG("Context created successfully");
+ *_context_data = context_data;
+ context_data = NULL;
+end:
+ free(context_data);
+ free(buf);
+ return ret;
+}
+
+int ustctl_set_filter(int sock, struct lttng_ust_abi_filter_bytecode *bytecode,
+ struct lttng_ust_abi_object_data *obj_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!obj_data)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = obj_data->handle;
+ lum.cmd = LTTNG_UST_ABI_FILTER;
+ lum.u.filter.data_size = bytecode->len;
+ lum.u.filter.reloc_offset = bytecode->reloc_offset;
+ lum.u.filter.seqnum = bytecode->seqnum;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+ /* send var len bytecode */
+ ret = ustcomm_send_unix_sock(sock, bytecode->data,
+ bytecode->len);
+ if (ret < 0) {
+ return ret;
+ }
+ if (ret != bytecode->len)
+ return -EINVAL;
+ return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+}
+
+int ustctl_set_capture(int sock, struct lttng_ust_abi_capture_bytecode *bytecode,
+ struct lttng_ust_abi_object_data *obj_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!obj_data)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = obj_data->handle;
+ lum.cmd = LTTNG_UST_ABI_CAPTURE;
+ lum.u.capture.data_size = bytecode->len;
+ lum.u.capture.reloc_offset = bytecode->reloc_offset;
+ lum.u.capture.seqnum = bytecode->seqnum;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+ /* send var len bytecode */
+ ret = ustcomm_send_unix_sock(sock, bytecode->data,
+ bytecode->len);
+ if (ret < 0) {
+ return ret;
+ }
+ if (ret != bytecode->len)
+ return -EINVAL;
+ return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+}
+
+int ustctl_set_exclusion(int sock, struct lttng_ust_abi_event_exclusion *exclusion,
+ struct lttng_ust_abi_object_data *obj_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!obj_data) {
+ return -EINVAL;
+ }
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = obj_data->handle;
+ lum.cmd = LTTNG_UST_ABI_EXCLUSION;
+ lum.u.exclusion.count = exclusion->count;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret) {
+ return ret;
+ }
+
+ /* send var len exclusion names */
+ ret = ustcomm_send_unix_sock(sock,
+ exclusion->names,
+ exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN);
+ if (ret < 0) {
+ return ret;
+ }
+ if (ret != exclusion->count * LTTNG_UST_ABI_SYM_NAME_LEN) {
+ return -EINVAL;
+ }
+ return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+}
+
+/* Enable event, channel and session ioctl */
+int ustctl_enable(int sock, struct lttng_ust_abi_object_data *object)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!object)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = object->handle;
+ lum.cmd = LTTNG_UST_ABI_ENABLE;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("enabled handle %u", object->handle);
+ return 0;
+}
+
+/* Disable event, channel and session ioctl */
+int ustctl_disable(int sock, struct lttng_ust_abi_object_data *object)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!object)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = object->handle;
+ lum.cmd = LTTNG_UST_ABI_DISABLE;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("disable handle %u", object->handle);
+ return 0;
+}
+
+int ustctl_start_session(int sock, int handle)
+{
+ struct lttng_ust_abi_object_data obj;
+
+ obj.handle = handle;
+ return ustctl_enable(sock, &obj);
+}
+
+int ustctl_stop_session(int sock, int handle)
+{
+ struct lttng_ust_abi_object_data obj;
+
+ obj.handle = handle;
+ return ustctl_disable(sock, &obj);
+}
+
+int ustctl_create_event_notifier_group(int sock, int pipe_fd,
+ struct lttng_ust_abi_object_data **_event_notifier_group_data)
+{
+ struct lttng_ust_abi_object_data *event_notifier_group_data;
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ ssize_t len;
+ int ret;
+
+ if (!_event_notifier_group_data)
+ return -EINVAL;
+
+ event_notifier_group_data = zmalloc(sizeof(*event_notifier_group_data));
+ if (!event_notifier_group_data)
+ return -ENOMEM;
+
+ event_notifier_group_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER_GROUP;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE;
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ goto error;
+
+ /* Send event_notifier notification pipe. */
+ len = ustcomm_send_fds_unix_sock(sock, &pipe_fd, 1);
+ if (len <= 0) {
+ ret = len;
+ goto error;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (ret)
+ goto error;
+
+ event_notifier_group_data->handle = lur.ret_val;
+ DBG("received event_notifier group handle %d", event_notifier_group_data->handle);
+
+ *_event_notifier_group_data = event_notifier_group_data;
+
+ ret = 0;
+ goto end;
+error:
+ free(event_notifier_group_data);
+
+end:
+ return ret;
+}
+
+int ustctl_create_event_notifier(int sock, struct lttng_ust_abi_event_notifier *event_notifier,
+ struct lttng_ust_abi_object_data *event_notifier_group,
+ struct lttng_ust_abi_object_data **_event_notifier_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ struct lttng_ust_abi_object_data *event_notifier_data;
+ ssize_t len;
+ int ret;
+
+ if (!event_notifier_group || !_event_notifier_data)
+ return -EINVAL;
+
+ event_notifier_data = zmalloc(sizeof(*event_notifier_data));
+ if (!event_notifier_data)
+ return -ENOMEM;
+
+ event_notifier_data->type = LTTNG_UST_ABI_OBJECT_TYPE_EVENT_NOTIFIER;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = event_notifier_group->handle;
+ lum.cmd = LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE;
+ lum.u.event_notifier.len = sizeof(*event_notifier);
+
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret) {
+ free(event_notifier_data);
+ return ret;
+ }
+ /* Send struct lttng_ust_abi_event_notifier */
+ len = ustcomm_send_unix_sock(sock, event_notifier, sizeof(*event_notifier));
+ if (len != sizeof(*event_notifier)) {
+ free(event_notifier_data);
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (ret) {
+ free(event_notifier_data);
+ return ret;
+ }
+ event_notifier_data->handle = lur.ret_val;
+ DBG("received event_notifier handle %u", event_notifier_data->handle);
+ *_event_notifier_data = event_notifier_data;
+
+ return ret;
+}
+
+int ustctl_tracepoint_list(int sock)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, tp_list_handle;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_TRACEPOINT_LIST;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ tp_list_handle = lur.ret_val;
+ DBG("received tracepoint list handle %u", tp_list_handle);
+ return tp_list_handle;
+}
+
+int ustctl_tracepoint_list_get(int sock, int tp_list_handle,
+ struct lttng_ust_abi_tracepoint_iter *iter)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!iter)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = tp_list_handle;
+ lum.cmd = LTTNG_UST_ABI_TRACEPOINT_LIST_GET;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("received tracepoint list entry name %s loglevel %d",
+ lur.u.tracepoint.name,
+ lur.u.tracepoint.loglevel);
+ memcpy(iter, &lur.u.tracepoint, sizeof(*iter));
+ return 0;
+}
+
+int ustctl_tracepoint_field_list(int sock)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, tp_field_list_handle;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ tp_field_list_handle = lur.ret_val;
+ DBG("received tracepoint field list handle %u", tp_field_list_handle);
+ return tp_field_list_handle;
+}
+
+int ustctl_tracepoint_field_list_get(int sock, int tp_field_list_handle,
+ struct lttng_ust_abi_field_iter *iter)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+ ssize_t len;
+
+ if (!iter)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = tp_field_list_handle;
+ lum.cmd = LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ len = ustcomm_recv_unix_sock(sock, iter, sizeof(*iter));
+ if (len != sizeof(*iter)) {
+ return -EINVAL;
+ }
+ DBG("received tracepoint field list entry event_name %s event_loglevel %d field_name %s field_type %d",
+ iter->event_name,
+ iter->loglevel,
+ iter->field_name,
+ iter->type);
+ return 0;
+}
+
+int ustctl_tracer_version(int sock, struct lttng_ust_abi_tracer_version *v)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!v)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_TRACER_VERSION;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ memcpy(v, &lur.u.version, sizeof(*v));
+ DBG("received tracer version");
+ return 0;
+}
+
+int ustctl_wait_quiescent(int sock)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = LTTNG_UST_ABI_ROOT_HANDLE;
+ lum.cmd = LTTNG_UST_ABI_WAIT_QUIESCENT;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("waited for quiescent state");
+ return 0;
+}
+
+int ustctl_calibrate(int sock __attribute__((unused)),
+ struct lttng_ust_abi_calibrate *calibrate)
+{
+ if (!calibrate)
+ return -EINVAL;
+
+ return -ENOSYS;
+}
+
+int ustctl_sock_flush_buffer(int sock, struct lttng_ust_abi_object_data *object)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!object)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = object->handle;
+ lum.cmd = LTTNG_UST_ABI_FLUSH_BUFFER;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("flushed buffer handle %u", object->handle);
+ return 0;
+}
+
+static
+int ustctl_send_channel(int sock,
+ enum lttng_ust_abi_chan_type type,
+ void *data,
+ uint64_t size,
+ int wakeup_fd,
+ int send_fd_only)
+{
+ ssize_t len;
+
+ if (!send_fd_only) {
+ /* Send mmap size */
+ len = ustcomm_send_unix_sock(sock, &size, sizeof(size));
+ if (len != sizeof(size)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ /* Send channel type */
+ len = ustcomm_send_unix_sock(sock, &type, sizeof(type));
+ if (len != sizeof(type)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ }
+
+ /* Send channel data */
+ len = ustcomm_send_unix_sock(sock, data, size);
+ if (len != size) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ /* Send wakeup fd */
+ len = ustcomm_send_fds_unix_sock(sock, &wakeup_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ return 0;
+}
+
+static
+int ustctl_send_stream(int sock,
+ uint32_t stream_nr,
+ uint64_t memory_map_size,
+ int shm_fd, int wakeup_fd,
+ int send_fd_only)
+{
+ ssize_t len;
+ int fds[2];
+
+ if (!send_fd_only) {
+ if (shm_fd < 0) {
+ /* finish iteration */
+ uint64_t v = -1;
+
+ len = ustcomm_send_unix_sock(sock, &v, sizeof(v));
+ if (len != sizeof(v)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ return 0;
+ }
+
+ /* Send mmap size */
+ len = ustcomm_send_unix_sock(sock, &memory_map_size,
+ sizeof(memory_map_size));
+ if (len != sizeof(memory_map_size)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ /* Send stream nr */
+ len = ustcomm_send_unix_sock(sock, &stream_nr,
+ sizeof(stream_nr));
+ if (len != sizeof(stream_nr)) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ }
+
+ /* Send shm fd and wakeup fd */
+ fds[0] = shm_fd;
+ fds[1] = wakeup_fd;
+ len = ustcomm_send_fds_unix_sock(sock, fds, 2);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+ return 0;
+}
+
+int ustctl_recv_channel_from_consumer(int sock,
+ struct lttng_ust_abi_object_data **_channel_data)
+{
+ struct lttng_ust_abi_object_data *channel_data;
+ ssize_t len;
+ int wakeup_fd;
+ int ret;
+
+ channel_data = zmalloc(sizeof(*channel_data));
+ if (!channel_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ channel_data->type = LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL;
+ channel_data->handle = -1;
+
+ /* recv mmap size */
+ len = ustcomm_recv_unix_sock(sock, &channel_data->size,
+ sizeof(channel_data->size));
+ if (len != sizeof(channel_data->size)) {
+ if (len < 0)
+ ret = len;
+ else
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* recv channel type */
+ len = ustcomm_recv_unix_sock(sock, &channel_data->u.channel.type,
+ sizeof(channel_data->u.channel.type));
+ if (len != sizeof(channel_data->u.channel.type)) {
+ if (len < 0)
+ ret = len;
+ else
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* recv channel data */
+ channel_data->u.channel.data = zmalloc(channel_data->size);
+ if (!channel_data->u.channel.data) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ len = ustcomm_recv_unix_sock(sock, channel_data->u.channel.data,
+ channel_data->size);
+ if (len != channel_data->size) {
+ if (len < 0)
+ ret = len;
+ else
+ ret = -EINVAL;
+ goto error_recv_data;
+ }
+ /* recv wakeup fd */
+ len = ustcomm_recv_fds_unix_sock(sock, &wakeup_fd, 1);
+ if (len <= 0) {
+ if (len < 0) {
+ ret = len;
+ goto error_recv_data;
+ } else {
+ ret = -EIO;
+ goto error_recv_data;
+ }
+ }
+ channel_data->u.channel.wakeup_fd = wakeup_fd;
+ *_channel_data = channel_data;
+ return 0;
+
+error_recv_data:
+ free(channel_data->u.channel.data);
+error:
+ free(channel_data);
+error_alloc:
+ return ret;
+}
+
+int ustctl_recv_stream_from_consumer(int sock,
+ struct lttng_ust_abi_object_data **_stream_data)
+{
+ struct lttng_ust_abi_object_data *stream_data;
+ ssize_t len;
+ int ret;
+ int fds[2];
+
+ stream_data = zmalloc(sizeof(*stream_data));
+ if (!stream_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+
+ stream_data->type = LTTNG_UST_ABI_OBJECT_TYPE_STREAM;
+ stream_data->handle = -1;
+
+ /* recv mmap size */
+ len = ustcomm_recv_unix_sock(sock, &stream_data->size,
+ sizeof(stream_data->size));
+ if (len != sizeof(stream_data->size)) {
+ if (len < 0)
+ ret = len;
+ else
+ ret = -EINVAL;
+ goto error;
+ }
+ if (stream_data->size == -1) {
+ ret = -LTTNG_UST_ERR_NOENT;
+ goto error;
+ }
+
+ /* recv stream nr */
+ len = ustcomm_recv_unix_sock(sock, &stream_data->u.stream.stream_nr,
+ sizeof(stream_data->u.stream.stream_nr));
+ if (len != sizeof(stream_data->u.stream.stream_nr)) {
+ if (len < 0)
+ ret = len;
+ else
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* recv shm fd and wakeup fd */
+ len = ustcomm_recv_fds_unix_sock(sock, fds, 2);
+ if (len <= 0) {
+ if (len < 0) {
+ ret = len;
+ goto error;
+ } else {
+ ret = -EIO;
+ goto error;
+ }
+ }
+ stream_data->u.stream.shm_fd = fds[0];
+ stream_data->u.stream.wakeup_fd = fds[1];
+ *_stream_data = stream_data;
+ return 0;
+
+error:
+ free(stream_data);
+error_alloc:
+ return ret;
+}
+
+int ustctl_send_channel_to_ust(int sock, int session_handle,
+ struct lttng_ust_abi_object_data *channel_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ if (!channel_data)
+ return -EINVAL;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = session_handle;
+ lum.cmd = LTTNG_UST_ABI_CHANNEL;
+ lum.u.channel.len = channel_data->size;
+ lum.u.channel.type = channel_data->u.channel.type;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ ret = ustctl_send_channel(sock,
+ channel_data->u.channel.type,
+ channel_data->u.channel.data,
+ channel_data->size,
+ channel_data->u.channel.wakeup_fd,
+ 1);
+ if (ret)
+ return ret;
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ channel_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_stream_to_ust(int sock,
+ struct lttng_ust_abi_object_data *channel_data,
+ struct lttng_ust_abi_object_data *stream_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = channel_data->handle;
+ lum.cmd = LTTNG_UST_ABI_STREAM;
+ lum.u.stream.len = stream_data->size;
+ lum.u.stream.stream_nr = stream_data->u.stream.stream_nr;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ assert(stream_data);
+ assert(stream_data->type == LTTNG_UST_ABI_OBJECT_TYPE_STREAM);
+
+ ret = ustctl_send_stream(sock,
+ stream_data->u.stream.stream_nr,
+ stream_data->size,
+ stream_data->u.stream.shm_fd,
+ stream_data->u.stream.wakeup_fd, 1);
+ if (ret)
+ return ret;
+ return ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+}
+
+int ustctl_duplicate_ust_object_data(struct lttng_ust_abi_object_data **dest,
+ struct lttng_ust_abi_object_data *src)
+{
+ struct lttng_ust_abi_object_data *obj;
+ int ret;
+
+ if (src->handle != -1) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ obj = zmalloc(sizeof(*obj));
+ if (!obj) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ obj->type = src->type;
+ obj->handle = src->handle;
+ obj->size = src->size;
+
+ switch (obj->type) {
+ case LTTNG_UST_ABI_OBJECT_TYPE_CHANNEL:
+ {
+ obj->u.channel.type = src->u.channel.type;
+ if (src->u.channel.wakeup_fd >= 0) {
+ obj->u.channel.wakeup_fd =
+ dup(src->u.channel.wakeup_fd);
+ if (obj->u.channel.wakeup_fd < 0) {
+ ret = errno;
+ goto chan_error_wakeup_fd;
+ }
+ } else {
+ obj->u.channel.wakeup_fd =
+ src->u.channel.wakeup_fd;
+ }
+ obj->u.channel.data = zmalloc(obj->size);
+ if (!obj->u.channel.data) {
+ ret = -ENOMEM;
+ goto chan_error_alloc;
+ }
+ memcpy(obj->u.channel.data, src->u.channel.data, obj->size);
+ break;
+
+ chan_error_alloc:
+ if (src->u.channel.wakeup_fd >= 0) {
+ int closeret;
+
+ closeret = close(obj->u.channel.wakeup_fd);
+ if (closeret) {
+ PERROR("close");
+ }
+ }
+ chan_error_wakeup_fd:
+ goto error_type;
+
+ }
+
+ case LTTNG_UST_ABI_OBJECT_TYPE_STREAM:
+ {
+ obj->u.stream.stream_nr = src->u.stream.stream_nr;
+ if (src->u.stream.wakeup_fd >= 0) {
+ obj->u.stream.wakeup_fd =
+ dup(src->u.stream.wakeup_fd);
+ if (obj->u.stream.wakeup_fd < 0) {
+ ret = errno;
+ goto stream_error_wakeup_fd;
+ }
+ } else {
+ obj->u.stream.wakeup_fd =
+ src->u.stream.wakeup_fd;
+ }
+
+ if (src->u.stream.shm_fd >= 0) {
+ obj->u.stream.shm_fd =
+ dup(src->u.stream.shm_fd);
+ if (obj->u.stream.shm_fd < 0) {
+ ret = errno;
+ goto stream_error_shm_fd;
+ }
+ } else {
+ obj->u.stream.shm_fd =
+ src->u.stream.shm_fd;
+ }
+ break;
+
+ stream_error_shm_fd:
+ if (src->u.stream.wakeup_fd >= 0) {
+ int closeret;
+
+ closeret = close(obj->u.stream.wakeup_fd);
+ if (closeret) {
+ PERROR("close");
+ }
+ }
+ stream_error_wakeup_fd:
+ goto error_type;
+ }
+
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER:
+ {
+ obj->u.counter.data = zmalloc(obj->size);
+ if (!obj->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_type;
+ }
+ memcpy(obj->u.counter.data, src->u.counter.data, obj->size);
+ break;
+ }
+
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL:
+ {
+ if (src->u.counter_global.shm_fd >= 0) {
+ obj->u.counter_global.shm_fd =
+ dup(src->u.counter_global.shm_fd);
+ if (obj->u.counter_global.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
+ case LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU:
+ {
+ obj->u.counter_cpu.cpu_nr = src->u.counter_cpu.cpu_nr;
+ if (src->u.counter_cpu.shm_fd >= 0) {
+ obj->u.counter_cpu.shm_fd =
+ dup(src->u.counter_cpu.shm_fd);
+ if (obj->u.counter_cpu.shm_fd < 0) {
+ ret = errno;
+ goto error_type;
+ }
+ }
+ break;
+ }
+
+ default:
+ ret = -EINVAL;
+ goto error_type;
+ }
+
+ *dest = obj;
+ return 0;
+
+error_type:
+ free(obj);
+error:
+ return ret;
+}
+
+
+/* Buffer operations */
+
+int ustctl_get_nr_stream_per_channel(void)
+{
+ return num_possible_cpus();
+}
+
+struct ustctl_consumer_channel *
+ ustctl_create_channel(struct ustctl_consumer_channel_attr *attr,
+ const int *stream_fds, int nr_stream_fds)
+{
+ struct ustctl_consumer_channel *chan;
+ const char *transport_name;
+ struct lttng_transport *transport;
+
+ switch (attr->type) {
+ case LTTNG_UST_ABI_CHAN_PER_CPU:
+ if (attr->output == LTTNG_UST_ABI_MMAP) {
+ if (attr->overwrite) {
+ if (attr->read_timer_interval == 0) {
+ transport_name = "relay-overwrite-mmap";
+ } else {
+ transport_name = "relay-overwrite-rt-mmap";
+ }
+ } else {
+ if (attr->read_timer_interval == 0) {
+ transport_name = "relay-discard-mmap";
+ } else {
+ transport_name = "relay-discard-rt-mmap";
+ }
+ }
+ } else {
+ return NULL;
+ }
+ break;
+ case LTTNG_UST_ABI_CHAN_METADATA:
+ if (attr->output == LTTNG_UST_ABI_MMAP)
+ transport_name = "relay-metadata-mmap";
+ else
+ return NULL;
+ break;
+ default:
+ transport_name = "<unknown>";
+ return NULL;
+ }
+
+ transport = lttng_ust_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ return NULL;
+ }
+
+ chan = zmalloc(sizeof(*chan));
+ if (!chan)
+ return NULL;
+
+ chan->chan = transport->ops.priv->channel_create(transport_name, NULL,
+ attr->subbuf_size, attr->num_subbuf,
+ attr->switch_timer_interval,
+ attr->read_timer_interval,
+ attr->uuid, attr->chan_id,
+ stream_fds, nr_stream_fds,
+ attr->blocking_timeout);
+ if (!chan->chan) {
+ goto chan_error;
+ }
+ chan->chan->ops = &transport->ops;
+ memcpy(&chan->attr, attr, sizeof(chan->attr));
+ chan->wait_fd = ustctl_channel_get_wait_fd(chan);
+ chan->wakeup_fd = ustctl_channel_get_wakeup_fd(chan);
+ return chan;
+
+chan_error:
+ free(chan);
+ return NULL;
+}
+
+void ustctl_destroy_channel(struct ustctl_consumer_channel *chan)
+{
+ (void) ustctl_channel_close_wait_fd(chan);
+ (void) ustctl_channel_close_wakeup_fd(chan);
+ chan->chan->ops->priv->channel_destroy(chan->chan);
+ free(chan);
+}
+
+int ustctl_send_channel_to_sessiond(int sock,
+ struct ustctl_consumer_channel *channel)
+{
+ struct shm_object_table *table;
+
+ table = channel->chan->priv->rb_chan->handle->table;
+ if (table->size <= 0)
+ return -EINVAL;
+ return ustctl_send_channel(sock,
+ channel->attr.type,
+ table->objects[0].memory_map,
+ table->objects[0].memory_map_size,
+ channel->wakeup_fd,
+ 0);
+}
+
+int ustctl_send_stream_to_sessiond(int sock,
+ struct ustctl_consumer_stream *stream)
+{
+ if (!stream)
+ return ustctl_send_stream(sock, -1U, -1U, -1, -1, 0);
+
+ return ustctl_send_stream(sock,
+ stream->cpu,
+ stream->memory_map_size,
+ stream->shm_fd, stream->wakeup_fd,
+ 0);
+}
+
+int ustctl_write_metadata_to_channel(
+ struct ustctl_consumer_channel *channel,
+ const char *metadata_str, /* NOT null-terminated */
+ size_t len) /* metadata length */
+{
+ struct lttng_ust_lib_ring_buffer_ctx ctx;
+ struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
+ const char *str = metadata_str;
+ int ret = 0, waitret;
+ size_t reserve_len, pos;
+
+ for (pos = 0; pos < len; pos += reserve_len) {
+ reserve_len = min_t(size_t,
+ lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
+ len - pos);
+ lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
+ /*
+ * We don't care about metadata buffer's records lost
+ * count, because we always retry here. Report error if
+ * we need to bail out after timeout or being
+ * interrupted.
+ */
+ waitret = wait_cond_interruptible_timeout(
+ ({
+ ret = lttng_chan_buf->ops->event_reserve(&ctx);
+ ret != -ENOBUFS || !ret;
+ }),
+ LTTNG_METADATA_TIMEOUT_MSEC);
+ if (waitret == -ETIMEDOUT || waitret == -EINTR || ret) {
+ DBG("LTTng: Failure to write metadata to buffers (%s)\n",
+ waitret == -EINTR ? "interrupted" :
+ (ret == -ENOBUFS ? "timeout" : "I/O error"));
+ if (waitret == -EINTR)
+ ret = waitret;
+ goto end;
+ }
+ lttng_chan_buf->ops->event_write(&ctx, &str[pos], reserve_len, 1);
+ lttng_chan_buf->ops->event_commit(&ctx);
+ }
+end:
+ return ret;
+}
+
+/*
+ * Write at most one packet in the channel.
+ * Returns the number of bytes written on success, < 0 on error.
+ */
+ssize_t ustctl_write_one_packet_to_channel(
+ struct ustctl_consumer_channel *channel,
+ const char *metadata_str, /* NOT null-terminated */
+ size_t len) /* metadata length */
+{
+ struct lttng_ust_lib_ring_buffer_ctx ctx;
+ struct lttng_ust_channel_buffer *lttng_chan_buf = channel->chan;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = lttng_chan_buf->priv->rb_chan;
+ const char *str = metadata_str;
+ ssize_t reserve_len;
+ int ret;
+
+ reserve_len = min_t(ssize_t,
+ lttng_chan_buf->ops->priv->packet_avail_size(lttng_chan_buf),
+ len);
+ lttng_ust_lib_ring_buffer_ctx_init(&ctx, rb_chan, reserve_len, sizeof(char), NULL);
+ ret = lttng_chan_buf->ops->event_reserve(&ctx);
+ if (ret != 0) {
+ DBG("LTTng: event reservation failed");
+ assert(ret < 0);
+ reserve_len = ret;
+ goto end;
+ }
+ lttng_chan_buf->ops->event_write(&ctx, str, reserve_len, 1);
+ lttng_chan_buf->ops->event_commit(&ctx);
+
+end:
+ return reserve_len;
+}
+
+int ustctl_channel_close_wait_fd(struct ustctl_consumer_channel *consumer_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ int ret;
+
+ chan = consumer_chan->chan->priv->rb_chan;
+ ret = ring_buffer_channel_close_wait_fd(&chan->backend.config,
+ chan, chan->handle);
+ if (!ret)
+ consumer_chan->wait_fd = -1;
+ return ret;
+}
+
+int ustctl_channel_close_wakeup_fd(struct ustctl_consumer_channel *consumer_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ int ret;
+
+ chan = consumer_chan->chan->priv->rb_chan;
+ ret = ring_buffer_channel_close_wakeup_fd(&chan->backend.config,
+ chan, chan->handle);
+ if (!ret)
+ consumer_chan->wakeup_fd = -1;
+ return ret;
+}
+
+int ustctl_stream_close_wait_fd(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ chan = stream->chan->chan->priv->rb_chan;
+ return ring_buffer_stream_close_wait_fd(&chan->backend.config,
+ chan, chan->handle, stream->cpu);
+}
+
+int ustctl_stream_close_wakeup_fd(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ chan = stream->chan->chan->priv->rb_chan;
+ return ring_buffer_stream_close_wakeup_fd(&chan->backend.config,
+ chan, chan->handle, stream->cpu);
+}
+
+struct ustctl_consumer_stream *
+ ustctl_create_stream(struct ustctl_consumer_channel *channel,
+ int cpu)
+{
+ struct ustctl_consumer_stream *stream;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ int shm_fd, wait_fd, wakeup_fd;
+ uint64_t memory_map_size;
+ struct lttng_ust_lib_ring_buffer *buf;
+ int ret;
+
+ if (!channel)
+ return NULL;
+ rb_chan = channel->chan->priv->rb_chan;
+ handle = rb_chan->handle;
+ if (!handle)
+ return NULL;
+
+ buf = channel_get_ring_buffer(&rb_chan->backend.config,
+ rb_chan, cpu, handle, &shm_fd, &wait_fd,
+ &wakeup_fd, &memory_map_size);
+ if (!buf)
+ return NULL;
+ ret = lib_ring_buffer_open_read(buf, handle);
+ if (ret)
+ return NULL;
+
+ stream = zmalloc(sizeof(*stream));
+ if (!stream)
+ goto alloc_error;
+ stream->buf = buf;
+ stream->chan = channel;
+ stream->shm_fd = shm_fd;
+ stream->wait_fd = wait_fd;
+ stream->wakeup_fd = wakeup_fd;
+ stream->memory_map_size = memory_map_size;
+ stream->cpu = cpu;
+ return stream;
+
+alloc_error:
+ return NULL;
+}
+
+void ustctl_destroy_stream(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ assert(stream);
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ (void) ustctl_stream_close_wait_fd(stream);
+ (void) ustctl_stream_close_wakeup_fd(stream);
+ lib_ring_buffer_release_read(buf, consumer_chan->chan->priv->rb_chan->handle);
+ free(stream);
+}
+
+int ustctl_channel_get_wait_fd(struct ustctl_consumer_channel *chan)
+{
+ if (!chan)
+ return -EINVAL;
+ return shm_get_wait_fd(chan->chan->priv->rb_chan->handle,
+ &chan->chan->priv->rb_chan->handle->chan._ref);
+}
+
+int ustctl_channel_get_wakeup_fd(struct ustctl_consumer_channel *chan)
+{
+ if (!chan)
+ return -EINVAL;
+ return shm_get_wakeup_fd(chan->chan->priv->rb_chan->handle,
+ &chan->chan->priv->rb_chan->handle->chan._ref);
+}
+
+int ustctl_stream_get_wait_fd(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return shm_get_wait_fd(consumer_chan->chan->priv->rb_chan->handle, &buf->self._ref);
+}
+
+int ustctl_stream_get_wakeup_fd(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return shm_get_wakeup_fd(consumer_chan->chan->priv->rb_chan->handle, &buf->self._ref);
+}
+
+/* For mmap mode, readable without "get" operation */
+
+void *ustctl_get_mmap_base(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return NULL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return shmp(consumer_chan->chan->priv->rb_chan->handle, buf->backend.memory_map);
+}
+
+/* returns the length to mmap. */
+int ustctl_get_mmap_len(struct ustctl_consumer_stream *stream,
+ unsigned long *len)
+{
+ struct ustctl_consumer_channel *consumer_chan;
+ unsigned long mmap_buf_len;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+
+ if (!stream)
+ return -EINVAL;
+ consumer_chan = stream->chan;
+ rb_chan = consumer_chan->chan->priv->rb_chan;
+ if (rb_chan->backend.config.output != RING_BUFFER_MMAP)
+ return -EINVAL;
+ mmap_buf_len = rb_chan->backend.buf_size;
+ if (rb_chan->backend.extra_reader_sb)
+ mmap_buf_len += rb_chan->backend.subbuf_size;
+ if (mmap_buf_len > INT_MAX)
+ return -EFBIG;
+ *len = mmap_buf_len;
+ return 0;
+}
+
+/* returns the maximum size for sub-buffers. */
+int ustctl_get_max_subbuf_size(struct ustctl_consumer_stream *stream,
+ unsigned long *len)
+{
+ struct ustctl_consumer_channel *consumer_chan;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+
+ if (!stream)
+ return -EINVAL;
+ consumer_chan = stream->chan;
+ rb_chan = consumer_chan->chan->priv->rb_chan;
+ *len = rb_chan->backend.subbuf_size;
+ return 0;
+}
+
+/*
+ * For mmap mode, operate on the current packet (between get/put or
+ * get_next/put_next).
+ */
+
+/* returns the offset of the subbuffer belonging to the mmap reader. */
+int ustctl_get_mmap_read_offset(struct ustctl_consumer_stream *stream,
+ unsigned long *off)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *barray_idx;
+ struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ rb_chan = consumer_chan->chan->priv->rb_chan;
+ if (rb_chan->backend.config.output != RING_BUFFER_MMAP)
+ return -EINVAL;
+ sb_bindex = subbuffer_id_get_index(&rb_chan->backend.config,
+ buf->backend.buf_rsb.id);
+ barray_idx = shmp_index(rb_chan->handle, buf->backend.array,
+ sb_bindex);
+ if (!barray_idx)
+ return -EINVAL;
+ pages = shmp(rb_chan->handle, barray_idx->shmp);
+ if (!pages)
+ return -EINVAL;
+ *off = pages->mmap_offset;
+ return 0;
+}
+
+/* returns the size of the current sub-buffer, without padding (for mmap). */
+int ustctl_get_subbuf_size(struct ustctl_consumer_stream *stream,
+ unsigned long *len)
+{
+ struct ustctl_consumer_channel *consumer_chan;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream)
+ return -EINVAL;
+
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ rb_chan = consumer_chan->chan->priv->rb_chan;
+ *len = lib_ring_buffer_get_read_data_size(&rb_chan->backend.config, buf,
+ rb_chan->handle);
+ return 0;
+}
+
+/* returns the size of the current sub-buffer, without padding (for mmap). */
+int ustctl_get_padded_subbuf_size(struct ustctl_consumer_stream *stream,
+ unsigned long *len)
+{
+ struct ustctl_consumer_channel *consumer_chan;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ rb_chan = consumer_chan->chan->priv->rb_chan;
+ *len = lib_ring_buffer_get_read_data_size(&rb_chan->backend.config, buf,
+ rb_chan->handle);
+ *len = LTTNG_UST_PAGE_ALIGN(*len);
+ return 0;
+}
+
+/* Get exclusive read access to the next sub-buffer that can be read. */
+int ustctl_get_next_subbuf(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return lib_ring_buffer_get_next_subbuf(buf,
+ consumer_chan->chan->priv->rb_chan->handle);
+}
+
+
+/* Release exclusive sub-buffer access, move consumer forward. */
+int ustctl_put_next_subbuf(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ lib_ring_buffer_put_next_subbuf(buf, consumer_chan->chan->priv->rb_chan->handle);
+ return 0;
+}
+
+/* snapshot */
+
+/* Get a snapshot of the current ring buffer producer and consumer positions */
+int ustctl_snapshot(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
+ &buf->prod_snapshot, consumer_chan->chan->priv->rb_chan->handle);
+}
+
+/*
+ * Get a snapshot of the current ring buffer producer and consumer positions
+ * even if the consumed and produced positions are contained within the same
+ * subbuffer.
+ */
+int ustctl_snapshot_sample_positions(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return lib_ring_buffer_snapshot_sample_positions(buf,
+ &buf->cons_snapshot, &buf->prod_snapshot,
+ consumer_chan->chan->priv->rb_chan->handle);
+}
+
+/* Get the consumer position (iteration start) */
+int ustctl_snapshot_get_consumed(struct ustctl_consumer_stream *stream,
+ unsigned long *pos)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ *pos = buf->cons_snapshot;
+ return 0;
+}
+
+/* Get the producer position (iteration end) */
+int ustctl_snapshot_get_produced(struct ustctl_consumer_stream *stream,
+ unsigned long *pos)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ *pos = buf->prod_snapshot;
+ return 0;
+}
+
+/* Get exclusive read access to the specified sub-buffer position */
+int ustctl_get_subbuf(struct ustctl_consumer_stream *stream,
+ unsigned long *pos)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ return lib_ring_buffer_get_subbuf(buf, *pos,
+ consumer_chan->chan->priv->rb_chan->handle);
+}
+
+/* Release exclusive sub-buffer access */
+int ustctl_put_subbuf(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ if (!stream)
+ return -EINVAL;
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ lib_ring_buffer_put_subbuf(buf, consumer_chan->chan->priv->rb_chan->handle);
+ return 0;
+}
+
+void ustctl_flush_buffer(struct ustctl_consumer_stream *stream,
+ int producer_active)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ assert(stream);
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ lib_ring_buffer_switch_slow(buf,
+ producer_active ? SWITCH_ACTIVE : SWITCH_FLUSH,
+ consumer_chan->chan->priv->rb_chan->handle);
+}
+
+void ustctl_clear_buffer(struct ustctl_consumer_stream *stream)
+{
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct ustctl_consumer_channel *consumer_chan;
+
+ assert(stream);
+ buf = stream->buf;
+ consumer_chan = stream->chan;
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
+ consumer_chan->chan->priv->rb_chan->handle);
+ lib_ring_buffer_clear_reader(buf, consumer_chan->chan->priv->rb_chan->handle);
+}
+
+static
+struct lttng_ust_client_lib_ring_buffer_client_cb *get_client_cb(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+
+ config = &chan->backend.config;
+ if (!config->cb_ptr)
+ return NULL;
+ client_cb = caa_container_of(config->cb_ptr,
+ struct lttng_ust_client_lib_ring_buffer_client_cb,
+ parent);
+ return client_cb;
+}
+
+int ustctl_get_timestamp_begin(struct ustctl_consumer_stream *stream,
+ uint64_t *timestamp_begin)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !timestamp_begin)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->timestamp_begin(buf, chan, timestamp_begin);
+}
+
+int ustctl_get_timestamp_end(struct ustctl_consumer_stream *stream,
+ uint64_t *timestamp_end)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !timestamp_end)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->timestamp_end(buf, chan, timestamp_end);
+}
+
+int ustctl_get_events_discarded(struct ustctl_consumer_stream *stream,
+ uint64_t *events_discarded)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !events_discarded)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->events_discarded(buf, chan, events_discarded);
+}
+
+int ustctl_get_content_size(struct ustctl_consumer_stream *stream,
+ uint64_t *content_size)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !content_size)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->content_size(buf, chan, content_size);
+}
+
+int ustctl_get_packet_size(struct ustctl_consumer_stream *stream,
+ uint64_t *packet_size)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !packet_size)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->packet_size(buf, chan, packet_size);
+}
+
+int ustctl_get_stream_id(struct ustctl_consumer_stream *stream,
+ uint64_t *stream_id)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !stream_id)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->stream_id(buf, chan, stream_id);
+}
+
+int ustctl_get_current_timestamp(struct ustctl_consumer_stream *stream,
+ uint64_t *ts)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !ts)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb || !client_cb->current_timestamp)
+ return -ENOSYS;
+ return client_cb->current_timestamp(buf, chan, ts);
+}
+
+int ustctl_get_sequence_number(struct ustctl_consumer_stream *stream,
+ uint64_t *seq)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !seq)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb || !client_cb->sequence_number)
+ return -ENOSYS;
+ return client_cb->sequence_number(buf, chan, seq);
+}
+
+int ustctl_get_instance_id(struct ustctl_consumer_stream *stream,
+ uint64_t *id)
+{
+ struct lttng_ust_client_lib_ring_buffer_client_cb *client_cb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ if (!stream || !id)
+ return -EINVAL;
+ buf = stream->buf;
+ chan = stream->chan->chan->priv->rb_chan;
+ client_cb = get_client_cb(buf, chan);
+ if (!client_cb)
+ return -ENOSYS;
+ return client_cb->instance_id(buf, chan, id);
+}
+
+#ifdef HAVE_LINUX_PERF_EVENT_H
+
+int ustctl_has_perf_counters(void)
+{
+ return 1;
+}
+
+#else
+
+int ustctl_has_perf_counters(void)
+{
+ return 0;
+}
+
+#endif
+
+#ifdef __linux__
+/*
+ * Override application pid/uid/gid with unix socket credentials. If
+ * the application announced a pid matching our view, it means it is
+ * within the same pid namespace, so expose the ppid provided by the
+ * application.
+ */
+static
+int get_cred(int sock,
+ const struct ustctl_reg_msg *reg_msg,
+ uint32_t *pid,
+ uint32_t *ppid,
+ uint32_t *uid,
+ uint32_t *gid)
+{
+ struct ucred ucred;
+ socklen_t ucred_len = sizeof(struct ucred);
+ int ret;
+
+ ret = getsockopt(sock, SOL_SOCKET, SO_PEERCRED, &ucred, &ucred_len);
+ if (ret) {
+ return -LTTNG_UST_ERR_PEERCRED;
+ }
+ DBG("Unix socket peercred [ pid: %u, uid: %u, gid: %u ], "
+ "application registered claiming [ pid: %u, ppid: %u, uid: %u, gid: %u ]",
+ ucred.pid, ucred.uid, ucred.gid,
+ reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
+ if (!ucred.pid) {
+ ERR("Unix socket credential pid=0. Refusing application in distinct, non-nested pid namespace.");
+ return -LTTNG_UST_ERR_PEERCRED_PID;
+ }
+ *pid = ucred.pid;
+ *uid = ucred.uid;
+ *gid = ucred.gid;
+ if (ucred.pid == reg_msg->pid) {
+ *ppid = reg_msg->ppid;
+ } else {
+ *ppid = 0;
+ }
+ return 0;
+}
+#elif defined(__FreeBSD__)
+#include <sys/ucred.h>
+#include <sys/un.h>
+
+/*
+ * Override application uid/gid with unix socket credentials. Use the
+ * first group of the cr_groups.
+ * Use the pid and ppid provided by the application on registration.
+ */
+static
+int get_cred(int sock,
+ const struct ustctl_reg_msg *reg_msg,
+ uint32_t *pid,
+ uint32_t *ppid,
+ uint32_t *uid,
+ uint32_t *gid)
+{
+ struct xucred xucred;
+ socklen_t xucred_len = sizeof(struct xucred);
+ int ret;
+
+ ret = getsockopt(sock, SOL_SOCKET, LOCAL_PEERCRED, &xucred, &xucred_len);
+ if (ret) {
+ return -LTTNG_UST_ERR_PEERCRED;
+ }
+ if (xucred.cr_version != XUCRED_VERSION || xucred.cr_ngroups < 1) {
+ return -LTTNG_UST_ERR_PEERCRED;
+ }
+ DBG("Unix socket peercred [ uid: %u, gid: %u ], "
+ "application registered claiming [ pid: %d, ppid: %d, uid: %u, gid: %u ]",
+ xucred.cr_uid, xucred.cr_groups[0],
+ reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
+ *pid = reg_msg->pid;
+ *ppid = reg_msg->ppid;
+ *uid = xucred.cr_uid;
+ *gid = xucred.cr_groups[0];
+ return 0;
+}
+#else
+#warning "Using insecure fallback: trusting user id provided by registered applications. Please consider implementing use of unix socket credentials on your platform."
+static
+int get_cred(int sock,
+ const struct ustctl_reg_msg *reg_msg,
+ uint32_t *pid,
+ uint32_t *ppid,
+ uint32_t *uid,
+ uint32_t *gid)
+{
+ DBG("Application registered claiming [ pid: %u, ppid: %d, uid: %u, gid: %u ]",
+ reg_msg->pid, reg_msg->ppid, reg_msg->uid, reg_msg->gid);
+ *pid = reg_msg->pid;
+ *ppid = reg_msg->ppid;
+ *uid = reg_msg->uid;
+ *gid = reg_msg->gid;
+ return 0;
+}
+#endif
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustctl_recv_reg_msg(int sock,
+ enum ustctl_socket_type *type,
+ uint32_t *major,
+ uint32_t *minor,
+ uint32_t *pid,
+ uint32_t *ppid,
+ uint32_t *uid,
+ uint32_t *gid,
+ uint32_t *bits_per_long,
+ uint32_t *uint8_t_alignment,
+ uint32_t *uint16_t_alignment,
+ uint32_t *uint32_t_alignment,
+ uint32_t *uint64_t_alignment,
+ uint32_t *long_alignment,
+ int *byte_order,
+ char *name)
+{
+ ssize_t len;
+ struct ustctl_reg_msg reg_msg;
+
+ len = ustcomm_recv_unix_sock(sock, ®_msg, sizeof(reg_msg));
+ if (len > 0 && len != sizeof(reg_msg))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+
+ if (reg_msg.magic == LTTNG_UST_ABI_COMM_MAGIC) {
+ *byte_order = BYTE_ORDER == BIG_ENDIAN ?
+ BIG_ENDIAN : LITTLE_ENDIAN;
+ } else if (reg_msg.magic == bswap_32(LTTNG_UST_ABI_COMM_MAGIC)) {
+ *byte_order = BYTE_ORDER == BIG_ENDIAN ?
+ LITTLE_ENDIAN : BIG_ENDIAN;
+ } else {
+ return -LTTNG_UST_ERR_INVAL_MAGIC;
+ }
+ switch (reg_msg.socket_type) {
+ case 0: *type = USTCTL_SOCKET_CMD;
+ break;
+ case 1: *type = USTCTL_SOCKET_NOTIFY;
+ break;
+ default:
+ return -LTTNG_UST_ERR_INVAL_SOCKET_TYPE;
+ }
+ *major = reg_msg.major;
+ *minor = reg_msg.minor;
+ *bits_per_long = reg_msg.bits_per_long;
+ *uint8_t_alignment = reg_msg.uint8_t_alignment;
+ *uint16_t_alignment = reg_msg.uint16_t_alignment;
+ *uint32_t_alignment = reg_msg.uint32_t_alignment;
+ *uint64_t_alignment = reg_msg.uint64_t_alignment;
+ *long_alignment = reg_msg.long_alignment;
+ memcpy(name, reg_msg.name, LTTNG_UST_ABI_PROCNAME_LEN);
+ if (reg_msg.major < LTTNG_UST_ABI_MAJOR_VERSION_OLDEST_COMPATIBLE ||
+ reg_msg.major > LTTNG_UST_ABI_MAJOR_VERSION) {
+ return -LTTNG_UST_ERR_UNSUP_MAJOR;
+ }
+ return get_cred(sock, ®_msg, pid, ppid, uid, gid);
+}
+
+int ustctl_recv_notify(int sock, enum ustctl_notify_cmd *notify_cmd)
+{
+ struct ustcomm_notify_hdr header;
+ ssize_t len;
+
+ len = ustcomm_recv_unix_sock(sock, &header, sizeof(header));
+ if (len > 0 && len != sizeof(header))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+ switch (header.notify_cmd) {
+ case 0:
+ *notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
+ break;
+ case 1:
+ *notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
+ break;
+ case 2:
+ *notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustctl_recv_register_event(int sock,
+ int *session_objd,
+ int *channel_objd,
+ char *event_name,
+ int *loglevel,
+ char **signature,
+ size_t *nr_fields,
+ struct ustctl_field **fields,
+ char **model_emf_uri)
+{
+ ssize_t len;
+ struct ustcomm_notify_event_msg msg;
+ size_t signature_len, fields_len, model_emf_uri_len;
+ char *a_sign = NULL, *a_model_emf_uri = NULL;
+ struct ustctl_field *a_fields = NULL;
+
+ len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+
+ *session_objd = msg.session_objd;
+ *channel_objd = msg.channel_objd;
+ strncpy(event_name, msg.event_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ event_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ *loglevel = msg.loglevel;
+ signature_len = msg.signature_len;
+ fields_len = msg.fields_len;
+
+ if (fields_len % sizeof(*a_fields) != 0) {
+ return -EINVAL;
+ }
+
+ model_emf_uri_len = msg.model_emf_uri_len;
+
+ /* recv signature. contains at least \0. */
+ a_sign = zmalloc(signature_len);
+ if (!a_sign)
+ return -ENOMEM;
+ len = ustcomm_recv_unix_sock(sock, a_sign, signature_len);
+ if (len > 0 && len != signature_len) {
+ len = -EIO;
+ goto signature_error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto signature_error;
+ }
+ if (len < 0) {
+ goto signature_error;
+ }
+ /* Enforce end of string */
+ a_sign[signature_len - 1] = '\0';
+
+ /* recv fields */
+ if (fields_len) {
+ a_fields = zmalloc(fields_len);
+ if (!a_fields) {
+ len = -ENOMEM;
+ goto signature_error;
+ }
+ len = ustcomm_recv_unix_sock(sock, a_fields, fields_len);
+ if (len > 0 && len != fields_len) {
+ len = -EIO;
+ goto fields_error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto fields_error;
+ }
+ if (len < 0) {
+ goto fields_error;
+ }
+ }
+
+ if (model_emf_uri_len) {
+ /* recv model_emf_uri_len */
+ a_model_emf_uri = zmalloc(model_emf_uri_len);
+ if (!a_model_emf_uri) {
+ len = -ENOMEM;
+ goto fields_error;
+ }
+ len = ustcomm_recv_unix_sock(sock, a_model_emf_uri,
+ model_emf_uri_len);
+ if (len > 0 && len != model_emf_uri_len) {
+ len = -EIO;
+ goto model_error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto model_error;
+ }
+ if (len < 0) {
+ goto model_error;
+ }
+ /* Enforce end of string */
+ a_model_emf_uri[model_emf_uri_len - 1] = '\0';
+ }
+
+ *signature = a_sign;
+ *nr_fields = fields_len / sizeof(*a_fields);
+ *fields = a_fields;
+ *model_emf_uri = a_model_emf_uri;
+
+ return 0;
+
+model_error:
+ free(a_model_emf_uri);
+fields_error:
+ free(a_fields);
+signature_error:
+ free(a_sign);
+ return len;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustctl_reply_register_event(int sock,
+ uint32_t id,
+ int ret_code)
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_event_reply r;
+ } reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.header.notify_cmd = USTCTL_NOTIFY_CMD_EVENT;
+ reply.r.ret_code = ret_code;
+ reply.r.event_id = id;
+ len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
+ if (len > 0 && len != sizeof(reply))
+ return -EIO;
+ if (len < 0)
+ return len;
+ return 0;
+}
+
+/*
+ * Returns 0 on success, negative UST or system error value on error.
+ */
+int ustctl_recv_register_enum(int sock,
+ int *session_objd,
+ char *enum_name,
+ struct ustctl_enum_entry **entries,
+ size_t *nr_entries)
+{
+ ssize_t len;
+ struct ustcomm_notify_enum_msg msg;
+ size_t entries_len;
+ struct ustctl_enum_entry *a_entries = NULL;
+
+ len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+
+ *session_objd = msg.session_objd;
+ strncpy(enum_name, msg.enum_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ enum_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ entries_len = msg.entries_len;
+
+ if (entries_len % sizeof(*a_entries) != 0) {
+ return -EINVAL;
+ }
+
+ /* recv entries */
+ if (entries_len) {
+ a_entries = zmalloc(entries_len);
+ if (!a_entries)
+ return -ENOMEM;
+ len = ustcomm_recv_unix_sock(sock, a_entries, entries_len);
+ if (len > 0 && len != entries_len) {
+ len = -EIO;
+ goto entries_error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto entries_error;
+ }
+ if (len < 0) {
+ goto entries_error;
+ }
+ }
+ *nr_entries = entries_len / sizeof(*a_entries);
+ *entries = a_entries;
+
+ return 0;
+
+entries_error:
+ free(a_entries);
+ return len;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustctl_reply_register_enum(int sock,
+ uint64_t id,
+ int ret_code)
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_enum_reply r;
+ } reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.header.notify_cmd = USTCTL_NOTIFY_CMD_ENUM;
+ reply.r.ret_code = ret_code;
+ reply.r.enum_id = id;
+ len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
+ if (len > 0 && len != sizeof(reply))
+ return -EIO;
+ if (len < 0)
+ return len;
+ return 0;
+}
+
+/*
+ * Returns 0 on success, negative UST or system error value on error.
+ */
+int ustctl_recv_register_channel(int sock,
+ int *session_objd, /* session descriptor (output) */
+ int *channel_objd, /* channel descriptor (output) */
+ size_t *nr_fields,
+ struct ustctl_field **fields)
+{
+ ssize_t len;
+ struct ustcomm_notify_channel_msg msg;
+ size_t fields_len;
+ struct ustctl_field *a_fields;
+
+ len = ustcomm_recv_unix_sock(sock, &msg, sizeof(msg));
+ if (len > 0 && len != sizeof(msg))
+ return -EIO;
+ if (len == 0)
+ return -EPIPE;
+ if (len < 0)
+ return len;
+
+ *session_objd = msg.session_objd;
+ *channel_objd = msg.channel_objd;
+ fields_len = msg.ctx_fields_len;
+
+ if (fields_len % sizeof(*a_fields) != 0) {
+ return -EINVAL;
+ }
+
+ /* recv fields */
+ if (fields_len) {
+ a_fields = zmalloc(fields_len);
+ if (!a_fields) {
+ len = -ENOMEM;
+ goto alloc_error;
+ }
+ len = ustcomm_recv_unix_sock(sock, a_fields, fields_len);
+ if (len > 0 && len != fields_len) {
+ len = -EIO;
+ goto fields_error;
+ }
+ if (len == 0) {
+ len = -EPIPE;
+ goto fields_error;
+ }
+ if (len < 0) {
+ goto fields_error;
+ }
+ *fields = a_fields;
+ } else {
+ *fields = NULL;
+ }
+ *nr_fields = fields_len / sizeof(*a_fields);
+ return 0;
+
+fields_error:
+ free(a_fields);
+alloc_error:
+ return len;
+}
+
+/*
+ * Returns 0 on success, negative error value on error.
+ */
+int ustctl_reply_register_channel(int sock,
+ uint32_t chan_id,
+ enum ustctl_channel_header header_type,
+ int ret_code)
+{
+ ssize_t len;
+ struct {
+ struct ustcomm_notify_hdr header;
+ struct ustcomm_notify_channel_reply r;
+ } reply;
+
+ memset(&reply, 0, sizeof(reply));
+ reply.header.notify_cmd = USTCTL_NOTIFY_CMD_CHANNEL;
+ reply.r.ret_code = ret_code;
+ reply.r.chan_id = chan_id;
+ switch (header_type) {
+ case USTCTL_CHANNEL_HEADER_COMPACT:
+ reply.r.header_type = 1;
+ break;
+ case USTCTL_CHANNEL_HEADER_LARGE:
+ reply.r.header_type = 2;
+ break;
+ default:
+ reply.r.header_type = 0;
+ break;
+ }
+ len = ustcomm_send_unix_sock(sock, &reply, sizeof(reply));
+ if (len > 0 && len != sizeof(reply))
+ return -EIO;
+ if (len < 0)
+ return len;
+ return 0;
+}
+
+/* Regenerate the statedump. */
+int ustctl_regenerate_statedump(int sock, int handle)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = handle;
+ lum.cmd = LTTNG_UST_ABI_SESSION_STATEDUMP;
+ ret = ustcomm_send_app_cmd(sock, &lum, &lur);
+ if (ret)
+ return ret;
+ DBG("Regenerated statedump for handle %u", handle);
+ return 0;
+}
+
+/* counter operations */
+
+int ustctl_get_nr_cpu_per_counter(void)
+{
+ return lttng_counter_num_possible_cpus();
+}
+
+struct ustctl_daemon_counter *
+ ustctl_create_counter(size_t nr_dimensions,
+ const struct ustctl_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ enum ustctl_counter_bitness bitness,
+ enum ustctl_counter_arithmetic arithmetic,
+ uint32_t alloc_flags,
+ bool coalesce_hits)
+{
+ const char *transport_name;
+ struct ustctl_daemon_counter *counter;
+ struct lttng_counter_transport *transport;
+ struct lttng_counter_dimension ust_dim[LTTNG_COUNTER_DIMENSION_MAX];
+ size_t i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ /* Currently, only per-cpu allocation is supported. */
+ switch (alloc_flags) {
+ case USTCTL_COUNTER_ALLOC_PER_CPU:
+ break;
+
+ case USTCTL_COUNTER_ALLOC_PER_CPU | USTCTL_COUNTER_ALLOC_GLOBAL:
+ case USTCTL_COUNTER_ALLOC_GLOBAL:
+ default:
+ return NULL;
+ }
+ switch (bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-32-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-32-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ switch (arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ transport_name = "counter-per-cpu-64-modular";
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ transport_name = "counter-per-cpu-64-saturation";
+ break;
+ default:
+ return NULL;
+ }
+ break;
+ default:
+ return NULL;
+ }
+
+ transport = lttng_counter_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ return NULL;
+ }
+
+ counter = zmalloc(sizeof(*counter));
+ if (!counter)
+ return NULL;
+ counter->attr = zmalloc(sizeof(*counter->attr));
+ if (!counter->attr)
+ goto free_counter;
+ counter->attr->bitness = bitness;
+ counter->attr->arithmetic = arithmetic;
+ counter->attr->nr_dimensions = nr_dimensions;
+ counter->attr->global_sum_step = global_sum_step;
+ counter->attr->coalesce_hits = coalesce_hits;
+ for (i = 0; i < nr_dimensions; i++)
+ counter->attr->dimensions[i] = dimensions[i];
+
+ for (i = 0; i < nr_dimensions; i++) {
+ ust_dim[i].size = dimensions[i].size;
+ ust_dim[i].underflow_index = dimensions[i].underflow_index;
+ ust_dim[i].overflow_index = dimensions[i].overflow_index;
+ ust_dim[i].has_underflow = dimensions[i].has_underflow;
+ ust_dim[i].has_overflow = dimensions[i].has_overflow;
+ }
+ counter->counter = transport->ops.counter_create(nr_dimensions,
+ ust_dim, global_sum_step, global_counter_fd,
+ nr_counter_cpu_fds, counter_cpu_fds, true);
+ if (!counter->counter)
+ goto free_attr;
+ counter->ops = &transport->ops;
+ return counter;
+
+free_attr:
+ free(counter->attr);
+free_counter:
+ free(counter);
+ return NULL;
+}
+
+int ustctl_create_counter_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_abi_object_data **_counter_data)
+{
+ struct lttng_ust_abi_object_data *counter_data;
+ struct lttng_ust_abi_counter_conf counter_conf = {0};
+ size_t i;
+ int ret;
+
+ switch (counter->attr->arithmetic) {
+ case USTCTL_COUNTER_ARITHMETIC_MODULAR:
+ counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR;
+ break;
+ case USTCTL_COUNTER_ARITHMETIC_SATURATION:
+ counter_conf.arithmetic = LTTNG_UST_ABI_COUNTER_ARITHMETIC_SATURATION;
+ break;
+ default:
+ return -EINVAL;
+ }
+ switch (counter->attr->bitness) {
+ case USTCTL_COUNTER_BITNESS_32:
+ counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_32;
+ break;
+ case USTCTL_COUNTER_BITNESS_64:
+ counter_conf.bitness = LTTNG_UST_ABI_COUNTER_BITNESS_64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ counter_conf.number_dimensions = counter->attr->nr_dimensions;
+ counter_conf.global_sum_step = counter->attr->global_sum_step;
+ counter_conf.coalesce_hits = counter->attr->coalesce_hits;
+ for (i = 0; i < counter->attr->nr_dimensions; i++) {
+ counter_conf.dimensions[i].size = counter->attr->dimensions[i].size;
+ counter_conf.dimensions[i].underflow_index = counter->attr->dimensions[i].underflow_index;
+ counter_conf.dimensions[i].overflow_index = counter->attr->dimensions[i].overflow_index;
+ counter_conf.dimensions[i].has_underflow = counter->attr->dimensions[i].has_underflow;
+ counter_conf.dimensions[i].has_overflow = counter->attr->dimensions[i].has_overflow;
+ }
+
+ counter_data = zmalloc(sizeof(*counter_data));
+ if (!counter_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER;
+ counter_data->handle = -1;
+
+ counter_data->size = sizeof(counter_conf);
+ counter_data->u.counter.data = zmalloc(sizeof(counter_conf));
+ if (!counter_data->u.counter.data) {
+ ret = -ENOMEM;
+ goto error_alloc_data;
+ }
+
+ memcpy(counter_data->u.counter.data, &counter_conf, sizeof(counter_conf));
+ *_counter_data = counter_data;
+
+ return 0;
+
+error_alloc_data:
+ free(counter_data);
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_global_data(struct ustctl_daemon_counter *counter,
+ struct lttng_ust_abi_object_data **_counter_global_data)
+{
+ struct lttng_ust_abi_object_data *counter_global_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_global_shm(counter->counter, &fd, &len))
+ return -EINVAL;
+ counter_global_data = zmalloc(sizeof(*counter_global_data));
+ if (!counter_global_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_global_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_GLOBAL;
+ counter_global_data->handle = -1;
+ counter_global_data->size = len;
+ counter_global_data->u.counter_global.shm_fd = fd;
+ *_counter_global_data = counter_global_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+int ustctl_create_counter_cpu_data(struct ustctl_daemon_counter *counter, int cpu,
+ struct lttng_ust_abi_object_data **_counter_cpu_data)
+{
+ struct lttng_ust_abi_object_data *counter_cpu_data;
+ int ret, fd;
+ size_t len;
+
+ if (lttng_counter_get_cpu_shm(counter->counter, cpu, &fd, &len))
+ return -EINVAL;
+ counter_cpu_data = zmalloc(sizeof(*counter_cpu_data));
+ if (!counter_cpu_data) {
+ ret = -ENOMEM;
+ goto error_alloc;
+ }
+ counter_cpu_data->type = LTTNG_UST_ABI_OBJECT_TYPE_COUNTER_CPU;
+ counter_cpu_data->handle = -1;
+ counter_cpu_data->size = len;
+ counter_cpu_data->u.counter_cpu.shm_fd = fd;
+ counter_cpu_data->u.counter_cpu.cpu_nr = cpu;
+ *_counter_cpu_data = counter_cpu_data;
+ return 0;
+
+error_alloc:
+ return ret;
+}
+
+void ustctl_destroy_counter(struct ustctl_daemon_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter->attr);
+ free(counter);
+}
+
+int ustctl_send_counter_data_to_ust(int sock, int parent_handle,
+ struct lttng_ust_abi_object_data *counter_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret;
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data)
+ return -EINVAL;
+
+ size = counter_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = parent_handle;
+ lum.cmd = LTTNG_UST_ABI_COUNTER;
+ lum.u.counter.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ /* Send counter data */
+ len = ustcomm_send_unix_sock(sock, counter_data->u.counter.data, size);
+ if (len != size) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_global_data_to_ust(int sock,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data *counter_global_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_global_data)
+ return -EINVAL;
+
+ size = counter_global_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_ABI_COUNTER_GLOBAL;
+ lum.u.counter_global.len = size;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_global_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_global_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_send_counter_cpu_data_to_ust(int sock,
+ struct lttng_ust_abi_object_data *counter_data,
+ struct lttng_ust_abi_object_data *counter_cpu_data)
+{
+ struct ustcomm_ust_msg lum;
+ struct ustcomm_ust_reply lur;
+ int ret, shm_fd[1];
+ size_t size;
+ ssize_t len;
+
+ if (!counter_data || !counter_cpu_data)
+ return -EINVAL;
+
+ size = counter_cpu_data->size;
+ memset(&lum, 0, sizeof(lum));
+ lum.handle = counter_data->handle; /* parent handle */
+ lum.cmd = LTTNG_UST_ABI_COUNTER_CPU;
+ lum.u.counter_cpu.len = size;
+ lum.u.counter_cpu.cpu_nr = counter_cpu_data->u.counter_cpu.cpu_nr;
+ ret = ustcomm_send_app_msg(sock, &lum);
+ if (ret)
+ return ret;
+
+ shm_fd[0] = counter_cpu_data->u.counter_global.shm_fd;
+ len = ustcomm_send_fds_unix_sock(sock, shm_fd, 1);
+ if (len <= 0) {
+ if (len < 0)
+ return len;
+ else
+ return -EIO;
+ }
+
+ ret = ustcomm_recv_app_reply(sock, &lur, lum.handle, lum.cmd);
+ if (!ret) {
+ counter_cpu_data->handle = lur.ret_val;
+ }
+ return ret;
+}
+
+int ustctl_counter_read(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int cpu, int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_read(counter->counter, dimension_indexes, cpu,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_aggregate(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes,
+ int64_t *value,
+ bool *overflow, bool *underflow)
+{
+ return counter->ops->counter_aggregate(counter->counter, dimension_indexes,
+ value, overflow, underflow);
+}
+
+int ustctl_counter_clear(struct ustctl_daemon_counter *counter,
+ const size_t *dimension_indexes)
+{
+ return counter->ops->counter_clear(counter->counter, dimension_indexes);
+}
+
+static
+void ustctl_init(void)
+ __attribute__((constructor));
+static
+void ustctl_init(void)
+{
+ ust_err_init();
+ lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
+ lttng_ust_clock_init();
+ lttng_ust_ring_buffer_clients_init();
+ lttng_ust_counter_clients_init();
+ lib_ringbuffer_signal_init();
+}
+
+static
+void ustctl_exit(void)
+ __attribute__((destructor));
+static
+void ustctl_exit(void)
+{
+ lttng_ust_counter_clients_exit();
+ lttng_ust_ring_buffer_clients_exit();
+}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-cyg-profile.la \
+ liblttng-ust-cyg-profile-fast.la
+
+liblttng_ust_cyg_profile_la_SOURCES = \
+ lttng-ust-cyg-profile.c \
+ lttng-ust-cyg-profile.h
+
+liblttng_ust_cyg_profile_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_cyg_profile_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_cyg_profile_fast_la_SOURCES = \
+ lttng-ust-cyg-profile-fast.c \
+ lttng-ust-cyg-profile-fast.h
+
+liblttng_ust_cyg_profile_fast_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_cyg_profile_fast_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+dist_noinst_SCRIPTS = run run-fast
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <dlfcn.h>
+#include <sys/types.h>
+#include <stdio.h>
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM func_addr
+#include "lttng-ust-cyg-profile-fast.h"
+
+void __cyg_profile_func_enter(void *this_fn, void *call_site)
+ __attribute__((no_instrument_function));
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+ __attribute__((no_instrument_function));
+
+void __cyg_profile_func_enter(void *this_fn, void *call_site __attribute__((unused)))
+{
+ tracepoint(lttng_ust_cyg_profile_fast, func_entry, this_fn);
+}
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site __attribute__((unused)))
+{
+ tracepoint(lttng_ust_cyg_profile_fast, func_exit, this_fn);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_cyg_profile_fast
+
+#if !defined(_TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_cyg_profile_fast, func_entry,
+ TP_ARGS(void *, func_addr),
+ TP_FIELDS(
+ ctf_integer_hex(unsigned long, addr,
+ (unsigned long) func_addr)
+ )
+)
+
+TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile_fast, func_entry,
+ TRACE_DEBUG_FUNCTION)
+
+TRACEPOINT_EVENT(lttng_ust_cyg_profile_fast, func_exit,
+ TP_ARGS(void *, func_addr),
+ TP_FIELDS(
+ ctf_unused(func_addr)
+ )
+)
+
+TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile_fast, func_exit,
+ TRACE_DEBUG_FUNCTION)
+
+#endif /* _TRACEPOINT_LTTNG_UST_CYG_PROFILE_FAST_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng-ust-cyg-profile-fast.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <dlfcn.h>
+#include <sys/types.h>
+#include <stdio.h>
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM func_addr
+#include "lttng-ust-cyg-profile.h"
+
+void __cyg_profile_func_enter(void *this_fn, void *call_site)
+ __attribute__((no_instrument_function));
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+ __attribute__((no_instrument_function));
+
+void __cyg_profile_func_enter(void *this_fn, void *call_site)
+{
+ tracepoint(lttng_ust_cyg_profile, func_entry, this_fn, call_site);
+}
+
+void __cyg_profile_func_exit(void *this_fn, void *call_site)
+{
+ tracepoint(lttng_ust_cyg_profile, func_exit, this_fn, call_site);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_cyg_profile
+
+#if !defined(_TRACEPOINT_LTTNG_UST_CYG_PROFILE_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_CYG_PROFILE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT_CLASS(lttng_ust_cyg_profile, func_class,
+ TP_ARGS(void *, func_addr, void *, call_site),
+ TP_FIELDS(
+ ctf_integer_hex(unsigned long, addr,
+ (unsigned long) func_addr)
+ ctf_integer_hex(unsigned long, call_site,
+ (unsigned long) call_site)
+ )
+)
+
+TRACEPOINT_EVENT_INSTANCE(lttng_ust_cyg_profile, func_class,
+ func_entry,
+ TP_ARGS(void *, func_addr, void *, call_site)
+)
+
+TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile, func_entry,
+ TRACE_DEBUG_FUNCTION)
+
+TRACEPOINT_EVENT_INSTANCE(lttng_ust_cyg_profile, func_class,
+ func_exit,
+ TP_ARGS(void *, func_addr, void *, call_site)
+)
+
+TRACEPOINT_LOGLEVEL(lttng_ust_cyg_profile, func_exit,
+ TRACE_DEBUG_FUNCTION)
+
+#endif /* _TRACEPOINT_LTTNG_UST_CYG_PROFILE_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng-ust-cyg-profile.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+#!/bin/sh
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+
+LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-cyg-profile.so ${*}
--- /dev/null
+#!/bin/sh
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+
+LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-cyg-profile-fast.so ${*}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-dl.la
+liblttng_ust_dl_la_SOURCES = \
+ lttng-ust-dl.c \
+ ust_dl.c \
+ ust_dl.h
+
+liblttng_ust_dl_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
+ $(DL_LIBS)
+
+liblttng_ust_dl_la_CFLAGS = -DUST_COMPONENT=liblttng-ust-dl $(AM_CFLAGS)
+liblttng_ust_dl_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul.woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <ust-dlfcn.h>
+#include <ust-elf.h>
+#include <lttng/ust-events.h>
+#include <ust-helper.h>
+#include "usterr-signal-safe.h"
+
+#include "../liblttng-ust/ust-events-internal.h"
+
+/* Include link.h last else it conflicts with ust-dlfcn. */
+#include <link.h>
+
+#define TRACEPOINT_DEFINE
+#include "ust_dl.h"
+
+static void *(*__lttng_ust_plibc_dlopen)(const char *filename, int flags);
+#ifdef HAVE_DLMOPEN
+static void *(*__lttng_ust_plibc_dlmopen)(Lmid_t nsid, const char *filename,
+ int flags);
+#endif
+static int (*__lttng_ust_plibc_dlclose)(void *handle);
+
+static
+void _lttng_ust_dl_init(void)
+ __attribute__((constructor));
+static
+void _lttng_ust_dl_init(void)
+{
+ ust_err_init();
+}
+
+static
+void *_lttng_ust_dl_libc_dlopen(const char *filename, int flags)
+{
+ if (!__lttng_ust_plibc_dlopen) {
+ __lttng_ust_plibc_dlopen = dlsym(RTLD_NEXT, "dlopen");
+ if (!__lttng_ust_plibc_dlopen) {
+ fprintf(stderr, "%s\n", dlerror());
+ return NULL;
+ }
+ }
+ return __lttng_ust_plibc_dlopen(filename, flags);
+}
+
+#ifdef HAVE_DLMOPEN
+static
+void *_lttng_ust_dl_libc_dlmopen(Lmid_t nsid, const char *filename,
+ int flags)
+{
+ if (!__lttng_ust_plibc_dlmopen) {
+ __lttng_ust_plibc_dlmopen = dlsym(RTLD_NEXT, "dlmopen");
+ if (!__lttng_ust_plibc_dlmopen) {
+ fprintf(stderr, "%s\n", dlerror());
+ return NULL;
+ }
+ }
+ return __lttng_ust_plibc_dlmopen(nsid, filename, flags);
+}
+#endif
+
+static
+int _lttng_ust_dl_libc_dlclose(void *handle)
+{
+ if (!__lttng_ust_plibc_dlclose) {
+ __lttng_ust_plibc_dlclose = dlsym(RTLD_NEXT, "dlclose");
+ if (!__lttng_ust_plibc_dlclose) {
+ fprintf(stderr, "%s\n", dlerror());
+ return -1;
+ }
+ }
+ return __lttng_ust_plibc_dlclose(handle);
+}
+
+static
+void lttng_ust_dl_dlopen(void *so_base, const char *so_name,
+ int flags, void *ip)
+{
+ char resolved_path[PATH_MAX];
+ struct lttng_ust_elf *elf;
+ uint64_t memsz;
+ uint8_t *build_id = NULL;
+ size_t build_id_len;
+ char *dbg_file = NULL;
+ uint32_t crc;
+ int has_build_id = 0, has_debug_link = 0;
+ int ret;
+
+ if (!realpath(so_name, resolved_path)) {
+ ERR("could not resolve path '%s'", so_name);
+ return;
+ }
+
+ elf = lttng_ust_elf_create(resolved_path);
+ if (!elf) {
+ ERR("could not access file %s", resolved_path);
+ return;
+ }
+
+ ret = lttng_ust_elf_get_memsz(elf, &memsz);
+ if (ret) {
+ goto end;
+ }
+ ret = lttng_ust_elf_get_build_id(
+ elf, &build_id, &build_id_len, &has_build_id);
+ if (ret) {
+ goto end;
+ }
+ ret = lttng_ust_elf_get_debug_link(
+ elf, &dbg_file, &crc, &has_debug_link);
+ if (ret) {
+ goto end;
+ }
+
+ tracepoint(lttng_ust_dl, dlopen,
+ ip, so_base, resolved_path, flags, memsz,
+ has_build_id, has_debug_link);
+
+ if (has_build_id) {
+ tracepoint(lttng_ust_dl, build_id,
+ ip, so_base, build_id, build_id_len);
+ }
+
+ if (has_debug_link) {
+ tracepoint(lttng_ust_dl, debug_link,
+ ip, so_base, dbg_file, crc);
+ }
+
+end:
+ free(dbg_file);
+ free(build_id);
+ lttng_ust_elf_destroy(elf);
+ return;
+}
+
+#ifdef HAVE_DLMOPEN
+static
+void lttng_ust_dl_dlmopen(void *so_base, Lmid_t nsid, const char *so_name,
+ int flags, void *ip)
+{
+ char resolved_path[PATH_MAX];
+ struct lttng_ust_elf *elf;
+ uint64_t memsz;
+ uint8_t *build_id = NULL;
+ size_t build_id_len;
+ char *dbg_file = NULL;
+ uint32_t crc;
+ int has_build_id = 0, has_debug_link = 0;
+ int ret;
+
+ if (!realpath(so_name, resolved_path)) {
+ ERR("could not resolve path '%s'", so_name);
+ return;
+ }
+
+ elf = lttng_ust_elf_create(resolved_path);
+ if (!elf) {
+ ERR("could not access file %s", resolved_path);
+ return;
+ }
+
+ ret = lttng_ust_elf_get_memsz(elf, &memsz);
+ if (ret) {
+ goto end;
+ }
+ ret = lttng_ust_elf_get_build_id(
+ elf, &build_id, &build_id_len, &has_build_id);
+ if (ret) {
+ goto end;
+ }
+ ret = lttng_ust_elf_get_debug_link(
+ elf, &dbg_file, &crc, &has_debug_link);
+ if (ret) {
+ goto end;
+ }
+
+ tracepoint(lttng_ust_dl, dlmopen,
+ ip, so_base, nsid, resolved_path, flags, memsz,
+ has_build_id, has_debug_link);
+
+ if (has_build_id) {
+ tracepoint(lttng_ust_dl, build_id,
+ ip, so_base, build_id, build_id_len);
+ }
+
+ if (has_debug_link) {
+ tracepoint(lttng_ust_dl, debug_link,
+ ip, so_base, dbg_file, crc);
+ }
+
+end:
+ free(dbg_file);
+ free(build_id);
+ lttng_ust_elf_destroy(elf);
+ return;
+}
+#endif
+
+void *dlopen(const char *filename, int flags)
+{
+ void *handle;
+
+ handle = _lttng_ust_dl_libc_dlopen(filename, flags);
+ if (__tracepoint_ptrs_registered && handle) {
+ struct link_map *p = NULL;
+ int ret;
+
+ ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
+ if (ret != -1 && p != NULL && p->l_addr != 0) {
+ lttng_ust_dl_dlopen((void *) p->l_addr,
+ p->l_name, flags, LTTNG_UST_CALLER_IP());
+ }
+ }
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+ return handle;
+}
+
+#ifdef HAVE_DLMOPEN
+void *dlmopen(Lmid_t nsid, const char *filename, int flags)
+{
+ void *handle;
+
+ handle = _lttng_ust_dl_libc_dlmopen(nsid, filename, flags);
+ if (__tracepoint_ptrs_registered && handle) {
+ struct link_map *p = NULL;
+ int ret;
+
+ ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
+ if (ret != -1 && p != NULL && p->l_addr != 0) {
+ lttng_ust_dl_dlmopen((void *) p->l_addr,
+ nsid, p->l_name, flags,
+ LTTNG_UST_CALLER_IP());
+ }
+ }
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+ return handle;
+
+}
+#endif
+
+int dlclose(void *handle)
+{
+ int ret;
+
+ if (__tracepoint_ptrs_registered) {
+ struct link_map *p = NULL;
+
+ ret = dlinfo(handle, RTLD_DI_LINKMAP, &p);
+ if (ret != -1 && p != NULL && p->l_addr != 0) {
+ tracepoint(lttng_ust_dl, dlclose,
+ LTTNG_UST_CALLER_IP(),
+ (void *) p->l_addr);
+ }
+ }
+ ret = _lttng_ust_dl_libc_dlclose(handle);
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ */
+
+#define _LGPL_SOURCE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_dl.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_dl
+
+#if !defined(_TRACEPOINT_UST_DL_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_DL_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#define LTTNG_UST_DL_PROVIDER
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_dl, dlopen,
+ TP_ARGS(void *, ip, void *, baddr, const char *, path,
+ int, flags, uint64_t, memsz, uint8_t, has_build_id,
+ uint8_t, has_debug_link),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_integer_hex(int, flags, flags)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+
+#ifdef HAVE_DLMOPEN
+TRACEPOINT_EVENT(lttng_ust_dl, dlmopen,
+ TP_ARGS(void *, ip, void *, baddr, Lmid_t, nsid,
+ const char *, path, int, flags,
+ uint64_t, memsz, uint8_t, has_build_id,
+ uint8_t, has_debug_link),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_integer(Lmid_t, nsid, nsid)
+ ctf_integer_hex(int, flags, flags)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+#endif
+
+TRACEPOINT_EVENT(lttng_ust_dl, build_id,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ uint8_t *, build_id,
+ size_t, build_id_len
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_sequence_hex(uint8_t, build_id, build_id,
+ size_t, build_id_len)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_dl, debug_link,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ char *, filename,
+ uint32_t, crc
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint32_t, crc, crc)
+ ctf_string(filename, filename)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_dl, dlclose,
+ TP_ARGS(void *, ip, void *, baddr),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_DL_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_dl.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-fd.la
+liblttng_ust_fd_la_SOURCES = \
+ lttng-ust-fd.c
+
+liblttng_ust_fd_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_fd_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <ust-fd.h>
+#include <dlfcn.h>
+
+#include <ust-helper.h>
+
+static int (*__lttng_ust_fd_plibc_close)(int fd);
+static int (*__lttng_ust_fd_plibc_fclose)(FILE *stream);
+
+static
+int _lttng_ust_fd_libc_close(int fd)
+{
+ if (!__lttng_ust_fd_plibc_close) {
+ __lttng_ust_fd_plibc_close = dlsym(RTLD_NEXT, "close");
+ if (!__lttng_ust_fd_plibc_close) {
+ fprintf(stderr, "%s\n", dlerror());
+ return -1;
+ }
+ }
+ return lttng_ust_safe_close_fd(fd, __lttng_ust_fd_plibc_close);
+}
+
+static
+int _lttng_ust_fd_libc_fclose(FILE *stream)
+{
+ if (!__lttng_ust_fd_plibc_fclose) {
+ __lttng_ust_fd_plibc_fclose = dlsym(RTLD_NEXT, "fclose");
+ if (!__lttng_ust_fd_plibc_fclose) {
+ fprintf(stderr, "%s\n", dlerror());
+ return -1;
+ }
+ }
+ return lttng_ust_safe_fclose_stream(stream,
+ __lttng_ust_fd_plibc_fclose);
+}
+
+int close(int fd)
+{
+ return _lttng_ust_fd_libc_close(fd);
+}
+
+/*
+ * Note: fcloseall() is not an issue because it fcloses only the
+ * streams it knows about, which differs from the problems caused by
+ * gnulib close_stdout(), which does an explicit fclose(stdout).
+ */
+int fclose(FILE *stream)
+{
+ return _lttng_ust_fd_libc_fclose(stream);
+}
+
+#if defined(__sun__) || defined(__FreeBSD__)
+/* Solaris and FreeBSD. */
+void closefrom(int lowfd)
+{
+ (void) lttng_ust_safe_closefrom_fd(lowfd, __lttng_ust_fd_plibc_close);
+}
+#elif defined(__NetBSD__) || defined(__OpenBSD__)
+/* NetBSD and OpenBSD. */
+int closefrom(int lowfd)
+{
+ return lttng_ust_safe_closefrom_fd(lowfd, __lttng_ust_fd_plibc_close);
+}
+#else
+/* As far as we know, this OS does not implement closefrom. */
+#endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-fork.la
+liblttng_ust_fork_la_SOURCES = ustfork.c
+liblttng_ust_fork_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_fork_la_CFLAGS = -DUST_COMPONENT=liblttng-ust-fork $(AM_CFLAGS)
+
+liblttng_ust_fork_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <ust-dlfcn.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <signal.h>
+#include <sched.h>
+#include <stdarg.h>
+#include <errno.h>
+
+#include <lttng/ust-fork.h>
+
+pid_t fork(void)
+{
+ static pid_t (*plibc_func)(void) = NULL;
+ sigset_t sigset;
+ pid_t retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "fork");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"fork\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ lttng_ust_before_fork(&sigset);
+ /* Do the real fork */
+ retval = plibc_func();
+ saved_errno = errno;
+ if (retval == 0) {
+ /* child */
+ lttng_ust_after_fork_child(&sigset);
+ } else {
+ lttng_ust_after_fork_parent(&sigset);
+ }
+ errno = saved_errno;
+ return retval;
+}
+
+int daemon(int nochdir, int noclose)
+{
+ static int (*plibc_func)(int nochdir, int noclose) = NULL;
+ sigset_t sigset;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "daemon");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"daemon\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ lttng_ust_before_fork(&sigset);
+ /* Do the real daemon call */
+ retval = plibc_func(nochdir, noclose);
+ saved_errno = errno;
+ if (retval == 0) {
+ /* child, parent called _exit() directly */
+ lttng_ust_after_fork_child(&sigset);
+ } else {
+ /* on error in the parent */
+ lttng_ust_after_fork_parent(&sigset);
+ }
+ errno = saved_errno;
+ return retval;
+}
+
+int setuid(uid_t uid)
+{
+ static int (*plibc_func)(uid_t uid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setuid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setuid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setuid */
+ retval = plibc_func(uid);
+ saved_errno = errno;
+
+ lttng_ust_after_setuid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setgid(gid_t gid)
+{
+ static int (*plibc_func)(gid_t gid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setgid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setgid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setgid */
+ retval = plibc_func(gid);
+ saved_errno = errno;
+
+ lttng_ust_after_setgid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int seteuid(uid_t euid)
+{
+ static int (*plibc_func)(uid_t euid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "seteuid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"seteuid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real seteuid */
+ retval = plibc_func(euid);
+ saved_errno = errno;
+
+ lttng_ust_after_seteuid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setegid(gid_t egid)
+{
+ static int (*plibc_func)(gid_t egid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setegid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setegid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setegid */
+ retval = plibc_func(egid);
+ saved_errno = errno;
+
+ lttng_ust_after_setegid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setreuid(uid_t ruid, uid_t euid)
+{
+ static int (*plibc_func)(uid_t ruid, uid_t euid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setreuid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setreuid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setreuid */
+ retval = plibc_func(ruid, euid);
+ saved_errno = errno;
+
+ lttng_ust_after_setreuid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setregid(gid_t rgid, gid_t egid)
+{
+ static int (*plibc_func)(gid_t rgid, gid_t egid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setregid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setregid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setregid */
+ retval = plibc_func(rgid, egid);
+ saved_errno = errno;
+
+ lttng_ust_after_setregid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+#ifdef __linux__
+
+struct user_desc;
+
+struct ustfork_clone_info {
+ int (*fn)(void *);
+ void *arg;
+ sigset_t sigset;
+};
+
+static int clone_fn(void *arg)
+{
+ struct ustfork_clone_info *info = (struct ustfork_clone_info *) arg;
+
+ /* clone is now done and we are in child */
+ lttng_ust_after_fork_child(&info->sigset);
+ return info->fn(info->arg);
+}
+
+int clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ...)
+{
+ static int (*plibc_func)(int (*fn)(void *), void *child_stack,
+ int flags, void *arg, pid_t *ptid,
+ struct user_desc *tls, pid_t *ctid) = NULL;
+ /* var args */
+ pid_t *ptid;
+ struct user_desc *tls;
+ pid_t *ctid;
+ /* end of var args */
+ va_list ap;
+ int retval;
+ int saved_errno;
+
+ va_start(ap, arg);
+ ptid = va_arg(ap, pid_t *);
+ tls = va_arg(ap, struct user_desc *);
+ ctid = va_arg(ap, pid_t *);
+ va_end(ap);
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "clone");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"clone\" symbol.\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ if (flags & CLONE_VM) {
+ /*
+ * Creating a thread, no need to intervene, just pass on
+ * the arguments.
+ */
+ retval = plibc_func(fn, child_stack, flags, arg, ptid,
+ tls, ctid);
+ saved_errno = errno;
+ } else {
+ /* Creating a real process, we need to intervene. */
+ struct ustfork_clone_info info = { .fn = fn, .arg = arg };
+
+ lttng_ust_before_fork(&info.sigset);
+ retval = plibc_func(clone_fn, child_stack, flags, &info,
+ ptid, tls, ctid);
+ saved_errno = errno;
+ /* The child doesn't get here. */
+ lttng_ust_after_fork_parent(&info.sigset);
+ }
+ errno = saved_errno;
+ return retval;
+}
+
+int setns(int fd, int nstype)
+{
+ static int (*plibc_func)(int fd, int nstype) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setns");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setns\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setns */
+ retval = plibc_func(fd, nstype);
+ saved_errno = errno;
+
+ lttng_ust_after_setns();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int unshare(int flags)
+{
+ static int (*plibc_func)(int flags) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "unshare");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"unshare\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setns */
+ retval = plibc_func(flags);
+ saved_errno = errno;
+
+ lttng_ust_after_unshare();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setresuid(uid_t ruid, uid_t euid, uid_t suid)
+{
+ static int (*plibc_func)(uid_t ruid, uid_t euid, uid_t suid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setresuid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setresuid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setresuid */
+ retval = plibc_func(ruid, euid, suid);
+ saved_errno = errno;
+
+ lttng_ust_after_setresuid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+int setresgid(gid_t rgid, gid_t egid, gid_t sgid)
+{
+ static int (*plibc_func)(gid_t rgid, gid_t egid, gid_t sgid) = NULL;
+ int retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "setresgid");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"setresgid\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ /* Do the real setresgid */
+ retval = plibc_func(rgid, egid, sgid);
+ saved_errno = errno;
+
+ lttng_ust_after_setresgid();
+
+ errno = saved_errno;
+ return retval;
+}
+
+#elif defined (__FreeBSD__)
+
+pid_t rfork(int flags)
+{
+ static pid_t (*plibc_func)(void) = NULL;
+ sigset_t sigset;
+ pid_t retval;
+ int saved_errno;
+
+ if (plibc_func == NULL) {
+ plibc_func = dlsym(RTLD_NEXT, "rfork");
+ if (plibc_func == NULL) {
+ fprintf(stderr, "libustfork: unable to find \"rfork\" symbol\n");
+ errno = ENOSYS;
+ return -1;
+ }
+ }
+
+ lttng_ust_before_fork(&sigset);
+ /* Do the real rfork */
+ retval = plibc_func();
+ saved_errno = errno;
+ if (retval == 0) {
+ /* child */
+ lttng_ust_after_fork_child(&sigset);
+ } else {
+ lttng_ust_after_fork_parent(&sigset);
+ }
+ errno = saved_errno;
+ return retval;
+}
+
+/*
+ * On BSD, no need to override vfork, because it runs in the context of
+ * the parent, with parent waiting until execve or exit is executed in
+ * the child.
+ */
+
+#else
+#warning "Unknown OS. You might want to ensure that fork/clone/vfork/fork handling is complete."
+#endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+SUBDIRS = java jni
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+SUBDIRS = lttng-ust-agent-common lttng-ust-agent-all
+
+if ENABLE_JAVA_AGENT_WITH_JUL
+SUBDIRS += lttng-ust-agent-jul
+endif
+
+if ENABLE_JAVA_AGENT_WITH_LOG4J
+SUBDIRS += lttng-ust-agent-log4j
+endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+JAVAROOT = .
+
+jarfile_version = 1.0.0
+jarfile_manifest = $(srcdir)/Manifest.txt
+jarfile_symlink = lttng-ust-agent-all.jar
+jarfile = lttng-ust-agent-all-$(jarfile_version).jar
+
+# Compatibility symlink provided for applications expecting the agent
+# jar file installed by UST 2.7.
+jarfile_compat_symlink = liblttng-ust-agent.jar
+
+jardir = $(datadir)/java
+
+dist_noinst_DATA = $(jarfile_manifest)
+
+jar_DATA = $(jarfile)
+
+$(jarfile):
+ $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) \
+ && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink) \
+ && rm -f $(jarfile_compat_symlink) && $(LN_S) $(jarfile_symlink) $(jarfile_compat_symlink)
+
+install-data-hook:
+ cd $(DESTDIR)/$(jardir) \
+ && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink) \
+ && rm -f $(jarfile_compat_symlink) && $(LN_S) $(jarfile_symlink) $(jarfile_compat_symlink)
+
+uninstall-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && rm -f $(jarfile_compat_symlink)
+
+CLEANFILES = *.jar
--- /dev/null
+Name: org/lttng/ust/agent/all/
+Specification-Title: LTTng UST All Java Agents
+Specification-Version: 1.0.0
+Specification-Vendor: LTTng Project
+Implementation-Title: org.lttng.ust.agent.all
+Implementation-Version: 1.0.0
+Implementation-Vendor: LTTng Project
+Class-Path: lttng-ust-agent-common.jar lttng-ust-agent-jul.jar lttng-ust-agent-log4j.jar
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+JAVAROOT = .
+
+pkgpath = org/lttng/ust/agent
+
+jarfile_version = 1.0.0
+jarfile_manifest = $(srcdir)/Manifest.txt
+jarfile_symlink = lttng-ust-agent-common.jar
+jarfile = lttng-ust-agent-common-$(jarfile_version).jar
+
+jardir = $(datadir)/java
+jnioutdir = ../../jni/common
+
+dist_noinst_JAVA = $(pkgpath)/AbstractLttngAgent.java \
+ $(pkgpath)/EventNamePattern.java \
+ $(pkgpath)/ILttngAgent.java \
+ $(pkgpath)/ILttngHandler.java \
+ $(pkgpath)/LTTngAgent.java \
+ $(pkgpath)/client/ILttngTcpClientListener.java \
+ $(pkgpath)/client/SessiondCommand.java \
+ $(pkgpath)/client/LttngAgentResponse.java \
+ $(pkgpath)/client/LttngTcpSessiondClient.java \
+ $(pkgpath)/client/SessiondCommandHeader.java \
+ $(pkgpath)/client/SessiondDisableAppContextCommand.java \
+ $(pkgpath)/client/SessiondDisableEventCommand.java \
+ $(pkgpath)/client/SessiondEnableAppContextCommand.java \
+ $(pkgpath)/client/SessiondEnableEventCommand.java \
+ $(pkgpath)/client/SessiondListLoggersCommand.java \
+ $(pkgpath)/context/LttngContextApi.java \
+ $(pkgpath)/context/ContextInfoManager.java \
+ $(pkgpath)/context/ContextInfoSerializer.java \
+ $(pkgpath)/context/IContextInfoRetriever.java \
+ $(pkgpath)/filter/FilterChangeNotifier.java \
+ $(pkgpath)/filter/IFilterChangeListener.java \
+ $(pkgpath)/session/EventRule.java \
+ $(pkgpath)/session/LogLevelSelector.java \
+ $(pkgpath)/utils/LttngUstAgentLogger.java
+
+
+dist_noinst_DATA = $(jarfile_manifest)
+
+jar_DATA = $(jarfile)
+
+classes = $(pkgpath)/*.class \
+ $(pkgpath)/client/*.class \
+ $(pkgpath)/context/*.class \
+ $(pkgpath)/filter/*.class \
+ $(pkgpath)/session/*.class \
+ $(pkgpath)/utils/*.class
+
+$(jarfile): classnoinst.stamp
+ $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
+
+if !HAVE_JAVAH
+# If we don't have javah, assume we are running openjdk >= 10 and use javac
+# to generate the jni header file.
+AM_JAVACFLAGS = -h $(jnioutdir)
+else
+context-jni-header.stamp: $(dist_noinst_JAVA)
+ $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(jnioutdir) $(JAVAHFLAGS) org.lttng.ust.agent.context.LttngContextApi && \
+ echo "Context API JNI header generated" > context-jni-header.stamp
+
+all-local: context-jni-header.stamp
+endif
+
+install-data-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
+
+uninstall-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
+
+CLEANFILES = *.jar \
+ $(pkgpath)/*.class \
+ $(pkgpath)/client/*.class \
+ $(pkgpath)/context/*.class \
+ $(pkgpath)/filter/*.class \
+ $(pkgpath)/session/*.class \
+ $(pkgpath)/utils/*.class \
+ context-jni-header.stamp \
+ $(jnioutdir)/org_lttng_ust_agent_context_LttngContextApi.h
--- /dev/null
+Name: org/lttng/ust/agent/
+Specification-Title: LTTng UST Java Agent
+Specification-Version: 1.0.0
+Specification-Vendor: LTTng Project
+Implementation-Title: org.lttng.ust.agent
+Implementation-Version: 1.0.0
+Implementation-Vendor: LTTng Project
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.regex.Matcher;
+
+import org.lttng.ust.agent.client.ILttngTcpClientListener;
+import org.lttng.ust.agent.client.LttngTcpSessiondClient;
+import org.lttng.ust.agent.filter.FilterChangeNotifier;
+import org.lttng.ust.agent.session.EventRule;
+import org.lttng.ust.agent.utils.LttngUstAgentLogger;
+
+/**
+ * Base implementation of a {@link ILttngAgent}.
+ *
+ * @author Alexandre Montplaisir
+ * @param <T>
+ * The type of logging handler that should register to this agent
+ */
+public abstract class AbstractLttngAgent<T extends ILttngHandler>
+ implements ILttngAgent<T>, ILttngTcpClientListener {
+
+ private static final int INIT_TIMEOUT = 3; /* Seconds */
+
+ /** The handlers registered to this agent */
+ private final Set<T> registeredHandlers = new HashSet<T>();
+
+ /**
+ * The trace events currently enabled in the sessions.
+ *
+ * The key is the {@link EventNamePattern} that comes from the event name.
+ * The value is the ref count (how many different sessions currently have
+ * this event enabled). Once the ref count falls to 0, this means we can
+ * avoid sending log events through JNI because nobody wants them.
+ *
+ * Its accesses should be protected by the {@link #enabledEventNamesLock}
+ * below.
+ */
+ private final Map<EventNamePattern, Integer> enabledPatterns = new HashMap<EventNamePattern, Integer>();
+
+ /**
+ * Cache of already-checked event names. As long as enabled/disabled events
+ * don't change in the session, we can avoid re-checking events that were
+ * previously checked against all known enabled patterns.
+ *
+ * Its accesses should be protected by the {@link #enabledEventNamesLock}
+ * below, with the exception of concurrent get operations.
+ */
+ private final Map<String, Boolean> enabledEventNamesCache = new ConcurrentHashMap<String, Boolean>();
+
+ /**
+ * Lock protecting accesses to the {@link #enabledPatterns} and
+ * {@link #enabledEventNamesCache} maps.
+ */
+ private final Lock enabledEventNamesLock = new ReentrantLock();
+
+ /**
+ * The application contexts currently enabled in the tracing sessions.
+ *
+ * It is first indexed by context retriever, then by context name. This
+ * allows to efficiently query all the contexts for a given retriever.
+ *
+ * Works similarly as {@link #enabledEvents}, but for app contexts (and with
+ * an extra degree of indexing).
+ *
+ * TODO Could be changed to a Guava Table once/if we start using it.
+ */
+ private final Map<String, Map<String, Integer>> enabledAppContexts = new ConcurrentHashMap<String, Map<String, Integer>>();
+
+ /** Tracing domain. Defined by the sub-classes via the constructor. */
+ private final Domain domain;
+
+ /* Lazy-loaded sessiond clients and their thread objects */
+ private LttngTcpSessiondClient rootSessiondClient = null;
+ private LttngTcpSessiondClient userSessiondClient = null;
+ private Thread rootSessiondClientThread = null;
+ private Thread userSessiondClientThread = null;
+
+ /** Indicates if this agent has been initialized. */
+ private boolean initialized = false;
+
+ /**
+ * Constructor. Should only be called by sub-classes via super(...);
+ *
+ * @param domain
+ * The tracing domain of this agent.
+ */
+ protected AbstractLttngAgent(Domain domain) {
+ this.domain = domain;
+ }
+
+ @Override
+ public Domain getDomain() {
+ return domain;
+ }
+
+ @Override
+ public void registerHandler(T handler) {
+ synchronized (registeredHandlers) {
+ if (registeredHandlers.isEmpty()) {
+ /*
+ * This is the first handler that registers, we will initialize
+ * the agent.
+ */
+ init();
+ }
+ registeredHandlers.add(handler);
+ }
+ }
+
+ @Override
+ public void unregisterHandler(T handler) {
+ synchronized (registeredHandlers) {
+ registeredHandlers.remove(handler);
+ if (registeredHandlers.isEmpty()) {
+ /* There are no more registered handlers, close the connection. */
+ dispose();
+ }
+ }
+ }
+
+ private void init() {
+ /*
+ * Only called from a synchronized (registeredHandlers) block, should
+ * not need additional synchronization.
+ */
+ if (initialized) {
+ return;
+ }
+
+ LttngUstAgentLogger.log(AbstractLttngAgent.class, "Initializing Agent for domain: " + domain.name());
+
+ String rootClientThreadName = "Root sessiond client started by agent: " + this.getClass().getSimpleName();
+
+ rootSessiondClient = new LttngTcpSessiondClient(this, getDomain().value(), true);
+ rootSessiondClientThread = new Thread(rootSessiondClient, rootClientThreadName);
+ rootSessiondClientThread.setDaemon(true);
+ rootSessiondClientThread.start();
+
+ String userClientThreadName = "User sessiond client started by agent: " + this.getClass().getSimpleName();
+
+ userSessiondClient = new LttngTcpSessiondClient(this, getDomain().value(), false);
+ userSessiondClientThread = new Thread(userSessiondClient, userClientThreadName);
+ userSessiondClientThread.setDaemon(true);
+ userSessiondClientThread.start();
+
+ /* Give the threads' registration a chance to end. */
+ if (!rootSessiondClient.waitForConnection(INIT_TIMEOUT)) {
+ userSessiondClient.waitForConnection(INIT_TIMEOUT);
+ }
+
+ initialized = true;
+ }
+
+ /**
+ * Dispose the agent
+ */
+ private void dispose() {
+ LttngUstAgentLogger.log(AbstractLttngAgent.class, "Disposing Agent for domain: " + domain.name());
+
+ /*
+ * Only called from a synchronized (registeredHandlers) block, should
+ * not need additional synchronization.
+ */
+ rootSessiondClient.close();
+ userSessiondClient.close();
+
+ try {
+ rootSessiondClientThread.join();
+ userSessiondClientThread.join();
+
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ rootSessiondClient = null;
+ rootSessiondClientThread = null;
+ userSessiondClient = null;
+ userSessiondClientThread = null;
+
+ /*
+ * Send filter change notifications for all event rules currently
+ * active, then clear them.
+ */
+ FilterChangeNotifier fcn = FilterChangeNotifier.getInstance();
+
+ enabledEventNamesLock.lock();
+ try {
+ for (Map.Entry<EventNamePattern, Integer> entry : enabledPatterns.entrySet()) {
+ String eventName = entry.getKey().getEventName();
+ Integer nb = entry.getValue();
+ for (int i = 0; i < nb.intValue(); i++) {
+ fcn.removeEventRules(eventName);
+ }
+ }
+ enabledPatterns.clear();
+ enabledEventNamesCache.clear();
+ } finally {
+ enabledEventNamesLock.unlock();
+ }
+
+ /*
+ * Also clear tracked app contexts (no filter notifications sent for
+ * those currently).
+ */
+ enabledAppContexts.clear();
+
+ initialized = false;
+ }
+
+ @Override
+ public boolean eventEnabled(EventRule eventRule) {
+ /* Notify the filter change manager of the command */
+ FilterChangeNotifier.getInstance().addEventRule(eventRule);
+
+ String eventName = eventRule.getEventName();
+ EventNamePattern pattern = new EventNamePattern(eventName);
+
+ enabledEventNamesLock.lock();
+ try {
+ boolean ret = incrementRefCount(pattern, enabledPatterns);
+ enabledEventNamesCache.clear();
+ return ret;
+ } finally {
+ enabledEventNamesLock.unlock();
+ }
+ }
+
+ @Override
+ public boolean eventDisabled(String eventName) {
+ /* Notify the filter change manager of the command */
+ FilterChangeNotifier.getInstance().removeEventRules(eventName);
+
+ EventNamePattern pattern = new EventNamePattern(eventName);
+
+ enabledEventNamesLock.lock();
+ try {
+ boolean ret = decrementRefCount(pattern, enabledPatterns);
+ enabledEventNamesCache.clear();
+ return ret;
+ } finally {
+ enabledEventNamesLock.unlock();
+ }
+ }
+
+ @Override
+ public boolean appContextEnabled(String contextRetrieverName, String contextName) {
+ synchronized (enabledAppContexts) {
+ Map<String, Integer> retrieverMap = enabledAppContexts.get(contextRetrieverName);
+ if (retrieverMap == null) {
+ /* There is no submap for this retriever, let's create one. */
+ retrieverMap = new ConcurrentHashMap<String, Integer>();
+ enabledAppContexts.put(contextRetrieverName, retrieverMap);
+ }
+
+ return incrementRefCount(contextName, retrieverMap);
+ }
+ }
+
+ @Override
+ public boolean appContextDisabled(String contextRetrieverName, String contextName) {
+ synchronized (enabledAppContexts) {
+ Map<String, Integer> retrieverMap = enabledAppContexts.get(contextRetrieverName);
+ if (retrieverMap == null) {
+ /* There was no submap for this retriever, invalid command? */
+ return false;
+ }
+
+ boolean ret = decrementRefCount(contextName, retrieverMap);
+
+ /* If the submap is now empty we can remove it from the main map. */
+ if (retrieverMap.isEmpty()) {
+ enabledAppContexts.remove(contextRetrieverName);
+ }
+
+ return ret;
+ }
+ }
+
+ /*
+ * Implementation of this method is domain-specific.
+ */
+ @Override
+ public abstract Collection<String> listAvailableEvents();
+
+ @Override
+ public boolean isEventEnabled(String eventName) {
+ Boolean cachedEnabled = enabledEventNamesCache.get(eventName);
+ if (cachedEnabled != null) {
+ /* We have seen this event previously */
+ /*
+ * Careful! enabled == null could also mean that the null value is
+ * associated with the key. But we should have never inserted null
+ * values in the map.
+ */
+ return cachedEnabled.booleanValue();
+ }
+
+ /*
+ * We have not previously checked this event. Run it against all known
+ * enabled event patterns to determine if it should pass or not.
+ */
+ enabledEventNamesLock.lock();
+ try {
+ boolean enabled = false;
+ for (EventNamePattern enabledPattern : enabledPatterns.keySet()) {
+ Matcher matcher = enabledPattern.getPattern().matcher(eventName);
+ if (matcher.matches()) {
+ enabled = true;
+ break;
+ }
+ }
+
+ /* Add the result to the cache */
+ enabledEventNamesCache.put(eventName, Boolean.valueOf(enabled));
+ return enabled;
+
+ } finally {
+ enabledEventNamesLock.unlock();
+ }
+ }
+
+ @Override
+ public Collection<Map.Entry<String, Map<String, Integer>>> getEnabledAppContexts() {
+ return enabledAppContexts.entrySet();
+ }
+
+ private static <T> boolean incrementRefCount(T key, Map<T, Integer> refCountMap) {
+ synchronized (refCountMap) {
+ Integer count = refCountMap.get(key);
+ if (count == null) {
+ /* This is the first instance of this event being enabled */
+ refCountMap.put(key, Integer.valueOf(1));
+ return true;
+ }
+ if (count.intValue() <= 0) {
+ /* It should not have been in the map in the first place! */
+ throw new IllegalStateException();
+ }
+ /* The event was already enabled, increment its refcount */
+ refCountMap.put(key, Integer.valueOf(count.intValue() + 1));
+ return true;
+ }
+ }
+
+ private static <T> boolean decrementRefCount(T key, Map<T, Integer> refCountMap) {
+ synchronized (refCountMap) {
+ Integer count = refCountMap.get(key);
+ if (count == null || count.intValue() <= 0) {
+ /*
+ * The sessiond asked us to disable an event that was not
+ * enabled previously. Command error?
+ */
+ return false;
+ }
+ if (count.intValue() == 1) {
+ /*
+ * This is the last instance of this event being disabled,
+ * remove it from the map so that we stop sending it.
+ */
+ refCountMap.remove(key);
+ return true;
+ }
+ /*
+ * Other sessions are still looking for this event, simply decrement
+ * its refcount.
+ */
+ refCountMap.put(key, Integer.valueOf(count.intValue() - 1));
+ return true;
+ }
+ }
+}
+
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2017 EfficiOS Inc.
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+package org.lttng.ust.agent;
+
+import java.util.regex.Pattern;
+
+/**
+ * Class encapsulating an event name from the session daemon, and its
+ * corresponding {@link Pattern}. This allows referring back to the original
+ * event name, for example when we receive a disable command.
+ *
+ * @author Philippe Proulx
+ * @author Alexandre Montplaisir
+ */
+class EventNamePattern {
+
+ private final String originalEventName;
+
+ /*
+ * Note that two Patterns coming from the exact same String will not be
+ * equals()! As such, it would be confusing to make the pattern part of this
+ * class's equals/hashCode
+ */
+ private final transient Pattern pattern;
+
+ public EventNamePattern(String eventName) {
+ if (eventName == null) {
+ throw new IllegalArgumentException();
+ }
+
+ originalEventName = eventName;
+ pattern = patternFromEventName(eventName);
+ }
+
+ public String getEventName() {
+ return originalEventName;
+ }
+
+ public Pattern getPattern() {
+ return pattern;
+ }
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + originalEventName.hashCode();
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ EventNamePattern other = (EventNamePattern) obj;
+ if (!originalEventName.equals(other.originalEventName)) {
+ return false;
+ }
+ return true;
+ }
+
+ private static Pattern patternFromEventName(String eventName) {
+ /*
+ * The situation here is that `\*` means a literal `*` in the event
+ * name, and `*` is a wildcard star. We check the event name one
+ * character at a time and create a list of tokens to be converter to
+ * partial patterns.
+ */
+ StringBuilder bigBuilder = new StringBuilder("^");
+ StringBuilder smallBuilder = new StringBuilder();
+
+ for (int i = 0; i < eventName.length(); i++) {
+ char c = eventName.charAt(i);
+
+ switch (c) {
+ case '*':
+ /* Add current quoted builder's string if not empty. */
+ if (smallBuilder.length() > 0) {
+ bigBuilder.append(Pattern.quote(smallBuilder.toString()));
+ smallBuilder.setLength(0);
+ }
+
+ /* Append the equivalent regex which is `.*`. */
+ bigBuilder.append(".*");
+ continue;
+
+ case '\\':
+ /* We only escape `*` and `\` here. */
+ if (i < (eventName.length() - 1)) {
+ char nextChar = eventName.charAt(i + 1);
+
+ if (nextChar == '*' || nextChar == '\\') {
+ smallBuilder.append(nextChar);
+ } else {
+ smallBuilder.append(c);
+ smallBuilder.append(nextChar);
+ }
+
+ i++;
+ continue;
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ smallBuilder.append(c);
+ }
+
+ /* Add current quoted builder's string if not empty. */
+ if (smallBuilder.length() > 0) {
+ bigBuilder.append(Pattern.quote(smallBuilder.toString()));
+ }
+
+ bigBuilder.append("$");
+
+ return Pattern.compile(bigBuilder.toString());
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent;
+
+import java.util.Collection;
+import java.util.Map;
+
+/**
+ * Interface to define LTTng Java agents.
+ *
+ * An "agent" is a representative of an LTTng session daemon in the Java world.
+ * It tracks the settings of a tracing session as they defined in the session
+ * daemon.
+ *
+ * It also track the current logging handlers that are sending events to UST.
+ *
+ * @author Alexandre Montplaisir
+ *
+ * @param <T>
+ * The type of logging handler that should register to this agent
+ */
+public interface ILttngAgent<T extends ILttngHandler> {
+
+ // ------------------------------------------------------------------------
+ // Agent configuration elements
+ // ------------------------------------------------------------------------
+
+ /**
+ * Tracing domains. Corresponds to domains defined by LTTng Tools.
+ */
+ enum Domain {
+ JUL(3), LOG4J(4);
+ private int value;
+
+ private Domain(int value) {
+ this.value = value;
+ }
+
+ public int value() {
+ return value;
+ }
+ }
+
+ /**
+ * The tracing domain of this agent.
+ *
+ * @return The tracing domain.
+ */
+ Domain getDomain();
+
+ // ------------------------------------------------------------------------
+ // Log handler registering
+ // ------------------------------------------------------------------------
+
+ /**
+ * Register a handler to this agent.
+ *
+ * @param handler
+ * The handler to register
+ */
+ void registerHandler(T handler);
+
+ /**
+ * Deregister a handler from this agent.
+ *
+ * @param handler
+ * The handler to deregister.
+ */
+ void unregisterHandler(T handler);
+
+ // ------------------------------------------------------------------------
+ // Tracing session parameters
+ // ------------------------------------------------------------------------
+
+ /**
+ * Query if a given event is currently enabled in a current tracing session,
+ * meaning it should be sent to UST.
+ *
+ * @param eventName
+ * The name of the event to check.
+ * @return True if the event is currently enabled, false if it is not.
+ */
+ boolean isEventEnabled(String eventName);
+
+ /**
+ * Return the list of application contexts enabled in the tracing sessions.
+ *
+ * @return The application contexts, first indexed by retriever name, then
+ * by context name
+ */
+ Collection<Map.Entry<String, Map<String, Integer>>> getEnabledAppContexts();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent;
+
+/**
+ * Simple interface to organize all LTTng log handlers under one type.
+ *
+ * @author Alexandre Montplaisir
+ */
+public interface ILttngHandler {
+
+ /**
+ * Get the number of events logged by this handler since its inception.
+ *
+ * @return The number of logged events
+ */
+ long getEventCount();
+
+ /**
+ * Close the log handler. Should be called once the application is done
+ * logging through it.
+ */
+ void close();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent;
+
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+import java.util.logging.Handler;
+import java.util.logging.Logger;
+
+/**
+ * The central agent managing the JUL and Log4j handlers.
+ *
+ * @author David Goulet
+ * @deprecated Applications are now expected to manage their Logger and Handler
+ * objects.
+ */
+@Deprecated
+public class LTTngAgent {
+
+ private static LTTngAgent instance = null;
+
+ /**
+ * Public getter to acquire a reference to this singleton object.
+ *
+ * @return The agent instance
+ */
+ public static synchronized LTTngAgent getLTTngAgent() {
+ if (instance == null) {
+ instance = new LTTngAgent();
+ }
+ return instance;
+ }
+
+ /**
+ * Dispose the agent. Applications should call this once they are done
+ * logging. This dispose function is non-static for backwards
+ * compatibility purposes.
+ */
+ @SuppressWarnings("static-method")
+ public void dispose() {
+ synchronized (LTTngAgent.class) {
+ if (instance != null) {
+ instance.disposeInstance();
+ instance = null;
+ }
+ }
+ return;
+ }
+
+ private ILttngHandler julHandler = null;
+ private ILttngHandler log4jAppender = null;
+
+ /**
+ * Private constructor. This is a singleton and a reference should be
+ * acquired using {@link #getLTTngAgent()}.
+ */
+ private LTTngAgent() {
+ initJulHandler();
+ initLog4jAppender();
+ }
+
+ /**
+ * "Destructor" method.
+ */
+ private void disposeInstance() {
+ disposeJulHandler();
+ disposeLog4jAppender();
+ }
+
+ /**
+ * Create a LTTng-JUL handler, and attach it to the JUL root logger.
+ */
+ private void initJulHandler() {
+ try {
+ Class<?> julHandlerClass = Class.forName("org.lttng.ust.agent.jul.LttngLogHandler");
+ /*
+ * It is safer to use Constructor.newInstance() rather than
+ * Class.newInstance(), because it will catch the exceptions thrown
+ * by the constructor below (which happens if the Java library is
+ * present, but the matching JNI one is not).
+ */
+ Constructor<?> julHandlerCtor = julHandlerClass.getConstructor();
+ julHandler = (ILttngHandler) julHandlerCtor.newInstance();
+
+ /* Attach the handler to the root JUL logger */
+ Logger.getLogger("").addHandler((Handler) julHandler);
+
+ /*
+ * If any of the following exceptions happen, it means we could not
+ * find or initialize LTTng JUL classes. We will not setup LTTng JUL
+ * tracing in this case.
+ */
+ } catch (SecurityException e) {
+ } catch (IllegalAccessException e) {
+ } catch (IllegalArgumentException e) {
+ } catch (ClassNotFoundException e) {
+ } catch (NoSuchMethodException e) {
+ } catch (InstantiationException e) {
+ } catch (InvocationTargetException e) {
+ }
+ }
+
+ /**
+ * Create a LTTng-logj4 appender, and attach it to the log4j root logger.
+ */
+ private void initLog4jAppender() {
+ /*
+ * Since Log4j is a 3rd party library, we first need to check if we can
+ * load any of its classes.
+ */
+ if (!testLog4jClasses()) {
+ return;
+ }
+
+ try {
+ Class<?> log4jAppenderClass = Class.forName("org.lttng.ust.agent.log4j.LttngLogAppender");
+ Constructor<?> log4jAppendCtor = log4jAppenderClass.getConstructor();
+ log4jAppender = (ILttngHandler) log4jAppendCtor.newInstance();
+
+ /*
+ * If any of the following exceptions happen, it means we could not
+ * find or initialize LTTng log4j classes. We will not setup LTTng
+ * log4j tracing in this case.
+ */
+ } catch (SecurityException e) {
+ return;
+ } catch (ClassNotFoundException e) {
+ return;
+ } catch (NoSuchMethodException e) {
+ return;
+ } catch (IllegalArgumentException e) {
+ return;
+ } catch (InstantiationException e) {
+ return;
+ } catch (IllegalAccessException e) {
+ return;
+ } catch (InvocationTargetException e) {
+ return;
+ }
+
+ /*
+ * Attach the appender to the root Log4j logger. Slightly more tricky
+ * here, as log4j.Logger is not in the base Java library, and we do not
+ * want the "common" package to depend on log4j. So we have to obtain it
+ * through reflection too.
+ */
+ try {
+ Class<?> loggerClass = Class.forName("org.apache.log4j.Logger");
+ Class<?> appenderClass = Class.forName("org.apache.log4j.Appender");
+
+ Method getRootLoggerMethod = loggerClass.getMethod("getRootLogger", (Class<?>[]) null);
+ Method addAppenderMethod = loggerClass.getMethod("addAppender", appenderClass);
+
+ Object rootLogger = getRootLoggerMethod.invoke(null, (Object[]) null);
+ addAppenderMethod.invoke(rootLogger, log4jAppender);
+
+ /*
+ * We have checked for the log4j library version previously, none of
+ * the following exceptions should happen.
+ */
+ } catch (SecurityException e) {
+ throw new IllegalStateException(e);
+ } catch (ClassNotFoundException e) {
+ throw new IllegalStateException(e);
+ } catch (NoSuchMethodException e) {
+ throw new IllegalStateException(e);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalStateException(e);
+ } catch (IllegalAccessException e) {
+ throw new IllegalStateException(e);
+ } catch (InvocationTargetException e) {
+ throw new IllegalStateException(e);
+ }
+ }
+
+ /**
+ * Check if log4j >= 1.2.15 library is present.
+ */
+ private static boolean testLog4jClasses() {
+ Class<?> loggingEventClass;
+
+ try {
+ loggingEventClass = Class.forName("org.apache.log4j.spi.LoggingEvent");
+ } catch (ClassNotFoundException e) {
+ /*
+ * Log4j classes not found, no need to create the relevant objects
+ */
+ return false;
+ }
+
+ /*
+ * Detect capabilities of the log4j library. We only support log4j >=
+ * 1.2.15. The getTimeStamp() method was introduced in log4j 1.2.15, so
+ * verify that it is available.
+ *
+ * We can't rely on the getPackage().getImplementationVersion() call
+ * that would retrieves information from the manifest file found in the
+ * JAR since the manifest file shipped from upstream is known to be
+ * broken in several versions of the library.
+ *
+ * More info: https://issues.apache.org/bugzilla/show_bug.cgi?id=44370
+ */
+ try {
+ loggingEventClass.getDeclaredMethod("getTimeStamp");
+ } catch (NoSuchMethodException e) {
+ System.err.println(
+ "Warning: The loaded log4j library is too old. Log4j tracing with LTTng will be disabled.");
+ return false;
+ } catch (SecurityException e) {
+ return false;
+ }
+
+ return true;
+ }
+
+ /**
+ * Detach the JUL handler from its logger and close it.
+ */
+ private void disposeJulHandler() {
+ if (julHandler == null) {
+ /* The JUL handler was not activated, we have nothing to do */
+ return;
+ }
+ Logger.getLogger("").removeHandler((Handler) julHandler);
+ julHandler.close();
+ julHandler = null;
+ }
+
+ /**
+ * Detach the log4j appender from its logger and close it.
+ */
+ private void disposeLog4jAppender() {
+ if (log4jAppender == null) {
+ /* The log4j appender was not active, we have nothing to do */
+ return;
+ }
+
+ /*
+ * Detach the appender from the log4j root logger. Again, we have to do
+ * this via reflection.
+ */
+ try {
+ Class<?> loggerClass = Class.forName("org.apache.log4j.Logger");
+ Class<?> appenderClass = Class.forName("org.apache.log4j.Appender");
+
+ Method getRootLoggerMethod = loggerClass.getMethod("getRootLogger", (Class<?>[]) null);
+ Method removeAppenderMethod = loggerClass.getMethod("removeAppender", appenderClass);
+
+ Object rootLogger = getRootLoggerMethod.invoke(null, (Object[]) null);
+ removeAppenderMethod.invoke(rootLogger, log4jAppender);
+
+ /*
+ * We were able to attach the appender previously, we should not
+ * have problems here either!
+ */
+ } catch (SecurityException e) {
+ throw new IllegalStateException(e);
+ } catch (ClassNotFoundException e) {
+ throw new IllegalStateException(e);
+ } catch (NoSuchMethodException e) {
+ throw new IllegalStateException(e);
+ } catch (IllegalArgumentException e) {
+ throw new IllegalStateException(e);
+ } catch (IllegalAccessException e) {
+ throw new IllegalStateException(e);
+ } catch (InvocationTargetException e) {
+ throw new IllegalStateException(e);
+ }
+
+ /* Close the appender */
+ log4jAppender.close();
+ log4jAppender = null;
+ }
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.util.Collection;
+
+import org.lttng.ust.agent.session.EventRule;
+
+/**
+ * TCP client listener interface.
+ *
+ * This interface contains callbacks that are called when the TCP client
+ * receives commands from the session daemon. These callbacks will define what
+ * do to with each command.
+ *
+ * @author Alexandre Montplaisir
+ */
+public interface ILttngTcpClientListener {
+
+ /**
+ * Callback for the TCP client to notify the listener agent that a request
+ * for enabling an event rule was sent from the session daemon.
+ *
+ * @param eventRule
+ * The event rule that was requested to be enabled
+ * @return Since we do not track individual sessions, right now this command
+ * cannot fail. It will always return true.
+ */
+ boolean eventEnabled(EventRule eventRule);
+
+ /**
+ * Callback for the TCP client to notify the listener agent that a request
+ * for disabling an event was sent from the session daemon.
+ *
+ * @param eventName
+ * The name of the event that was requested to be disabled.
+ * @return True if the command completed successfully, false if we should
+ * report an error (event was not enabled, etc.)
+ */
+ boolean eventDisabled(String eventName);
+
+ /**
+ * Callback for the TCP client to notify the listener agent that a request
+ * for enabling an application-specific context was sent from the session
+ * daemon.
+ *
+ * @param contextRetrieverName
+ * The name of the retriever in which the context is present.
+ * This is used to namespace the contexts.
+ * @param contextName
+ * The name of the context that was requested to be enabled
+ * @return Since we do not track individual sessions, right now this command
+ * cannot fail. It will always return true.
+ */
+ boolean appContextEnabled(String contextRetrieverName, String contextName);
+
+ /**
+ * Callback for the TCP client to notify the listener agent that a request
+ * for disabling an application-specific context was sent from the session
+ * daemon.
+ *
+ * @param contextRetrieverName
+ * The name of the retriever in which the context is present.
+ * This is used to namespace the contexts.
+ * @param contextName
+ * The name of the context that was requested to be disabled.
+ * @return True if the command completed successfully, false if we should
+ * report an error (context was not previously enabled for example)
+ */
+ boolean appContextDisabled(String contextRetrieverName, String contextName);
+
+ /**
+ * List the events that are available in the agent's tracing domain.
+ *
+ * In Java terms, this means loggers that have at least one LTTng log
+ * handler of their corresponding domain attached.
+ *
+ * @return The list of available events
+ */
+ Collection<String> listAvailableEvents();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Interface for all response messages sent from the Java agent to the sessiond
+ * daemon. Normally sent after a command coming from the session daemon was
+ * executed.
+ *
+ * @author Alexandre Montplaisir
+ */
+abstract class LttngAgentResponse {
+
+ private static final int INT_SIZE = 4;
+
+ public static final LttngAgentResponse SUCESS_RESPONSE = new LttngAgentResponse() {
+ @Override
+ public ReturnCode getReturnCode() {
+ return ReturnCode.CODE_SUCCESS_CMD;
+ }
+ };
+
+ public static final LttngAgentResponse FAILURE_RESPONSE = new LttngAgentResponse() {
+ @Override
+ public ReturnCode getReturnCode() {
+ return ReturnCode.CODE_INVALID_CMD;
+ }
+ };
+
+ /**
+ * Return codes used in agent responses, to indicate success or different
+ * types of failures of the commands.
+ */
+ protected enum ReturnCode {
+
+ CODE_SUCCESS_CMD(1, "sucess"),
+ CODE_INVALID_CMD(2, "invalid"),
+ CODE_UNKNOWN_LOGGER_NAME(3, "unknown logger name");
+
+ private final int code;
+ private final String toString;
+
+ private ReturnCode(int c, String str) {
+ code = c;
+ toString = str;
+ }
+
+ public int getCode() {
+ return code;
+ }
+
+ /**
+ * Mainly used for debugging. The strings are not sent through the
+ * socket.
+ */
+ @Override
+ public String toString() {
+ return toString;
+ }
+ }
+
+ /**
+ * Get the {@link ReturnCode} that goes with this response. It is expected
+ * by the session daemon, but some commands may require more than this
+ * in their response.
+ *
+ * @return The return code
+ */
+ public abstract ReturnCode getReturnCode();
+
+ /**
+ * Gets a byte array of the response so that it may be streamed.
+ *
+ * @return The byte array of the response
+ */
+ public byte[] getBytes() {
+ byte data[] = new byte[INT_SIZE];
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+ buf.putInt(getReturnCode().getCode());
+ return data;
+ }
+
+ @Override
+ public String toString() {
+ return "LttngAgentResponse["
+ + "code=" + getReturnCode().getCode()
+ + ", " + getReturnCode().toString()
+ + "]";
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015-2016 EfficiOS Inc.
+ * Copyright (C) 2015-2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.io.BufferedReader;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileInputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.InputStreamReader;
+import java.lang.management.ManagementFactory;
+import java.net.Socket;
+import java.net.UnknownHostException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.charset.Charset;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.TimeUnit;
+
+import org.lttng.ust.agent.utils.LttngUstAgentLogger;
+
+/**
+ * Client for agents to connect to a local session daemon, using a TCP socket.
+ *
+ * @author David Goulet
+ */
+public class LttngTcpSessiondClient implements Runnable {
+
+ private static final String SESSION_HOST = "127.0.0.1";
+ private static final String ROOT_PORT_FILE = "/var/run/lttng/agent.port";
+ private static final String USER_PORT_FILE = "/.lttng/agent.port";
+ private static final Charset PORT_FILE_ENCODING = Charset.forName("UTF-8");
+
+ private static final int PROTOCOL_MAJOR_VERSION = 2;
+ private static final int PROTOCOL_MINOR_VERSION = 0;
+
+ /** Command header from the session deamon. */
+ private final CountDownLatch registrationLatch = new CountDownLatch(1);
+
+ private Socket sessiondSock;
+ private volatile boolean quit = false;
+
+ private DataInputStream inFromSessiond;
+ private DataOutputStream outToSessiond;
+
+ private final ILttngTcpClientListener logAgent;
+ private final int domainValue;
+ private final boolean isRoot;
+
+ /**
+ * Constructor
+ *
+ * @param logAgent
+ * The listener this client will operate on, typically an LTTng
+ * agent.
+ * @param domainValue
+ * The integer to send to the session daemon representing the
+ * tracing domain to handle.
+ * @param isRoot
+ * True if this client should connect to the root session daemon,
+ * false if it should connect to the user one.
+ */
+ public LttngTcpSessiondClient(ILttngTcpClientListener logAgent, int domainValue, boolean isRoot) {
+ this.logAgent = logAgent;
+ this.domainValue = domainValue;
+ this.isRoot = isRoot;
+ }
+
+ /**
+ * Wait until this client has successfully established a connection to its
+ * target session daemon.
+ *
+ * @param seconds
+ * A timeout in seconds after which this method will return
+ * anyway.
+ * @return True if the the client actually established the connection, false
+ * if we returned because the timeout has elapsed or the thread was
+ * interrupted.
+ */
+ public boolean waitForConnection(int seconds) {
+ try {
+ return registrationLatch.await(seconds, TimeUnit.SECONDS);
+ } catch (InterruptedException e) {
+ return false;
+ }
+ }
+
+ @Override
+ public void run() {
+ for (;;) {
+ if (this.quit) {
+ break;
+ }
+
+ try {
+
+ /*
+ * Connect to the session daemon before anything else.
+ */
+ log("Connecting to sessiond");
+ connectToSessiond();
+
+ /*
+ * Register to the session daemon as the Java component of the
+ * UST application.
+ */
+ log("Registering to sessiond");
+ registerToSessiond();
+
+ /*
+ * Block on socket receive and wait for command from the
+ * session daemon. This will return if and only if there is a
+ * fatal error or the socket closes.
+ */
+ log("Waiting on sessiond commands...");
+ handleSessiondCmd();
+ } catch (UnknownHostException uhe) {
+ uhe.printStackTrace();
+ /*
+ * Terminate agent thread.
+ */
+ close();
+ } catch (IOException ioe) {
+ /*
+ * I/O exception may have been triggered by a session daemon
+ * closing the socket. Close our own socket and
+ * retry connecting after a delay.
+ */
+ try {
+ if (this.sessiondSock != null) {
+ this.sessiondSock.close();
+ }
+ Thread.sleep(3000);
+ } catch (InterruptedException e) {
+ /*
+ * Retry immediately if sleep is interrupted.
+ */
+ } catch (IOException closeioe) {
+ closeioe.printStackTrace();
+ /*
+ * Terminate agent thread.
+ */
+ close();
+ }
+ }
+ }
+ }
+
+ /**
+ * Dispose this client and close any socket connection it may hold.
+ */
+ public void close() {
+ log("Closing client");
+ this.quit = true;
+
+ try {
+ if (this.sessiondSock != null) {
+ this.sessiondSock.close();
+ }
+ } catch (IOException e) {
+ e.printStackTrace();
+ }
+ }
+
+ private void connectToSessiond() throws IOException {
+ int rootPort = getPortFromFile(ROOT_PORT_FILE);
+ int userPort = getPortFromFile(getHomePath() + USER_PORT_FILE);
+
+ /*
+ * Check for the edge case of both files existing but pointing to the
+ * same port. In this case, let the root client handle it.
+ */
+ if ((rootPort != 0) && (rootPort == userPort) && (!isRoot)) {
+ log("User and root config files both point to port " + rootPort +
+ ". Letting the root client handle it.");
+ throw new IOException();
+ }
+
+ int portToUse = (isRoot ? rootPort : userPort);
+
+ if (portToUse == 0) {
+ /* No session daemon available. Stop and retry later. */
+ throw new IOException();
+ }
+
+ this.sessiondSock = new Socket(SESSION_HOST, portToUse);
+ this.inFromSessiond = new DataInputStream(sessiondSock.getInputStream());
+ this.outToSessiond = new DataOutputStream(sessiondSock.getOutputStream());
+ }
+
+ private static String getHomePath() {
+ /*
+ * The environment variable LTTNG_HOME overrides HOME if
+ * defined.
+ */
+ String homePath = System.getenv("LTTNG_HOME");
+
+ if (homePath == null) {
+ homePath = System.getProperty("user.home");
+ }
+ return homePath;
+ }
+
+ /**
+ * Read port number from file created by the session daemon.
+ *
+ * @return port value if found else 0.
+ */
+ private static int getPortFromFile(String path) throws IOException {
+ BufferedReader br = null;
+
+ try {
+ br = new BufferedReader(new InputStreamReader(new FileInputStream(path), PORT_FILE_ENCODING));
+ String line = br.readLine();
+ if (line == null) {
+ /* File exists but is empty. */
+ return 0;
+ }
+
+ int port = Integer.parseInt(line, 10);
+ if (port < 0 || port > 65535) {
+ /* Invalid value. Ignore. */
+ port = 0;
+ }
+ return port;
+
+ } catch (NumberFormatException e) {
+ /* File contained something that was not a number. */
+ return 0;
+ } catch (FileNotFoundException e) {
+ /* No port available. */
+ return 0;
+ } finally {
+ if (br != null) {
+ br.close();
+ }
+ }
+ }
+
+ private void registerToSessiond() throws IOException {
+ byte data[] = new byte[16];
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ String pid = ManagementFactory.getRuntimeMXBean().getName().split("@")[0];
+
+ buf.putInt(domainValue);
+ buf.putInt(Integer.parseInt(pid));
+ buf.putInt(PROTOCOL_MAJOR_VERSION);
+ buf.putInt(PROTOCOL_MINOR_VERSION);
+ this.outToSessiond.write(data, 0, data.length);
+ this.outToSessiond.flush();
+ }
+
+ /**
+ * Handle session command from the session daemon.
+ */
+ private void handleSessiondCmd() throws IOException {
+ /* Data read from the socket */
+ byte inputData[] = null;
+ /* Reply data written to the socket, sent to the sessiond */
+ LttngAgentResponse response;
+
+ while (true) {
+ /* Get header from session daemon. */
+ SessiondCommandHeader cmdHeader = recvHeader();
+
+ if (cmdHeader.getDataSize() > 0) {
+ inputData = recvPayload(cmdHeader);
+ }
+
+ switch (cmdHeader.getCommandType()) {
+ case CMD_REG_DONE:
+ {
+ /*
+ * Countdown the registration latch, meaning registration is
+ * done and we can proceed to continue tracing.
+ */
+ registrationLatch.countDown();
+ /*
+ * We don't send any reply to the registration done command.
+ * This just marks the end of the initial session setup.
+ */
+ log("Registration done");
+ continue;
+ }
+ case CMD_LIST:
+ {
+ SessiondCommand listLoggerCmd = new SessiondListLoggersCommand();
+ response = listLoggerCmd.execute(logAgent);
+ log("Received list loggers command");
+ break;
+ }
+ case CMD_EVENT_ENABLE:
+ {
+ if (inputData == null) {
+ /* Invalid command */
+ response = LttngAgentResponse.FAILURE_RESPONSE;
+ break;
+ }
+ SessiondCommand enableEventCmd = new SessiondEnableEventCommand(inputData);
+ response = enableEventCmd.execute(logAgent);
+ log("Received enable event command: " + enableEventCmd.toString());
+ break;
+ }
+ case CMD_EVENT_DISABLE:
+ {
+ if (inputData == null) {
+ /* Invalid command */
+ response = LttngAgentResponse.FAILURE_RESPONSE;
+ break;
+ }
+ SessiondCommand disableEventCmd = new SessiondDisableEventCommand(inputData);
+ response = disableEventCmd.execute(logAgent);
+ log("Received disable event command: " + disableEventCmd.toString());
+ break;
+ }
+ case CMD_APP_CTX_ENABLE:
+ {
+ if (inputData == null) {
+ /* This commands expects a payload, invalid command */
+ response = LttngAgentResponse.FAILURE_RESPONSE;
+ break;
+ }
+ SessiondCommand enableAppCtxCmd = new SessiondEnableAppContextCommand(inputData);
+ response = enableAppCtxCmd.execute(logAgent);
+ log("Received enable app-context command");
+ break;
+ }
+ case CMD_APP_CTX_DISABLE:
+ {
+ if (inputData == null) {
+ /* This commands expects a payload, invalid command */
+ response = LttngAgentResponse.FAILURE_RESPONSE;
+ break;
+ }
+ SessiondCommand disableAppCtxCmd = new SessiondDisableAppContextCommand(inputData);
+ response = disableAppCtxCmd.execute(logAgent);
+ log("Received disable app-context command");
+ break;
+ }
+ default:
+ {
+ /* Unknown command, send empty reply */
+ response = null;
+ log("Received unknown command, ignoring");
+ break;
+ }
+ }
+
+ /* Send response to the session daemon. */
+ byte[] responseData;
+ if (response == null) {
+ responseData = new byte[4];
+ ByteBuffer buf = ByteBuffer.wrap(responseData);
+ buf.order(ByteOrder.BIG_ENDIAN);
+ } else {
+ log("Sending response: " + response.toString());
+ responseData = response.getBytes();
+ }
+ this.outToSessiond.write(responseData, 0, responseData.length);
+ this.outToSessiond.flush();
+ }
+ }
+
+ /**
+ * Receive header data from the session daemon using the LTTng command
+ * static buffer of the right size.
+ */
+ private SessiondCommandHeader recvHeader() throws IOException {
+ byte data[] = new byte[SessiondCommandHeader.HEADER_SIZE];
+ int bytesLeft = data.length;
+ int bytesOffset = 0;
+
+ while (bytesLeft > 0) {
+ int bytesRead = this.inFromSessiond.read(data, bytesOffset, bytesLeft);
+
+ if (bytesRead < 0) {
+ throw new IOException();
+ }
+ bytesLeft -= bytesRead;
+ bytesOffset += bytesRead;
+ }
+ return new SessiondCommandHeader(data);
+ }
+
+ /**
+ * Receive payload from the session daemon. This MUST be done after a
+ * recvHeader() so the header value of a command are known.
+ *
+ * The caller SHOULD use isPayload() before which returns true if a payload
+ * is expected after the header.
+ */
+ private byte[] recvPayload(SessiondCommandHeader headerCmd) throws IOException {
+ byte payload[] = new byte[(int) headerCmd.getDataSize()];
+ int bytesLeft = payload.length;
+ int bytesOffset = 0;
+
+ /* Failsafe check so we don't waste our time reading 0 bytes. */
+ if (bytesLeft == 0) {
+ return null;
+ }
+
+ while (bytesLeft > 0) {
+ int bytesRead = inFromSessiond.read(payload, bytesOffset, bytesLeft);
+
+ if (bytesRead < 0) {
+ throw new IOException();
+ }
+ bytesLeft -= bytesRead;
+ bytesOffset += bytesRead;
+ }
+ return payload;
+ }
+
+ /**
+ * Wrapper for this class's logging, adds the connection's characteristics
+ * to help differentiate between multiple TCP clients.
+ */
+ private void log(String message) {
+ LttngUstAgentLogger.log(getClass(),
+ "(root=" + isRoot + ", domain=" + domainValue + ") " + message);
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015-2016 EfficiOS Inc.
+ * Copyright (C) 2015-2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.charset.Charset;
+
+/**
+ * Base class to represent all commands sent from the session daemon to the Java
+ * agent. The agent is then expected to execute the command and provide a
+ * response.
+ *
+ * @author Alexandre Montplaisir
+ */
+abstract class SessiondCommand {
+
+ /**
+ * Encoding that should be used for the strings in the sessiond agent
+ * protocol on the socket.
+ */
+ protected static final Charset SESSIOND_PROTOCOL_CHARSET = Charset.forName("UTF-8");
+
+ enum CommandType {
+ /** List logger(s). */
+ CMD_LIST(1),
+ /** Enable logger by name. */
+ CMD_EVENT_ENABLE(2),
+ /** Disable logger by name. */
+ CMD_EVENT_DISABLE(3),
+ /** Registration done */
+ CMD_REG_DONE(4),
+ /** Enable application context */
+ CMD_APP_CTX_ENABLE(5),
+ /** Disable application context */
+ CMD_APP_CTX_DISABLE(6);
+
+ private int code;
+
+ private CommandType(int c) {
+ code = c;
+ }
+
+ public int getCommandType() {
+ return code;
+ }
+ }
+
+ /**
+ * Execute the command handler's action on the specified tracing agent.
+ *
+ * @param agent
+ * The agent on which to execute the command
+ * @return If the command completed successfully or not
+ */
+ public abstract LttngAgentResponse execute(ILttngTcpClientListener agent);
+
+ /**
+ * Utility method to read agent-protocol strings passed on the socket. The
+ * buffer will contain a 32-bit integer representing the length, immediately
+ * followed by the string itself.
+ *
+ * @param buffer
+ * The ByteBuffer from which to read. It should already be setup
+ * and positioned where the read should begin.
+ * @return The string that was read, or <code>null</code> if it was badly
+ * formatted.
+ */
+ protected static String readNextString(ByteBuffer buffer) {
+ int nbBytes = buffer.getInt();
+ if (nbBytes < 0) {
+ /* The string length should be positive */
+ return null;
+ }
+ if (nbBytes == 0) {
+ /* The string is explicitly an empty string */
+ return "";
+ }
+
+ byte[] stringBytes = new byte[nbBytes];
+ buffer.get(stringBytes);
+ return new String(stringBytes, SESSIOND_PROTOCOL_CHARSET).trim();
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import org.lttng.ust.agent.client.SessiondCommand.CommandType;
+
+/**
+ * Header of session daemon commands.
+ *
+ * @author Alexandre Montplaisir
+ * @author David Goulet
+ */
+class SessiondCommandHeader {
+
+ /** ABI size of command header. */
+ public static final int HEADER_SIZE = 16;
+
+ /** Payload size in bytes following this header. */
+ private final long dataSize;
+
+ /** Command type. */
+ private final CommandType cmd;
+
+ public SessiondCommandHeader(byte[] data) {
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+
+ dataSize = buf.getLong();
+ cmd = CommandType.values()[buf.getInt() - 1];
+ buf.getInt(); // command version, currently unused
+ }
+
+ public long getDataSize() {
+ return dataSize;
+ }
+
+ public CommandType getCommandType() {
+ return cmd;
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Session daemon command indicating to the Java agent that an
+ * application-specific context was disabled in the tracing session.
+ *
+ * @author Alexandre Montplaisir
+ */
+class SessiondDisableAppContextCommand extends SessiondCommand {
+
+ private final String retrieverName;
+ private final String contextName;
+
+ private final boolean commandIsValid;
+
+ public SessiondDisableAppContextCommand(byte[] data) {
+ if (data == null) {
+ throw new IllegalArgumentException();
+ }
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+
+ /*
+ * The buffer contains the retriever name first, followed by the
+ * context's name.
+ */
+ retrieverName = readNextString(buf);
+ contextName = readNextString(buf);
+
+ /* If any of these strings were null then the command was invalid */
+ commandIsValid = ((retrieverName != null) && (contextName != null));
+ }
+
+ @Override
+ public LttngAgentResponse execute(ILttngTcpClientListener agent) {
+ if (!commandIsValid) {
+ return LttngAgentResponse.FAILURE_RESPONSE;
+ }
+
+ boolean success = agent.appContextDisabled(retrieverName, contextName);
+ return (success ? LttngAgentResponse.SUCESS_RESPONSE : DISABLE_APP_CONTEXT_FAILURE_RESPONSE);
+ }
+
+ /**
+ * Response sent when the disable-context command asks to disable an
+ * unknown context name.
+ */
+ private static final LttngAgentResponse DISABLE_APP_CONTEXT_FAILURE_RESPONSE = new LttngAgentResponse() {
+ @Override
+ public ReturnCode getReturnCode() {
+ /* Same return code used for unknown event/logger names */
+ return ReturnCode.CODE_UNKNOWN_LOGGER_NAME;
+ }
+ };
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Session daemon command indicating to the Java agent that some events were
+ * disabled in the tracing session.
+ *
+ * @author Alexandre Montplaisir
+ * @author David Goulet
+ */
+class SessiondDisableEventCommand extends SessiondCommand {
+
+ /**
+ * Response sent when the disable-event command asks to disable an
+ * unknown event.
+ */
+ private static final LttngAgentResponse DISABLE_EVENT_FAILURE_RESPONSE = new LttngAgentResponse() {
+ @Override
+ public ReturnCode getReturnCode() {
+ return ReturnCode.CODE_UNKNOWN_LOGGER_NAME;
+ }
+ };
+
+ /** Event name to disable from the tracing session */
+ private final String eventName;
+
+ public SessiondDisableEventCommand(byte[] data) {
+ if (data == null) {
+ throw new IllegalArgumentException();
+ }
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+ eventName = new String(data, SESSIOND_PROTOCOL_CHARSET).trim();
+ }
+
+ @Override
+ public LttngAgentResponse execute(ILttngTcpClientListener agent) {
+ boolean success = agent.eventDisabled(this.eventName);
+ return (success ? LttngAgentResponse.SUCESS_RESPONSE : DISABLE_EVENT_FAILURE_RESPONSE);
+ }
+
+ @Override
+ public String toString() {
+ return "SessiondDisableEventCommand["
+ + "eventName=" + eventName
+ +"]";
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+/**
+ * Session daemon command indicating to the Java agent that an
+ * application-specific context was enabled in the tracing session.
+ *
+ * @author Alexandre Montplaisir
+ */
+class SessiondEnableAppContextCommand extends SessiondCommand {
+
+ private final String retrieverName;
+ private final String contextName;
+
+ private final boolean commandIsValid;
+
+ public SessiondEnableAppContextCommand(byte[] data) {
+ if (data == null) {
+ throw new IllegalArgumentException();
+ }
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+
+ /*
+ * The buffer contains the retriever name first, followed by the
+ * context's name.
+ */
+ retrieverName = readNextString(buf);
+ contextName = readNextString(buf);
+
+ /* If any of these strings were null then the command was invalid */
+ commandIsValid = ((retrieverName != null) && (contextName != null));
+ }
+
+ @Override
+ public LttngAgentResponse execute(ILttngTcpClientListener agent) {
+ if (!commandIsValid) {
+ return LttngAgentResponse.FAILURE_RESPONSE;
+ }
+
+ boolean success = agent.appContextEnabled(retrieverName, contextName);
+ return (success ? LttngAgentResponse.SUCESS_RESPONSE : LttngAgentResponse.FAILURE_RESPONSE);
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+
+import org.lttng.ust.agent.session.EventRule;
+import org.lttng.ust.agent.session.LogLevelSelector;
+
+/**
+ * Session daemon command indicating to the Java agent that some events were
+ * enabled in the tracing session.
+ *
+ * @author Alexandre Montplaisir
+ * @author David Goulet
+ */
+class SessiondEnableEventCommand extends SessiondCommand {
+
+ /** Fixed event name length. Value defined by the lttng agent protocol. */
+ private static final int EVENT_NAME_LENGTH = 256;
+
+ private final boolean commandIsValid;
+
+ /* Parameters of the event rule being enabled */
+ private final String eventName;
+ private final LogLevelSelector logLevelFilter;
+ private final String filterString;
+
+ public SessiondEnableEventCommand(byte[] data) {
+ if (data == null) {
+ throw new IllegalArgumentException();
+ }
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+ int logLevel = buf.getInt();
+ int logLevelType = buf.getInt();
+ logLevelFilter = new LogLevelSelector(logLevel, logLevelType);
+
+ /* Read the event name */
+ byte[] eventNameBytes = new byte[EVENT_NAME_LENGTH];
+ buf.get(eventNameBytes);
+ eventName = new String(eventNameBytes, SESSIOND_PROTOCOL_CHARSET).trim();
+
+ /* Read the filter string */
+ filterString = readNextString(buf);
+
+ /* The command was invalid if the string could not be read correctly */
+ commandIsValid = (filterString != null);
+ }
+
+ @Override
+ public LttngAgentResponse execute(ILttngTcpClientListener agent) {
+ if (!commandIsValid) {
+ return LttngAgentResponse.FAILURE_RESPONSE;
+ }
+
+ EventRule rule = new EventRule(eventName, logLevelFilter, filterString);
+ boolean success = agent.eventEnabled(rule);
+ return (success ? LttngAgentResponse.SUCESS_RESPONSE : LttngAgentResponse.FAILURE_RESPONSE);
+ }
+
+ @Override
+ public String toString() {
+ return "SessiondEnableEventCommand["
+ + "eventName=" + eventName
+ + ", logLevel=" + logLevelFilter.toString()
+ + ", filterString=" + filterString
+ +"]";
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.client;
+
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.util.Collection;
+
+/**
+ * Session daemon command asking the Java agent to list its registered loggers,
+ * which corresponds to event names in the tracing session.
+ *
+ * @author Alexandre Montplaisir
+ * @author David Goulet
+ */
+class SessiondListLoggersCommand extends SessiondCommand {
+
+ @Override
+ public LttngAgentResponse execute(ILttngTcpClientListener agent) {
+ final Collection<String> loggerList = agent.listAvailableEvents();
+ return new SessiondListLoggersResponse(loggerList);
+ }
+
+ private static class SessiondListLoggersResponse extends LttngAgentResponse {
+
+ private final static int SIZE = 12;
+
+ private final Collection<String> loggers;
+
+ public SessiondListLoggersResponse(Collection<String> loggers) {
+ this.loggers = loggers;
+ }
+
+ @Override
+ public ReturnCode getReturnCode() {
+ /* This command can't really fail */
+ return ReturnCode.CODE_SUCCESS_CMD;
+ }
+
+ @Override
+ public byte[] getBytes() {
+ /*
+ * Compute the data size, which is the number of bytes of each
+ * encoded string, +1 per string for the \0
+ */
+ int dataSize = 0;
+ for (String logger : loggers) {
+ dataSize += logger.getBytes(SESSIOND_PROTOCOL_CHARSET).length + 1;
+ }
+
+ /* Prepare the buffer */
+ byte data[] = new byte[SIZE + dataSize];
+ ByteBuffer buf = ByteBuffer.wrap(data);
+ buf.order(ByteOrder.BIG_ENDIAN);
+
+ /* Write the header section of the response */
+ buf.putInt(getReturnCode().getCode());
+ buf.putInt(dataSize);
+ buf.putInt(loggers.size());
+
+ /* Write the payload */
+ for (String logger : loggers) {
+ buf.put(logger.getBytes(SESSIOND_PROTOCOL_CHARSET));
+ /* NULL terminated byte after the logger name. */
+ buf.put((byte) 0x0);
+ }
+ return data;
+ }
+ }
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.context;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+/**
+ * The singleton manager of {@link IContextInfoRetriever} objects.
+ *
+ * @author Alexandre Montplaisir
+ */
+public final class ContextInfoManager {
+
+ private static final String SHARED_LIBRARY_NAME = "lttng-ust-context-jni";
+
+ private static final Pattern VALID_CONTEXT_NAME_PATTERN = Pattern.compile("^[a-zA-Z0-9_\\.]+$");
+
+ private static ContextInfoManager instance;
+
+ private final Map<String, IContextInfoRetriever> contextInfoRetrievers = new ConcurrentHashMap<String, IContextInfoRetriever>();
+ private final Map<String, Long> contextInforRetrieverRefs = new HashMap<String, Long>();
+
+ /**
+ * Lock used to keep the two maps above in sync when retrievers are
+ * registered or unregistered.
+ */
+ private final Object retrieverLock = new Object();
+
+ /** Singleton class, constructor should not be accessed directly */
+ private ContextInfoManager() {
+ }
+
+ /**
+ * Get the singleton instance.
+ *
+ * <p>
+ * Usage of this class requires the "liblttng-ust-context-jni.so" native
+ * library to be present on the system and available (passing
+ * -Djava.library.path=path to the JVM may be needed).
+ * </p>
+ *
+ * @return The singleton instance
+ * @throws IOException
+ * If the shared library cannot be found.
+ * @throws SecurityException
+ * We will forward any SecurityExcepion that may be thrown when
+ * trying to load the JNI library.
+ */
+ public static synchronized ContextInfoManager getInstance() throws IOException, SecurityException {
+ if (instance == null) {
+ try {
+ System.loadLibrary(SHARED_LIBRARY_NAME);
+ } catch (UnsatisfiedLinkError e) {
+ throw new IOException(e);
+ }
+ instance = new ContextInfoManager();
+ }
+ return instance;
+ }
+
+ /**
+ * Register a new context info retriever.
+ *
+ * <p>
+ * Each context info retriever is registered with a given "retriever name",
+ * which specifies the namespace of the context elements. This name is
+ * specified separately from the retriever objects, which would allow
+ * register the same retriever under different namespaces for example.
+ * </p>
+ *
+ * <p>
+ * If the method returns false (indicating registration failure), then the
+ * retriever object will *not* be used for context information.
+ * </p>
+ *
+ * @param retrieverName
+ * The name to register to the context retriever object with.
+ * @param contextInfoRetriever
+ * The context info retriever to register
+ * @return True if the retriever was successfully registered, false if there
+ * was an error, for example if a retriever is already registered
+ * with that name.
+ */
+ public boolean registerContextInfoRetriever(String retrieverName, IContextInfoRetriever contextInfoRetriever) {
+ synchronized (retrieverLock) {
+ if (!validateRetrieverName(retrieverName)) {
+ return false;
+ }
+
+ if (contextInfoRetrievers.containsKey(retrieverName)) {
+ /*
+ * There is already a retriever registered with that name,
+ * refuse the new registration.
+ */
+ return false;
+ }
+ /*
+ * Inform LTTng-UST of the new retriever. The names have to start
+ * with "$app." on the UST side!
+ */
+ long ref = LttngContextApi.registerProvider("$app." + retrieverName);
+ if (ref == 0) {
+ return false;
+ }
+
+ contextInfoRetrievers.put(retrieverName, contextInfoRetriever);
+ contextInforRetrieverRefs.put(retrieverName, Long.valueOf(ref));
+
+ return true;
+ }
+ }
+
+ /**
+ * Unregister a previously added context info retriever.
+ *
+ * This method has no effect if the retriever was not already registered.
+ *
+ * @param retrieverName
+ * The context info retriever to unregister
+ * @return True if unregistration was successful, false if there was an
+ * error
+ */
+ public boolean unregisterContextInfoRetriever(String retrieverName) {
+ synchronized (retrieverLock) {
+ if (!contextInfoRetrievers.containsKey(retrieverName)) {
+ /*
+ * There was no retriever registered with that name.
+ */
+ return false;
+ }
+ contextInfoRetrievers.remove(retrieverName);
+ long ref = contextInforRetrieverRefs.remove(retrieverName).longValue();
+
+ /* Unregister the retriever on the UST side too */
+ LttngContextApi.unregisterProvider(ref);
+
+ return true;
+ }
+ }
+
+ /**
+ * Return the context info retriever object registered with the given name.
+ *
+ * @param retrieverName
+ * The retriever name to look for
+ * @return The corresponding retriever object, or <code>null</code> if there
+ * was none
+ */
+ public IContextInfoRetriever getContextInfoRetriever(String retrieverName) {
+ /*
+ * Note that this method does not take the retrieverLock, it lets
+ * concurrent threads access the ConcurrentHashMap directly.
+ *
+ * It's fine for a get() to happen during a registration or
+ * unregistration, it's first-come-first-serve.
+ */
+ return contextInfoRetrievers.get(retrieverName);
+ }
+
+ /**
+ * Validate that the given retriever name contains only the allowed
+ * characters, which are alphanumerical characters, period "." and
+ * underscore "_". The name must also not start with a number.
+ */
+ private static boolean validateRetrieverName(String contextName) {
+ if (contextName.isEmpty()) {
+ return false;
+ }
+
+ /* First character must not be a number */
+ if (Character.isDigit(contextName.charAt(0))) {
+ return false;
+ }
+
+ /* Validate the other characters of the string */
+ Matcher matcher = VALID_CONTEXT_NAME_PATTERN.matcher(contextName);
+ return matcher.matches();
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.context;
+
+import java.io.ByteArrayOutputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.ByteOrder;
+import java.nio.charset.Charset;
+import java.util.Collection;
+import java.util.Map;
+
+import org.lttng.ust.agent.utils.LttngUstAgentLogger;
+
+/**
+ * This class is used to serialize the list of "context info" objects to pass
+ * through JNI.
+ *
+ * The protocol expects two byte array parameters, which are contained here in
+ * the {@link SerializedContexts} inner class.
+ *
+ * The first byte array is called the "entries array", and contains fixed-size
+ * entries, one per context element.
+ *
+ * The second one is the "strings array", it is of variable length and used to
+ * hold the variable-length strings. Each one of these strings is formatted as a
+ * UTF-8 C-string, meaning in will end with a "\0" byte to indicate its end.
+ * Entries in the first array may refer to offsets in the second array to point
+ * to relevant strings.
+ *
+ * The fixed-size entries in the entries array contain the following elements
+ * (size in bytes in parentheses):
+ *
+ * <ul>
+ * <li>The offset in the strings array pointing to the full context name, like
+ * "$app.myprovider:mycontext" (4)</li>
+ * <li>The context value type (1)</li>
+ * <li>The context value itself (8)</li>
+ * </ul>
+ *
+ * The context value type will indicate how many bytes are used for the value.
+ * If the it is of String type, then we use 4 bytes to represent the offset in
+ * the strings array.
+ *
+ * So the total size of each entry is 13 bytes. All unused bytes (for context
+ * values shorter than 8 bytes for example) will be zero'ed.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class ContextInfoSerializer {
+
+ private enum DataType {
+ NULL(0),
+ INTEGER(1),
+ LONG(2),
+ DOUBLE(3),
+ FLOAT(4),
+ BYTE(5),
+ SHORT(6),
+ BOOLEAN(7),
+ STRING(8);
+
+ private final byte value;
+
+ private DataType(int value) {
+ this.value = (byte) value;
+ }
+
+ public byte getValue() {
+ return value;
+ }
+ }
+
+ /**
+ * Class used to wrap the two byte arrays returned by
+ * {@link #queryAndSerializeRequestedContexts}.
+ */
+ public static class SerializedContexts {
+
+ private final byte[] contextEntries;
+ private final byte[] contextStrings;
+
+ /**
+ * Constructor
+ *
+ * @param entries
+ * Arrays for the fixed-size context entries.
+ * @param strings
+ * Arrays for variable-length strings
+ */
+ public SerializedContexts(byte[] entries, byte[] strings) {
+ contextEntries = entries;
+ contextStrings = strings;
+ }
+
+ /**
+ * @return The entries array
+ */
+ public byte[] getEntriesArray() {
+ return contextEntries;
+ }
+
+ /**
+ * @return The strings array
+ */
+ public byte[] getStringsArray() {
+ return contextStrings;
+ }
+ }
+
+ private static final String UST_APP_CTX_PREFIX = "$app.";
+ private static final int ENTRY_LENGTH = 13;
+ private static final ByteOrder NATIVE_ORDER = ByteOrder.nativeOrder();
+ private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
+ private static final SerializedContexts EMPTY_CONTEXTS = new SerializedContexts(new byte[0], new byte[0]);
+
+ /**
+ * From the list of requested contexts in the tracing session, look them up
+ * in the {@link ContextInfoManager}, retrieve the available ones, and
+ * serialize them into a byte array.
+ *
+ * @param enabledContexts
+ * The contexts that are enabled in the tracing session (indexed
+ * first by retriever name, then by index names). Should come
+ * from the LTTng Agent.
+ * @return The byte array representing the intersection of the requested and
+ * available contexts.
+ */
+ public static SerializedContexts queryAndSerializeRequestedContexts(Collection<Map.Entry<String, Map<String, Integer>>> enabledContexts) {
+ if (enabledContexts.isEmpty()) {
+ /* Early return if there is no requested context information */
+ return EMPTY_CONTEXTS;
+ }
+
+ ContextInfoManager contextManager;
+ try {
+ contextManager = ContextInfoManager.getInstance();
+ } catch (IOException e) {
+ /*
+ * The JNI library is not available, do not send any context
+ * information. No retriever could have been defined anyways.
+ */
+ return EMPTY_CONTEXTS;
+ }
+
+ /* Compute the total number of contexts (flatten the map) */
+ int totalArraySize = 0;
+ for (Map.Entry<String, Map<String, Integer>> contexts : enabledContexts) {
+ totalArraySize += contexts.getValue().size() * ENTRY_LENGTH;
+ }
+
+ /* Prepare the ByteBuffer that will generate the "entries" array */
+ ByteBuffer entriesBuffer = ByteBuffer.allocate(totalArraySize);
+ entriesBuffer.order(NATIVE_ORDER);
+ entriesBuffer.clear();
+
+ /* Prepare the streams that will generate the "strings" array */
+ ByteArrayOutputStream stringsBaos = new ByteArrayOutputStream();
+ DataOutputStream stringsDos = new DataOutputStream(stringsBaos);
+
+ try {
+ for (Map.Entry<String, Map<String, Integer>> entry : enabledContexts) {
+ String requestedRetrieverName = entry.getKey();
+ Map<String, Integer> requestedContexts = entry.getValue();
+
+ IContextInfoRetriever retriever = contextManager.getContextInfoRetriever(requestedRetrieverName);
+
+ for (String requestedContext : requestedContexts.keySet()) {
+ Object contextInfo;
+ if (retriever == null) {
+ contextInfo = null;
+ } else {
+ contextInfo = retriever.retrieveContextInfo(requestedContext);
+ /*
+ * 'contextInfo' can still be null here, which would
+ * indicate the retriever does not supply this context.
+ * We will still write this information so that the
+ * tracer can know about it.
+ */
+ }
+
+ /* Serialize the result to the buffers */
+ // FIXME Eventually pass the retriever name only once?
+ String fullContextName = (UST_APP_CTX_PREFIX + requestedRetrieverName + ':' + requestedContext);
+ byte[] strArray = fullContextName.getBytes(UTF8_CHARSET);
+
+ entriesBuffer.putInt(stringsDos.size());
+ stringsDos.write(strArray);
+ stringsDos.writeChar('\0');
+
+ LttngUstAgentLogger.log(ContextInfoSerializer.class,
+ "ContextInfoSerializer: Context to be sent through JNI: " + fullContextName + '=' +
+ (contextInfo == null ? "null" : contextInfo.toString()));
+
+ serializeContextInfo(entriesBuffer, stringsDos, contextInfo);
+ }
+ }
+
+ stringsDos.flush();
+ stringsBaos.flush();
+
+ } catch (IOException e) {
+ /*
+ * Should not happen because we are wrapping a
+ * ByteArrayOutputStream, which writes to memory
+ */
+ e.printStackTrace();
+ }
+
+ byte[] entriesArray = entriesBuffer.array();
+ byte[] stringsArray = stringsBaos.toByteArray();
+ return new SerializedContexts(entriesArray, stringsArray);
+ }
+
+ private static final int CONTEXT_VALUE_LENGTH = 8;
+
+ private static void serializeContextInfo(ByteBuffer entriesBuffer, DataOutputStream stringsDos, Object contextInfo) throws IOException {
+ int remainingBytes;
+ if (contextInfo == null) {
+ entriesBuffer.put(DataType.NULL.getValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH;
+
+ } else if (contextInfo instanceof Integer) {
+ entriesBuffer.put(DataType.INTEGER.getValue());
+ entriesBuffer.putInt(((Integer) contextInfo).intValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 4;
+
+ } else if (contextInfo instanceof Long) {
+ entriesBuffer.put(DataType.LONG.getValue());
+ entriesBuffer.putLong(((Long) contextInfo).longValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 8;
+
+ } else if (contextInfo instanceof Double) {
+ entriesBuffer.put(DataType.DOUBLE.getValue());
+ entriesBuffer.putDouble(((Double) contextInfo).doubleValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 8;
+
+ } else if (contextInfo instanceof Float) {
+ entriesBuffer.put(DataType.FLOAT.getValue());
+ entriesBuffer.putFloat(((Float) contextInfo).floatValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 4;
+
+ } else if (contextInfo instanceof Byte) {
+ entriesBuffer.put(DataType.BYTE.getValue());
+ entriesBuffer.put(((Byte) contextInfo).byteValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 1;
+
+ } else if (contextInfo instanceof Short) {
+ entriesBuffer.put(DataType.SHORT.getValue());
+ entriesBuffer.putShort(((Short) contextInfo).shortValue());
+ remainingBytes = CONTEXT_VALUE_LENGTH - 2;
+
+ } else if (contextInfo instanceof Boolean) {
+ entriesBuffer.put(DataType.BOOLEAN.getValue());
+ boolean b = ((Boolean) contextInfo).booleanValue();
+ /* Converted to one byte, write 1 for true, 0 for false */
+ entriesBuffer.put((byte) (b ? 1 : 0));
+ remainingBytes = CONTEXT_VALUE_LENGTH - 1;
+
+ } else {
+ /* Also includes the case of Character. */
+ /*
+ * We'll write the object as a string, into the strings array. We
+ * will write the corresponding offset to the entries array.
+ */
+ String str = contextInfo.toString();
+ byte[] strArray = str.getBytes(UTF8_CHARSET);
+
+ entriesBuffer.put(DataType.STRING.getValue());
+
+ entriesBuffer.putInt(stringsDos.size());
+ stringsDos.write(strArray);
+ stringsDos.writeChar('\0');
+
+ remainingBytes = CONTEXT_VALUE_LENGTH - 4;
+ }
+ entriesBuffer.position(entriesBuffer.position() + remainingBytes);
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.context;
+
+/**
+ * Context-retrieving object specified by the application to extract
+ * application-specific context information, which can then be passed on to the
+ * Java agents and saved to a trace.
+ *
+ * Retriever objects should be registered to the {@link ContextInfoManager} to
+ * make them available to the LTTng agents.
+ *
+ * @author Alexandre Montplaisir
+ */
+public interface IContextInfoRetriever {
+
+ /**
+ * Retrieve a piece of context information from the application, identified
+ * by a key.
+ *
+ * @param key
+ * The key identifying the context information
+ * @return The context information.
+ */
+ Object retrieveContextInfo(String key);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.context;
+
+/**
+ * Virtual class containing the Java side of the LTTng-UST context provider
+ * registering/unregistering methods.
+ *
+ * @author Alexandre Montplaisir
+ */
+final class LttngContextApi {
+
+ private LttngContextApi() {}
+
+ /**
+ * Register a context provider to UST.
+ *
+ * The callbacks are the same for all providers, and are defined in the .c
+ * file. The only needed information is the retriever (which is called
+ * "provider" from UST'S point of view) name.
+ *
+ * @param provider_name
+ * The name of the provider
+ * @return The pointer to the created provider object. It's useless in the
+ * Java space, but will be needed for
+ * {@link #unregisterProvider(long)}.
+ */
+ static native long registerProvider(String provider_name);
+
+ /**
+ * Unregister a previously-registered context provider from UST.
+ *
+ * @param provider_ref
+ * The pointer to the provider object, obtained from
+ * {@link #registerProvider}
+ */
+ static native void unregisterProvider(long provider_ref);
+}
+
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.filter;
+
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+
+import org.lttng.ust.agent.session.EventRule;
+
+/**
+ * Singleton class managing the filter notifications.
+ *
+ * Applications can register a {@link IFilterChangeListener} to be notified when
+ * event filtering rules change in the tracing sessions.
+ *
+ * @author Alexandre Montplaisir
+ */
+public final class FilterChangeNotifier {
+
+ /** Lazy-loaded singleton instance object */
+ private static FilterChangeNotifier instance = null;
+
+ private final Map<EventRule, Integer> enabledEventRules = new HashMap<EventRule, Integer>();
+ private final Collection<IFilterChangeListener> registeredListeners = new LinkedList<IFilterChangeListener>();
+
+
+ /**
+ * Private constructor, singleton class should not be instantiated directly.
+ */
+ private FilterChangeNotifier() {
+ }
+
+ /**
+ * Get the singleton instance, initializing it if needed.
+ *
+ * @return The singleton instance
+ */
+ public static synchronized FilterChangeNotifier getInstance() {
+ if (instance == null) {
+ instance = new FilterChangeNotifier();
+ }
+ return instance;
+ }
+
+ /**
+ * Notify the filter manager that a new rule was enabled in a tracing
+ * session ("lttng enable-event ...")
+ *
+ * This is meant to be called by the LTTng Agent only. External Java
+ * applications should not call this.
+ *
+ * @param rule
+ * The rule that was added
+ */
+ public synchronized void addEventRule(EventRule rule) {
+ Integer count = enabledEventRules.get(rule);
+ if (count == null) {
+ /*
+ * This is the first instance of this rule being enabled. Add it to
+ * the map and send notifications to the registered notifiers.
+ */
+ enabledEventRules.put(rule, Integer.valueOf(1));
+ notifyForAddedRule(rule);
+ return;
+ }
+ if (count.intValue() <= 0) {
+ /* It should not have been in the map! */
+ throw new IllegalStateException();
+ }
+ /*
+ * This exact event rule was already enabled, just increment its
+ * refcount without sending notifications
+ */
+ enabledEventRules.put(rule, Integer.valueOf(count.intValue() + 1));
+ }
+
+ /**
+ * Notify the filter manager that an event name was disabled in the tracing
+ * sessions ("lttng disable-event ...").
+ *
+ * The "disable-event" only specifies an event name. This means all the
+ * rules containing this event name are to be disabled.
+ *
+ * This is meant to be called by the LTTng Agent only. External Java
+ * applications should not call this.
+ *
+ * @param eventName
+ * The event name to disable
+ */
+ public synchronized void removeEventRules(String eventName) {
+ List<EventRule> rulesToRemove = new LinkedList<EventRule>();
+
+ for (EventRule eventRule : enabledEventRules.keySet()) {
+ if (eventRule.getEventName().equals(eventName)) {
+ rulesToRemove.add(eventRule);
+ }
+ }
+ /*
+ * We cannot modify the map while iterating on it. We have to do the
+ * removal separately from the iteration above.
+ */
+ for (EventRule rule : rulesToRemove) {
+ removeEventRule(rule);
+ }
+ }
+
+ private synchronized void removeEventRule(EventRule eventRule) {
+ Integer count = enabledEventRules.get(eventRule);
+ if (count == null || count.intValue() <= 0) {
+ /*
+ * We were asked us to disable an event rule that was not enabled
+ * previously. Command error?
+ */
+ throw new IllegalStateException();
+ }
+ if (count.intValue() == 1) {
+ /*
+ * This is the last instance of this event rule being disabled,
+ * remove it from the map and send notifications of this rule being
+ * gone.
+ */
+ enabledEventRules.remove(eventRule);
+ notifyForRemovedRule(eventRule);
+ return;
+ }
+ /*
+ * Other sessions/daemons are still looking for this event rule, simply
+ * decrement its refcount, and do not send notifications.
+ */
+ enabledEventRules.put(eventRule, Integer.valueOf(count.intValue() - 1));
+
+ }
+
+ /**
+ * Register a new listener to the manager.
+ *
+ * @param listener
+ * The listener to add
+ */
+ public synchronized void registerListener(IFilterChangeListener listener) {
+ registeredListeners.add(listener);
+
+ /* Send the current rules to the new listener ("statedump") */
+ for (EventRule rule : enabledEventRules.keySet()) {
+ listener.eventRuleAdded(rule);
+ }
+ }
+
+ /**
+ * Unregister a listener from the manager.
+ *
+ * @param listener
+ * The listener to remove
+ */
+ public synchronized void unregisterListener(IFilterChangeListener listener) {
+ registeredListeners.remove(listener);
+ }
+
+ private void notifyForAddedRule(final EventRule rule) {
+ for (IFilterChangeListener notifier : registeredListeners) {
+ notifier.eventRuleAdded(rule);
+ }
+ }
+
+ private void notifyForRemovedRule(final EventRule rule) {
+ for (IFilterChangeListener notifier : registeredListeners) {
+ notifier.eventRuleRemoved(rule);
+ }
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.filter;
+
+import org.lttng.ust.agent.session.EventRule;
+
+/**
+ * Filter notification listener interface.
+ * <p>
+ * Applications wanting to be notified of event filtering rule changes should
+ * implement this interface, then register their listener using
+ * {@link FilterChangeNotifier#registerListener}.
+ * </p>
+ * <p>
+ * The callbacks defined in this interface will be called whenever an event rule
+ * is added or removed. The manager will take care of the reference-counting in
+ * case multiple tracing sessions enable the exact same rules. For example, the
+ * {@link #eventRuleRemoved} callback is only called when there are no more
+ * session interested into it.
+ * </p>
+ * <p>
+ * Do not forget to unregister the listener after use, using
+ * {@link FilterChangeNotifier#unregisterListener}. If you do not, or if
+ * you use an anonymous listener for example, these will remain attached until
+ * the complete shutdown of the application.
+ * </p>
+ * <p>
+ * Only one thread is used to dispatch notifications, sequentially. This means
+ * that if a callback hangs it will prevent other listeners from receiving
+ * notifications. Please take care of not blocking inside the listener
+ * callbacks, and use separate threads for potentially long or blocking
+ * operations.
+ * </p>
+ *
+ * @author Alexandre Montplaisir
+ */
+public interface IFilterChangeListener {
+
+ /**
+ * Notification that a new event rule is now enabled in the tracing
+ * sessions.
+ *
+ * @param rule
+ * The event rule that was enabled
+ */
+ void eventRuleAdded(EventRule rule);
+
+ /**
+ * Notification that an existing event rule is now disabled in the tracing
+ * sessions.
+ *
+ * @param rule
+ * The event rule that was disabled
+ */
+ void eventRuleRemoved(EventRule rule);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.session;
+
+/**
+ * Event filtering rule present in a tracing session.
+ *
+ * It typically comes from a "lttng enable-event" command, and contains a
+ * domain, event name, log level and filter string.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class EventRule {
+
+ private final String eventName;
+ private final LogLevelSelector logLevelSelector;
+ private final String filterString;
+
+ /**
+ * Constructor.
+ *
+ * @param eventName
+ * The name of the tracepoint
+ * @param logLevelSelector
+ * The log level of the event rule
+ * @param filterString
+ * The filtering string. May be null if there is no extra filter.
+ */
+ public EventRule(String eventName, LogLevelSelector logLevelSelector, String filterString) {
+ this.eventName = eventName;
+ this.logLevelSelector = logLevelSelector;
+ this.filterString = filterString;
+ }
+
+ /**
+ * Get the event name of this rule.
+ *
+ * @return The event name
+ */
+ public String getEventName() {
+ return eventName;
+ }
+
+ /**
+ * Get the log level filter configuration of the rule.
+ *
+ * @return The log level selector
+ */
+ public LogLevelSelector getLogLevelSelector() {
+ return logLevelSelector;
+ }
+
+ /**
+ * Get the filter string associated with this rule.
+ *
+ * @return The filter string, may be null for no filter string.
+ */
+ public String getFilterString() {
+ return filterString;
+ }
+
+ // ------------------------------------------------------------------------
+ // Methods from Object
+ // ------------------------------------------------------------------------
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + ((eventName == null) ? 0 : eventName.hashCode());
+ result = prime * result + ((filterString == null) ? 0 : filterString.hashCode());
+ result = prime * result + ((logLevelSelector == null) ? 0 : logLevelSelector.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ EventRule other = (EventRule) obj;
+
+ if (eventName == null) {
+ if (other.eventName != null) {
+ return false;
+ }
+ } else if (!eventName.equals(other.eventName)) {
+ return false;
+ }
+ /* else, continue */
+
+ if (filterString == null) {
+ if (other.filterString != null) {
+ return false;
+ }
+ } else if (!filterString.equals(other.filterString)) {
+ return false;
+ }
+ /* else, continue */
+
+ if (logLevelSelector == null) {
+ if (other.logLevelSelector != null) {
+ return false;
+ }
+ } else if (!logLevelSelector.equals(other.logLevelSelector)) {
+ return false;
+ }
+ /* else, continue */
+
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ return "Event name = " + getEventName() +
+ ", Log level selector = (" + getLogLevelSelector().toString() + ")" +
+ ", Filter string = " + getFilterString();
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.session;
+
+/**
+ * Log level filtering element, which is part of an {@link EventRule}.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class LogLevelSelector {
+
+ /**
+ * The type of log level filter that is enabled.
+ *
+ * Defined from lttng-tools' include/lttng/event.h.
+ */
+ public enum LogLevelType {
+ /**
+ * All log levels are enabled. This overrides the value of
+ * {@link LogLevelSelector#getLogLevel}.
+ */
+ LTTNG_EVENT_LOGLEVEL_ALL(0),
+
+ /** This log level along with all log levels of higher severity are enabled. */
+ LTTNG_EVENT_LOGLEVEL_RANGE(1),
+
+ /** Only this exact log level is enabled. */
+ LTTNG_EVENT_LOGLEVEL_SINGLE(2);
+
+ private final int value;
+
+ private LogLevelType(int value) {
+ this.value = value;
+ }
+
+ /**
+ * Get the numerical (int) value representing this log level type in the
+ * communication protocol.
+ *
+ * @return The int value
+ */
+ public int getValue() {
+ return value;
+ }
+
+ static LogLevelType fromValue(int val) {
+ switch (val) {
+ case 0:
+ return LTTNG_EVENT_LOGLEVEL_ALL;
+ case 1:
+ return LTTNG_EVENT_LOGLEVEL_RANGE;
+ case 2:
+ return LTTNG_EVENT_LOGLEVEL_SINGLE;
+ default:
+ throw new IllegalArgumentException();
+ }
+ }
+ }
+
+ private final int logLevel;
+ private final LogLevelType logLevelType;
+
+ /**
+ * Constructor using numerical values straight from the communication
+ * protocol.
+ *
+ * @param logLevel
+ * The numerical value of the log level. The exact value depends
+ * on the tracing domain, see include/lttng/event.h in the
+ * lttng-tools tree for the complete enumeration.
+ * @param logLevelType
+ * The numerical value of the log level type. It will be
+ * converted to a {@link LogLevelType} by this constructor.
+ * @throws IllegalArgumentException
+ * If the 'logLevelType' does not correspond to a valid value.
+ */
+ public LogLevelSelector(int logLevel, int logLevelType) {
+ this.logLevel = logLevel;
+ this.logLevelType = LogLevelType.fromValue(logLevelType);
+ }
+
+ /**
+ * "Manual" constructor, specifying the {@link LogLevelType} directly.
+ *
+ * @param logLevel
+ * The numerical value of the log level. The exact value depends
+ * on the tracing domain, see include/lttng/event.h in the
+ * lttng-tools tree for the complete enumeration.
+ * @param type
+ * The log level filter type.
+ */
+ public LogLevelSelector(int logLevel, LogLevelType type) {
+ this.logLevel = logLevel;
+ this.logLevelType = type;
+ }
+
+ /**
+ * Get the numerical value of the log level element. Does not apply if
+ * {@link #getLogLevelType} returns
+ * {@link LogLevelType#LTTNG_EVENT_LOGLEVEL_ALL}.
+ *
+ * @return The numerical value of the log level
+ */
+ public int getLogLevel() {
+ return logLevel;
+ }
+
+ /**
+ * Get the log level filter type.
+ *
+ * @return The log level filter type
+ */
+ public LogLevelType getLogLevelType() {
+ return logLevelType;
+ }
+
+ /**
+ * Helper method to determine if an event with the given log level should be
+ * traced when considering this filter.
+ *
+ * For example, if this filter object represents "higher severity than 5",
+ * and the log level passed in parameter is "8", it will return that it
+ * matches (higher value means higher severity).
+ *
+ * @param targetLogLevel
+ * The log level value of the event to check for
+ * @return Should this event be traced, or not
+ */
+ public boolean matches(int targetLogLevel) {
+ switch (logLevelType) {
+ case LTTNG_EVENT_LOGLEVEL_ALL:
+ return true;
+ case LTTNG_EVENT_LOGLEVEL_RANGE:
+ return (targetLogLevel >= logLevel);
+ case LTTNG_EVENT_LOGLEVEL_SINGLE:
+ return (targetLogLevel == logLevel);
+ default:
+ throw new IllegalStateException();
+ }
+ }
+
+ // ------------------------------------------------------------------------
+ // Methods from Object
+ // ------------------------------------------------------------------------
+
+ @Override
+ public int hashCode() {
+ final int prime = 31;
+ int result = 1;
+ result = prime * result + logLevel;
+ result = prime * result + ((logLevelType == null) ? 0 : logLevelType.hashCode());
+ return result;
+ }
+
+ @Override
+ public boolean equals(Object obj) {
+ if (this == obj) {
+ return true;
+ }
+ if (obj == null) {
+ return false;
+ }
+ if (getClass() != obj.getClass()) {
+ return false;
+ }
+ LogLevelSelector other = (LogLevelSelector) obj;
+
+ if (logLevel != other.logLevel) {
+ return false;
+ }
+ if (logLevelType != other.logLevelType) {
+ return false;
+ }
+ return true;
+ }
+
+ @Override
+ public String toString() {
+ if (getLogLevelType() == LogLevelType.LTTNG_EVENT_LOGLEVEL_ALL) {
+ return LogLevelType.LTTNG_EVENT_LOGLEVEL_ALL.toString();
+ }
+ return String.valueOf(getLogLevel()) + ", " + getLogLevelType().toString();
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.utils;
+
+/**
+ * Logging infrastructure for the lttng-ust Java agent. It prints log messages
+ * to stderr but only when the environment variable LTTNG_UST_DEBUG is defined.
+ *
+ * @author Alexandre Montplaisir
+ */
+public class LttngUstAgentLogger {
+
+ private static final String ENV_VAR_NAME = "LTTNG_UST_DEBUG";
+ private static final boolean LOGGING_ENABLED = (System.getenv(ENV_VAR_NAME) == null ? false : true);
+
+ /**
+ * Log event. Will be printed to stderr if the environment variable
+ * "LTTNG_UST_DEBUG" is defined.
+ *
+ * @param c
+ * The class logging the message (should normally be called with
+ * {@link #getClass()}).
+ * @param message
+ * The message to print
+ */
+ public static void log(Class<?> c, String message) {
+ if (LOGGING_ENABLED) {
+ System.err.println(c.getSimpleName() + ": " + message);
+ }
+ }
+}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+JAVAROOT = .
+AM_JAVACFLAGS = -classpath $(CLASSPATH):$(builddir)/../lttng-ust-agent-common/lttng-ust-agent-common.jar
+
+pkgpath = org/lttng/ust/agent/jul
+
+jarfile_version = 1.0.0
+jarfile_manifest = $(srcdir)/Manifest.txt
+jarfile_symlink = lttng-ust-agent-jul.jar
+jarfile = lttng-ust-agent-jul-$(jarfile_version).jar
+
+jardir = $(datadir)/java
+
+juljniout = ../../jni/jul
+
+dist_noinst_JAVA = $(pkgpath)/LttngJulAgent.java \
+ $(pkgpath)/LttngJulApi.java \
+ $(pkgpath)/LttngLogHandler.java
+
+dist_noinst_DATA = $(jarfile_manifest)
+
+jar_DATA = $(jarfile)
+
+stamp = jul-jni-header.stamp
+classes = $(pkgpath)/*.class
+
+$(jarfile): classnoinst.stamp
+ $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
+
+if !HAVE_JAVAH
+# If we don't have javah, assume we are running openjdk >= 10 and use javac
+# to generate the jni header file.
+AM_JAVACFLAGS += -h $(juljniout)
+else
+jul-jni-header.stamp: $(dist_noinst_JAVA)
+ $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(juljniout) $(JAVAHFLAGS) org.lttng.ust.agent.jul.LttngJulApi && \
+ echo "JUL JNI header generated" > jul-jni-header.stamp
+
+all-local: $(stamp)
+endif
+
+install-data-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
+
+uninstall-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
+
+CLEANFILES = *.jar \
+ $(pkgpath)/*.class \
+ jul-jni-header.stamp \
+ $(juljniout)/org_lttng_ust_agent_jul_LttngJulApi.h
--- /dev/null
+Name: org/lttng/ust/agent/jul/
+Specification-Title: LTTng UST Java Agent JUL Integration
+Specification-Version: 1.0.0
+Specification-Vendor: LTTng Project
+Implementation-Title: org.lttng.ust.agent.jul
+Implementation-Version: 1.0.0
+Implementation-Vendor: LTTng Project
+Class-Path: lttng-ust-agent-common.jar
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.jul;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.logging.Handler;
+import java.util.logging.LogManager;
+import java.util.logging.Logger;
+
+import org.lttng.ust.agent.AbstractLttngAgent;
+
+/**
+ * Agent implementation for tracing from JUL loggers.
+ *
+ * @author Alexandre Montplaisir
+ */
+class LttngJulAgent extends AbstractLttngAgent<LttngLogHandler> {
+
+ private static LttngJulAgent instance = null;
+
+ private LttngJulAgent() {
+ super(Domain.JUL);
+ }
+
+ public static synchronized LttngJulAgent getInstance() {
+ if (instance == null) {
+ instance = new LttngJulAgent();
+ }
+ return instance;
+ }
+
+ @Override
+ public Collection<String> listAvailableEvents() {
+ Set<String> ret = new TreeSet<String>();
+
+ List<String> loggersNames = Collections.list(LogManager.getLogManager().getLoggerNames());
+ for (String name : loggersNames) {
+ /*
+ * Skip the root logger. An empty string is not a valid event name
+ * in LTTng.
+ */
+ if (name.equals("") || name.equals("global")) {
+ continue;
+ }
+
+ /*
+ * Check if that logger has at least one LTTng JUL handler attached.
+ */
+ Logger logger = Logger.getLogger(name);
+ if (hasLttngHandlerAttached(logger)) {
+ ret.add(name);
+ }
+ }
+
+ return ret;
+ }
+
+ private static boolean hasLttngHandlerAttached(Logger logger) {
+ for (Handler handler : logger.getHandlers()) {
+ if (handler instanceof LttngLogHandler) {
+ return true;
+ }
+ }
+
+ /*
+ * A parent logger, if any, may be connected to an LTTng handler. In
+ * this case, we will want to include this child logger in the output,
+ * since it will be accessible by LTTng.
+ */
+ Logger parent = logger.getParent();
+ if (parent != null) {
+ return hasLttngHandlerAttached(parent);
+ }
+
+ /*
+ * We have reached the root logger and have not found any LTTng handler,
+ * this event will not be accessible.
+ */
+ return false;
+ }
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.jul;
+
+/**
+ * Virtual class containing the Java side of the LTTng-JUL JNI API methods.
+ *
+ * @author Alexandre Montplaisir
+ */
+final class LttngJulApi {
+
+ private LttngJulApi() {}
+
+ static native void tracepoint(String msg,
+ String logger_name,
+ String class_name,
+ String method_name,
+ long millis,
+ int log_level,
+ int thread_id);
+
+ static native void tracepointWithContext(String msg,
+ String logger_name,
+ String class_name,
+ String method_name,
+ long millis,
+ int log_level,
+ int thread_id,
+ byte[] contextEntries,
+ byte[] contextStrings);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2013 David Goulet <dgoulet@efficios.com>
+ */
+
+package org.lttng.ust.agent.jul;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.logging.Formatter;
+import java.util.logging.Handler;
+import java.util.logging.LogRecord;
+
+import org.lttng.ust.agent.ILttngAgent;
+import org.lttng.ust.agent.ILttngHandler;
+import org.lttng.ust.agent.context.ContextInfoSerializer;
+
+/**
+ * LTTng-UST JUL log handler.
+ *
+ * Applications can attach this handler to their
+ * {@link java.util.logging.Logger} to have it generate UST events from logging
+ * events received through the logger.
+ *
+ * It sends its events to UST via the JNI library "liblttng-ust-jul-jni.so".
+ * Make sure this library is available before using this handler.
+ *
+ * @author Alexandre Montplaisir
+ * @author David Goulet
+ */
+public class LttngLogHandler extends Handler implements ILttngHandler {
+
+ private static final String SHARED_OBJECT_NAME = "lttng-ust-jul-jni";
+
+ /**
+ * Dummy Formatter object, so we can use its
+ * {@link Formatter#formatMessage(LogRecord)} method.
+ */
+ private static final Formatter FORMATTER = new Formatter() {
+ @Override
+ public String format(LogRecord record) {
+ throw new UnsupportedOperationException();
+ }
+ };
+
+ private final ILttngAgent<LttngLogHandler> agent;
+
+ /** Number of events logged (really sent through JNI) by this handler */
+ private final AtomicLong eventCount = new AtomicLong(0);
+
+ /**
+ * Constructor
+ *
+ * @throws IOException
+ * This handler requires the lttng-ust-jul-jni.so native
+ * library, through which it will send the trace events. This
+ * exception is throw is this library cannot be found.
+ * @throws SecurityException
+ * We will forward any SecurityExcepion that may be thrown when
+ * trying to load the JNI library.
+ */
+ public LttngLogHandler() throws IOException, SecurityException {
+ super();
+ /* Initialize LTTng UST tracer. */
+ try {
+ System.loadLibrary(SHARED_OBJECT_NAME); //$NON-NLS-1$
+ } catch (UnsatisfiedLinkError e) {
+ throw new IOException(e);
+ }
+
+ /** Register to the relevant agent */
+ agent = LttngJulAgent.getInstance();
+ agent.registerHandler(this);
+ }
+
+ @Override
+ public synchronized void close() {
+ agent.unregisterHandler(this);
+ }
+
+ /**
+ * Get the number of events logged by this handler so far. This means the
+ * number of events actually sent through JNI to UST.
+ *
+ * @return The number of events logged so far
+ */
+ @Override
+ public long getEventCount() {
+ return eventCount.get();
+ }
+
+ @Override
+ public void flush() {
+ }
+
+ @Override
+ public void publish(LogRecord record) {
+ /*
+ * Check if the current message should be logged, according to the UST
+ * session settings.
+ */
+ if (!agent.isEventEnabled(record.getLoggerName())) {
+ return;
+ }
+
+ String formattedMessage = FORMATTER.formatMessage(record);
+
+ /* Retrieve all the requested context information we can find */
+ Collection<Entry<String, Map<String, Integer>>> enabledContexts = agent.getEnabledAppContexts();
+ ContextInfoSerializer.SerializedContexts contextInfo = ContextInfoSerializer.queryAndSerializeRequestedContexts(enabledContexts);
+
+ eventCount.incrementAndGet();
+
+ /*
+ * Specific tracepoint designed for JUL events. The source class of the
+ * caller is used for the event name, the raw message is taken, the
+ * loglevel of the record and the thread ID.
+ */
+ LttngJulApi.tracepointWithContext(formattedMessage,
+ record.getLoggerName(),
+ record.getSourceClassName(),
+ record.getSourceMethodName(),
+ record.getMillis(),
+ record.getLevel().intValue(),
+ record.getThreadID(),
+ contextInfo.getEntriesArray(),
+ contextInfo.getStringsArray());
+ }
+
+}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+JAVAROOT = .
+AM_JAVACFLAGS = -classpath $(CLASSPATH):$(builddir)/../lttng-ust-agent-common/lttng-ust-agent-common.jar
+
+pkgpath = org/lttng/ust/agent/log4j
+
+jarfile_version = 1.0.0
+jarfile_manifest = $(srcdir)/Manifest.txt
+jarfile_symlink = lttng-ust-agent-log4j.jar
+jarfile = lttng-ust-agent-log4j-$(jarfile_version).jar
+
+jardir = $(datadir)/java
+
+log4jjniout = ../../jni/log4j
+
+dist_noinst_JAVA = $(pkgpath)/LttngLog4jAgent.java \
+ $(pkgpath)/LttngLog4jApi.java \
+ $(pkgpath)/LttngLogAppender.java
+
+dist_noinst_DATA = $(jarfile_manifest)
+
+jar_DATA = $(jarfile)
+
+stamp = log4j-jni-header.stamp
+classes = $(pkgpath)/*.class
+
+$(jarfile): classnoinst.stamp
+ $(JAR) cfm $(JARFLAGS) $@ $(jarfile_manifest) $(classes) && rm -f $(jarfile_symlink) && $(LN_S) $@ $(jarfile_symlink)
+
+if !HAVE_JAVAH
+# If we don't have javah, assume we are running openjdk >= 10 and use javac
+# to generate the jni header file.
+AM_JAVACFLAGS += -h $(log4jjniout)
+else
+log4j-jni-header.stamp: $(dist_noinst_JAVA)
+ $(JAVAH) -classpath $(CLASSPATH):$(srcdir) -d $(log4jjniout) $(JAVAHFLAGS) org.lttng.ust.agent.log4j.LttngLog4jApi && \
+ echo "Log4j JNI header generated" > log4j-jni-header.stamp
+
+all-local: $(stamp)
+endif
+
+install-data-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink) && $(LN_S) $(jarfile) $(jarfile_symlink)
+
+uninstall-hook:
+ cd $(DESTDIR)/$(jardir) && rm -f $(jarfile_symlink)
+
+CLEANFILES = *.jar \
+ $(pkgpath)/*.class \
+ log4j-jni-header.stamp \
+ $(log4jjniout)/org_lttng_ust_agent_log4j_LttngLog4jApi.h
--- /dev/null
+Name: org/lttng/ust/agent/log4j/
+Specification-Title: LTTng UST Java Agent Log4J 1.x Integration
+Specification-Version: 1.0.0
+Specification-Vendor: LTTng Project
+Implementation-Title: org.lttng.ust.agent.log4j
+Implementation-Version: 1.0.0
+Implementation-Vendor: LTTng Project
+Class-Path: lttng-ust-agent-common.jar
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.log4j;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.List;
+import java.util.Set;
+import java.util.TreeSet;
+
+import org.apache.log4j.Appender;
+import org.apache.log4j.Category;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+import org.lttng.ust.agent.AbstractLttngAgent;
+
+/**
+ * Agent implementation for using the Log4j logger, connecting to a root session
+ * daemon.
+ *
+ * @author Alexandre Montplaisir
+ */
+class LttngLog4jAgent extends AbstractLttngAgent<LttngLogAppender> {
+
+ private static LttngLog4jAgent instance = null;
+
+ private LttngLog4jAgent() {
+ super(Domain.LOG4J);
+ }
+
+ public static synchronized LttngLog4jAgent getInstance() {
+ if (instance == null) {
+ instance = new LttngLog4jAgent();
+ }
+ return instance;
+ }
+
+ @Override
+ public Collection<String> listAvailableEvents() {
+ Set<String> ret = new TreeSet<String>();
+
+ @SuppressWarnings("unchecked")
+ List<Logger> loggers = Collections.list(LogManager.getCurrentLoggers());
+ for (Logger logger : loggers) {
+ if (logger == null) {
+ continue;
+ }
+
+ /*
+ * Check if that logger has at least one LTTng log4j appender
+ * attached.
+ */
+ if (hasLttngAppenderAttached(logger)) {
+ ret.add(logger.getName());
+ }
+ }
+
+ return ret;
+ }
+
+ private static boolean hasLttngAppenderAttached(Category logger) {
+ @SuppressWarnings("unchecked")
+ Enumeration<Appender> appenders = logger.getAllAppenders();
+ if (appenders != null) {
+ for (Appender appender : Collections.list(appenders)) {
+ if (appender instanceof LttngLogAppender) {
+ return true;
+ }
+ }
+ }
+
+ /*
+ * A parent logger, if any, may be connected to an LTTng handler. In
+ * this case, we will want to include this child logger in the output,
+ * since it will be accessible by LTTng.
+ */
+ Category parent = logger.getParent();
+ if (parent != null) {
+ return hasLttngAppenderAttached(parent);
+ }
+
+ /*
+ * We have reached the root logger and have not found any LTTng handler,
+ * this event will not be accessible.
+ */
+ return false;
+ }
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+package org.lttng.ust.agent.log4j;
+
+/**
+ * Virtual class containing the Java side of the LTTng-log4j JNI API methods.
+ *
+ * @author Alexandre Montplaisir
+ */
+final class LttngLog4jApi {
+
+ private LttngLog4jApi() {}
+
+ static native void tracepoint(String msg,
+ String logger_name,
+ String class_name,
+ String method_name,
+ String file_name,
+ int line_number,
+ long timestamp,
+ int loglevel,
+ String thread_name);
+
+ static native void tracepointWithContext(String msg,
+ String logger_name,
+ String class_name,
+ String method_name,
+ String file_name,
+ int line_number,
+ long timestamp,
+ int loglevel,
+ String thread_name,
+ byte[] contextEntries,
+ byte[] contextStrings);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 EfficiOS Inc.
+ * Copyright (C) 2015 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2014 Christian Babeux <christian.babeux@efficios.com>
+ */
+
+package org.lttng.ust.agent.log4j;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.log4j.AppenderSkeleton;
+import org.apache.log4j.spi.LoggingEvent;
+import org.lttng.ust.agent.ILttngAgent;
+import org.lttng.ust.agent.ILttngHandler;
+import org.lttng.ust.agent.context.ContextInfoSerializer;
+
+/**
+ * LTTng-UST Log4j 1.x log handler.
+ *
+ * Applications can attach this appender to their
+ * {@link org.apache.log4j.Logger} to have it generate UST events from logging
+ * events received through the logger.
+ *
+ * It sends its events to UST via the JNI library "liblttng-ust-log4j-jni.so".
+ * Make sure this library is available before using this appender.
+ *
+ * @author Alexandre Montplaisir
+ * @author Christian Babeux
+ */
+public class LttngLogAppender extends AppenderSkeleton implements ILttngHandler {
+
+ private static final String SHARED_OBJECT_NAME = "lttng-ust-log4j-jni";
+
+ private final AtomicLong eventCount = new AtomicLong(0);
+
+ private final ILttngAgent<LttngLogAppender> agent;
+
+
+ /**
+ * Constructor
+ *
+ * @throws IOException
+ * This handler requires the lttng-ust-log4j-jni.so native
+ * library, through which it will send the trace events. This
+ * exception is throw is this library cannot be found.
+ * @throws SecurityException
+ * We will forward any SecurityExcepion that may be thrown when
+ * trying to load the JNI library.
+ */
+ public LttngLogAppender() throws IOException, SecurityException {
+ super();
+ /* Initialize LTTng UST tracer. */
+ try {
+ System.loadLibrary(SHARED_OBJECT_NAME); // $NON-NLS-1$
+ } catch (UnsatisfiedLinkError e) {
+ throw new IOException(e);
+ }
+
+ /** Register to the relevant agent */
+ agent = LttngLog4jAgent.getInstance();
+ agent.registerHandler(this);
+ }
+
+ @Override
+ public synchronized void close() {
+ agent.unregisterHandler(this);
+ }
+
+ /**
+ * Get the number of events logged by this handler so far. This means the
+ * number of events actually sent through JNI to UST.
+ *
+ * @return The number of events logged so far
+ */
+ @Override
+ public long getEventCount() {
+ return eventCount.get();
+ }
+
+ @Override
+ public boolean requiresLayout() {
+ return false;
+ }
+
+ @Override
+ protected void append(LoggingEvent event) {
+ /*
+ * Check if the current message should be logged, according to the UST
+ * session settings.
+ */
+ if (!agent.isEventEnabled(event.getLoggerName())) {
+ return;
+ }
+
+ /*
+ * The line number returned from LocationInformation is a string. At
+ * least try to convert to a proper int.
+ */
+ int line;
+ try {
+ String lineString = event.getLocationInformation().getLineNumber();
+ line = Integer.parseInt(lineString);
+ } catch (NumberFormatException n) {
+ line = -1;
+ }
+
+ /* Retrieve all the requested context information we can find */
+ Collection<Entry<String, Map<String, Integer>>> enabledContexts = agent.getEnabledAppContexts();
+ ContextInfoSerializer.SerializedContexts contextInfo = ContextInfoSerializer.queryAndSerializeRequestedContexts(enabledContexts);
+
+ eventCount.incrementAndGet();
+
+ LttngLog4jApi.tracepointWithContext(event.getRenderedMessage(),
+ event.getLoggerName(),
+ event.getLocationInformation().getClassName(),
+ event.getLocationInformation().getMethodName(),
+ event.getLocationInformation().getFileName(),
+ line,
+ event.getTimeStamp(),
+ event.getLevel().toInt(),
+ event.getThreadName(),
+ contextInfo.getEntriesArray(),
+ contextInfo.getStringsArray());
+ }
+
+}
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+SUBDIRS = common
+
+if ENABLE_JAVA_AGENT_WITH_JUL
+SUBDIRS += jul
+endif
+
+if ENABLE_JAVA_AGENT_WITH_LOG4J
+SUBDIRS += log4j
+endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
+
+lib_LTLIBRARIES = liblttng-ust-context-jni.la
+liblttng_ust_context_jni_la_SOURCES = lttng_ust_context.c lttng_ust_context.h
+
+nodist_liblttng_ust_context_jni_la_SOURCES = org_lttng_ust_agent_context_LttngContextApi.h
+
+liblttng_ust_context_jni_la_LIBADD = -lc \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "org_lttng_ust_agent_context_LttngContextApi.h"
+
+#include <string.h>
+#include <inttypes.h>
+#include <lttng/ust-events.h>
+#include <lttng/ringbuffer-context.h>
+#include <ust-context-provider.h>
+
+#include "ust-helper.h"
+#include "lttng_ust_context.h"
+
+enum lttng_ust_jni_type {
+ JNI_TYPE_NULL = 0,
+ JNI_TYPE_INTEGER = 1,
+ JNI_TYPE_LONG = 2,
+ JNI_TYPE_DOUBLE = 3,
+ JNI_TYPE_FLOAT = 4,
+ JNI_TYPE_BYTE = 5,
+ JNI_TYPE_SHORT = 6,
+ JNI_TYPE_BOOLEAN = 7,
+ JNI_TYPE_STRING = 8,
+};
+
+struct lttng_ust_jni_ctx_entry {
+ int32_t context_name_offset;
+ char type; /* enum lttng_ust_jni_type */
+ union {
+ int32_t _integer;
+ int64_t _long;
+ double _double;
+ float _float;
+ signed char _byte;
+ int16_t _short;
+ signed char _boolean;
+ int32_t _string_offset;
+ } value;
+} __attribute__((packed));
+
+struct lttng_ust_jni_provider {
+ struct lttng_ust_registered_context_provider *reg_provider;
+ char *name;
+ struct lttng_ust_context_provider provider;
+};
+
+/* TLS passing context info from JNI to callbacks. */
+__thread struct lttng_ust_jni_tls lttng_ust_context_info_tls;
+
+static const char *get_ctx_string_at_offset(int32_t offset)
+{
+ signed char *ctx_strings_array = lttng_ust_context_info_tls.ctx_strings;
+
+ if (offset < 0 || offset >= lttng_ust_context_info_tls.ctx_strings_len) {
+ return NULL;
+ }
+ return (const char *) (ctx_strings_array + offset);
+}
+
+static struct lttng_ust_jni_ctx_entry *lookup_ctx_by_name(const char *ctx_name)
+{
+ struct lttng_ust_jni_ctx_entry *ctx_entries_array = lttng_ust_context_info_tls.ctx_entries;
+ int i, len = lttng_ust_context_info_tls.ctx_entries_len / sizeof(struct lttng_ust_jni_ctx_entry);
+
+ for (i = 0; i < len; i++) {
+ int32_t offset = ctx_entries_array[i].context_name_offset;
+ const char *string = get_ctx_string_at_offset(offset);
+
+ if (string && strcmp(string, ctx_name) == 0) {
+ return &ctx_entries_array[i];
+ }
+ }
+ return NULL;
+}
+
+static size_t get_size_cb(void *priv, size_t offset)
+{
+ struct lttng_ust_jni_ctx_entry *jctx;
+ size_t size = 0;
+ struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
+ const char *ctx_name = jni_provider->name;
+ enum lttng_ust_jni_type jni_type;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += sizeof(char); /* tag */
+ jctx = lookup_ctx_by_name(ctx_name);
+ if (!jctx) {
+ jni_type = JNI_TYPE_NULL;
+ } else {
+ jni_type = jctx->type;
+ }
+ switch (jni_type) {
+ case JNI_TYPE_NULL:
+ break;
+ case JNI_TYPE_INTEGER:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int32_t));
+ size += sizeof(int32_t); /* variant */
+ break;
+ case JNI_TYPE_LONG:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int64_t));
+ size += sizeof(int64_t); /* variant */
+ break;
+ case JNI_TYPE_DOUBLE:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(double));
+ size += sizeof(double); /* variant */
+ break;
+ case JNI_TYPE_FLOAT:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(float));
+ size += sizeof(float); /* variant */
+ break;
+ case JNI_TYPE_SHORT:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int16_t));
+ size += sizeof(int16_t); /* variant */
+ break;
+ case JNI_TYPE_BYTE: /* Fall-through. */
+ case JNI_TYPE_BOOLEAN:
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += sizeof(char); /* variant */
+ break;
+ case JNI_TYPE_STRING:
+ {
+ /* The value is an offset, the string is in the "strings" array */
+ int32_t string_offset = jctx->value._string_offset;
+ const char *string = get_ctx_string_at_offset(string_offset);
+
+ if (string) {
+ size += strlen(string) + 1;
+ }
+ break;
+ }
+ default:
+ abort();
+ }
+ return size;
+
+}
+
+static void record_cb(void *priv,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *lttng_chan_buf)
+{
+ struct lttng_ust_jni_ctx_entry *jctx;
+ struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
+ const char *ctx_name = jni_provider->name;
+ enum lttng_ust_jni_type jni_type;
+ char sel_char;
+
+ jctx = lookup_ctx_by_name(ctx_name);
+ if (!jctx) {
+ jni_type = JNI_TYPE_NULL;
+ } else {
+ jni_type = jctx->type;
+ }
+
+ switch (jni_type) {
+ case JNI_TYPE_NULL:
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ break;
+ case JNI_TYPE_INTEGER:
+ {
+ int32_t v = jctx->value._integer;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_S32;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_LONG:
+ {
+ int64_t v = jctx->value._long;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_S64;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_DOUBLE:
+ {
+ double v = jctx->value._double;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_FLOAT:
+ {
+ float v = jctx->value._float;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_FLOAT;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_SHORT:
+ {
+ int16_t v = jctx->value._short;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_S16;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_BYTE:
+ {
+ char v = jctx->value._byte;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_BOOLEAN:
+ {
+ char v = jctx->value._boolean;
+
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_S8;
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ lttng_chan_buf->ops->event_write(ctx, &v, sizeof(v), lttng_ust_rb_alignof(v));
+ break;
+ }
+ case JNI_TYPE_STRING:
+ {
+ int32_t offset = jctx->value._string_offset;
+ const char *str = get_ctx_string_at_offset(offset);
+
+ if (str) {
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_STRING;
+ } else {
+ sel_char = LTTNG_UST_DYNAMIC_TYPE_NONE;
+ }
+ lttng_chan_buf->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(char));
+ if (str) {
+ lttng_chan_buf->ops->event_write(ctx, str, strlen(str) + 1, 1);
+ }
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+static void get_value_cb(void *priv, struct lttng_ust_ctx_value *value)
+{
+ struct lttng_ust_jni_provider *jni_provider = (struct lttng_ust_jni_provider *) priv;
+ struct lttng_ust_jni_ctx_entry *jctx;
+ const char *ctx_name = jni_provider->name;
+ enum lttng_ust_jni_type jni_type;
+
+ jctx = lookup_ctx_by_name(ctx_name);
+ if (!jctx) {
+ jni_type = JNI_TYPE_NULL;
+ } else {
+ jni_type = jctx->type;
+ }
+
+ switch (jni_type) {
+ case JNI_TYPE_NULL:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
+ break;
+ case JNI_TYPE_INTEGER:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
+ value->u.s64 = (int64_t) jctx->value._integer;
+ break;
+ case JNI_TYPE_LONG:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
+ value->u.s64 = jctx->value._long;
+ break;
+ case JNI_TYPE_DOUBLE:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
+ value->u.d = jctx->value._double;
+ break;
+ case JNI_TYPE_FLOAT:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_DOUBLE;
+ value->u.d = (double) jctx->value._float;
+ break;
+ case JNI_TYPE_SHORT:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
+ value->u.s64 = (int64_t) jctx->value._short;
+ break;
+ case JNI_TYPE_BYTE:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
+ value->u.s64 = (int64_t) jctx->value._byte;
+ break;
+ case JNI_TYPE_BOOLEAN:
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_S64;
+ value->u.s64 = (int64_t) jctx->value._boolean;
+ break;
+ case JNI_TYPE_STRING:
+ {
+ int32_t offset = jctx->value._string_offset;
+ const char *str = get_ctx_string_at_offset(offset);
+
+ if (str) {
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_STRING;
+ value->u.str = str;
+ } else {
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
+ }
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+/*
+ * Register a context provider to UST.
+ *
+ * Called from the Java side when an application registers a context retriever,
+ * so we create and register a corresponding provider on the C side.
+ */
+JNIEXPORT jlong JNICALL Java_org_lttng_ust_agent_context_LttngContextApi_registerProvider(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring provider_name)
+{
+ jboolean iscopy;
+ const char *provider_name_jstr;
+ char *provider_name_cstr;
+ struct lttng_ust_context_provider *provider;
+ struct lttng_ust_jni_provider *jni_provider;
+ /*
+ * Note: a "jlong" is 8 bytes on all architectures, whereas a
+ * C "long" varies.
+ */
+ jlong provider_ref;
+
+ provider_name_jstr = (*env)->GetStringUTFChars(env, provider_name, &iscopy);
+ if (!provider_name_jstr) {
+ goto error_jstr;
+ }
+ /* Keep our own copy of the string so UST can use it. */
+ provider_name_cstr = strdup(provider_name_jstr);
+ (*env)->ReleaseStringUTFChars(env, provider_name, provider_name_jstr);
+ if (!provider_name_cstr) {
+ goto error_strdup;
+ }
+ jni_provider = zmalloc(sizeof(*jni_provider));
+ if (!jni_provider) {
+ goto error_provider;
+ }
+ provider = &jni_provider->provider;
+ provider->struct_size = sizeof(*provider);
+ jni_provider->name = provider_name_cstr;
+ provider->name = jni_provider->name;
+ provider->get_size = get_size_cb;
+ provider->record = record_cb;
+ provider->get_value = get_value_cb;
+ provider->priv = jni_provider;
+
+ jni_provider->reg_provider = lttng_ust_context_provider_register(provider);
+ if (!jni_provider->reg_provider) {
+ goto error_register;
+ }
+
+ provider_ref = (jlong) (long) jni_provider;
+ return provider_ref;
+
+ /* Error handling. */
+error_register:
+ free(jni_provider);
+error_provider:
+ free(provider_name_cstr);
+error_strdup:
+error_jstr:
+ return 0;
+}
+
+/*
+ * Unregister a previously-registered context provider.
+ *
+ * Called from the Java side when an application unregisters a context retriever,
+ * so we unregister and delete the corresponding provider on the C side.
+ */
+JNIEXPORT void JNICALL Java_org_lttng_ust_agent_context_LttngContextApi_unregisterProvider(JNIEnv *env __attribute__((unused)),
+ jobject jobj __attribute__((unused)),
+ jlong provider_ref)
+{
+ struct lttng_ust_jni_provider *jni_provider =
+ (struct lttng_ust_jni_provider *) (unsigned long) provider_ref;
+
+ if (!jni_provider) {
+ return;
+ }
+
+ lttng_ust_context_provider_unregister(jni_provider->reg_provider);
+
+ free(jni_provider->name);
+ free(jni_provider);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ */
+
+#ifndef LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_
+#define LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_
+
+struct lttng_ust_jni_ctx_entry;
+
+struct lttng_ust_jni_tls {
+ struct lttng_ust_jni_ctx_entry *ctx_entries;
+ int32_t ctx_entries_len;
+ signed char *ctx_strings;
+ int32_t ctx_strings_len;
+};
+
+extern __thread struct lttng_ust_jni_tls lttng_ust_context_info_tls;
+
+#endif /* LIBLTTNG_UST_JAVA_AGENT_JNI_COMMON_LTTNG_UST_CONTEXT_H_ */
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
+
+lib_LTLIBRARIES = liblttng-ust-jul-jni.la
+liblttng_ust_jul_jni_la_SOURCES = lttng_ust_jul.c \
+ lttng_ust_jul.h
+
+nodist_liblttng_ust_jul_jni_la_SOURCES = org_lttng_ust_agent_jul_LttngJulApi.h
+
+liblttng_ust_jul_jni_la_LIBADD = -lc \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/liblttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "org_lttng_ust_agent_jul_LttngJulApi.h"
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#include "lttng_ust_jul.h"
+#include "../common/lttng_ust_context.h"
+
+/*
+ * Deprecated function from before the context information was passed.
+ */
+JNIEXPORT void JNICALL Java_org_lttng_ust_agent_jul_LttngJulApi_tracepoint(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring msg,
+ jstring logger_name,
+ jstring class_name,
+ jstring method_name,
+ jlong millis,
+ jint log_level,
+ jint thread_id)
+{
+ jboolean iscopy;
+ const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
+ const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
+ const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
+ const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
+
+ tracepoint(lttng_jul, event, msg_cstr, logger_name_cstr,
+ class_name_cstr, method_name_cstr, millis, log_level, thread_id);
+
+ (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
+ (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
+}
+
+/*
+ * Tracepoint used by Java applications using the JUL handler.
+ */
+JNIEXPORT void JNICALL Java_org_lttng_ust_agent_jul_LttngJulApi_tracepointWithContext(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring msg,
+ jstring logger_name,
+ jstring class_name,
+ jstring method_name,
+ jlong millis,
+ jint log_level,
+ jint thread_id,
+ jbyteArray context_info_entries,
+ jbyteArray context_info_strings)
+{
+ jboolean iscopy;
+ const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
+ const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
+ const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
+ const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
+ signed char *context_info_entries_array;
+ signed char *context_info_strings_array;
+
+ /*
+ * Write these to the TLS variables, so that the UST callbacks in
+ * lttng_ust_context.c can access them.
+ */
+ context_info_entries_array = (*env)->GetByteArrayElements(env, context_info_entries, &iscopy);
+ lttng_ust_context_info_tls.ctx_entries = (struct lttng_ust_jni_ctx_entry *) context_info_entries_array;
+ lttng_ust_context_info_tls.ctx_entries_len = (*env)->GetArrayLength(env, context_info_entries);
+ context_info_strings_array = (*env)->GetByteArrayElements(env, context_info_strings, &iscopy);
+ lttng_ust_context_info_tls.ctx_strings = context_info_strings_array;
+ lttng_ust_context_info_tls.ctx_strings_len = (*env)->GetArrayLength(env, context_info_strings);
+
+ tracepoint(lttng_jul, event, msg_cstr, logger_name_cstr,
+ class_name_cstr, method_name_cstr, millis, log_level, thread_id);
+
+ lttng_ust_context_info_tls.ctx_entries = NULL;
+ lttng_ust_context_info_tls.ctx_entries_len = 0;
+ lttng_ust_context_info_tls.ctx_strings = NULL;
+ lttng_ust_context_info_tls.ctx_strings_len = 0;
+ (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
+ (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
+ (*env)->ReleaseByteArrayElements(env, context_info_entries, context_info_entries_array, 0);
+ (*env)->ReleaseByteArrayElements(env, context_info_strings, context_info_strings_array, 0);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_jul
+
+#if !defined(_TRACEPOINT_LTTNG_UST_JUL_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_JUL_H
+
+#include <lttng/tracepoint.h>
+
+/*
+ * Tracepoint used by Java applications using the JUL handler.
+ */
+TRACEPOINT_EVENT(lttng_jul, event,
+ TP_ARGS(
+ const char *, msg,
+ const char *, logger_name,
+ const char *, class_name,
+ const char *, method_name,
+ long, millis,
+ int, log_level,
+ int, thread_id),
+ TP_FIELDS(
+ ctf_string(msg, msg)
+ ctf_string(logger_name, logger_name)
+ ctf_string(class_name, class_name)
+ ctf_string(method_name, method_name)
+ ctf_integer(long, long_millis, millis)
+ ctf_integer(int, int_loglevel, log_level)
+ ctf_integer(int, int_threadid, thread_id)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_JUL_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng_ust_jul.h"
+
+/* This part must be outside protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
+
+lib_LTLIBRARIES = liblttng-ust-log4j-jni.la
+liblttng_ust_log4j_jni_la_SOURCES = lttng_ust_log4j.c \
+ lttng_ust_log4j.h
+
+nodist_liblttng_ust_log4j_jni_la_SOURCES = org_lttng_ust_agent_log4j_LttngLog4jApi.h
+
+liblttng_ust_log4j_jni_la_LIBADD = -lc \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(top_builddir)/src/liblttng-ust-java-agent/jni/common/liblttng-ust-context-jni.la
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 EfficiOS Inc.
+ * Copyright (C) 2016 Alexandre Montplaisir <alexmonthy@efficios.com>
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "org_lttng_ust_agent_log4j_LttngLog4jApi.h"
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#include "lttng_ust_log4j.h"
+#include "../common/lttng_ust_context.h"
+
+/*
+ * Deprecated function from before the context information was passed.
+ */
+JNIEXPORT void JNICALL Java_org_lttng_ust_agent_log4j_LttngLog4jApi_tracepoint(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring msg,
+ jstring logger_name,
+ jstring class_name,
+ jstring method_name,
+ jstring file_name,
+ jint line_number,
+ jlong timestamp,
+ jint loglevel,
+ jstring thread_name)
+{
+ jboolean iscopy;
+ const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
+ const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
+ const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
+ const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
+ const char *file_name_cstr = (*env)->GetStringUTFChars(env, file_name, &iscopy);
+ const char *thread_name_cstr = (*env)->GetStringUTFChars(env, thread_name, &iscopy);
+
+ tracepoint(lttng_log4j, event, msg_cstr, logger_name_cstr,
+ class_name_cstr, method_name_cstr, file_name_cstr,
+ line_number, timestamp, loglevel, thread_name_cstr);
+
+ (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
+ (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, file_name, file_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, thread_name, thread_name_cstr);
+}
+
+/*
+ * Tracepoint used by Java applications using the log4j handler.
+ */
+JNIEXPORT void JNICALL Java_org_lttng_ust_agent_log4j_LttngLog4jApi_tracepointWithContext(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring msg,
+ jstring logger_name,
+ jstring class_name,
+ jstring method_name,
+ jstring file_name,
+ jint line_number,
+ jlong timestamp,
+ jint loglevel,
+ jstring thread_name,
+ jbyteArray context_info_entries,
+ jbyteArray context_info_strings)
+{
+ jboolean iscopy;
+ const char *msg_cstr = (*env)->GetStringUTFChars(env, msg, &iscopy);
+ const char *logger_name_cstr = (*env)->GetStringUTFChars(env, logger_name, &iscopy);
+ const char *class_name_cstr = (*env)->GetStringUTFChars(env, class_name, &iscopy);
+ const char *method_name_cstr = (*env)->GetStringUTFChars(env, method_name, &iscopy);
+ const char *file_name_cstr = (*env)->GetStringUTFChars(env, file_name, &iscopy);
+ const char *thread_name_cstr = (*env)->GetStringUTFChars(env, thread_name, &iscopy);
+ signed char *context_info_entries_array;
+ signed char *context_info_strings_array;
+
+ /*
+ * Write these to the TLS variables, so that the UST callbacks in
+ * lttng_ust_context.c can access them.
+ */
+ context_info_entries_array = (*env)->GetByteArrayElements(env, context_info_entries, &iscopy);
+ lttng_ust_context_info_tls.ctx_entries = (struct lttng_ust_jni_ctx_entry *) context_info_entries_array;
+ lttng_ust_context_info_tls.ctx_entries_len = (*env)->GetArrayLength(env, context_info_entries);
+ context_info_strings_array = (*env)->GetByteArrayElements(env, context_info_strings, &iscopy);
+ lttng_ust_context_info_tls.ctx_strings = context_info_strings_array;
+ lttng_ust_context_info_tls.ctx_strings_len = (*env)->GetArrayLength(env, context_info_strings);
+
+ tracepoint(lttng_log4j, event, msg_cstr, logger_name_cstr,
+ class_name_cstr, method_name_cstr, file_name_cstr,
+ line_number, timestamp, loglevel, thread_name_cstr);
+
+ lttng_ust_context_info_tls.ctx_entries = NULL;
+ lttng_ust_context_info_tls.ctx_entries_len = 0;
+ lttng_ust_context_info_tls.ctx_strings = NULL;
+ lttng_ust_context_info_tls.ctx_strings_len = 0;
+ (*env)->ReleaseStringUTFChars(env, msg, msg_cstr);
+ (*env)->ReleaseStringUTFChars(env, logger_name, logger_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, class_name, class_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, method_name, method_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, file_name, file_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, thread_name, thread_name_cstr);
+ (*env)->ReleaseByteArrayElements(env, context_info_entries, context_info_entries_array, 0);
+ (*env)->ReleaseByteArrayElements(env, context_info_strings, context_info_strings_array, 0);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_log4j
+
+#if !defined(_TRACEPOINT_LTTNG_UST_LOG4J_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_LOG4J_H
+
+#include <lttng/tracepoint.h>
+
+/*
+ * Tracepoint used by Java applications using the log4j log appender.
+ */
+TRACEPOINT_EVENT(lttng_log4j, event,
+ TP_ARGS(
+ const char *, msg,
+ const char *, logger_name,
+ const char *, class_name,
+ const char *, method_name,
+ const char *, file_name,
+ int, line_number,
+ long, timestamp,
+ int, log_level,
+ const char *, thread_name),
+ TP_FIELDS(
+ ctf_string(msg, msg)
+ ctf_string(logger_name, logger_name)
+ ctf_string(class_name, class_name)
+ ctf_string(method_name, method_name)
+ ctf_string(filename, file_name)
+ ctf_integer(int, line_number, line_number)
+ ctf_integer(long, timestamp, timestamp)
+ ctf_integer(int, int_loglevel, log_level)
+ ctf_string(thread_name, thread_name)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_LOG4J_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng_ust_log4j.h"
+
+/* This part must be outside protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+org_lttng_ust_LTTngUst.h
+org/
+liblttng-ust-java.jar
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "org_lttng_ust_LTTngUst.h"
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#include "lttng_ust_java.h"
+
+JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointInt(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring ev_name,
+ jint payload)
+{
+ jboolean iscopy;
+ const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
+
+ tracepoint(lttng_ust_java, int_event, ev_name_cstr, payload);
+
+ (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
+}
+
+JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointIntInt(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring ev_name,
+ jint payload1,
+ jint payload2)
+{
+ jboolean iscopy;
+ const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
+
+ tracepoint(lttng_ust_java, int_int_event, ev_name_cstr, payload1, payload2);
+
+ (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
+}
+
+JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointLong(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring ev_name,
+ jlong payload)
+{
+ jboolean iscopy;
+ const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
+
+ tracepoint(lttng_ust_java, long_event, ev_name_cstr, payload);
+
+ (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
+}
+
+JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointLongLong(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring ev_name,
+ jlong payload1,
+ jlong payload2)
+{
+ jboolean iscopy;
+ const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
+
+ tracepoint(lttng_ust_java, long_long_event, ev_name_cstr, payload1, payload2);
+
+ (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
+}
+
+JNIEXPORT void JNICALL Java_org_lttng_ust_LTTngUst_tracepointString(JNIEnv *env,
+ jobject jobj __attribute__((unused)),
+ jstring ev_name,
+ jstring payload)
+{
+ jboolean iscopy;
+ const char *ev_name_cstr = (*env)->GetStringUTFChars(env, ev_name, &iscopy);
+ const char *payload_cstr = (*env)->GetStringUTFChars(env, payload, &iscopy);
+
+ tracepoint(lttng_ust_java, string_event, ev_name_cstr, payload_cstr);
+
+ (*env)->ReleaseStringUTFChars(env, ev_name, ev_name_cstr);
+ (*env)->ReleaseStringUTFChars(env, payload, payload_cstr);
+}
+
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+JAVAROOT = .
+jarfile = liblttng-ust-java.jar
+jardir = $(datadir)/java
+pkgpath = org/lttng/ust
+
+dist_noinst_JAVA = $(pkgpath)/LTTngUst.java
+jar_DATA = $(jarfile)
+BUILT_SOURCES = org_lttng_ust_LTTngUst.h
+
+AM_CPPFLAGS += -I$(builddir) -I$(srcdir) $(JNI_CPPFLAGS)
+lib_LTLIBRARIES = liblttng-ust-java.la
+liblttng_ust_java_la_SOURCES = LTTngUst.c lttng_ust_java.h
+nodist_liblttng_ust_java_la_SOURCES = org_lttng_ust_LTTngUst.h
+
+liblttng_ust_java_la_LIBADD = -lc \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la
+
+$(jarfile): classnoinst.stamp
+ $(JAR) cf $(JARFLAGS) $@ $(pkgpath)/*.class
+
+if !HAVE_JAVAH
+# If we don't have javah, assume we are running openjdk >= 10 and use javac
+# to generate the jni header file.
+AM_JAVACFLAGS = -h .
+
+org_lttng_ust_LTTngUst.h: $(jarfile)
+else
+org_lttng_ust_LTTngUst.h: jni-header.stamp
+
+jni-header.stamp: $(dist_noinst_JAVA)
+ $(JAVAH) -classpath $(srcdir) $(JAVAHFLAGS) org.lttng.ust.LTTngUst && \
+ echo "JNI header generated" > jni-header.stamp
+endif
+
+all-local: org_lttng_ust_LTTngUst.h
+
+EXTRA_DIST = README
+
+CLEANFILES = $(jarfile) $(pkgpath)/*.class jni-header.stamp org_lttng_ust_LTTngUst.h
--- /dev/null
+This directory contains a simple API for instrumenting java applications.
+
+Configuration examples to build this library:
+
+dependency: openjdk-7-jdk
+./configure --enable-jni-interface
+
+Note that the OpenJDK 7 is used for development and continuous integration thus
+we directly support that version for this library. However, it has been tested
+with OpenJDK 6 also. Please let us know if other Java version (commercial or
+not) work with this library.
+
+After building, you can use the liblttng-ust-java.jar file in a Java project.
+It requires the liblttng-ust-java.so* files (which get installed when doing
+`make install') so make sure those are in the linker's library path.
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_java
+
+#if !defined(_TRACEPOINT_LTTNG_UST_JAVA_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_JAVA_H
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_java, int_event,
+ TP_ARGS(const char *, name, int, payload),
+ TP_FIELDS(
+ ctf_string(name, name)
+ ctf_integer(int, int_payload, payload)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_java, int_int_event,
+ TP_ARGS(const char *, name, int, payload1, int, payload2),
+ TP_FIELDS(
+ ctf_string(name, name)
+ ctf_integer(int, int_payload1, payload1)
+ ctf_integer(int, int_payload2, payload2)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_java, long_event,
+ TP_ARGS(const char *, name, long, payload),
+ TP_FIELDS(
+ ctf_string(name, name)
+ ctf_integer(long, long_payload, payload)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_java, long_long_event,
+ TP_ARGS(const char *, name, long, payload1, long, payload2),
+ TP_FIELDS(
+ ctf_string(name, name)
+ ctf_integer(long, long_payload1, payload1)
+ ctf_integer(long, long_payload2, payload2)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_java, string_event,
+ TP_ARGS(const char *, name, const char *, payload),
+ TP_FIELDS(
+ ctf_string(name, name)
+ ctf_string(string_payload, payload)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_JAVA_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng_ust_java.h"
+
+/* This part must be outside protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2012 Alexandre Montplaisir <alexandre.montplaisir@polymtl.ca>
+ */
+
+package org.lttng.ust;
+
+/**
+ * This class implements the the Java side of the LTTng-UST Java interface.
+ *
+ * First, make sure you have installed "liblttng-ust-java.so" where the linker
+ * can find it. You can then call LTTngUst.init() from your Java program to
+ * connect the methods exposed here to the native library.
+ *
+ * Because of limitations in the probe declaration, all trace events generated
+ * by this library will have "lttng_ust_java" for domain, and "<type>_event" for
+ * event name in the CTF trace files. The "name" parameter will instead appear
+ * as the first element of the event's payload.
+ *
+ * @author Mathieu Desnoyers
+ * @author Alexandre Montplaisir
+ *
+ */
+public abstract class LTTngUst {
+
+ /**
+ * Initialize the UST tracer. This should always be called first, before any
+ * tracepoint* method.
+ */
+ public static void init() {
+ System.loadLibrary("lttng-ust-java"); //$NON-NLS-1$
+ }
+
+ /**
+ * Insert a tracepoint with a payload of type Integer.
+ *
+ * @param name
+ * The name assigned to this event. For best performance, this
+ * should be a statically-defined String, or a literal.
+ * @param payload
+ * The int payload
+ */
+ public static native void tracepointInt(String name, int payload);
+
+ /**
+ * Insert a tracepoint with a payload consisting of two integers.
+ *
+ * @param name
+ * The name assigned to this event. For best performance, this
+ * should be a statically-defined String, or a literal.
+ * @param payload1
+ * The first int payload
+ * @param payload2
+ * The second int payload
+ */
+ public static native void
+ tracepointIntInt(String name, int payload1, int payload2);
+
+ /**
+ * Insert a tracepoint with a payload of type Long
+ *
+ * @param name
+ * The name assigned to this event. For best performance, this
+ * should be a statically-defined String, or a literal.
+ * @param payload
+ * The long payload
+ */
+ public static native void tracepointLong(String name, long payload);
+
+ /**
+ * Insert a tracepoint with a payload consisting of two longs.
+ *
+ * @param name
+ * The name assigned to this event. For best performance, this
+ * should be a statically-defined String, or a literal.
+ * @param payload1
+ * The first long payload
+ * @param payload2
+ * The second long payload
+ */
+ public static native void
+ tracepointLongLong(String name, long payload1, long payload2);
+
+ /**
+ * Insert a tracepoint with a String payload.
+ *
+ * @param name
+ * The name assigned to this event. For best performance, this
+ * should be a statically-defined String, or a literal.
+ * @param payload
+ * The String payload
+ */
+ public static native void tracepointString(String name, String payload);
+
+}
+
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-libc-wrapper.la \
+ liblttng-ust-pthread-wrapper.la
+
+liblttng_ust_libc_wrapper_la_SOURCES = \
+ lttng-ust-malloc.c \
+ ust_libc.h
+
+liblttng_ust_libc_wrapper_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_libc_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_pthread_wrapper_la_SOURCES = \
+ lttng-ust-pthread.c \
+ ust_pthread.h
+
+liblttng_ust_pthread_wrapper_la_LIBADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
+
+liblttng_ust_pthread_wrapper_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+dist_noinst_SCRIPTS = run
+EXTRA_DIST = README
--- /dev/null
+liblttng-ust-libc is used for instrumenting some calls to libc in a
+program, without need for recompiling it.
+
+This library defines a malloc() function that is instrumented with a
+tracepoint. It also calls the libc malloc afterwards. When loaded with
+LD_PRELOAD, it replaces the libc malloc() function, in effect
+instrumenting all calls to malloc(). The same is performed for free().
+
+See the "run" script for a usage example.
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
+#include <ust-dlfcn.h>
+#include <sys/types.h>
+#include <stdio.h>
+#include <assert.h>
+#include <malloc.h>
+
+#include <urcu/system.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <urcu/arch.h>
+
+#include <lttng/ust-libc-wrapper.h>
+
+#include <ust-helper.h>
+#include "ust-compat.h"
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_libc.h"
+
+#define STATIC_CALLOC_LEN 4096
+static char static_calloc_buf[STATIC_CALLOC_LEN];
+static unsigned long static_calloc_buf_offset;
+
+struct alloc_functions {
+ void *(*calloc)(size_t nmemb, size_t size);
+ void *(*malloc)(size_t size);
+ void (*free)(void *ptr);
+ void *(*realloc)(void *ptr, size_t size);
+ void *(*memalign)(size_t alignment, size_t size);
+ int (*posix_memalign)(void **memptr, size_t alignment, size_t size);
+};
+
+static
+struct alloc_functions cur_alloc;
+
+/*
+ * Make sure our own use of the LTS compat layer will not cause infinite
+ * recursion by calling calloc.
+ */
+
+static
+void *static_calloc(size_t nmemb, size_t size);
+
+/*
+ * pthread mutex replacement for URCU tls compat layer.
+ */
+static int ust_malloc_lock;
+
+static
+void ust_malloc_spin_lock(pthread_mutex_t *lock)
+ __attribute__((unused));
+static
+void ust_malloc_spin_lock(pthread_mutex_t *lock __attribute__((unused)))
+{
+ /*
+ * The memory barrier within cmpxchg takes care of ordering
+ * memory accesses with respect to the start of the critical
+ * section.
+ */
+ while (uatomic_cmpxchg(&ust_malloc_lock, 0, 1) != 0)
+ caa_cpu_relax();
+}
+
+static
+void ust_malloc_spin_unlock(pthread_mutex_t *lock)
+ __attribute__((unused));
+static
+void ust_malloc_spin_unlock(pthread_mutex_t *lock __attribute__((unused)))
+{
+ /*
+ * Ensure memory accesses within the critical section do not
+ * leak outside.
+ */
+ cmm_smp_mb();
+ uatomic_set(&ust_malloc_lock, 0);
+}
+
+#define calloc static_calloc
+#define pthread_mutex_lock ust_malloc_spin_lock
+#define pthread_mutex_unlock ust_malloc_spin_unlock
+static DEFINE_URCU_TLS(int, malloc_nesting);
+#undef pthread_mutex_unlock
+#undef pthread_mutex_lock
+#undef calloc
+
+/*
+ * Static allocator to use when initially executing dlsym(). It keeps a
+ * size_t value of each object size prior to the object.
+ */
+static
+void *static_calloc_aligned(size_t nmemb, size_t size, size_t alignment)
+{
+ size_t prev_offset, new_offset, res_offset, aligned_offset;
+
+ if (nmemb * size == 0) {
+ return NULL;
+ }
+
+ /*
+ * Protect static_calloc_buf_offset from concurrent updates
+ * using a cmpxchg loop rather than a mutex to remove a
+ * dependency on pthread. This will minimize the risk of bad
+ * interaction between mutex and malloc instrumentation.
+ */
+ res_offset = CMM_LOAD_SHARED(static_calloc_buf_offset);
+ do {
+ prev_offset = res_offset;
+ aligned_offset = LTTNG_UST_ALIGN(prev_offset + sizeof(size_t), alignment);
+ new_offset = aligned_offset + nmemb * size;
+ if (new_offset > sizeof(static_calloc_buf)) {
+ abort();
+ }
+ } while ((res_offset = uatomic_cmpxchg(&static_calloc_buf_offset,
+ prev_offset, new_offset)) != prev_offset);
+ *(size_t *) &static_calloc_buf[aligned_offset - sizeof(size_t)] = size;
+ return &static_calloc_buf[aligned_offset];
+}
+
+static
+void *static_calloc(size_t nmemb, size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(nmemb, size, 1);
+ return retval;
+}
+
+static
+void *static_malloc(size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(1, size, 1);
+ return retval;
+}
+
+static
+void static_free(void *ptr __attribute__((unused)))
+{
+ /* no-op. */
+}
+
+static
+void *static_realloc(void *ptr, size_t size)
+{
+ size_t *old_size = NULL;
+ void *retval;
+
+ if (size == 0) {
+ retval = NULL;
+ goto end;
+ }
+
+ if (ptr) {
+ old_size = (size_t *) ptr - 1;
+ if (size <= *old_size) {
+ /* We can re-use the old entry. */
+ *old_size = size;
+ retval = ptr;
+ goto end;
+ }
+ }
+ /* We need to expand. Don't free previous memory location. */
+ retval = static_calloc_aligned(1, size, 1);
+ assert(retval);
+ if (ptr)
+ memcpy(retval, ptr, *old_size);
+end:
+ return retval;
+}
+
+static
+void *static_memalign(size_t alignment, size_t size)
+{
+ void *retval;
+
+ retval = static_calloc_aligned(1, size, alignment);
+ return retval;
+}
+
+static
+int static_posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ void *ptr;
+
+ /* Check for power of 2, larger than void *. */
+ if (alignment & (alignment - 1)
+ || alignment < sizeof(void *)
+ || alignment == 0) {
+ goto end;
+ }
+ ptr = static_calloc_aligned(1, size, alignment);
+ *memptr = ptr;
+end:
+ return 0;
+}
+
+static
+void setup_static_allocator(void)
+{
+ assert(cur_alloc.calloc == NULL);
+ cur_alloc.calloc = static_calloc;
+ assert(cur_alloc.malloc == NULL);
+ cur_alloc.malloc = static_malloc;
+ assert(cur_alloc.free == NULL);
+ cur_alloc.free = static_free;
+ assert(cur_alloc.realloc == NULL);
+ cur_alloc.realloc = static_realloc;
+ assert(cur_alloc.memalign == NULL);
+ cur_alloc.memalign = static_memalign;
+ assert(cur_alloc.posix_memalign == NULL);
+ cur_alloc.posix_memalign = static_posix_memalign;
+}
+
+static
+void lookup_all_symbols(void)
+{
+ struct alloc_functions af;
+
+ /*
+ * Temporarily redirect allocation functions to
+ * static_calloc_aligned, and free function to static_free
+ * (no-op), until the dlsym lookup has completed.
+ */
+ setup_static_allocator();
+
+ /* Perform the actual lookups */
+ af.calloc = dlsym(RTLD_NEXT, "calloc");
+ af.malloc = dlsym(RTLD_NEXT, "malloc");
+ af.free = dlsym(RTLD_NEXT, "free");
+ af.realloc = dlsym(RTLD_NEXT, "realloc");
+ af.memalign = dlsym(RTLD_NEXT, "memalign");
+ af.posix_memalign = dlsym(RTLD_NEXT, "posix_memalign");
+
+ /* Populate the new allocator functions */
+ memcpy(&cur_alloc, &af, sizeof(cur_alloc));
+}
+
+void *malloc(size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.malloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.malloc == NULL) {
+ fprintf(stderr, "mallocwrap: unable to find malloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.malloc(size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, malloc,
+ size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void free(void *ptr)
+{
+ URCU_TLS(malloc_nesting)++;
+ /*
+ * Check whether the memory was allocated with
+ * static_calloc_align, in which case there is nothing to free.
+ */
+ if (caa_unlikely((char *)ptr >= static_calloc_buf &&
+ (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
+ goto end;
+ }
+
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, free,
+ ptr, LTTNG_UST_CALLER_IP());
+ }
+
+ if (cur_alloc.free == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.free == NULL) {
+ fprintf(stderr, "mallocwrap: unable to find free\n");
+ abort();
+ }
+ }
+ cur_alloc.free(ptr);
+end:
+ URCU_TLS(malloc_nesting)--;
+}
+
+void *calloc(size_t nmemb, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.calloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.calloc == NULL) {
+ fprintf(stderr, "callocwrap: unable to find calloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.calloc(nmemb, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, calloc,
+ nmemb, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void *realloc(void *ptr, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ /*
+ * Check whether the memory was allocated with
+ * static_calloc_align, in which case there is nothing
+ * to free, and we need to copy the old data.
+ */
+ if (caa_unlikely((char *)ptr >= static_calloc_buf &&
+ (char *)ptr < static_calloc_buf + STATIC_CALLOC_LEN)) {
+ size_t *old_size;
+
+ old_size = (size_t *) ptr - 1;
+ if (cur_alloc.calloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.calloc == NULL) {
+ fprintf(stderr, "reallocwrap: unable to find calloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.calloc(1, size);
+ if (retval) {
+ memcpy(retval, ptr, *old_size);
+ }
+ /*
+ * Mimick that a NULL pointer has been received, so
+ * memory allocation analysis based on the trace don't
+ * get confused by the address from the static
+ * allocator.
+ */
+ ptr = NULL;
+ goto end;
+ }
+
+ if (cur_alloc.realloc == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.realloc == NULL) {
+ fprintf(stderr, "reallocwrap: unable to find realloc\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.realloc(ptr, size);
+end:
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, realloc,
+ ptr, size, retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+void *memalign(size_t alignment, size_t size)
+{
+ void *retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.memalign == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.memalign == NULL) {
+ fprintf(stderr, "memalignwrap: unable to find memalign\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.memalign(alignment, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, memalign,
+ alignment, size, retval,
+ LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+int posix_memalign(void **memptr, size_t alignment, size_t size)
+{
+ int retval;
+
+ URCU_TLS(malloc_nesting)++;
+ if (cur_alloc.posix_memalign == NULL) {
+ lookup_all_symbols();
+ if (cur_alloc.posix_memalign == NULL) {
+ fprintf(stderr, "posix_memalignwrap: unable to find posix_memalign\n");
+ abort();
+ }
+ }
+ retval = cur_alloc.posix_memalign(memptr, alignment, size);
+ if (URCU_TLS(malloc_nesting) == 1) {
+ tracepoint(lttng_ust_libc, posix_memalign,
+ *memptr, alignment, size,
+ retval, LTTNG_UST_CALLER_IP());
+ }
+ URCU_TLS(malloc_nesting)--;
+ return retval;
+}
+
+static
+void lttng_ust_fixup_malloc_nesting_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(malloc_nesting)));
+}
+
+void lttng_ust_libc_wrapper_malloc_init(void)
+{
+ /* Initialization already done */
+ if (cur_alloc.calloc) {
+ return;
+ }
+ lttng_ust_fixup_malloc_nesting_tls();
+ /*
+ * Ensure the allocator is in place before the process becomes
+ * multithreaded.
+ */
+ lookup_all_symbols();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Mentor Graphics
+ */
+
+/*
+ * Do _not_ define _LGPL_SOURCE because we don't want to create a
+ * circular dependency loop between this malloc wrapper, liburcu and
+ * libc.
+ */
+#include <ust-dlfcn.h>
+#include <ust-helper.h>
+#include <pthread.h>
+
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_pthread.h"
+
+static __thread int thread_in_trace;
+
+int pthread_mutex_lock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_lock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_lock) {
+ mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
+ if (!mutex_lock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_lock(mutex);
+ }
+
+ thread_in_trace = 1;
+ tracepoint(lttng_ust_pthread, pthread_mutex_lock_req, mutex,
+ LTTNG_UST_CALLER_IP());
+ retval = mutex_lock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_lock_acq, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
+
+int pthread_mutex_trylock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_trylock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_trylock) {
+ mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
+ if (!mutex_trylock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_trylock(mutex);
+ }
+
+ thread_in_trace = 1;
+ retval = mutex_trylock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_trylock, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
+
+int pthread_mutex_unlock(pthread_mutex_t *mutex)
+{
+ static int (*mutex_unlock)(pthread_mutex_t *);
+ int retval;
+
+ if (!mutex_unlock) {
+ mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
+ if (!mutex_unlock) {
+ if (thread_in_trace) {
+ abort();
+ }
+ fprintf(stderr, "unable to initialize pthread wrapper library.\n");
+ return EINVAL;
+ }
+ }
+ if (thread_in_trace) {
+ return mutex_unlock(mutex);
+ }
+
+ thread_in_trace = 1;
+ retval = mutex_unlock(mutex);
+ tracepoint(lttng_ust_pthread, pthread_mutex_unlock, mutex,
+ retval, LTTNG_UST_CALLER_IP());
+ thread_in_trace = 0;
+ return retval;
+}
--- /dev/null
+#!/bin/sh
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+
+LD_VERBOSE=1 LD_PRELOAD=.libs/liblttng-ust-libc-wrapper.so ${*}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_libc
+
+#if !defined(_TRACEPOINT_UST_LIBC_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_LIBC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_libc, malloc,
+ TP_ARGS(size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, free,
+ TP_ARGS(void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, calloc,
+ TP_ARGS(size_t, nmemb, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, nmemb, nmemb)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, realloc,
+ TP_ARGS(void *, in_ptr, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, in_ptr, in_ptr)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, memalign,
+ TP_ARGS(size_t, alignment, size_t, size, void *, ptr, void *, ip),
+ TP_FIELDS(
+ ctf_integer(size_t, alignment, alignment)
+ ctf_integer(size_t, size, size)
+ ctf_integer_hex(void *, ptr, ptr)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_libc, posix_memalign,
+ TP_ARGS(void *, out_ptr, size_t, alignment, size_t, size, int, result, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, out_ptr, out_ptr)
+ ctf_integer(size_t, alignment, alignment)
+ ctf_integer(size_t, size, size)
+ ctf_integer(int, result, result)
+ ctf_unused(ip)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_LIBC_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_libc.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Mentor Graphics
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_pthread
+
+#if !defined(_TRACEPOINT_UST_PTHREAD_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_PTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_req,
+ TP_ARGS(pthread_mutex_t *, mutex, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_lock_acq,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_trylock,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_pthread, pthread_mutex_unlock,
+ TP_ARGS(pthread_mutex_t *, mutex, int, status, void *, ip),
+ TP_FIELDS(
+ ctf_integer_hex(void *, mutex, mutex)
+ ctf_integer(int, status, status)
+ ctf_unused(ip)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_PTHREAD_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_pthread.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+# tracepoint provider: always built/installed (does not depend on Python per se)
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+lib_LTLIBRARIES = liblttng-ust-python-agent.la
+
+liblttng_ust_python_agent_la_SOURCES = lttng_ust_python.c lttng_ust_python.h
+liblttng_ust_python_agent_la_LIBADD = \
+ -lc \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la
+
+# Follow the main library soname for co-installability
+liblttng_ust_python_agent_la_LDFLAGS = -version-info $(LTTNG_UST_LIBRARY_VERSION)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#define TRACEPOINT_DEFINE
+#define TRACEPOINT_CREATE_PROBES
+#include "lttng_ust_python.h"
+
+/*
+ * The tracepoint fired by the agent.
+ */
+
+void py_tracepoint(const char *asctime, const char *msg,
+ const char *logger_name, const char *funcName, unsigned int lineno,
+ unsigned int int_loglevel, unsigned int thread, const char *threadName);
+void py_tracepoint(const char *asctime, const char *msg,
+ const char *logger_name, const char *funcName, unsigned int lineno,
+ unsigned int int_loglevel, unsigned int thread, const char *threadName)
+{
+ tracepoint(lttng_python, event, asctime, msg, logger_name, funcName,
+ lineno, int_loglevel, thread, threadName);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 - David Goulet <dgoulet@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_python
+
+#if !defined(_TRACEPOINT_LTTNG_UST_PYTHON_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_PYTHON_H
+
+#include <lttng/tracepoint.h>
+#include <stdbool.h>
+
+TRACEPOINT_EVENT(lttng_python, event,
+ TP_ARGS(
+ const char *, asctime,
+ const char *, msg,
+ const char *, logger_name,
+ const char *, funcName,
+ int, lineno,
+ int, int_loglevel,
+ int, thread,
+ const char *, threadName
+ ),
+ TP_FIELDS(
+ ctf_string(asctime, asctime)
+ ctf_string(msg, msg)
+ ctf_string(logger_name, logger_name)
+ ctf_string(funcName, funcName)
+ ctf_integer(unsigned int, lineno, lineno)
+ ctf_integer(unsigned int, int_loglevel, int_loglevel)
+ ctf_integer(unsigned int, thread, thread)
+ ctf_string(threadName, threadName)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_PYTHON_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng_ust_python.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -I$(srcdir) -fno-strict-aliasing
+
+noinst_LTLIBRARIES = liblttng-ust-runtime.la liblttng-ust-support.la
+
+lib_LTLIBRARIES = liblttng-ust-common.la liblttng-ust-tracepoint.la liblttng-ust.la
+
+liblttng_ust_common_la_SOURCES = \
+ lttng-ust-urcu.c \
+ lttng-ust-urcu-pointer.c
+
+liblttng_ust_common_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_tracepoint_la_SOURCES = \
+ tracepoint.c \
+ tracepoint-weak-test.c \
+ tracepoint-internal.h \
+ lttng-tracer-core.h \
+ jhash.h \
+ error.h
+
+liblttng_ust_tracepoint_la_LIBADD = \
+ liblttng-ust-common.la \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
+ $(DL_LIBS)
+
+liblttng_ust_tracepoint_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+liblttng_ust_tracepoint_la_CFLAGS = -DUST_COMPONENT="liblttng_ust_tracepoint" $(AM_CFLAGS)
+
+liblttng_ust_runtime_la_SOURCES = \
+ bytecode.h \
+ lttng-ust-comm.c \
+ lttng-ust-abi.c \
+ lttng-probes.c \
+ lttng-bytecode.c \
+ lttng-bytecode.h \
+ lttng-bytecode-validator.c \
+ lttng-bytecode-specialize.c \
+ lttng-bytecode-interpreter.c \
+ lttng-context-provider.c \
+ lttng-context-vtid.c \
+ lttng-context-vpid.c \
+ lttng-context-pthread-id.c \
+ lttng-context-procname.c \
+ lttng-context-ip.c \
+ lttng-context-cpu-id.c \
+ lttng-context-cgroup-ns.c \
+ lttng-context-ipc-ns.c \
+ lttng-context-mnt-ns.c \
+ lttng-context-net-ns.c \
+ lttng-context-pid-ns.c \
+ lttng-context-time-ns.c \
+ lttng-context-user-ns.c \
+ lttng-context-uts-ns.c \
+ lttng-context-vuid.c \
+ lttng-context-veuid.c \
+ lttng-context-vsuid.c \
+ lttng-context-vgid.c \
+ lttng-context-vegid.c \
+ lttng-context-vsgid.c \
+ lttng-context.c \
+ lttng-events.c \
+ lttng-hash-helper.h \
+ lttng-ust-elf.c \
+ lttng-ust-elf.h \
+ lttng-ust-statedump.c \
+ lttng-ust-statedump.h \
+ lttng-ust-statedump-provider.h \
+ ust_lib.c \
+ ust_lib.h \
+ context-internal.h \
+ context-provider-internal.h \
+ tracepoint-internal.h \
+ ust-events-internal.h \
+ clock.h \
+ compat.h \
+ wait.h \
+ jhash.h \
+ lttng-ust-uuid.h \
+ error.h \
+ tracef.c \
+ lttng-ust-tracef-provider.h \
+ tracelog.c \
+ lttng-ust-tracelog-provider.h \
+ getenv.h \
+ string-utils.c \
+ string-utils.h \
+ event-notifier-notification.c \
+ ns.h \
+ creds.h \
+ rculfhash.c \
+ rculfhash.h \
+ rculfhash-internal.h \
+ rculfhash-mm-chunk.c \
+ rculfhash-mm-mmap.c \
+ rculfhash-mm-order.c \
+ compat_futex.c \
+ futex.h
+
+if HAVE_PERF_EVENT
+liblttng_ust_runtime_la_SOURCES += \
+ lttng-context-perf-counters.c \
+ perf_event.h
+endif
+
+liblttng_ust_support_la_SOURCES = \
+ lttng-tracer.h \
+ lttng-tracer-core.h \
+ ust-core.c \
+ getenv.h \
+ getenv.c \
+ lttng-ust-dynamic-type.c \
+ lttng-rb-clients.h \
+ lttng-ring-buffer-client-template.h \
+ lttng-ring-buffer-client-discard.c \
+ lttng-ring-buffer-client-discard-rt.c \
+ lttng-ring-buffer-client-overwrite.c \
+ lttng-ring-buffer-client-overwrite-rt.c \
+ lttng-ring-buffer-metadata-client-template.h \
+ lttng-ring-buffer-metadata-client.c \
+ lttng-counter-client.h \
+ lttng-counter-client-percpu-32-modular.c \
+ lttng-counter-client-percpu-64-modular.c \
+ lttng-clock.c lttng-getcpu.c
+
+liblttng_ust_la_SOURCES =
+
+liblttng_ust_la_LDFLAGS = -no-undefined -version-info $(LTTNG_UST_LIBRARY_VERSION)
+
+liblttng_ust_support_la_LIBADD = \
+ $(top_builddir)/src/libringbuffer/libringbuffer.la \
+ $(top_builddir)/src/libcounter/libcounter.la
+
+liblttng_ust_la_LIBADD = \
+ -lrt \
+ liblttng-ust-common.la \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
+ $(top_builddir)/src/liblttng-ust-comm/liblttng-ust-comm.la \
+ liblttng-ust-tracepoint.la \
+ liblttng-ust-runtime.la liblttng-ust-support.la \
+ $(top_builddir)/src/libmsgpack/libmsgpack.la \
+ $(DL_LIBS)
+
+liblttng_ust_la_CFLAGS = -DUST_COMPONENT="liblttng_ust" $(AM_CFLAGS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _BYTECODE_H
+#define _BYTECODE_H
+
+#include <stdint.h>
+#include <lttng/ust-abi.h>
+
+/*
+ * offsets are absolute from start of bytecode.
+ */
+
+struct field_ref {
+ /* Initially, symbol offset. After link, field offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_symbol {
+ /* Symbol offset. */
+ uint16_t offset;
+} __attribute__((packed));
+
+struct get_index_u16 {
+ uint16_t index;
+} __attribute__((packed));
+
+struct get_index_u64 {
+ uint64_t index;
+} __attribute__((packed));
+
+struct literal_numeric {
+ int64_t v;
+} __attribute__((packed));
+
+struct literal_double {
+ double v;
+} __attribute__((packed));
+
+struct literal_string {
+ char string[0];
+} __attribute__((packed));
+
+enum bytecode_op {
+ BYTECODE_OP_UNKNOWN = 0,
+
+ BYTECODE_OP_RETURN = 1,
+
+ /* binary */
+ BYTECODE_OP_MUL = 2,
+ BYTECODE_OP_DIV = 3,
+ BYTECODE_OP_MOD = 4,
+ BYTECODE_OP_PLUS = 5,
+ BYTECODE_OP_MINUS = 6,
+ BYTECODE_OP_BIT_RSHIFT = 7,
+ BYTECODE_OP_BIT_LSHIFT = 8,
+ BYTECODE_OP_BIT_AND = 9,
+ BYTECODE_OP_BIT_OR = 10,
+ BYTECODE_OP_BIT_XOR = 11,
+
+ /* binary comparators */
+ BYTECODE_OP_EQ = 12,
+ BYTECODE_OP_NE = 13,
+ BYTECODE_OP_GT = 14,
+ BYTECODE_OP_LT = 15,
+ BYTECODE_OP_GE = 16,
+ BYTECODE_OP_LE = 17,
+
+ /* string binary comparator: apply to */
+ BYTECODE_OP_EQ_STRING = 18,
+ BYTECODE_OP_NE_STRING = 19,
+ BYTECODE_OP_GT_STRING = 20,
+ BYTECODE_OP_LT_STRING = 21,
+ BYTECODE_OP_GE_STRING = 22,
+ BYTECODE_OP_LE_STRING = 23,
+
+ /* s64 binary comparator */
+ BYTECODE_OP_EQ_S64 = 24,
+ BYTECODE_OP_NE_S64 = 25,
+ BYTECODE_OP_GT_S64 = 26,
+ BYTECODE_OP_LT_S64 = 27,
+ BYTECODE_OP_GE_S64 = 28,
+ BYTECODE_OP_LE_S64 = 29,
+
+ /* double binary comparator */
+ BYTECODE_OP_EQ_DOUBLE = 30,
+ BYTECODE_OP_NE_DOUBLE = 31,
+ BYTECODE_OP_GT_DOUBLE = 32,
+ BYTECODE_OP_LT_DOUBLE = 33,
+ BYTECODE_OP_GE_DOUBLE = 34,
+ BYTECODE_OP_LE_DOUBLE = 35,
+
+ /* Mixed S64-double binary comparators */
+ BYTECODE_OP_EQ_DOUBLE_S64 = 36,
+ BYTECODE_OP_NE_DOUBLE_S64 = 37,
+ BYTECODE_OP_GT_DOUBLE_S64 = 38,
+ BYTECODE_OP_LT_DOUBLE_S64 = 39,
+ BYTECODE_OP_GE_DOUBLE_S64 = 40,
+ BYTECODE_OP_LE_DOUBLE_S64 = 41,
+
+ BYTECODE_OP_EQ_S64_DOUBLE = 42,
+ BYTECODE_OP_NE_S64_DOUBLE = 43,
+ BYTECODE_OP_GT_S64_DOUBLE = 44,
+ BYTECODE_OP_LT_S64_DOUBLE = 45,
+ BYTECODE_OP_GE_S64_DOUBLE = 46,
+ BYTECODE_OP_LE_S64_DOUBLE = 47,
+
+ /* unary */
+ BYTECODE_OP_UNARY_PLUS = 48,
+ BYTECODE_OP_UNARY_MINUS = 49,
+ BYTECODE_OP_UNARY_NOT = 50,
+ BYTECODE_OP_UNARY_PLUS_S64 = 51,
+ BYTECODE_OP_UNARY_MINUS_S64 = 52,
+ BYTECODE_OP_UNARY_NOT_S64 = 53,
+ BYTECODE_OP_UNARY_PLUS_DOUBLE = 54,
+ BYTECODE_OP_UNARY_MINUS_DOUBLE = 55,
+ BYTECODE_OP_UNARY_NOT_DOUBLE = 56,
+
+ /* logical */
+ BYTECODE_OP_AND = 57,
+ BYTECODE_OP_OR = 58,
+
+ /* load field ref */
+ BYTECODE_OP_LOAD_FIELD_REF = 59,
+ BYTECODE_OP_LOAD_FIELD_REF_STRING = 60,
+ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE = 61,
+ BYTECODE_OP_LOAD_FIELD_REF_S64 = 62,
+ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE = 63,
+
+ /* load immediate from operand */
+ BYTECODE_OP_LOAD_STRING = 64,
+ BYTECODE_OP_LOAD_S64 = 65,
+ BYTECODE_OP_LOAD_DOUBLE = 66,
+
+ /* cast */
+ BYTECODE_OP_CAST_TO_S64 = 67,
+ BYTECODE_OP_CAST_DOUBLE_TO_S64 = 68,
+ BYTECODE_OP_CAST_NOP = 69,
+
+ /* get context ref */
+ BYTECODE_OP_GET_CONTEXT_REF = 70,
+ BYTECODE_OP_GET_CONTEXT_REF_STRING = 71,
+ BYTECODE_OP_GET_CONTEXT_REF_S64 = 72,
+ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE = 73,
+
+ /* load userspace field ref */
+ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING = 74,
+ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE = 75,
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate
+ */
+ BYTECODE_OP_LOAD_STAR_GLOB_STRING = 76,
+
+ /* globbing pattern binary operator: apply to */
+ BYTECODE_OP_EQ_STAR_GLOB_STRING = 77,
+ BYTECODE_OP_NE_STAR_GLOB_STRING = 78,
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ BYTECODE_OP_GET_CONTEXT_ROOT = 79,
+ BYTECODE_OP_GET_APP_CONTEXT_ROOT = 80,
+ BYTECODE_OP_GET_PAYLOAD_ROOT = 81,
+
+ BYTECODE_OP_GET_SYMBOL = 82,
+ BYTECODE_OP_GET_SYMBOL_FIELD = 83,
+ BYTECODE_OP_GET_INDEX_U16 = 84,
+ BYTECODE_OP_GET_INDEX_U64 = 85,
+
+ BYTECODE_OP_LOAD_FIELD = 86,
+ BYTECODE_OP_LOAD_FIELD_S8 = 87,
+ BYTECODE_OP_LOAD_FIELD_S16 = 88,
+ BYTECODE_OP_LOAD_FIELD_S32 = 89,
+ BYTECODE_OP_LOAD_FIELD_S64 = 90,
+ BYTECODE_OP_LOAD_FIELD_U8 = 91,
+ BYTECODE_OP_LOAD_FIELD_U16 = 92,
+ BYTECODE_OP_LOAD_FIELD_U32 = 93,
+ BYTECODE_OP_LOAD_FIELD_U64 = 94,
+ BYTECODE_OP_LOAD_FIELD_STRING = 95,
+ BYTECODE_OP_LOAD_FIELD_SEQUENCE = 96,
+ BYTECODE_OP_LOAD_FIELD_DOUBLE = 97,
+
+ BYTECODE_OP_UNARY_BIT_NOT = 98,
+
+ BYTECODE_OP_RETURN_S64 = 99,
+
+ NR_BYTECODE_OPS,
+};
+
+typedef uint8_t bytecode_opcode_t;
+
+struct load_op {
+ bytecode_opcode_t op;
+ /*
+ * data to load. Size known by enum bytecode_opcode and null-term char.
+ */
+ char data[0];
+} __attribute__((packed));
+
+struct binary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct unary_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+/* skip_offset is absolute from start of bytecode */
+struct logical_op {
+ bytecode_opcode_t op;
+ uint16_t skip_offset; /* bytecode insn, if skip second test */
+} __attribute__((packed));
+
+struct cast_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+struct return_op {
+ bytecode_opcode_t op;
+} __attribute__((packed));
+
+#endif /* _BYTECODE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_CLOCK_H
+#define _UST_CLOCK_H
+
+#include <time.h>
+#include <sys/time.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+#include <lttng/ust-clock.h>
+
+#include "lttng-ust-uuid.h"
+
+struct lttng_ust_trace_clock {
+ uint64_t (*read64)(void);
+ uint64_t (*freq)(void);
+ int (*uuid)(char *uuid);
+ const char *(*name)(void);
+ const char *(*description)(void);
+};
+
+extern struct lttng_ust_trace_clock *lttng_ust_trace_clock
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_clock_init(void);
+
+/* Use the kernel MONOTONIC clock. */
+
+static __inline__
+uint64_t trace_clock_read64_monotonic(void)
+{
+ struct timespec ts;
+
+ if (caa_unlikely(clock_gettime(CLOCK_MONOTONIC, &ts))) {
+ ts.tv_sec = 0;
+ ts.tv_nsec = 0;
+ }
+ return ((uint64_t) ts.tv_sec * 1000000000ULL) + ts.tv_nsec;
+}
+
+static __inline__
+uint64_t trace_clock_read64(void)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ return trace_clock_read64_monotonic();
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ return ltc->read64();
+ }
+}
+
+#endif /* _UST_CLOCK_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2016 Raphaël Beamonte <raphael.beamonte@gmail.com>
+ * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _UST_COMPAT_H
+#define _UST_COMPAT_H
+
+#include <pthread.h>
+#include <errno.h>
+#include <string.h>
+
+#ifdef __FreeBSD__
+#include <pthread_np.h>
+#endif
+
+#include <lttng/ust-abi.h>
+
+#define LTTNG_UST_PROCNAME_SUFFIX "-ust"
+
+
+#if defined(HAVE_PTHREAD_SETNAME_NP_WITH_TID)
+static inline
+int lttng_pthread_setname_np(const char *name)
+{
+ /*
+ * Some implementations don't error out, replicate this behavior for
+ * consistency.
+ */
+ if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
+ return ERANGE;
+ }
+
+ return pthread_setname_np(pthread_self(), name);
+}
+#elif defined(HAVE_PTHREAD_SETNAME_NP_WITHOUT_TID)
+static inline
+int lttng_pthread_setname_np(const char *name)
+{
+ return pthread_setname_np(name);
+}
+#elif defined(HAVE_PTHREAD_SET_NAME_NP_WITH_TID)
+
+static inline
+int lttng_pthread_setname_np(const char *name)
+{
+ /* Replicate pthread_setname_np's behavior */
+ if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
+ return ERANGE;
+ }
+
+ pthread_set_name_np(pthread_self(), name);
+ return 0;
+}
+#elif defined(__linux__)
+
+/* Fallback on prtctl on Linux */
+#include <sys/prctl.h>
+
+static inline
+int lttng_pthread_setname_np(const char *name)
+{
+ /* Replicate pthread_setname_np's behavior */
+ if (strnlen(name, LTTNG_UST_ABI_PROCNAME_LEN) >= LTTNG_UST_ABI_PROCNAME_LEN) {
+ return ERANGE;
+ }
+ return prctl(PR_SET_NAME, name, 0, 0, 0);
+}
+#else
+#error "Please add pthread set name support for your OS."
+#endif
+
+
+#if defined(HAVE_PTHREAD_GETNAME_NP_WITH_TID)
+static inline
+int lttng_pthread_getname_np(char *name, size_t len)
+{
+ return pthread_getname_np(pthread_self(), name, len);
+}
+#elif defined(HAVE_PTHREAD_GETNAME_NP_WITHOUT_TID)
+static inline
+int lttng_pthread_getname_np(char *name, size_t len)
+{
+ return pthread_getname_np(name, len);
+}
+#elif defined(HAVE_PTHREAD_GET_NAME_NP_WITH_TID)
+
+static inline
+int lttng_pthread_getname_np(char *name, size_t len)
+{
+ pthread_get_name_np(pthread_self(), name, len);
+ return 0;
+}
+#elif defined(__linux__)
+
+/* Fallback on prtctl on Linux */
+#include <sys/prctl.h>
+
+static inline
+int lttng_pthread_getname_np(char *name, size_t len)
+{
+ return prctl(PR_GET_NAME, name, 0, 0, 0);
+}
+
+#else
+#error "Please add pthread get name support for your OS."
+#endif
+
+/*
+ * If a pthread setname/set_name function is available, declare
+ * the setustprocname() function that will add '-ust' to the end
+ * of the current process name, while truncating it if needed.
+ */
+static inline
+int lttng_ust_setustprocname(void)
+{
+ int ret = 0, len;
+ char name[LTTNG_UST_ABI_PROCNAME_LEN];
+ int limit = LTTNG_UST_ABI_PROCNAME_LEN - strlen(LTTNG_UST_PROCNAME_SUFFIX) - 1;
+
+ /*
+ * Get the current thread name.
+ */
+ ret = lttng_pthread_getname_np(name, LTTNG_UST_ABI_PROCNAME_LEN);
+ if (ret) {
+ goto error;
+ }
+
+ len = strlen(name);
+ if (len > limit) {
+ len = limit;
+ }
+
+ ret = sprintf(name + len, LTTNG_UST_PROCNAME_SUFFIX);
+ if (ret != strlen(LTTNG_UST_PROCNAME_SUFFIX)) {
+ goto error;
+ }
+
+ ret = lttng_pthread_setname_np(name);
+
+error:
+ return ret;
+}
+
+#include <errno.h>
+
+#ifndef ENODATA
+#define ENODATA ENOMSG
+#endif
+
+#endif /* _UST_COMPAT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Userspace RCU library - sys_futex compatibility code
+ */
+
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <errno.h>
+#include <poll.h>
+#include <stdint.h>
+
+#include <urcu/arch.h>
+#include <urcu/system.h>
+#include "futex.h"
+
+/*
+ * Using attribute "weak" for __lttng_ust_compat_futex_lock and
+ * __lttng_ust_compat_futex_cond. Those are globally visible by the entire
+ * program, even though many shared objects may have their own version.
+ * The first version that gets loaded will be used by the entire program
+ * (executable and all shared objects).
+ */
+
+__attribute__((weak))
+pthread_mutex_t __lttng_ust_compat_futex_lock = PTHREAD_MUTEX_INITIALIZER;
+__attribute__((weak))
+pthread_cond_t __lttng_ust_compat_futex_cond = PTHREAD_COND_INITIALIZER;
+
+/*
+ * _NOT SIGNAL-SAFE_. pthread_cond is not signal-safe anyway. Though.
+ * For now, timeout, uaddr2 and val3 are unused.
+ * Waiter will relinquish the CPU until woken up.
+ */
+
+int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret = 0, lockret;
+
+ /*
+ * Check if NULL. Don't let users expect that they are taken into
+ * account.
+ */
+ assert(!timeout);
+ assert(!uaddr2);
+ assert(!val3);
+
+ /*
+ * memory barriers to serialize with the previous uaddr modification.
+ */
+ cmm_smp_mb();
+
+ lockret = pthread_mutex_lock(&__lttng_ust_compat_futex_lock);
+ if (lockret) {
+ errno = lockret;
+ ret = -1;
+ goto end;
+ }
+ switch (op) {
+ case FUTEX_WAIT:
+ /*
+ * Wait until *uaddr is changed to something else than "val".
+ * Comparing *uaddr content against val figures out which
+ * thread has been awakened.
+ */
+ while (CMM_LOAD_SHARED(*uaddr) == val)
+ pthread_cond_wait(&__lttng_ust_compat_futex_cond,
+ &__lttng_ust_compat_futex_lock);
+ break;
+ case FUTEX_WAKE:
+ /*
+ * Each wake is sending a broadcast, thus attempting wakeup of
+ * all awaiting threads, independently of their respective
+ * uaddr.
+ */
+ pthread_cond_broadcast(&__lttng_ust_compat_futex_cond);
+ break;
+ default:
+ errno = EINVAL;
+ ret = -1;
+ }
+ lockret = pthread_mutex_unlock(&__lttng_ust_compat_futex_lock);
+ if (lockret) {
+ errno = lockret;
+ ret = -1;
+ }
+end:
+ return ret;
+}
+
+/*
+ * _ASYNC SIGNAL-SAFE_.
+ * For now, timeout, uaddr2 and val3 are unused.
+ * Waiter will busy-loop trying to read the condition.
+ * It is OK to use compat_futex_async() on a futex address on which
+ * futex() WAKE operations are also performed.
+ */
+
+int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret = 0;
+
+ /*
+ * Check if NULL. Don't let users expect that they are taken into
+ * account.
+ */
+ assert(!timeout);
+ assert(!uaddr2);
+ assert(!val3);
+
+ /*
+ * Ensure previous memory operations on uaddr have completed.
+ */
+ cmm_smp_mb();
+
+ switch (op) {
+ case FUTEX_WAIT:
+ while (CMM_LOAD_SHARED(*uaddr) == val) {
+ if (poll(NULL, 0, 10) < 0) {
+ ret = -1;
+ /* Keep poll errno. Caller handles EINTR. */
+ goto end;
+ }
+ }
+ break;
+ case FUTEX_WAKE:
+ break;
+ default:
+ errno = EINVAL;
+ ret = -1;
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2020 (C) Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_CONTEXT_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_INTERNAL_H
+
+#include <lttng/ust-events.h>
+#include "ust-events-internal.h"
+#include "ust-context-provider.h"
+
+int lttng_context_init_all(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_attach_context(struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
+ __attribute__((visibility("hidden")));
+
+int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_destroy_context(struct lttng_ust_ctx *ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+ __attribute__((visibility("hidden")));
+
+int lttng_context_is_app(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vtid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vpid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_cgroup_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_ipc_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_mnt_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_net_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_pid_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_user_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_uts_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_time_ns_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_veuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vsuid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vgid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vegid_reset(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_context_vsgid_reset(void)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_dyntest_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_CONTEXT_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2019 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+#define _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H
+
+#include <stddef.h>
+#include <lttng/ust-events.h>
+
+void lttng_ust_context_set_event_notifier_group_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv,
+ struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_CONTEXT_PROVIDER_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_CREDS_H
+#define _LTTNG_CREDS_H
+
+/*
+ * This is used in the kernel as an invalid value.
+ */
+
+#define INVALID_UID (uid_t) -1
+#define INVALID_GID (gid_t) -1
+
+#endif /* _LTTNG_CREDS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_ERROR_H
+#define _LTTNG_ERROR_H
+
+#include <urcu/compiler.h>
+#include <unistd.h>
+
+#define MAX_ERRNO 4095
+
+static inline
+int IS_ERR_VALUE(long value)
+{
+ if (caa_unlikely((unsigned long) value >= (unsigned long) -MAX_ERRNO))
+ return 1;
+ else
+ return 0;
+}
+
+static inline
+void *ERR_PTR(long error)
+{
+ return (void *) error;
+}
+
+static inline
+long PTR_ERR(const void *ptr)
+{
+ return (long) ptr;
+}
+
+static inline
+int IS_ERR(const void *ptr)
+{
+ return IS_ERR_VALUE((long) ptr);
+}
+
+#endif /* _LTTNG_ERROR_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+
+#include <lttng/ust-endian.h>
+#include <usterr-signal-safe.h>
+#include <urcu/rculist.h>
+
+#include "lttng-tracer-core.h"
+#include "ust-events-internal.h"
+#include "../libmsgpack/msgpack.h"
+#include "lttng-bytecode.h"
+#include "ust-share.h"
+
+/*
+ * We want this write to be atomic AND non-blocking, meaning that we
+ * want to write either everything OR nothing.
+ * According to `pipe(7)`, writes that are less than `PIPE_BUF` bytes must be
+ * atomic, so we bound the capture buffer size to the `PIPE_BUF` minus the size
+ * of the notification struct we are sending alongside the capture buffer.
+ */
+#define CAPTURE_BUFFER_SIZE \
+ (PIPE_BUF - sizeof(struct lttng_ust_abi_event_notifier_notification) - 1)
+
+struct lttng_event_notifier_notification {
+ int notification_fd;
+ uint64_t event_notifier_token;
+ uint8_t capture_buf[CAPTURE_BUFFER_SIZE];
+ struct lttng_msgpack_writer writer;
+ bool has_captures;
+};
+
+static
+void capture_enum(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ lttng_msgpack_begin_map(writer, 2);
+ lttng_msgpack_write_str(writer, "type");
+ lttng_msgpack_write_str(writer, "enum");
+
+ lttng_msgpack_write_str(writer, "value");
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ lttng_msgpack_write_signed_integer(writer, output->u.u);
+ break;
+ default:
+ abort();
+ }
+
+ lttng_msgpack_end_map(writer);
+}
+
+static
+int64_t capture_sequence_element_signed(uint8_t *ptr,
+ const struct lttng_ust_type_integer *integer_type)
+{
+ int64_t value;
+ unsigned int size = integer_type->size;
+ bool byte_order_reversed = integer_type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ int16_t tmp;
+ tmp = *(int16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ int32_t tmp;
+ tmp = *(int32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ int64_t tmp;
+ tmp = *(int64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+uint64_t capture_sequence_element_unsigned(uint8_t *ptr,
+ const struct lttng_ust_type_integer *integer_type)
+{
+ uint64_t value;
+ unsigned int size = integer_type->size;
+ bool byte_order_reversed = integer_type->reverse_byte_order;
+
+ switch (size) {
+ case 8:
+ value = *ptr;
+ break;
+ case 16:
+ {
+ uint16_t tmp;
+ tmp = *(uint16_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_16(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 32:
+ {
+ uint32_t tmp;
+ tmp = *(uint32_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_32(tmp);
+
+ value = tmp;
+ break;
+ }
+ case 64:
+ {
+ uint64_t tmp;
+ tmp = *(uint64_t *) ptr;
+ if (byte_order_reversed)
+ tmp = bswap_64(tmp);
+
+ value = tmp;
+ break;
+ }
+ default:
+ abort();
+ }
+
+ return value;
+}
+
+static
+void capture_sequence(struct lttng_msgpack_writer *writer,
+ struct lttng_interpreter_output *output)
+{
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_type_common *nested_type;
+ uint8_t *ptr;
+ bool signedness;
+ int i;
+
+ lttng_msgpack_begin_array(writer, output->u.sequence.nr_elem);
+
+ ptr = (uint8_t *) output->u.sequence.ptr;
+ nested_type = output->u.sequence.nested_type;
+ switch (nested_type->type) {
+ case lttng_ust_type_integer:
+ integer_type = lttng_ust_get_type_integer(nested_type);
+ break;
+ case lttng_ust_type_enum:
+ /* Treat enumeration as an integer. */
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_enum(nested_type)->container_type);
+ break;
+ default:
+ /* Capture of array of non-integer are not supported. */
+ abort();
+ }
+ signedness = integer_type->signedness;
+ for (i = 0; i < output->u.sequence.nr_elem; i++) {
+ if (signedness) {
+ lttng_msgpack_write_signed_integer(writer,
+ capture_sequence_element_signed(ptr, integer_type));
+ } else {
+ lttng_msgpack_write_unsigned_integer(writer,
+ capture_sequence_element_unsigned(ptr, integer_type));
+ }
+
+ /*
+ * We assume that alignment is smaller or equal to the size.
+ * This currently holds true but if it changes in the future,
+ * we will want to change the pointer arithmetics below to
+ * take into account that the next element might be further
+ * away.
+ */
+ assert(integer_type->alignment <= integer_type->size);
+
+ /* Size is in number of bits. */
+ ptr += (integer_type->size / CHAR_BIT) ;
+ }
+
+ lttng_msgpack_end_array(writer);
+}
+
+static
+void notification_init(struct lttng_event_notifier_notification *notif,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ notif->event_notifier_token = event_notifier->priv->parent.user_token;
+ notif->notification_fd = event_notifier->priv->group->notification_fd;
+ notif->has_captures = false;
+
+ if (event_notifier->priv->num_captures > 0) {
+ lttng_msgpack_writer_init(writer, notif->capture_buf,
+ CAPTURE_BUFFER_SIZE);
+
+ lttng_msgpack_begin_array(writer, event_notifier->priv->num_captures);
+ notif->has_captures = true;
+ }
+}
+
+static
+void notification_append_capture(
+ struct lttng_event_notifier_notification *notif,
+ struct lttng_interpreter_output *output)
+{
+ struct lttng_msgpack_writer *writer = ¬if->writer;
+
+ switch (output->type) {
+ case LTTNG_INTERPRETER_TYPE_S64:
+ lttng_msgpack_write_signed_integer(writer, output->u.s);
+ break;
+ case LTTNG_INTERPRETER_TYPE_U64:
+ lttng_msgpack_write_unsigned_integer(writer, output->u.u);
+ break;
+ case LTTNG_INTERPRETER_TYPE_DOUBLE:
+ lttng_msgpack_write_double(writer, output->u.d);
+ break;
+ case LTTNG_INTERPRETER_TYPE_STRING:
+ lttng_msgpack_write_str(writer, output->u.str.str);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SEQUENCE:
+ capture_sequence(writer, output);
+ break;
+ case LTTNG_INTERPRETER_TYPE_SIGNED_ENUM:
+ case LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM:
+ capture_enum(writer, output);
+ break;
+ default:
+ abort();
+ }
+}
+
+static
+void notification_append_empty_capture(
+ struct lttng_event_notifier_notification *notif)
+{
+ lttng_msgpack_write_nil(¬if->writer);
+}
+
+static void record_error(struct lttng_ust_event_notifier *event_notifier)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ event_notifier->priv->group;
+ struct lttng_counter *error_counter;
+ size_t dimension_index[1];
+ int ret;
+
+ error_counter = CMM_LOAD_SHARED(event_notifier_group->error_counter);
+ /*
+ * load-acquire paired with store-release orders creation of the
+ * error counter and setting error_counter_len before the
+ * error_counter is used.
+ * Currently a full memory barrier is used, which could be
+ * turned into acquire-release barriers.
+ */
+ cmm_smp_mb();
+ /* This group may not have an error counter attached to it. */
+ if (!error_counter)
+ return;
+
+ dimension_index[0] = event_notifier->priv->error_counter_index;
+ ret = event_notifier_group->error_counter->ops->counter_add(
+ error_counter->counter, dimension_index, 1);
+ if (ret)
+ WARN_ON_ONCE(1);
+}
+
+static
+void notification_send(struct lttng_event_notifier_notification *notif,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ ssize_t ret;
+ size_t content_len;
+ int iovec_count = 1;
+ struct lttng_ust_abi_event_notifier_notification ust_notif = {0};
+ struct iovec iov[2];
+
+ assert(notif);
+
+ ust_notif.token = event_notifier->priv->parent.user_token;
+
+ /*
+ * Prepare sending the notification from multiple buffers using an
+ * array of `struct iovec`. The first buffer of the vector is
+ * notification structure itself and is always present.
+ */
+ iov[0].iov_base = &ust_notif;
+ iov[0].iov_len = sizeof(ust_notif);
+
+ if (notif->has_captures) {
+ /*
+ * If captures were requested, the second buffer of the array
+ * is the capture buffer.
+ */
+ assert(notif->writer.buffer);
+ content_len = notif->writer.write_pos - notif->writer.buffer;
+
+ assert(content_len > 0 && content_len <= CAPTURE_BUFFER_SIZE);
+
+ iov[1].iov_base = notif->capture_buf;
+ iov[1].iov_len = content_len;
+
+ iovec_count++;
+ } else {
+ content_len = 0;
+ }
+
+ /*
+ * Update the capture buffer size so that receiver of the buffer will
+ * know how much to expect.
+ */
+ ust_notif.capture_buf_size = content_len;
+
+ /* Send all the buffers. */
+ ret = ust_patient_writev(notif->notification_fd, iov, iovec_count);
+ if (ret == -1) {
+ if (errno == EAGAIN) {
+ record_error(event_notifier);
+ DBG("Cannot send event_notifier notification without blocking: %s",
+ strerror(errno));
+ } else {
+ DBG("Error to sending event notifier notification: %s",
+ strerror(errno));
+ abort();
+ }
+ }
+}
+
+void lttng_event_notifier_notification_send(
+ struct lttng_ust_event_notifier *event_notifier,
+ const char *stack_data,
+ struct lttng_ust_notification_ctx *notif_ctx)
+{
+ /*
+ * This function is called from the probe, we must do dynamic
+ * allocation in this context.
+ */
+ struct lttng_event_notifier_notification notif = {0};
+
+ notification_init(¬if, event_notifier);
+
+ if (caa_unlikely(notif_ctx->eval_capture)) {
+ struct lttng_ust_bytecode_runtime *capture_bc_runtime;
+
+ /*
+ * Iterate over all the capture bytecodes. If the interpreter
+ * functions returns successfully, append the value of the
+ * `output` parameter to the capture buffer. If the interpreter
+ * fails, append an empty capture to the buffer.
+ */
+ cds_list_for_each_entry_rcu(capture_bc_runtime,
+ &event_notifier->priv->capture_bytecode_runtime_head, node) {
+ struct lttng_interpreter_output output;
+
+ if (capture_bc_runtime->interpreter_func(capture_bc_runtime,
+ stack_data, &output) == LTTNG_UST_BYTECODE_INTERPRETER_OK)
+ notification_append_capture(¬if, &output);
+ else
+ notification_append_empty_capture(¬if);
+ }
+ }
+
+ /*
+ * Send the notification (including the capture buffer) to the
+ * sessiond.
+ */
+ notification_send(¬if, event_notifier);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Userspace RCU - sys_futex/compat_futex header.
+ */
+
+#ifndef _LTTNG_UST_FUTEX_H
+#define _LTTNG_UST_FUTEX_H
+
+#include <errno.h>
+#include <stdint.h>
+#include <time.h>
+#include <sys/syscall.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FUTEX_WAIT 0
+#define FUTEX_WAKE 1
+
+/*
+ * sys_futex compatibility header.
+ * Use *only* *either of* futex_noasync OR futex_async on a given address.
+ *
+ * futex_noasync cannot be executed in signal handlers, but ensures that
+ * it will be put in a wait queue even in compatibility mode.
+ *
+ * futex_async is signal-handler safe for the wakeup. It uses polling
+ * on the wait-side in compatibility mode.
+ *
+ * BEWARE: sys_futex() FUTEX_WAIT may return early if interrupted
+ * (returns EINTR).
+ */
+
+extern int lttng_ust_compat_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_compat_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+ __attribute__((visibility("hidden")));
+
+#if (defined(__linux__) && defined(__NR_futex))
+
+#include <unistd.h>
+#include <errno.h>
+#include <urcu/compiler.h>
+#include <urcu/arch.h>
+
+static inline int lttng_ust_futex(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return syscall(__NR_futex, uaddr, op, val, timeout,
+ uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ /*
+ * The fallback on ENOSYS is the async-safe version of
+ * the compat futex implementation, because the
+ * async-safe compat implementation allows being used
+ * concurrently with calls to futex(). Indeed, sys_futex
+ * FUTEX_WAIT, on some architectures (mips and parisc),
+ * within a given process, spuriously return ENOSYS due
+ * to signal restart bugs on some kernel versions.
+ */
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int ret;
+
+ ret = lttng_ust_futex(uaddr, op, val, timeout, uaddr2, val3);
+ if (caa_unlikely(ret < 0 && errno == ENOSYS)) {
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout,
+ uaddr2, val3);
+ }
+ return ret;
+}
+
+#elif defined(__FreeBSD__)
+
+#include <sys/types.h>
+#include <sys/umtx.h>
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ int umtx_op;
+ void *umtx_uaddr = NULL, *umtx_uaddr2 = NULL;
+ struct _umtx_time umtx_timeout = {
+ ._flags = UMTX_ABSTIME,
+ ._clockid = CLOCK_MONOTONIC,
+ };
+
+ switch (op) {
+ case FUTEX_WAIT:
+ /* On FreeBSD, a "u_int" is a 32-bit integer. */
+ umtx_op = UMTX_OP_WAIT_UINT;
+ if (timeout != NULL) {
+ umtx_timeout._timeout = *timeout;
+ umtx_uaddr = (void *) sizeof(umtx_timeout);
+ umtx_uaddr2 = (void *) &umtx_timeout;
+ }
+ break;
+ case FUTEX_WAKE:
+ umtx_op = UMTX_OP_WAKE;
+ break;
+ default:
+ errno = EINVAL;
+ return -1;
+ }
+
+ return _umtx_op(uaddr, umtx_op, (uint32_t) val, umtx_uaddr,
+ umtx_uaddr2);
+}
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#elif defined(__CYGWIN__)
+
+/*
+ * The futex_noasync compat code uses a weak symbol to share state across
+ * different shared object which is not possible on Windows with the
+ * Portable Executable format. Use the async compat code for both cases.
+ */
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#else
+
+static inline int lttng_ust_futex_noasync(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_noasync(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+static inline int lttng_ust_futex_async(int32_t *uaddr, int op, int32_t val,
+ const struct timespec *timeout, int32_t *uaddr2, int32_t val3)
+{
+ return lttng_ust_compat_futex_async(uaddr, op, val, timeout, uaddr2, val3);
+}
+
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LTTNG_UST_FUTEX_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <usterr-signal-safe.h>
+#include <ust-helper.h>
+#include "getenv.h"
+
+enum lttng_env_secure {
+ LTTNG_ENV_SECURE,
+ LTTNG_ENV_NOT_SECURE,
+};
+
+struct lttng_env {
+ const char *key;
+ enum lttng_env_secure secure;
+ char *value;
+};
+
+static struct lttng_env lttng_env[] = {
+ /*
+ * LTTNG_UST_DEBUG is used directly by snprintf, because it
+ * needs to be already set for ERR() used in
+ * lttng_ust_getenv_init().
+ */
+ { "LTTNG_UST_DEBUG", LTTNG_ENV_NOT_SECURE, NULL, },
+
+ /* Env. var. which can be used in setuid/setgid executables. */
+ { "LTTNG_UST_WITHOUT_BADDR_STATEDUMP", LTTNG_ENV_NOT_SECURE, NULL, },
+ { "LTTNG_UST_REGISTER_TIMEOUT", LTTNG_ENV_NOT_SECURE, NULL, },
+
+ /* Env. var. which are not fetched in setuid/setgid executables. */
+ { "LTTNG_UST_CLOCK_PLUGIN", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_UST_GETCPU_PLUGIN", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_UST_ALLOW_BLOCKING", LTTNG_ENV_SECURE, NULL, },
+ { "HOME", LTTNG_ENV_SECURE, NULL, },
+ { "LTTNG_HOME", LTTNG_ENV_SECURE, NULL, },
+};
+
+static
+int lttng_is_setuid_setgid(void)
+{
+ return geteuid() != getuid() || getegid() != getgid();
+}
+
+char *lttng_ust_getenv(const char *name)
+{
+ size_t i;
+ struct lttng_env *e;
+ bool found = false;
+
+ for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
+ e = <tng_env[i];
+
+ if (strcmp(e->key, name) == 0) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ return NULL;
+ }
+ return e->value;
+}
+
+void lttng_ust_getenv_init(void)
+{
+ size_t i;
+
+ for (i = 0; i < LTTNG_ARRAY_SIZE(lttng_env); i++) {
+ struct lttng_env *e = <tng_env[i];
+
+ if (e->secure == LTTNG_ENV_SECURE && lttng_is_setuid_setgid()) {
+ ERR("Getting environment variable '%s' from setuid/setgid binary refused for security reasons.",
+ e->key);
+ continue;
+ }
+ e->value = getenv(e->key);
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _COMPAT_GETENV_H
+#define _COMPAT_GETENV_H
+
+/*
+ * Always add the lttng-ust environment variables using the lttng_ust_getenv()
+ * infrastructure rather than using getenv() directly. This ensures that we
+ * don't trigger races between getenv() invoked by lttng-ust listener threads
+ * invoked concurrently with setenv() called by an otherwise single-threaded
+ * application thread. (the application is not aware that it runs with
+ * lttng-ust)
+ */
+
+char *lttng_ust_getenv(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_getenv_init(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _COMPAT_GETENV_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stddef.h>
+#include <stdint.h>
+#include <urcu/compiler.h>
+#include <lttng/ust-endian.h>
+
+/*
+ * Hash function
+ * Source: http://burtleburtle.net/bob/c/lookup3.c
+ * Originally Public Domain
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+#define mix(a, b, c) \
+do { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c, 16); c += b; \
+ b -= a; b ^= rot(a, 19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+} while (0)
+
+#define final(a, b, c) \
+{ \
+ c ^= b; c -= rot(b, 14); \
+ a ^= c; a -= rot(c, 11); \
+ b ^= a; b -= rot(a, 25); \
+ c ^= b; c -= rot(b, 16); \
+ a ^= c; a -= rot(c, 4);\
+ b ^= a; b -= rot(a, 14); \
+ c ^= b; c -= rot(b, 24); \
+}
+
+#if (BYTE_ORDER == LITTLE_ENDIAN)
+#define HASH_LITTLE_ENDIAN 1
+#else
+#define HASH_LITTLE_ENDIAN 0
+#endif
+
+/*
+ *
+ * hashlittle() -- hash a variable-length key into a 32-bit value
+ * k : the key (the unaligned variable-length array of bytes)
+ * length : the length of the key, counting by bytes
+ * initval : can be any 4-byte value
+ * Returns a 32-bit value. Every bit of the key affects every bit of
+ * the return value. Two keys differing by one or two bits will have
+ * totally different hash values.
+ *
+ * The best hash table sizes are powers of 2. There is no need to do
+ * mod a prime (mod is sooo slow!). If you need less than 32 bits,
+ * use a bitmask. For example, if you need only 10 bits, do
+ * h = (h & hashmask(10));
+ * In which case, the hash table should have hashsize(10) elements.
+ *
+ * If you are hashing n strings (uint8_t **)k, do it like this:
+ * for (i = 0, h = 0; i < n; ++i) h = hashlittle(k[i], len[i], h);
+ *
+ * By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+ * code any way you wish, private, educational, or commercial. It's free.
+ *
+ * Use for hash table lookup, or anything where one collision in 2^^32 is
+ * acceptable. Do NOT use for cryptographic purposes.
+ */
+static
+uint32_t hashlittle(const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a, b, c; /* internal state */
+ union {
+ const void *ptr;
+ size_t i;
+ } u;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *) key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * The original jhash.h reads beyond the end of string, and implements
+ * a special code path for VALGRIND. It seems to make ASan unhappy too
+ * though, so considering that hashing event names is not a fast-path
+ * in lttng-ust, remove the "fast" code entirely and use the slower
+ * but verifiable VALGRIND version of the code which does not issue
+ * out-of-bound reads.
+ */
+ {
+ const uint8_t *k8;
+
+ k8 = (const uint8_t *) k;
+ switch (length) {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t) k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t) k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t) k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ }
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *) key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t) k[1])<<16);
+ b += k[2] + (((uint32_t) k[3])<<16);
+ c += k[4] + (((uint32_t) k[5])<<16);
+ mix(a, b, c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *) k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t) k[5])<<16);
+ b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 11: c+=((uint32_t) k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t) k[3])<<16);
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t) k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t) k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t) k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a, b, c) */
+ while (length > 12) {
+ a += k[0];
+ a += ((uint32_t) k[1])<<8;
+ a += ((uint32_t) k[2])<<16;
+ a += ((uint32_t) k[3])<<24;
+ b += k[4];
+ b += ((uint32_t) k[5])<<8;
+ b += ((uint32_t) k[6])<<16;
+ b += ((uint32_t) k[7])<<24;
+ c += k[8];
+ c += ((uint32_t) k[9])<<8;
+ c += ((uint32_t) k[10])<<16;
+ c += ((uint32_t) k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch (length) { /* all the case statements fall through */
+ case 12: c+=((uint32_t) k[11])<<24; /* fall through */
+ case 11: c+=((uint32_t) k[10])<<16; /* fall through */
+ case 10: c+=((uint32_t) k[9])<<8; /* fall through */
+ case 9 : c+=k[8]; /* fall through */
+ case 8 : b+=((uint32_t) k[7])<<24; /* fall through */
+ case 7 : b+=((uint32_t) k[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t) k[5])<<8; /* fall through */
+ case 5 : b+=k[4]; /* fall through */
+ case 4 : a+=((uint32_t) k[3])<<24; /* fall through */
+ case 3 : a+=((uint32_t) k[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t) k[1])<<8; /* fall through */
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a, b, c);
+ return c;
+}
+
+static inline
+uint32_t jhash(const void *key, size_t length, uint32_t seed)
+{
+ return hashlittle(key, length, seed);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode interpreter.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lttng/urcu/pointer.h>
+#include <urcu/rculist.h>
+#include <lttng/ust-endian.h>
+#include <lttng/ust-events.h>
+#include "ust-events-internal.h"
+
+#include "lttng-bytecode.h"
+#include "string-utils.h"
+
+
+/*
+ * -1: wildcard found.
+ * -2: unknown escape char.
+ * 0: normal char.
+ */
+
+static
+int parse_char(const char **p)
+{
+ switch (**p) {
+ case '\\':
+ (*p)++;
+ switch (**p) {
+ case '\\':
+ case '*':
+ return 0;
+ default:
+ return -2;
+ }
+ case '*':
+ return -1;
+ default:
+ return 0;
+ }
+}
+
+/*
+ * Returns SIZE_MAX if the string is null-terminated, or the number of
+ * characters if not.
+ */
+static
+size_t get_str_or_seq_len(const struct estack_entry *entry)
+{
+ return entry->u.s.seq_len;
+}
+
+static
+int stack_star_glob_match(struct estack *stack, int top,
+ const char *cmp_type __attribute__((unused)))
+{
+ const char *pattern;
+ const char *candidate;
+ size_t pattern_len;
+ size_t candidate_len;
+
+ /* Find out which side is the pattern vs. the candidate. */
+ if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
+ pattern = estack_ax(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_ax(stack, top));
+ candidate = estack_bx(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_bx(stack, top));
+ } else {
+ pattern = estack_bx(stack, top)->u.s.str;
+ pattern_len = get_str_or_seq_len(estack_bx(stack, top));
+ candidate = estack_ax(stack, top)->u.s.str;
+ candidate_len = get_str_or_seq_len(estack_ax(stack, top));
+ }
+
+ /* Perform the match. Returns 0 when the result is true. */
+ return !strutils_star_glob_match(pattern, pattern_len, candidate,
+ candidate_len);
+}
+
+static
+int stack_strcmp(struct estack *stack, int top, const char *cmp_type __attribute__((unused)))
+{
+ const char *p = estack_bx(stack, top)->u.s.str, *q = estack_ax(stack, top)->u.s.str;
+ int ret;
+ int diff;
+
+ for (;;) {
+ int escaped_r0 = 0;
+
+ if (unlikely(p - estack_bx(stack, top)->u.s.str >= estack_bx(stack, top)->u.s.seq_len || *p == '\0')) {
+ if (q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0') {
+ return 0;
+ } else {
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1)
+ return 0;
+ }
+ return -1;
+ }
+ }
+ if (unlikely(q - estack_ax(stack, top)->u.s.str >= estack_ax(stack, top)->u.s.seq_len || *q == '\0')) {
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1)
+ return 0;
+ }
+ return 1;
+ }
+ if (estack_bx(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&p);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ escaped_r0 = 1;
+ }
+ /* else compare both char */
+ }
+ if (estack_ax(stack, top)->u.s.literal_type ==
+ ESTACK_STRING_LITERAL_TYPE_PLAIN) {
+ ret = parse_char(&q);
+ if (ret == -1) {
+ return 0;
+ } else if (ret == -2) {
+ if (!escaped_r0)
+ return -1;
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ } else {
+ if (escaped_r0)
+ return 1;
+ }
+ diff = *p - *q;
+ if (diff != 0)
+ break;
+ p++;
+ q++;
+ }
+ return diff;
+}
+
+int lttng_bytecode_interpret_error(
+ struct lttng_ust_bytecode_runtime *bytecode_runtime __attribute__((unused)),
+ const char *stack_data __attribute__((unused)),
+ void *ctx __attribute__((unused)))
+{
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+}
+
+#ifdef INTERPRETER_USE_SWITCH
+
+/*
+ * Fallback for compilers that do not support taking address of labels.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->data[0]; \
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
+ pc = next_pc) { \
+ dbg_printf("Executing op %s (%u)\n", \
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc), \
+ (unsigned int) *(bytecode_opcode_t *) pc); \
+ switch (*(bytecode_opcode_t *) pc) {
+
+#define OP(name) jump_target_##name: __attribute__((unused)); \
+ case name
+
+#define PO break
+
+#define END_OP } \
+ }
+
+#define JUMP_TO(name) \
+ goto jump_target_##name
+
+#else
+
+/*
+ * Dispatch-table based interpreter.
+ */
+
+#define START_OP \
+ start_pc = &bytecode->code[0]; \
+ pc = next_pc = start_pc; \
+ if (unlikely(pc - start_pc >= bytecode->len)) \
+ goto end; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define OP(name) \
+LABEL_##name
+
+#define PO \
+ pc = next_pc; \
+ goto *dispatch[*(bytecode_opcode_t *) pc];
+
+#define END_OP
+
+#define JUMP_TO(name) \
+ goto LABEL_##name
+
+#endif
+
+#define IS_INTEGER_REGISTER(reg_type) \
+ (reg_type == REG_U64 || reg_type == REG_S64)
+
+static int context_get_index(struct lttng_ust_ctx *ctx,
+ struct load_ptr *ptr,
+ uint32_t idx)
+{
+
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct lttng_ust_ctx_value v;
+
+ ctx_field = &ctx->fields[idx];
+ field = ctx_field->event_field;
+ ptr->type = LOAD_OBJECT;
+ ptr->field = field;
+
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ ctx_field->get_value(ctx_field->priv, &v);
+ if (lttng_ust_get_type_integer(field->type)->signedness) {
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_type_integer *itype;
+
+ itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
+ ctx_field->get_value(ctx_field->priv, &v);
+ if (itype->signedness) {
+ ptr->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ } else {
+ ptr->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ ptr->u.u64 = v.u.s64; /* Cast. */
+ ptr->ptr = &ptr->u.u64;
+ }
+ break;
+ }
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
+ ERR("Only string arrays are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
+ ERR("Only string sequences are supported for contexts.");
+ return -EINVAL;
+ }
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_string:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->ptr = v.u.str;
+ break;
+ case lttng_ust_type_float:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ctx_field->get_value(ctx_field->priv, &v);
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ break;
+ case lttng_ust_type_dynamic:
+ ctx_field->get_value(ctx_field->priv, &v);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ return -EINVAL;
+ case LTTNG_UST_DYNAMIC_TYPE_U8:
+ case LTTNG_UST_DYNAMIC_TYPE_U16:
+ case LTTNG_UST_DYNAMIC_TYPE_U32:
+ case LTTNG_UST_DYNAMIC_TYPE_U64:
+ ptr->object_type = OBJECT_TYPE_U64;
+ ptr->u.u64 = v.u.u64;
+ ptr->ptr = &ptr->u.u64;
+ dbg_printf("context get index dynamic u64 %" PRIi64 "\n", ptr->u.u64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_S8:
+ case LTTNG_UST_DYNAMIC_TYPE_S16:
+ case LTTNG_UST_DYNAMIC_TYPE_S32:
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ ptr->object_type = OBJECT_TYPE_S64;
+ ptr->u.s64 = v.u.s64;
+ ptr->ptr = &ptr->u.s64;
+ dbg_printf("context get index dynamic s64 %" PRIi64 "\n", ptr->u.s64);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_FLOAT:
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ ptr->object_type = OBJECT_TYPE_DOUBLE;
+ ptr->u.d = v.u.d;
+ ptr->ptr = &ptr->u.d;
+ dbg_printf("context get index dynamic double %g\n", ptr->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ ptr->object_type = OBJECT_TYPE_STRING;
+ ptr->ptr = v.u.str;
+ dbg_printf("context get index dynamic string %s\n", (const char *) ptr->ptr);
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ return -EINVAL;
+ }
+ break;
+ default:
+ ERR("Unknown type: %d", (int) field->type->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int dynamic_get_index(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ uint64_t index, struct estack_entry *stack_top)
+{
+ int ret;
+ const struct bytecode_get_index_data *gid;
+
+ gid = (const struct bytecode_get_index_data *) &runtime->data[index];
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const char *ptr;
+
+ assert(gid->offset < gid->array_len);
+ /* Skip count (unsigned long) */
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type->type == lttng_ust_type_array);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const char *ptr;
+ size_t ptr_seq_len;
+
+ ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
+ ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
+ if (gid->offset >= gid->elem.len * ptr_seq_len) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ptr = ptr + gid->offset;
+ stack_top->u.ptr.ptr = ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ assert(stack_top->u.ptr.field->type->type == lttng_ust_type_sequence);
+ stack_top->u.ptr.field = NULL;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ ERR("Nested structures are not supported yet.");
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_VARIANT:
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->u.ptr.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
+ {
+ ret = context_get_index(ctx,
+ &stack_top->u.ptr,
+ gid->ctx_index);
+ if (ret) {
+ goto end;
+ }
+ break;
+ }
+ case LOAD_ROOT_PAYLOAD:
+ stack_top->u.ptr.ptr += gid->offset;
+ if (gid->elem.type == OBJECT_TYPE_STRING)
+ stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
+ stack_top->u.ptr.object_type = gid->elem.type;
+ stack_top->u.ptr.type = LOAD_OBJECT;
+ stack_top->u.ptr.field = gid->field;
+ stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
+ break;
+ }
+
+ stack_top->type = REG_PTR;
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int dynamic_load_field(struct estack_entry *stack_top)
+{
+ int ret;
+
+ switch (stack_top->u.ptr.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Interpreter warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_S64;
+ break;
+ case OBJECT_TYPE_S16:
+ {
+ int16_t tmp;
+
+ dbg_printf("op load field s16\n");
+ tmp = *(int16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S32:
+ {
+ int32_t tmp;
+
+ dbg_printf("op load field s32\n");
+ tmp = *(int32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_S64:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field s64\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_SIGNED_ENUM:
+ {
+ int64_t tmp;
+
+ dbg_printf("op load field signed enumeration\n");
+ tmp = *(int64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_S64;
+ break;
+ }
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
+ stack_top->type = REG_U64;
+ break;
+ case OBJECT_TYPE_U16:
+ {
+ uint16_t tmp;
+
+ dbg_printf("op load field u16\n");
+ tmp = *(uint16_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_16(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U32:
+ {
+ uint32_t tmp;
+
+ dbg_printf("op load field u32\n");
+ tmp = *(uint32_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_32(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_U64:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field u64\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ {
+ uint64_t tmp;
+
+ dbg_printf("op load field unsigned enumeration\n");
+ tmp = *(uint64_t *) stack_top->u.ptr.ptr;
+ if (stack_top->u.ptr.rev_bo)
+ tmp = bswap_64(tmp);
+ stack_top->u.v = tmp;
+ stack_top->type = REG_U64;
+ break;
+ }
+ case OBJECT_TYPE_DOUBLE:
+ memcpy(&stack_top->u.d,
+ stack_top->u.ptr.ptr,
+ sizeof(struct literal_double));
+ stack_top->type = REG_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) stack_top->u.ptr.ptr;
+ stack_top->u.s.str = str;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.seq_len = SIZE_MAX;
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ stack_top->type = REG_STRING;
+ break;
+ }
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = stack_top->u.ptr.ptr;
+ stack_top->u.s.seq_len = *(unsigned long *) ptr;
+ stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ stack_top->type = REG_STRING;
+ if (unlikely(!stack_top->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ stack_top->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ break;
+ }
+ case OBJECT_TYPE_DYNAMIC:
+ /*
+ * Dynamic types in context are looked up
+ * by context get index.
+ */
+ ret = -EINVAL;
+ goto end;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static
+int lttng_bytecode_interpret_format_output(struct estack_entry *ax,
+ struct lttng_interpreter_output *output)
+{
+ int ret;
+
+again:
+ switch (ax->type) {
+ case REG_S64:
+ output->type = LTTNG_INTERPRETER_TYPE_S64;
+ output->u.s = ax->u.v;
+ break;
+ case REG_U64:
+ output->type = LTTNG_INTERPRETER_TYPE_U64;
+ output->u.u = (uint64_t) ax->u.v;
+ break;
+ case REG_DOUBLE:
+ output->type = LTTNG_INTERPRETER_TYPE_DOUBLE;
+ output->u.d = ax->u.d;
+ break;
+ case REG_STRING:
+ output->type = LTTNG_INTERPRETER_TYPE_STRING;
+ output->u.str.str = ax->u.s.str;
+ output->u.str.len = ax->u.s.seq_len;
+ break;
+ case REG_PTR:
+ switch (ax->u.ptr.object_type) {
+ case OBJECT_TYPE_S8:
+ case OBJECT_TYPE_S16:
+ case OBJECT_TYPE_S32:
+ case OBJECT_TYPE_S64:
+ case OBJECT_TYPE_U8:
+ case OBJECT_TYPE_U16:
+ case OBJECT_TYPE_U32:
+ case OBJECT_TYPE_U64:
+ case OBJECT_TYPE_DOUBLE:
+ case OBJECT_TYPE_STRING:
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ /* Retry after loading ptr into stack top. */
+ goto again;
+ case OBJECT_TYPE_SEQUENCE:
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = *(unsigned long *) ax->u.ptr.ptr;
+ output->u.sequence.nested_type = lttng_ust_get_type_sequence(ax->u.ptr.field->type)->elem_type;
+ break;
+ case OBJECT_TYPE_ARRAY:
+ /* Skip count (unsigned long) */
+ output->type = LTTNG_INTERPRETER_TYPE_SEQUENCE;
+ output->u.sequence.ptr = *(const char **) (ax->u.ptr.ptr + sizeof(unsigned long));
+ output->u.sequence.nr_elem = lttng_ust_get_type_array(ax->u.ptr.field->type)->length;
+ output->u.sequence.nested_type = lttng_ust_get_type_array(ax->u.ptr.field->type)->elem_type;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_SIGNED_ENUM;
+ output->u.s = ax->u.v;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ ret = dynamic_load_field(ax);
+ if (ret)
+ return ret;
+ output->type = LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM;
+ output->u.u = ax->u.v;
+ break;
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ default:
+ return -EINVAL;
+ }
+
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Return LTTNG_UST_BYTECODE_INTERPRETER_OK on success.
+ * Return LTTNG_UST_BYTECODE_INTERPRETER_ERROR on error.
+ *
+ * For FILTER bytecode: expect a struct lttng_ust_bytecode_filter_ctx *
+ * as @ctx argument.
+ * For CAPTURE bytecode: expect a struct lttng_interpreter_output *
+ * as @ctx argument.
+ */
+int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *ust_bytecode,
+ const char *interpreter_stack_data,
+ void *caller_ctx)
+{
+ struct bytecode_runtime *bytecode = caa_container_of(ust_bytecode, struct bytecode_runtime, p);
+ struct lttng_ust_ctx *ctx = lttng_ust_rcu_dereference(*ust_bytecode->pctx);
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL, retval = 0;
+ struct estack _stack;
+ struct estack *stack = &_stack;
+ register int64_t ax = 0, bx = 0;
+ register enum entry_type ax_t = REG_UNKNOWN, bx_t = REG_UNKNOWN;
+ register int top = INTERPRETER_STACK_EMPTY;
+#ifndef INTERPRETER_USE_SWITCH
+ static void *dispatch[NR_BYTECODE_OPS] = {
+ [ BYTECODE_OP_UNKNOWN ] = &&LABEL_BYTECODE_OP_UNKNOWN,
+
+ [ BYTECODE_OP_RETURN ] = &&LABEL_BYTECODE_OP_RETURN,
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = &&LABEL_BYTECODE_OP_MUL,
+ [ BYTECODE_OP_DIV ] = &&LABEL_BYTECODE_OP_DIV,
+ [ BYTECODE_OP_MOD ] = &&LABEL_BYTECODE_OP_MOD,
+ [ BYTECODE_OP_PLUS ] = &&LABEL_BYTECODE_OP_PLUS,
+ [ BYTECODE_OP_MINUS ] = &&LABEL_BYTECODE_OP_MINUS,
+ [ BYTECODE_OP_BIT_RSHIFT ] = &&LABEL_BYTECODE_OP_BIT_RSHIFT,
+ [ BYTECODE_OP_BIT_LSHIFT ] = &&LABEL_BYTECODE_OP_BIT_LSHIFT,
+ [ BYTECODE_OP_BIT_AND ] = &&LABEL_BYTECODE_OP_BIT_AND,
+ [ BYTECODE_OP_BIT_OR ] = &&LABEL_BYTECODE_OP_BIT_OR,
+ [ BYTECODE_OP_BIT_XOR ] = &&LABEL_BYTECODE_OP_BIT_XOR,
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = &&LABEL_BYTECODE_OP_EQ,
+ [ BYTECODE_OP_NE ] = &&LABEL_BYTECODE_OP_NE,
+ [ BYTECODE_OP_GT ] = &&LABEL_BYTECODE_OP_GT,
+ [ BYTECODE_OP_LT ] = &&LABEL_BYTECODE_OP_LT,
+ [ BYTECODE_OP_GE ] = &&LABEL_BYTECODE_OP_GE,
+ [ BYTECODE_OP_LE ] = &&LABEL_BYTECODE_OP_LE,
+
+ /* string binary comparator */
+ [ BYTECODE_OP_EQ_STRING ] = &&LABEL_BYTECODE_OP_EQ_STRING,
+ [ BYTECODE_OP_NE_STRING ] = &&LABEL_BYTECODE_OP_NE_STRING,
+ [ BYTECODE_OP_GT_STRING ] = &&LABEL_BYTECODE_OP_GT_STRING,
+ [ BYTECODE_OP_LT_STRING ] = &&LABEL_BYTECODE_OP_LT_STRING,
+ [ BYTECODE_OP_GE_STRING ] = &&LABEL_BYTECODE_OP_GE_STRING,
+ [ BYTECODE_OP_LE_STRING ] = &&LABEL_BYTECODE_OP_LE_STRING,
+
+ /* globbing pattern binary comparator */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_EQ_STAR_GLOB_STRING,
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_NE_STAR_GLOB_STRING,
+
+ /* s64 binary comparator */
+ [ BYTECODE_OP_EQ_S64 ] = &&LABEL_BYTECODE_OP_EQ_S64,
+ [ BYTECODE_OP_NE_S64 ] = &&LABEL_BYTECODE_OP_NE_S64,
+ [ BYTECODE_OP_GT_S64 ] = &&LABEL_BYTECODE_OP_GT_S64,
+ [ BYTECODE_OP_LT_S64 ] = &&LABEL_BYTECODE_OP_LT_S64,
+ [ BYTECODE_OP_GE_S64 ] = &&LABEL_BYTECODE_OP_GE_S64,
+ [ BYTECODE_OP_LE_S64 ] = &&LABEL_BYTECODE_OP_LE_S64,
+
+ /* double binary comparator */
+ [ BYTECODE_OP_EQ_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE,
+ [ BYTECODE_OP_NE_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_DOUBLE,
+ [ BYTECODE_OP_GT_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_DOUBLE,
+ [ BYTECODE_OP_LT_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_DOUBLE,
+ [ BYTECODE_OP_GE_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_DOUBLE,
+ [ BYTECODE_OP_LE_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_DOUBLE,
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_EQ_DOUBLE_S64,
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_NE_DOUBLE_S64,
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GT_DOUBLE_S64,
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LT_DOUBLE_S64,
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_GE_DOUBLE_S64,
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = &&LABEL_BYTECODE_OP_LE_DOUBLE_S64,
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_EQ_S64_DOUBLE,
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_NE_S64_DOUBLE,
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GT_S64_DOUBLE,
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LT_S64_DOUBLE,
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_GE_S64_DOUBLE,
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = &&LABEL_BYTECODE_OP_LE_S64_DOUBLE,
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = &&LABEL_BYTECODE_OP_UNARY_PLUS,
+ [ BYTECODE_OP_UNARY_MINUS ] = &&LABEL_BYTECODE_OP_UNARY_MINUS,
+ [ BYTECODE_OP_UNARY_NOT ] = &&LABEL_BYTECODE_OP_UNARY_NOT,
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_S64,
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_S64,
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = &&LABEL_BYTECODE_OP_UNARY_NOT_S64,
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_PLUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_MINUS_DOUBLE,
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = &&LABEL_BYTECODE_OP_UNARY_NOT_DOUBLE,
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = &&LABEL_BYTECODE_OP_AND,
+ [ BYTECODE_OP_OR ] = &&LABEL_BYTECODE_OP_OR,
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF,
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_S64,
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_REF_DOUBLE,
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STRING,
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_BYTECODE_OP_LOAD_STAR_GLOB_STRING,
+ [ BYTECODE_OP_LOAD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_S64,
+ [ BYTECODE_OP_LOAD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_DOUBLE,
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_TO_S64,
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_BYTECODE_OP_CAST_DOUBLE_TO_S64,
+ [ BYTECODE_OP_CAST_NOP ] = &&LABEL_BYTECODE_OP_CAST_NOP,
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF,
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_STRING,
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_S64,
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_REF_DOUBLE,
+
+ /* Instructions for recursive traversal through composed types. */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_BYTECODE_OP_GET_APP_CONTEXT_ROOT,
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = &&LABEL_BYTECODE_OP_GET_PAYLOAD_ROOT,
+
+ [ BYTECODE_OP_GET_SYMBOL ] = &&LABEL_BYTECODE_OP_GET_SYMBOL,
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = &&LABEL_BYTECODE_OP_GET_SYMBOL_FIELD,
+ [ BYTECODE_OP_GET_INDEX_U16 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U16,
+ [ BYTECODE_OP_GET_INDEX_U64 ] = &&LABEL_BYTECODE_OP_GET_INDEX_U64,
+
+ [ BYTECODE_OP_LOAD_FIELD ] = &&LABEL_BYTECODE_OP_LOAD_FIELD,
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S8,
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S16,
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S32,
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_S64,
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U8,
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U16,
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U32,
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_U64,
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_STRING,
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_SEQUENCE,
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_BYTECODE_OP_LOAD_FIELD_DOUBLE,
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = &&LABEL_BYTECODE_OP_UNARY_BIT_NOT,
+
+ [ BYTECODE_OP_RETURN_S64 ] = &&LABEL_BYTECODE_OP_RETURN_S64,
+ };
+#endif /* #ifndef INTERPRETER_USE_SWITCH */
+
+ START_OP
+
+ OP(BYTECODE_OP_UNKNOWN):
+ OP(BYTECODE_OP_LOAD_FIELD_REF):
+#ifdef INTERPRETER_USE_SWITCH
+ default:
+#endif /* INTERPRETER_USE_SWITCH */
+ ERR("unknown bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN):
+ /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
+ /* Handle dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ case REG_U64:
+ retval = !!estack_ax_v;
+ break;
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ if (ust_bytecode->type != LTTNG_UST_BYTECODE_TYPE_CAPTURE) {
+ ret = -EINVAL;
+ goto end;
+ }
+ retval = 0;
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ OP(BYTECODE_OP_RETURN_S64):
+ /* LTTNG_UST_BYTECODE_INTERPRETER_ERROR or LTTNG_UST_BYTECODE_INTERPRETER_OK */
+ retval = !!estack_ax_v;
+ ret = 0;
+ goto end;
+
+ /* binary */
+ OP(BYTECODE_OP_MUL):
+ OP(BYTECODE_OP_DIV):
+ OP(BYTECODE_OP_MOD):
+ OP(BYTECODE_OP_PLUS):
+ OP(BYTECODE_OP_MINUS):
+ ERR("unsupported bytecode op %u",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ OP(BYTECODE_OP_EQ):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_EQ_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_EQ_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_EQ_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_NE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_NE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_NE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STRING);
+ case REG_STAR_GLOB_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ case REG_DOUBLE:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_NE_STAR_GLOB_STRING);
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LT_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LT_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_GE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_GE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_GE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_GE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_LE):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE_S64);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_DOUBLE:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_LE_S64_DOUBLE);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_LE_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case REG_STRING:
+ switch (estack_bx_t) {
+ case REG_S64: /* Fall-through */
+ case REG_U64: /* Fall-through */
+ case REG_DOUBLE: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ JUMP_TO(BYTECODE_OP_LE_STRING);
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_bx_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_EQ_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">") > 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<") < 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, ">=") >= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_STRING):
+ {
+ int res;
+
+ res = (stack_strcmp(stack, top, "<=") <= 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "==") == 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_STAR_GLOB_STRING):
+ {
+ int res;
+
+ res = (stack_star_glob_match(stack, top, "!=") != 0);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* Mixed S64-double binary comparators */
+ OP(BYTECODE_OP_EQ_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d == estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d != estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d > estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d < estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d >= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_DOUBLE_S64):
+ {
+ int res;
+
+ res = (estack_bx(stack, top)->u.d <= estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_EQ_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v == estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_NE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v != estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v > estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LT_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v < estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_GE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v >= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LE_S64_DOUBLE):
+ {
+ int res;
+
+ res = (estack_bx_v <= estack_ax(stack, top)->u.d);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_RSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_LSHIFT):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Catch undefined behavior. */
+ if (caa_unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_AND):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_OR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_BIT_XOR):
+ {
+ int64_t res;
+
+ if (!IS_INTEGER_REGISTER(estack_ax_t) || !IS_INTEGER_REGISTER(estack_bx_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = res;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ PO;
+ }
+
+ /* unary */
+ OP(BYTECODE_OP_UNARY_PLUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_PLUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_MINUS):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_MINUS_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ OP(BYTECODE_OP_UNARY_NOT):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64: /* Fall-through. */
+ case REG_U64:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_S64);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_UNARY_NOT_DOUBLE);
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_BIT_NOT):
+ {
+ /* Dynamic typing. */
+ if (!IS_INTEGER_REGISTER(estack_ax_t)) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ estack_ax_v = ~(uint64_t) estack_ax_v;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_UNARY_PLUS_S64):
+ OP(BYTECODE_OP_UNARY_PLUS_DOUBLE):
+ {
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_S64):
+ {
+ estack_ax_v = -estack_ax_v;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_MINUS_DOUBLE):
+ {
+ estack_ax(stack, top)->u.d = -estack_ax(stack, top)->u.d;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_S64):
+ {
+ estack_ax_v = !estack_ax_v;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+ OP(BYTECODE_OP_UNARY_NOT_DOUBLE):
+ {
+ estack_ax_v = !estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ PO;
+ }
+
+ /* logical */
+ OP(BYTECODE_OP_AND):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is 0, skip and evaluate to 0 */
+ if (unlikely(estack_ax_v == 0)) {
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+ OP(BYTECODE_OP_OR):
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (estack_ax_t != REG_S64 && estack_ax_t != REG_U64) {
+ ret = -EINVAL;
+ goto end;
+ }
+ /* If AX is nonzero, skip and evaluate to 1 */
+ if (unlikely(estack_ax_v != 0)) {
+ estack_ax_v = 1;
+ dbg_printf("Jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ next_pc = start_pc + insn->skip_offset;
+ } else {
+ /* Pop 1 when jump not taken */
+ estack_pop(stack, top, ax, bx, ax_t, bx_t);
+ next_pc += sizeof(struct logical_op);
+ }
+ PO;
+ }
+
+
+ /* load field ref */
+ OP(BYTECODE_OP_LOAD_FIELD_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type string\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str =
+ *(const char * const *) &interpreter_stack_data[ref->offset];
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref load string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type sequence\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.seq_len =
+ *(unsigned long *) &interpreter_stack_data[ref->offset];
+ estack_ax(stack, top)->u.s.str =
+ *(const char **) (&interpreter_stack_data[ref->offset
+ + sizeof(unsigned long)]);
+ estack_ax_t = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type s64\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v =
+ ((struct literal_numeric *) &interpreter_stack_data[ref->offset])->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("load field ref offset %u type double\n",
+ ref->offset);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &interpreter_stack_data[ref->offset],
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ /* load from immediate operand */
+ OP(BYTECODE_OP_LOAD_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load string %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_PLAIN;
+ estack_ax_t = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_STAR_GLOB_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("load globbing pattern %s\n", insn->data);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = insn->data;
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
+ estack_ax_t = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = ((struct literal_numeric *) insn->data)->v;
+ estack_ax_t = REG_S64;
+ dbg_printf("load s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, insn->data,
+ sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("load double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ PO;
+ }
+
+ /* cast */
+ OP(BYTECODE_OP_CAST_TO_S64):
+ {
+ /* Dynamic typing. */
+ switch (estack_ax_t) {
+ case REG_S64:
+ JUMP_TO(BYTECODE_OP_CAST_NOP);
+ case REG_DOUBLE:
+ JUMP_TO(BYTECODE_OP_CAST_DOUBLE_TO_S64);
+ case REG_U64:
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op); /* Fall-through */
+ case REG_STRING: /* Fall-through */
+ case REG_STAR_GLOB_STRING:
+ ret = -EINVAL;
+ goto end;
+ default:
+ ERR("Unknown interpreter register type (%d)",
+ (int) estack_ax_t);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ OP(BYTECODE_OP_CAST_DOUBLE_TO_S64):
+ {
+ estack_ax_v = (int64_t) estack_ax(stack, top)->u.d;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_CAST_NOP):
+ {
+ next_pc += sizeof(struct cast_op);
+ PO;
+ }
+
+ /* get context ref */
+ OP(BYTECODE_OP_GET_CONTEXT_REF):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type dynamic\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ switch (v.sel) {
+ case LTTNG_UST_DYNAMIC_TYPE_NONE:
+ ret = -EINVAL;
+ goto end;
+ case LTTNG_UST_DYNAMIC_TYPE_S64:
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context dynamic s64 %" PRIi64 "\n", estack_ax_v);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_DOUBLE:
+ estack_ax(stack, top)->u.d = v.u.d;
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context dynamic double %g\n", estack_ax(stack, top)->u.d);
+ break;
+ case LTTNG_UST_DYNAMIC_TYPE_STRING:
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ dbg_printf("ref get context dynamic string %s\n", estack_ax(stack, top)->u.s.str);
+ estack_ax_t = REG_STRING;
+ break;
+ default:
+ dbg_printf("Interpreter warning: unknown dynamic type (%d).\n", (int) v.sel);
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_STRING):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type string\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.s.str = v.u.str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax_t = REG_STRING;
+ dbg_printf("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_S64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type s64\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax_v = v.u.s64;
+ estack_ax_t = REG_S64;
+ dbg_printf("ref get context s64 %" PRIi64 "\n", estack_ax_v);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_REF_DOUBLE):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+ const struct lttng_ust_ctx_field *ctx_field;
+ struct lttng_ust_ctx_value v;
+
+ dbg_printf("get context ref offset %u type double\n",
+ ref->offset);
+ ctx_field = &ctx->fields[ref->offset];
+ ctx_field->get_value(ctx_field->priv, &v);
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ memcpy(&estack_ax(stack, top)->u.d, &v.u.d, sizeof(struct literal_double));
+ estack_ax_t = REG_DOUBLE;
+ dbg_printf("ref get context double %g\n", estack_ax(stack, top)->u.d);
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_CONTEXT_ROOT):
+ {
+ dbg_printf("op get context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_APP_CONTEXT_ROOT):
+ {
+ dbg_printf("op get app context root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_APP_CONTEXT;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_PAYLOAD_ROOT):
+ {
+ dbg_printf("op get app payload root\n");
+ estack_push(stack, top, ax, bx, ax_t, bx_t);
+ estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
+ estack_ax(stack, top)->u.ptr.ptr = interpreter_stack_data;
+ /* "field" only needed for variants. */
+ estack_ax(stack, top)->u.ptr.field = NULL;
+ estack_ax_t = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL):
+ {
+ dbg_printf("op get symbol\n");
+ switch (estack_ax(stack, top)->u.ptr.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ /*
+ * symbol lookup is performed by
+ * specialization.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_SYMBOL_FIELD):
+ {
+ /*
+ * Used for first variant encountered in a
+ * traversal. Variants are not implemented yet.
+ */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U16):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ PO;
+ }
+
+ OP(BYTECODE_OP_GET_INDEX_U64):
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ ret = dynamic_get_index(ctx, bytecode, index->index, estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD):
+ {
+ dbg_printf("op load field\n");
+ ret = dynamic_load_field(estack_ax(stack, top));
+ if (ret)
+ goto end;
+ estack_ax_v = estack_ax(stack, top)->u.v;
+ estack_ax_t = estack_ax(stack, top)->type;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_S8):
+ {
+ dbg_printf("op load field s8\n");
+
+ estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S16):
+ {
+ dbg_printf("op load field s16\n");
+
+ estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S32):
+ {
+ dbg_printf("op load field s32\n");
+
+ estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_S64):
+ {
+ dbg_printf("op load field s64\n");
+
+ estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_S64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U8):
+ {
+ dbg_printf("op load field u8\n");
+
+ estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U16):
+ {
+ dbg_printf("op load field u16\n");
+
+ estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U32):
+ {
+ dbg_printf("op load field u32\n");
+
+ estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_U64):
+ {
+ dbg_printf("op load field u64\n");
+
+ estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax_t = REG_U64;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+ OP(BYTECODE_OP_LOAD_FIELD_DOUBLE):
+ {
+ dbg_printf("op load field double\n");
+
+ memcpy(&estack_ax(stack, top)->u.d,
+ estack_ax(stack, top)->u.ptr.ptr,
+ sizeof(struct literal_double));
+ estack_ax(stack, top)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_STRING):
+ {
+ const char *str;
+
+ dbg_printf("op load field string\n");
+ str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.str = str;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL string.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ estack_ax(stack, top)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ OP(BYTECODE_OP_LOAD_FIELD_SEQUENCE):
+ {
+ const char *ptr;
+
+ dbg_printf("op load field string sequence\n");
+ ptr = estack_ax(stack, top)->u.ptr.ptr;
+ estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
+ estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
+ estack_ax(stack, top)->type = REG_STRING;
+ if (unlikely(!estack_ax(stack, top)->u.s.str)) {
+ dbg_printf("Interpreter warning: loading a NULL sequence.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ estack_ax(stack, top)->u.s.literal_type =
+ ESTACK_STRING_LITERAL_TYPE_NONE;
+ next_pc += sizeof(struct load_op);
+ PO;
+ }
+
+ END_OP
+end:
+ /* No need to prepare output if an error occurred. */
+ if (ret)
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+
+ /* Prepare output. */
+ switch (ust_bytecode->type) {
+ case LTTNG_UST_BYTECODE_TYPE_FILTER:
+ {
+ struct lttng_ust_bytecode_filter_ctx *filter_ctx =
+ (struct lttng_ust_bytecode_filter_ctx *) caller_ctx;
+ if (retval)
+ filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_ACCEPT;
+ else
+ filter_ctx->result = LTTNG_UST_BYTECODE_FILTER_REJECT;
+ break;
+ }
+ case LTTNG_UST_BYTECODE_TYPE_CAPTURE:
+ ret = lttng_bytecode_interpret_format_output(estack_ax(stack, top),
+ (struct lttng_interpreter_output *) caller_ctx);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ return LTTNG_UST_BYTECODE_INTERPRETER_ERROR;
+ else
+ return LTTNG_UST_BYTECODE_INTERPRETER_OK;
+}
+
+/*
+ * Return LTTNG_UST_EVENT_FILTER_ACCEPT or LTTNG_UST_EVENT_FILTER_REJECT.
+ */
+int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
+ const char *interpreter_stack_data,
+ void *event_filter_ctx __attribute__((unused)))
+{
+ struct lttng_ust_bytecode_runtime *filter_bc_runtime;
+ struct cds_list_head *filter_bytecode_runtime_head = &event->priv->filter_bytecode_runtime_head;
+ struct lttng_ust_bytecode_filter_ctx bytecode_filter_ctx;
+ bool filter_record = false;
+
+ cds_list_for_each_entry_rcu(filter_bc_runtime, filter_bytecode_runtime_head, node) {
+ if (caa_likely(filter_bc_runtime->interpreter_func(filter_bc_runtime,
+ interpreter_stack_data, &bytecode_filter_ctx) == LTTNG_UST_BYTECODE_INTERPRETER_OK)) {
+ if (caa_unlikely(bytecode_filter_ctx.result == LTTNG_UST_BYTECODE_FILTER_ACCEPT)) {
+ filter_record = true;
+ break;
+ }
+ }
+ }
+ if (filter_record)
+ return LTTNG_UST_EVENT_FILTER_ACCEPT;
+ else
+ return LTTNG_UST_EVENT_FILTER_REJECT;
+}
+
+#undef START_OP
+#undef OP
+#undef PO
+#undef END_OP
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode specializer.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <lttng/ust-utils.h>
+
+#include "context-internal.h"
+#include "lttng-bytecode.h"
+#include "ust-events-internal.h"
+#include "ust-helper.h"
+
+static int lttng_fls(int val)
+{
+ int r = 32;
+ unsigned int x = (unsigned int) val;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ r -= 1;
+ }
+ return r;
+}
+
+static int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = lttng_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
+ size_t align, size_t len)
+{
+ ssize_t ret;
+ size_t padding = lttng_ust_offset_align(runtime->data_len, align);
+ size_t new_len = runtime->data_len + padding + len;
+ size_t new_alloc_len = new_len;
+ size_t old_alloc_len = runtime->data_alloc_len;
+
+ if (new_len > BYTECODE_MAX_DATA_LEN)
+ return -EINVAL;
+
+ if (new_alloc_len > old_alloc_len) {
+ char *newptr;
+
+ new_alloc_len =
+ max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
+ newptr = realloc(runtime->data, new_alloc_len);
+ if (!newptr)
+ return -ENOMEM;
+ runtime->data = newptr;
+ /* We zero directly the memory from start of allocation. */
+ memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
+ runtime->data_alloc_len = new_alloc_len;
+ }
+ runtime->data_len += padding;
+ ret = runtime->data_len;
+ runtime->data_len += len;
+ return ret;
+}
+
+static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
+ const void *p, size_t align, size_t len)
+{
+ ssize_t offset;
+
+ offset = bytecode_reserve_data(runtime, align, len);
+ if (offset < 0)
+ return -ENOMEM;
+ memcpy(&runtime->data[offset], p, len);
+ return offset;
+}
+
+static int specialize_load_field(struct vstack_entry *stack_top,
+ struct load_op *insn)
+{
+ int ret;
+
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ default:
+ dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_S8:
+ dbg_printf("op load field s8\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S8;
+ break;
+ case OBJECT_TYPE_S16:
+ dbg_printf("op load field s16\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S16;
+ break;
+ case OBJECT_TYPE_S32:
+ dbg_printf("op load field s32\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S32;
+ break;
+ case OBJECT_TYPE_S64:
+ dbg_printf("op load field s64\n");
+ stack_top->type = REG_S64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_S64;
+ break;
+ case OBJECT_TYPE_SIGNED_ENUM:
+ dbg_printf("op load field signed enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_U8:
+ dbg_printf("op load field u8\n");
+ stack_top->type = REG_U64;
+ insn->op = BYTECODE_OP_LOAD_FIELD_U8;
+ break;
+ case OBJECT_TYPE_U16:
+ dbg_printf("op load field u16\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U16;
+ break;
+ case OBJECT_TYPE_U32:
+ dbg_printf("op load field u32\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U32;
+ break;
+ case OBJECT_TYPE_U64:
+ dbg_printf("op load field u64\n");
+ stack_top->type = REG_U64;
+ if (!stack_top->load.rev_bo)
+ insn->op = BYTECODE_OP_LOAD_FIELD_U64;
+ break;
+ case OBJECT_TYPE_UNSIGNED_ENUM:
+ dbg_printf("op load field unsigned enumeration\n");
+ stack_top->type = REG_PTR;
+ break;
+ case OBJECT_TYPE_DOUBLE:
+ stack_top->type = REG_DOUBLE;
+ insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
+ break;
+ case OBJECT_TYPE_STRING:
+ dbg_printf("op load field string\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
+ break;
+ case OBJECT_TYPE_STRING_SEQUENCE:
+ dbg_printf("op load field string sequence\n");
+ stack_top->type = REG_STRING;
+ insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
+ break;
+ case OBJECT_TYPE_DYNAMIC:
+ dbg_printf("op load field dynamic\n");
+ stack_top->type = REG_UNKNOWN;
+ /* Don't specialize load op. */
+ break;
+ case OBJECT_TYPE_SEQUENCE:
+ case OBJECT_TYPE_ARRAY:
+ case OBJECT_TYPE_STRUCT:
+ case OBJECT_TYPE_VARIANT:
+ ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
+ ret = -EINVAL;
+ goto end;
+ }
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_get_index_object_type(enum object_type *otype,
+ int signedness, uint32_t elem_len)
+{
+ switch (elem_len) {
+ case 8:
+ if (signedness)
+ *otype = OBJECT_TYPE_S8;
+ else
+ *otype = OBJECT_TYPE_U8;
+ break;
+ case 16:
+ if (signedness)
+ *otype = OBJECT_TYPE_S16;
+ else
+ *otype = OBJECT_TYPE_U16;
+ break;
+ case 32:
+ if (signedness)
+ *otype = OBJECT_TYPE_S32;
+ else
+ *otype = OBJECT_TYPE_U32;
+ break;
+ case 64:
+ if (signedness)
+ *otype = OBJECT_TYPE_S64;
+ else
+ *otype = OBJECT_TYPE_U64;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_get_index(struct bytecode_runtime *runtime,
+ struct load_op *insn, uint64_t index,
+ struct vstack_entry *stack_top,
+ int idx_len)
+{
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ memset(&gid, 0, sizeof(gid));
+ switch (stack_top->load.type) {
+ case LOAD_OBJECT:
+ switch (stack_top->load.object_type) {
+ case OBJECT_TYPE_ARRAY:
+ {
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_event_field *field;
+ uint32_t elem_len, num_elems;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type->type) {
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
+ num_elems = lttng_ust_get_type_array(field->type)->length;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ if (index >= num_elems) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.array_len = num_elems * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_SEQUENCE:
+ {
+ const struct lttng_ust_type_integer *integer_type;
+ const struct lttng_ust_event_field *field;
+ uint32_t elem_len;
+ int signedness;
+
+ field = stack_top->load.field;
+ switch (field->type->type) {
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ret = -EINVAL;
+ goto end;
+ }
+ integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ elem_len = integer_type->size;
+ signedness = integer_type->signedness;
+ ret = specialize_get_index_object_type(&stack_top->load.object_type,
+ signedness, elem_len);
+ if (ret)
+ goto end;
+ gid.offset = index * (elem_len / CHAR_BIT);
+ gid.elem.type = stack_top->load.object_type;
+ gid.elem.len = elem_len;
+ if (integer_type->reverse_byte_order)
+ gid.elem.rev_bo = true;
+ stack_top->load.rev_bo = gid.elem.rev_bo;
+ break;
+ }
+ case OBJECT_TYPE_STRUCT:
+ /* Only generated by the specialize phase. */
+ case OBJECT_TYPE_VARIANT: /* Fall-through */
+ default:
+ ERR("Unexpected get index type %d",
+ (int) stack_top->load.object_type);
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ case LOAD_ROOT_CONTEXT:
+ case LOAD_ROOT_APP_CONTEXT:
+ case LOAD_ROOT_PAYLOAD:
+ ERR("Index lookup for root field not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ }
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (idx_len) {
+ case 2:
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ break;
+ case 8:
+ ((struct get_index_u64 *) insn->data)->index = data_offset;
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+
+ return 0;
+
+end:
+ return ret;
+}
+
+static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *bytecode,
+ struct load_op *insn)
+{
+ uint16_t offset;
+ const char *name;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
+ return lttng_get_context_index(ctx, name);
+}
+
+static int specialize_load_object(const struct lttng_ust_event_field *field,
+ struct vstack_load *load, bool is_context)
+{
+ load->type = LOAD_OBJECT;
+
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ if (lttng_ust_get_type_integer(field->type)->signedness)
+ load->object_type = OBJECT_TYPE_S64;
+ else
+ load->object_type = OBJECT_TYPE_U64;
+ load->rev_bo = false;
+ break;
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_type_integer *itype;
+
+ itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
+ if (itype->signedness)
+ load->object_type = OBJECT_TYPE_SIGNED_ENUM;
+ else
+ load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
+ load->rev_bo = false;
+ break;
+ }
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Array nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
+ load->object_type = OBJECT_TYPE_ARRAY;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
+ ERR("Sequence nesting only supports integer types.");
+ return -EINVAL;
+ }
+ if (is_context) {
+ load->object_type = OBJECT_TYPE_STRING;
+ } else {
+ if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
+ load->object_type = OBJECT_TYPE_SEQUENCE;
+ load->field = field;
+ } else {
+ load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
+ }
+ }
+ break;
+
+ case lttng_ust_type_string:
+ load->object_type = OBJECT_TYPE_STRING;
+ break;
+ case lttng_ust_type_float:
+ load->object_type = OBJECT_TYPE_DOUBLE;
+ break;
+ case lttng_ust_type_dynamic:
+ load->object_type = OBJECT_TYPE_DYNAMIC;
+ break;
+ default:
+ ERR("Unknown type: %d", (int) field->type->type);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ int idx, ret;
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ idx = specialize_context_lookup_name(ctx, runtime, insn);
+ if (idx < 0) {
+ return -ENOENT;
+ }
+ ctx_field = &ctx->fields[idx];
+ field = ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ return ret;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ return -EINVAL;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ return 0;
+}
+
+static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ uint16_t offset;
+ const char *orig_name;
+ char *name = NULL;
+ int idx, ret;
+ const struct lttng_ust_ctx_field *ctx_field;
+ const struct lttng_ust_event_field *field;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ offset = ((struct get_symbol *) insn->data)->offset;
+ orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
+ if (!name) {
+ ret = -ENOMEM;
+ goto end;
+ }
+ strcpy(name, "$app.");
+ strcat(name, orig_name);
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0) {
+ assert(lttng_context_is_app(name));
+ ret = lttng_ust_add_app_context_to_ctx_rcu(name,
+ pctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(*pctx, name);
+ if (idx < 0)
+ return -ENOENT;
+ }
+ ctx_field = &(*pctx)->fields[idx];
+ field = ctx_field->event_field;
+ ret = specialize_load_object(field, load, true);
+ if (ret)
+ goto end;
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.ctx_index = idx;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ free(name);
+ return ret;
+}
+
+static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ struct load_op *insn,
+ struct vstack_load *load)
+{
+ const char *name;
+ uint16_t offset;
+ unsigned int i, nr_fields;
+ bool found = false;
+ uint32_t field_offset = 0;
+ const struct lttng_ust_event_field *field;
+ int ret;
+ struct bytecode_get_index_data gid;
+ ssize_t data_offset;
+
+ nr_fields = event_desc->nr_fields;
+ offset = ((struct get_symbol *) insn->data)->offset;
+ name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
+ for (i = 0; i < nr_fields; i++) {
+ field = event_desc->fields[i];
+ if (field->nofilter) {
+ continue;
+ }
+ if (!strcmp(field->name, name)) {
+ found = true;
+ break;
+ }
+ /* compute field offset on stack */
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case lttng_ust_type_array:
+ case lttng_ust_type_sequence:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_string:
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (!found) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = specialize_load_object(field, load, false);
+ if (ret)
+ goto end;
+
+ /* Specialize each get_symbol into a get_index. */
+ insn->op = BYTECODE_OP_GET_INDEX_U16;
+ memset(&gid, 0, sizeof(gid));
+ gid.offset = field_offset;
+ gid.elem.type = load->object_type;
+ gid.elem.rev_bo = load->rev_bo;
+ gid.field = field;
+ data_offset = bytecode_push_data(runtime, &gid,
+ __alignof__(gid), sizeof(gid));
+ if (data_offset < 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+ ((struct get_index_u16 *) insn->data)->index = data_offset;
+ ret = 0;
+end:
+ return ret;
+}
+
+int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+{
+ void *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack _stack;
+ struct vstack *stack = &_stack;
+ struct lttng_ust_ctx **pctx = bytecode->p.pctx;
+
+ vstack_init(stack);
+
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_RETURN:
+ if (vstack_ax(stack)->type == REG_S64 ||
+ vstack_ax(stack)->type == REG_U64)
+ *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
+ ret = 0;
+ goto end;
+
+ case BYTECODE_OP_RETURN_S64:
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Unexpected register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ ret = 0;
+ goto end;
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+
+ case BYTECODE_OP_EQ:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_EQ_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_EQ_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_NE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ else
+ insn->op = BYTECODE_OP_NE_STRING;
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_NE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_NE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for > binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_LT:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for < binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LT_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LT_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LT_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_GE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for >= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_GE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_GE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_GE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ struct binary_op *insn = (struct binary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STAR_GLOB_STRING:
+ ERR("invalid register type for <= binary operator\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_STRING:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ insn->op = BYTECODE_OP_LE_STRING;
+ break;
+ case REG_S64:
+ case REG_U64:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE_S64;
+ break;
+ case REG_DOUBLE:
+ if (vstack_bx(stack)->type == REG_UNKNOWN)
+ break;
+ if (vstack_bx(stack)->type == REG_S64 ||
+ vstack_bx(stack)->type == REG_U64)
+ insn->op = BYTECODE_OP_LE_S64_DOUBLE;
+ else
+ insn->op = BYTECODE_OP_LE_DOUBLE;
+ break;
+ case REG_UNKNOWN:
+ break; /* Dynamic typing. */
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_PLUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_MINUS_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ struct unary_op *insn = (struct unary_op *) pc;
+
+ switch(vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_S64:
+ case REG_U64:
+ insn->op = BYTECODE_OP_UNARY_NOT_S64;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
+ break;
+ case REG_UNKNOWN: /* Dynamic typing. */
+ break;
+ }
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ /* cast */
+ case BYTECODE_OP_CAST_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ insn->op = BYTECODE_OP_CAST_NOP;
+ break;
+ case REG_DOUBLE:
+ insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
+ break;
+ case REG_UNKNOWN:
+ case REG_U64:
+ break;
+ }
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ assert(vstack_ax(stack)->type == REG_PTR);
+ /* Pop 1, push 1 */
+ ret = specialize_load_field(vstack_ax(stack), insn);
+ if (ret)
+ goto end;
+
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ dbg_printf("op get symbol\n");
+ switch (vstack_ax(stack)->load.type) {
+ case LOAD_OBJECT:
+ ERR("Nested fields not implemented yet.");
+ ret = -EINVAL;
+ goto end;
+ case LOAD_ROOT_CONTEXT:
+ /* Lookup context field. */
+ ret = specialize_context_lookup(*pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_APP_CONTEXT:
+ /* Lookup app context field. */
+ ret = specialize_app_context_lookup(pctx,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ case LOAD_ROOT_PAYLOAD:
+ /* Lookup event payload field. */
+ ret = specialize_payload_lookup(event_desc,
+ bytecode, insn,
+ &vstack_ax(stack)->load);
+ if (ret)
+ goto end;
+ break;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Always generated by specialize phase. */
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("op get index u16\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("op get index u64\n");
+ /* Pop 1, push 1 */
+ ret = specialize_get_index(bytecode, insn, index->index,
+ vstack_ax(stack), sizeof(*index));
+ if (ret)
+ goto end;
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+ }
+end:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode validator.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <time.h>
+
+#include "rculfhash.h"
+
+#include "lttng-bytecode.h"
+#include "lttng-hash-helper.h"
+#include "string-utils.h"
+#include "ust-events-internal.h"
+#include "ust-helper.h"
+
+/*
+ * Number of merge points for hash table size. Hash table initialized to
+ * that size, and we do not resize, because we do not want to trigger
+ * RCU worker thread execution: fall-back on linear traversal if number
+ * of merge points exceeds this value.
+ */
+#define DEFAULT_NR_MERGE_POINTS 128
+#define MIN_NR_BUCKETS 128
+#define MAX_NR_BUCKETS 128
+
+/* merge point table node */
+struct lfht_mp_node {
+ struct lttng_ust_lfht_node node;
+
+ /* Context at merge point */
+ struct vstack stack;
+ unsigned long target_pc;
+};
+
+static unsigned long lttng_hash_seed;
+static unsigned int lttng_hash_seed_ready;
+
+static
+int lttng_hash_match(struct lttng_ust_lfht_node *node, const void *key)
+{
+ struct lfht_mp_node *mp_node =
+ caa_container_of(node, struct lfht_mp_node, node);
+ unsigned long key_pc = (unsigned long) key;
+
+ if (mp_node->target_pc == key_pc)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int merge_points_compare(const struct vstack *stacka,
+ const struct vstack *stackb)
+{
+ int i, len;
+
+ if (stacka->top != stackb->top)
+ return 1;
+ len = stacka->top + 1;
+ assert(len >= 0);
+ for (i = 0; i < len; i++) {
+ if (stacka->e[i].type != REG_UNKNOWN
+ && stackb->e[i].type != REG_UNKNOWN
+ && stacka->e[i].type != stackb->e[i].type)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int merge_point_add_check(struct lttng_ust_lfht *ht, unsigned long target_pc,
+ const struct vstack *stack)
+{
+ struct lfht_mp_node *node;
+ unsigned long hash = lttng_hash_mix((const char *) target_pc,
+ sizeof(target_pc),
+ lttng_hash_seed);
+ struct lttng_ust_lfht_node *ret;
+
+ dbg_printf("Bytecode: adding merge point at offset %lu, hash %lu\n",
+ target_pc, hash);
+ node = zmalloc(sizeof(struct lfht_mp_node));
+ if (!node)
+ return -ENOMEM;
+ node->target_pc = target_pc;
+ memcpy(&node->stack, stack, sizeof(node->stack));
+ ret = lttng_ust_lfht_add_unique(ht, hash, lttng_hash_match,
+ (const char *) target_pc, &node->node);
+ if (ret != &node->node) {
+ struct lfht_mp_node *ret_mp =
+ caa_container_of(ret, struct lfht_mp_node, node);
+
+ /* Key already present */
+ dbg_printf("Bytecode: compare merge points for offset %lu, hash %lu\n",
+ target_pc, hash);
+ free(node);
+ if (merge_points_compare(stack, &ret_mp->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * Binary comparators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_compare_check(struct vstack *stack, bytecode_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != BYTECODE_OP_EQ && opcode != BYTECODE_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ goto error_mismatch;
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_mismatch:
+ ERR("type mismatch for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack,
+ bytecode_opcode_t opcode __attribute__((unused)),
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ case REG_U64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
+/*
+ * Validate bytecode range overflow within the validation pass.
+ * Called for each instruction encountered.
+ */
+static
+int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
+ char *start_pc, char *pc)
+{
+ int ret = 0;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (unlikely(pc + sizeof(struct return_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ break;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ if (unlikely(pc + sizeof(struct binary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (unlikely(pc + sizeof(struct unary_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ if (unlikely(pc + sizeof(struct logical_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ uint32_t str_len, maxlen;
+
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+
+ maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
+ str_len = strnlen(insn->data, maxlen);
+ if (unlikely(str_len >= maxlen)) {
+ /* Final '\0' not found within range */
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ case BYTECODE_OP_CAST_NOP:
+ {
+ if (unlikely(pc + sizeof(struct cast_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ case BYTECODE_OP_LOAD_FIELD:
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ ERR("Unexpected get symbol field");
+ ret = -EINVAL;
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+ }
+
+ return ret;
+}
+
+static
+unsigned long delete_all_nodes(struct lttng_ust_lfht *ht)
+{
+ struct lttng_ust_lfht_iter iter;
+ struct lfht_mp_node *node;
+ unsigned long nr_nodes = 0;
+
+ lttng_ust_lfht_for_each_entry(ht, &iter, node, node) {
+ int ret;
+
+ ret = lttng_ust_lfht_del(ht, lttng_ust_lfht_iter_get_node(&iter));
+ assert(!ret);
+ /* note: this hash table is never used concurrently */
+ free(node);
+ nr_nodes++;
+ }
+ return nr_nodes;
+}
+
+/*
+ * Return value:
+ * >=0: success
+ * <0: error
+ */
+static
+int validate_instruction_context(
+ struct bytecode_runtime *bytecode __attribute__((unused)),
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret = 0;
+ const bytecode_opcode_t opcode = *(bytecode_opcode_t *) pc;
+
+ switch (opcode) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ case BYTECODE_OP_RETURN_S64:
+ {
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) opcode);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ {
+ ret = bin_op_compare_check(stack, opcode, "==");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_NE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "!=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GT:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LT:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_GE:
+ {
+ ret = bin_op_compare_check(stack, opcode, ">=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+ case BYTECODE_OP_LE:
+ {
+ ret = bin_op_compare_check(stack, opcode, "<=");
+ if (ret < 0)
+ goto end;
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STRING
+ || vstack_bx(stack)->type != REG_STRING) {
+ ERR("Unexpected register type for string comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ ERR("Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type for s64 comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
+ ERR("Double operator should have two double registers\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Double-S64 operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_bx(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("S64-Double operator has unexpected register types\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case BYTECODE_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Unary op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ ERR("Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64 &&
+ vstack_ax(stack)->type != REG_U64) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Invalid register type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_S64
+ && vstack_ax(stack)->type != REG_U64
+ && vstack_ax(stack)->type != REG_UNKNOWN) {
+ ERR("Logical comparator expects S64, U64 or dynamic register\n");
+ ret = -EINVAL;
+ goto end;
+ }
+
+ dbg_printf("Validate jumping to bytecode offset %u\n",
+ (unsigned int) insn->skip_offset);
+ if (unlikely(start_pc + insn->skip_offset <= pc)) {
+ ERR("Loops are not allowed in bytecode\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate load field ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ struct cast_op *insn = (struct cast_op *) pc;
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ ERR("Cast op can only be applied to numeric or floating point registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ if (insn->op == BYTECODE_OP_CAST_DOUBLE_TO_S64) {
+ if (vstack_ax(stack)->type != REG_DOUBLE) {
+ ERR("Cast expects double\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ break;
+ }
+
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type dynamic\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type string\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type s64\n",
+ ref->offset);
+ break;
+ }
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct field_ref *ref = (struct field_ref *) insn->data;
+
+ dbg_printf("Validate get context ref offset %u type double\n",
+ ref->offset);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get app context root\n");
+ break;
+ }
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printf("Validate get payload root\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printf("Validate load field\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ {
+ dbg_printf("Validate load field s8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ {
+ dbg_printf("Validate load field s16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ {
+ dbg_printf("Validate load field s32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ dbg_printf("Validate load field s64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ {
+ dbg_printf("Validate load field u8\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ {
+ dbg_printf("Validate load field u16\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ {
+ dbg_printf("Validate load field u32\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ dbg_printf("Validate load field u64\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printf("Validate load field string\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printf("Validate load field sequence\n");
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printf("Validate load field double\n");
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
+ break;
+ }
+ }
+end:
+ return ret;
+}
+
+/*
+ * Return value:
+ * 0: success
+ * <0: error
+ */
+static
+int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
+ struct lttng_ust_lfht *merge_points,
+ struct vstack *stack,
+ char *start_pc,
+ char *pc)
+{
+ int ret;
+ unsigned long target_pc = pc - start_pc;
+ struct lttng_ust_lfht_iter iter;
+ struct lttng_ust_lfht_node *node;
+ struct lfht_mp_node *mp_node;
+ unsigned long hash;
+
+ /* Validate the context resulting from the previous instruction */
+ ret = validate_instruction_context(bytecode, stack, start_pc, pc);
+ if (ret < 0)
+ return ret;
+
+ /* Validate merge points */
+ hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
+ lttng_hash_seed);
+ lttng_ust_lfht_lookup(merge_points, hash, lttng_hash_match,
+ (const char *) target_pc, &iter);
+ node = lttng_ust_lfht_iter_get_node(&iter);
+ if (node) {
+ mp_node = caa_container_of(node, struct lfht_mp_node, node);
+
+ dbg_printf("Bytecode: validate merge point at offset %lu\n",
+ target_pc);
+ if (merge_points_compare(stack, &mp_node->stack)) {
+ ERR("Merge points differ for offset %lu\n",
+ target_pc);
+ return -EINVAL;
+ }
+ /* Once validated, we can remove the merge point */
+ dbg_printf("Bytecode: remove merge point at offset %lu\n",
+ target_pc);
+ ret = lttng_ust_lfht_del(merge_points, node);
+ assert(!ret);
+ }
+ return 0;
+}
+
+/*
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int exec_insn(struct bytecode_runtime *bytecode __attribute__((unused)),
+ struct lttng_ust_lfht *merge_points,
+ struct vstack *stack,
+ char **_next_pc,
+ char *pc)
+{
+ int ret = 1;
+ char *next_pc = *_next_pc;
+
+ switch (*(bytecode_opcode_t *) pc) {
+ case BYTECODE_OP_UNKNOWN:
+ default:
+ {
+ ERR("unknown bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_RETURN:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_PTR:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+ case BYTECODE_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ case REG_UNKNOWN:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+
+ /* binary */
+ case BYTECODE_OP_MUL:
+ case BYTECODE_OP_DIV:
+ case BYTECODE_OP_MOD:
+ case BYTECODE_OP_PLUS:
+ case BYTECODE_OP_MINUS:
+ {
+ ERR("unsupported bytecode op %u\n",
+ (unsigned int) *(bytecode_opcode_t *) pc);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ case BYTECODE_OP_EQ:
+ case BYTECODE_OP_NE:
+ case BYTECODE_OP_GT:
+ case BYTECODE_OP_LT:
+ case BYTECODE_OP_GE:
+ case BYTECODE_OP_LE:
+ case BYTECODE_OP_EQ_STRING:
+ case BYTECODE_OP_NE_STRING:
+ case BYTECODE_OP_GT_STRING:
+ case BYTECODE_OP_LT_STRING:
+ case BYTECODE_OP_GE_STRING:
+ case BYTECODE_OP_LE_STRING:
+ case BYTECODE_OP_EQ_STAR_GLOB_STRING:
+ case BYTECODE_OP_NE_STAR_GLOB_STRING:
+ case BYTECODE_OP_EQ_S64:
+ case BYTECODE_OP_NE_S64:
+ case BYTECODE_OP_GT_S64:
+ case BYTECODE_OP_LT_S64:
+ case BYTECODE_OP_GE_S64:
+ case BYTECODE_OP_LE_S64:
+ case BYTECODE_OP_EQ_DOUBLE:
+ case BYTECODE_OP_NE_DOUBLE:
+ case BYTECODE_OP_GT_DOUBLE:
+ case BYTECODE_OP_LT_DOUBLE:
+ case BYTECODE_OP_GE_DOUBLE:
+ case BYTECODE_OP_LE_DOUBLE:
+ case BYTECODE_OP_EQ_DOUBLE_S64:
+ case BYTECODE_OP_NE_DOUBLE_S64:
+ case BYTECODE_OP_GT_DOUBLE_S64:
+ case BYTECODE_OP_LT_DOUBLE_S64:
+ case BYTECODE_OP_GE_DOUBLE_S64:
+ case BYTECODE_OP_LE_DOUBLE_S64:
+ case BYTECODE_OP_EQ_S64_DOUBLE:
+ case BYTECODE_OP_NE_S64_DOUBLE:
+ case BYTECODE_OP_GT_S64_DOUBLE:
+ case BYTECODE_OP_LT_S64_DOUBLE:
+ case BYTECODE_OP_GE_S64_DOUBLE:
+ case BYTECODE_OP_LE_S64_DOUBLE:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ case BYTECODE_OP_BIT_RSHIFT:
+ case BYTECODE_OP_BIT_LSHIFT:
+ case BYTECODE_OP_BIT_AND:
+ case BYTECODE_OP_BIT_OR:
+ case BYTECODE_OP_BIT_XOR:
+ {
+ /* Pop 2, push 1 */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct binary_op);
+ break;
+ }
+
+ /* unary */
+ case BYTECODE_OP_UNARY_PLUS:
+ case BYTECODE_OP_UNARY_MINUS:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_S64:
+ case BYTECODE_OP_UNARY_MINUS_S64:
+ case BYTECODE_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_S64:
+ case REG_U64:
+ break;
+ case REG_DOUBLE:
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_NOT_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case BYTECODE_OP_UNARY_PLUS_DOUBLE:
+ case BYTECODE_OP_UNARY_MINUS_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ /* logical */
+ case BYTECODE_OP_AND:
+ case BYTECODE_OP_OR:
+ {
+ struct logical_op *insn = (struct logical_op *) pc;
+ int merge_ret;
+
+ /* Add merge point to table */
+ merge_ret = merge_point_add_check(merge_points,
+ insn->skip_offset, stack);
+ if (merge_ret) {
+ ret = merge_ret;
+ goto end;
+ }
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Continue to next instruction */
+ /* Pop 1 when jump not taken */
+ if (vstack_pop(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct logical_op);
+ break;
+ }
+
+ /* load field ref */
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ ERR("Unknown field ref type\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* get context ref */
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_STRING:
+ case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
+ case BYTECODE_OP_GET_CONTEXT_REF_STRING:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_S64:
+ case BYTECODE_OP_GET_CONTEXT_REF_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+ case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
+ case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+ break;
+ }
+
+ /* load from immediate operand */
+ case BYTECODE_OP_LOAD_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_S64:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_numeric);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_DOUBLE:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op)
+ + sizeof(struct literal_double);
+ break;
+ }
+
+ case BYTECODE_OP_CAST_TO_S64:
+ case BYTECODE_OP_CAST_DOUBLE_TO_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_U64:
+ case REG_DOUBLE:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+ case BYTECODE_OP_CAST_NOP:
+ {
+ next_pc += sizeof(struct cast_op);
+ break;
+ }
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case BYTECODE_OP_GET_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
+ case BYTECODE_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_S8:
+ case BYTECODE_OP_LOAD_FIELD_S16:
+ case BYTECODE_OP_LOAD_FIELD_S32:
+ case BYTECODE_OP_LOAD_FIELD_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_U8:
+ case BYTECODE_OP_LOAD_FIELD_U16:
+ case BYTECODE_OP_LOAD_FIELD_U32:
+ case BYTECODE_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_U64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_STRING:
+ case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case BYTECODE_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
+ }
+end:
+ *_next_pc = next_pc;
+ return ret;
+}
+
+/*
+ * Never called concurrently (hash seed is shared).
+ */
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+{
+ struct lttng_ust_lfht *merge_points;
+ char *pc, *next_pc, *start_pc;
+ int ret = -EINVAL;
+ struct vstack stack;
+
+ vstack_init(&stack);
+
+ if (!lttng_hash_seed_ready) {
+ lttng_hash_seed = time(NULL);
+ lttng_hash_seed_ready = 1;
+ }
+ /*
+ * Note: merge_points hash table used by single thread, and
+ * never concurrently resized. Therefore, we can use it without
+ * holding RCU read-side lock and free nodes without using
+ * call_rcu.
+ */
+ merge_points = lttng_ust_lfht_new(DEFAULT_NR_MERGE_POINTS,
+ MIN_NR_BUCKETS, MAX_NR_BUCKETS,
+ 0, NULL);
+ if (!merge_points) {
+ ERR("Error allocating hash table for bytecode validation\n");
+ return -ENOMEM;
+ }
+ start_pc = &bytecode->code[0];
+ for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+ pc = next_pc) {
+ ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+ if (ret != 0) {
+ if (ret == -ERANGE)
+ ERR("Bytecode overflow\n");
+ goto end;
+ }
+ dbg_printf("Validating op %s (%u)\n",
+ lttng_bytecode_print_op((unsigned int) *(bytecode_opcode_t *) pc),
+ (unsigned int) *(bytecode_opcode_t *) pc);
+
+ /*
+ * For each instruction, validate the current context
+ * (traversal of entire execution flow), and validate
+ * all merge points targeting this instruction.
+ */
+ ret = validate_instruction_all_contexts(bytecode, merge_points,
+ &stack, start_pc, pc);
+ if (ret)
+ goto end;
+ ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
+ if (ret <= 0)
+ goto end;
+ }
+end:
+ if (delete_all_nodes(merge_points)) {
+ if (!ret) {
+ ERR("Unexpected merge points\n");
+ ret = -EINVAL;
+ }
+ }
+ if (lttng_ust_lfht_destroy(merge_points)) {
+ ERR("Error destroying hash table\n");
+ }
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode code.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+
+#include <urcu/rculist.h>
+
+#include "context-internal.h"
+#include "lttng-bytecode.h"
+#include "ust-events-internal.h"
+#include "ust-helper.h"
+
+static const char *opnames[] = {
+ [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
+
+ [ BYTECODE_OP_RETURN ] = "RETURN",
+
+ /* binary */
+ [ BYTECODE_OP_MUL ] = "MUL",
+ [ BYTECODE_OP_DIV ] = "DIV",
+ [ BYTECODE_OP_MOD ] = "MOD",
+ [ BYTECODE_OP_PLUS ] = "PLUS",
+ [ BYTECODE_OP_MINUS ] = "MINUS",
+ [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
+ [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
+ [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
+ [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
+ [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
+
+ /* binary comparators */
+ [ BYTECODE_OP_EQ ] = "EQ",
+ [ BYTECODE_OP_NE ] = "NE",
+ [ BYTECODE_OP_GT ] = "GT",
+ [ BYTECODE_OP_LT ] = "LT",
+ [ BYTECODE_OP_GE ] = "GE",
+ [ BYTECODE_OP_LE ] = "LE",
+
+ /* string binary comparators */
+ [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
+ [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
+ [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
+ [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
+ [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
+ [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
+
+ /* s64 binary comparators */
+ [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
+ [ BYTECODE_OP_NE_S64 ] = "NE_S64",
+ [ BYTECODE_OP_GT_S64 ] = "GT_S64",
+ [ BYTECODE_OP_LT_S64 ] = "LT_S64",
+ [ BYTECODE_OP_GE_S64 ] = "GE_S64",
+ [ BYTECODE_OP_LE_S64 ] = "LE_S64",
+
+ /* double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
+ [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
+ [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
+ [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
+ [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
+ [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
+
+ /* Mixed S64-double binary comparators */
+ [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
+ [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
+ [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
+ [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
+ [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
+ [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
+
+ [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
+ [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
+ [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
+ [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
+ [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
+ [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
+
+ /* unary */
+ [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
+ [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
+ [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
+ [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
+ [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
+ [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
+ [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
+ [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
+
+ /* logical */
+ [ BYTECODE_OP_AND ] = "AND",
+ [ BYTECODE_OP_OR ] = "OR",
+
+ /* load field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
+ [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
+ [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
+
+ /* load from immediate operand */
+ [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
+ [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
+ [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
+
+ /* cast */
+ [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
+ [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
+ [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
+
+ /* get context ref */
+ [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
+ [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
+ [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
+ [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
+
+ /* load userspace field ref */
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
+
+ /*
+ * load immediate star globbing pattern (literal string)
+ * from immediate.
+ */
+ [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
+
+ /* globbing pattern binary operator: apply to */
+ [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
+ [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
+
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
+ [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
+
+ [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
+ [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
+ [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
+ [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
+
+ [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
+ [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
+ [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
+ [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
+ [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
+ [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
+ [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
+ [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
+ [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
+ [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
+ [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
+ [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
+
+ [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
+
+ [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+{
+ if (op >= NR_BYTECODE_OPS)
+ return "UNKNOWN";
+ else
+ return opnames[op];
+}
+
+static
+int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len __attribute__((unused)),
+ uint32_t reloc_offset,
+ const char *field_name,
+ enum bytecode_op bytecode_op)
+{
+ const struct lttng_ust_event_field **fields, *field = NULL;
+ unsigned int nr_fields, i;
+ struct load_op *op;
+ uint32_t field_offset = 0;
+
+ dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
+
+ /* Lookup event by name */
+ if (!event_desc)
+ return -EINVAL;
+ fields = event_desc->fields;
+ if (!fields)
+ return -EINVAL;
+ nr_fields = event_desc->nr_fields;
+ for (i = 0; i < nr_fields; i++) {
+ if (fields[i]->nofilter) {
+ continue;
+ }
+ if (!strcmp(fields[i]->name, field_name)) {
+ field = fields[i];
+ break;
+ }
+ /* compute field offset */
+ switch (fields[i]->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ field_offset += sizeof(int64_t);
+ break;
+ case lttng_ust_type_array:
+ case lttng_ust_type_sequence:
+ field_offset += sizeof(unsigned long);
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_string:
+ field_offset += sizeof(void *);
+ break;
+ case lttng_ust_type_float:
+ field_offset += sizeof(double);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ if (!field)
+ return -EINVAL;
+
+ /* Check if field offset is too large for 16-bit offset */
+ if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* set type */
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
+ break;
+ case lttng_ust_type_array:
+ {
+ struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) field->type;
+
+ if (array->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ }
+ case lttng_ust_type_sequence:
+ {
+ struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) field->type;
+
+ if (sequence->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ }
+ case lttng_ust_type_string:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
+ break;
+ case lttng_ust_type_float:
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset */
+ field_ref->offset = (uint16_t) field_offset;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_context_reloc(struct bytecode_runtime *runtime,
+ uint32_t runtime_len __attribute__((unused)),
+ uint32_t reloc_offset,
+ const char *context_name,
+ enum bytecode_op bytecode_op)
+{
+ struct load_op *op;
+ const struct lttng_ust_ctx_field *ctx_field;
+ int idx;
+ struct lttng_ust_ctx **pctx = runtime->p.pctx;
+
+ dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
+
+ /* Get context index */
+ idx = lttng_get_context_index(*pctx, context_name);
+ if (idx < 0) {
+ if (lttng_context_is_app(context_name)) {
+ int ret;
+
+ ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
+ pctx);
+ if (ret)
+ return ret;
+ idx = lttng_get_context_index(*pctx, context_name);
+ if (idx < 0)
+ return -ENOENT;
+ } else {
+ return -ENOENT;
+ }
+ }
+ /* Check if idx is too large for 16-bit offset */
+ if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
+ return -EINVAL;
+
+ /* Get context return type */
+ ctx_field = &(*pctx)->fields[idx];
+ op = (struct load_op *) &runtime->code[reloc_offset];
+
+ switch (bytecode_op) {
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ {
+ struct field_ref *field_ref;
+
+ field_ref = (struct field_ref *) op->data;
+ switch (ctx_field->event_field->type->type) {
+ case lttng_ust_type_integer:
+ case lttng_ust_type_enum:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
+ break;
+ /* Sequence and array supported only as string */
+ case lttng_ust_type_array:
+ {
+ struct lttng_ust_type_array *array = (struct lttng_ust_type_array *) ctx_field->event_field->type;
+
+ if (array->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ }
+ case lttng_ust_type_sequence:
+ {
+ struct lttng_ust_type_sequence *sequence = (struct lttng_ust_type_sequence *) ctx_field->event_field->type;
+
+ if (sequence->encoding == lttng_ust_string_encoding_none)
+ return -EINVAL;
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ }
+ case lttng_ust_type_string:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
+ break;
+ case lttng_ust_type_float:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
+ break;
+ case lttng_ust_type_dynamic:
+ op->op = BYTECODE_OP_GET_CONTEXT_REF;
+ break;
+ default:
+ return -EINVAL;
+ }
+ /* set offset to context index within channel contexts */
+ field_ref->offset = (uint16_t) idx;
+ break;
+ }
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int apply_reloc(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *runtime,
+ uint32_t runtime_len,
+ uint32_t reloc_offset,
+ const char *name)
+{
+ struct load_op *op;
+
+ dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
+
+ /* Ensure that the reloc is within the code */
+ if (runtime_len - reloc_offset < sizeof(uint16_t))
+ return -EINVAL;
+
+ op = (struct load_op *) &runtime->code[reloc_offset];
+ switch (op->op) {
+ case BYTECODE_OP_LOAD_FIELD_REF:
+ return apply_field_reloc(event_desc, runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_CONTEXT_REF:
+ return apply_context_reloc(runtime, runtime_len,
+ reloc_offset, name, op->op);
+ case BYTECODE_OP_GET_SYMBOL:
+ case BYTECODE_OP_GET_SYMBOL_FIELD:
+ /*
+ * Will be handled by load specialize phase or
+ * dynamically by interpreter.
+ */
+ return 0;
+ default:
+ ERR("Unknown reloc op type %u\n", op->op);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static
+int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head)
+{
+ struct lttng_ust_bytecode_runtime *bc_runtime;
+
+ cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
+ if (bc_runtime->bc == bytecode)
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Take a bytecode with reloc table and link it to an event to create a
+ * bytecode runtime.
+ */
+static
+int link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head,
+ struct cds_list_head *insert_loc)
+{
+ int ret, offset, next_offset;
+ struct bytecode_runtime *runtime = NULL;
+ size_t runtime_alloc_len;
+
+ if (!bytecode)
+ return 0;
+ /* Bytecode already linked */
+ if (bytecode_is_linked(bytecode, bytecode_runtime_head))
+ return 0;
+
+ dbg_printf("Linking...\n");
+
+ /* We don't need the reloc table in the runtime */
+ runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
+ runtime = zmalloc(runtime_alloc_len);
+ if (!runtime) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.type = bytecode->type;
+ runtime->p.bc = bytecode;
+ runtime->p.pctx = ctx;
+ runtime->len = bytecode->bc.reloc_offset;
+ /* copy original bytecode */
+ memcpy(runtime->code, bytecode->bc.data, runtime->len);
+ /*
+ * apply relocs. Those are a uint16_t (offset in bytecode)
+ * followed by a string (field name).
+ */
+ for (offset = bytecode->bc.reloc_offset;
+ offset < bytecode->bc.len;
+ offset = next_offset) {
+ uint16_t reloc_offset =
+ *(uint16_t *) &bytecode->bc.data[offset];
+ const char *name =
+ (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
+
+ ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
+ if (ret) {
+ goto link_error;
+ }
+ next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
+ }
+ /* Validate bytecode */
+ ret = lttng_bytecode_validate(runtime);
+ if (ret) {
+ goto link_error;
+ }
+ /* Specialize bytecode */
+ ret = lttng_bytecode_specialize(event_desc, runtime);
+ if (ret) {
+ goto link_error;
+ }
+
+ runtime->p.interpreter_func = lttng_bytecode_interpret;
+ runtime->p.link_failed = 0;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+ dbg_printf("Linking successful.\n");
+ return 0;
+
+link_error:
+ runtime->p.interpreter_func = lttng_bytecode_interpret_error;
+ runtime->p.link_failed = 1;
+ cds_list_add_rcu(&runtime->p.node, insert_loc);
+alloc_error:
+ dbg_printf("Linking failed.\n");
+ return ret;
+}
+
+void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
+{
+ struct lttng_ust_bytecode_node *bc = runtime->bc;
+
+ if (!bc->enabler->enabled || runtime->link_failed)
+ runtime->interpreter_func = lttng_bytecode_interpret_error;
+ else
+ runtime->interpreter_func = lttng_bytecode_interpret;
+}
+
+/*
+ * Given the lists of bytecode programs of an instance (trigger or event) and
+ * of a matching enabler, try to link all the enabler's bytecode programs with
+ * the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are name matching (or glob pattern matching).
+ */
+void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct cds_list_head *instance_bytecode_head,
+ struct cds_list_head *enabler_bytecode_head)
+{
+ struct lttng_ust_bytecode_node *enabler_bc;
+ struct lttng_ust_bytecode_runtime *runtime;
+
+ assert(event_desc);
+
+ /* Go over all the bytecode programs of the enabler. */
+ cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
+ int found = 0, ret;
+ struct cds_list_head *insert_loc;
+
+ /*
+ * Check if the current enabler bytecode program is already
+ * linked with the instance.
+ */
+ cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
+ if (runtime->bc == enabler_bc) {
+ found = 1;
+ break;
+ }
+ }
+
+ /*
+ * Skip bytecode already linked, go to the next enabler
+ * bytecode program.
+ */
+ if (found)
+ continue;
+
+ /*
+ * Insert at specified priority (seqnum) in increasing
+ * order. If there already is a bytecode of the same priority,
+ * insert the new bytecode right after it.
+ */
+ cds_list_for_each_entry_reverse(runtime,
+ instance_bytecode_head, node) {
+ if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ /* insert here */
+ insert_loc = &runtime->node;
+ goto add_within;
+ }
+ }
+
+ /* Add to head to list */
+ insert_loc = instance_bytecode_head;
+ add_within:
+ dbg_printf("linking bytecode\n");
+ ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
+ if (ret) {
+ dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
+ }
+ }
+}
+
+static
+void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
+{
+ struct bytecode_runtime *runtime, *tmp;
+
+ cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
+ p.node) {
+ free(runtime->data);
+ free(runtime);
+ }
+}
+
+void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
+{
+ free_filter_runtime(&event->priv->filter_bytecode_runtime_head);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST bytecode header.
+ */
+
+#ifndef _LTTNG_BYTECODE_H
+#define _LTTNG_BYTECODE_H
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <ust-helper.h>
+#include <lttng/ust-events.h>
+#include <ust-context-provider.h>
+#include <stdint.h>
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <usterr-signal-safe.h>
+#include "bytecode.h"
+#include "ust-events-internal.h"
+
+/* Interpreter stack length, in number of entries */
+#define INTERPRETER_STACK_LEN 10 /* includes 2 dummy */
+#define INTERPRETER_STACK_EMPTY 1
+
+#define BYTECODE_MAX_DATA_LEN 65536
+
+#ifndef min_t
+#define min_t(type, a, b) \
+ ((type) (a) < (type) (b) ? (type) (a) : (type) (b))
+#endif
+
+#ifndef likely
+#define likely(x) __builtin_expect(!!(x), 1)
+#endif
+
+#ifndef unlikely
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#endif
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args)
+#else
+#define dbg_printf(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printf("[debug bytecode in %s:%s@%u] " fmt, \
+ __FILE__, __func__, __LINE__, ## args); \
+} while (0)
+#endif
+
+/* Linked bytecode. Child of struct lttng_bytecode_runtime. */
+struct bytecode_runtime {
+ struct lttng_ust_bytecode_runtime p;
+ size_t data_len;
+ size_t data_alloc_len;
+ char *data;
+ uint16_t len;
+ char code[0];
+};
+
+enum entry_type {
+ REG_S64,
+ REG_U64,
+ REG_DOUBLE,
+ REG_STRING,
+ REG_STAR_GLOB_STRING,
+ REG_UNKNOWN,
+ REG_PTR,
+};
+
+enum load_type {
+ LOAD_ROOT_CONTEXT,
+ LOAD_ROOT_APP_CONTEXT,
+ LOAD_ROOT_PAYLOAD,
+ LOAD_OBJECT,
+};
+
+enum object_type {
+ OBJECT_TYPE_S8,
+ OBJECT_TYPE_S16,
+ OBJECT_TYPE_S32,
+ OBJECT_TYPE_S64,
+ OBJECT_TYPE_U8,
+ OBJECT_TYPE_U16,
+ OBJECT_TYPE_U32,
+ OBJECT_TYPE_U64,
+
+ OBJECT_TYPE_SIGNED_ENUM,
+ OBJECT_TYPE_UNSIGNED_ENUM,
+
+ OBJECT_TYPE_DOUBLE,
+ OBJECT_TYPE_STRING,
+ OBJECT_TYPE_STRING_SEQUENCE,
+
+ OBJECT_TYPE_SEQUENCE,
+ OBJECT_TYPE_ARRAY,
+ OBJECT_TYPE_STRUCT,
+ OBJECT_TYPE_VARIANT,
+
+ OBJECT_TYPE_DYNAMIC,
+};
+
+struct bytecode_get_index_data {
+ uint64_t offset; /* in bytes */
+ size_t ctx_index;
+ size_t array_len;
+ /*
+ * Field is only populated for LOAD_ROOT_CONTEXT, LOAD_ROOT_APP_CONTEXT
+ * and LOAD_ROOT_PAYLOAD. Left NULL for LOAD_OBJECT, considering that the
+ * interpreter needs to find it from the event fields and types to
+ * support variants.
+ */
+ const struct lttng_ust_event_field *field;
+ struct {
+ size_t len;
+ enum object_type type;
+ bool rev_bo; /* reverse byte order */
+ } elem;
+};
+
+/* Validation stack */
+struct vstack_load {
+ enum load_type type;
+ enum object_type object_type;
+ const struct lttng_ust_event_field *field;
+ bool rev_bo; /* reverse byte order */
+};
+
+struct vstack_entry {
+ enum entry_type type;
+ struct vstack_load load;
+};
+
+struct vstack {
+ int top; /* top of stack */
+ struct vstack_entry e[INTERPRETER_STACK_LEN];
+};
+
+static inline
+void vstack_init(struct vstack *stack)
+{
+ stack->top = -1;
+}
+
+static inline
+struct vstack_entry *vstack_ax(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0))
+ return NULL;
+ return &stack->e[stack->top];
+}
+
+static inline
+struct vstack_entry *vstack_bx(struct vstack *stack)
+{
+ if (unlikely(stack->top < 1))
+ return NULL;
+ return &stack->e[stack->top - 1];
+}
+
+static inline
+int vstack_push(struct vstack *stack)
+{
+ if (stack->top >= INTERPRETER_STACK_LEN - 1) {
+ ERR("Stack full\n");
+ return -EINVAL;
+ }
+ ++stack->top;
+ return 0;
+}
+
+static inline
+int vstack_pop(struct vstack *stack)
+{
+ if (unlikely(stack->top < 0)) {
+ ERR("Stack empty\n");
+ return -EINVAL;
+ }
+ stack->top--;
+ return 0;
+}
+
+/* Execution stack */
+enum estack_string_literal_type {
+ ESTACK_STRING_LITERAL_TYPE_NONE,
+ ESTACK_STRING_LITERAL_TYPE_PLAIN,
+ ESTACK_STRING_LITERAL_TYPE_STAR_GLOB,
+};
+
+struct load_ptr {
+ enum load_type type;
+ enum object_type object_type;
+ const void *ptr;
+ size_t nr_elem;
+ bool rev_bo;
+ /* Temporary place-holders for contexts. */
+ union {
+ int64_t s64;
+ uint64_t u64;
+ double d;
+ } u;
+ const struct lttng_ust_event_field *field;
+};
+
+struct estack_entry {
+ enum entry_type type; /* For dynamic typing. */
+ union {
+ int64_t v;
+ double d;
+
+ struct {
+ const char *str;
+ size_t seq_len;
+ enum estack_string_literal_type literal_type;
+ } s;
+ struct load_ptr ptr;
+ } u;
+};
+
+struct estack {
+ int top; /* top of stack */
+ struct estack_entry e[INTERPRETER_STACK_LEN];
+};
+
+/*
+ * Always use aliased type for ax/bx (top of stack).
+ * When ax/bx are S64, use aliased value.
+ */
+#define estack_ax_v ax
+#define estack_bx_v bx
+#define estack_ax_t ax_t
+#define estack_bx_t bx_t
+
+/*
+ * ax and bx registers can hold either integer, double or string.
+ */
+#define estack_ax(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ &(stack)->e[top]; \
+ })
+
+#define estack_bx(stack, top) \
+ ({ \
+ assert((top) > INTERPRETER_STACK_EMPTY + 1); \
+ &(stack)->e[(top) - 1]; \
+ })
+
+/*
+ * Currently, only integers (REG_S64) can be pushed into the stack.
+ */
+#define estack_push(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) < INTERPRETER_STACK_LEN - 1); \
+ (stack)->e[(top) - 1].u.v = (bx); \
+ (stack)->e[(top) - 1].type = (bx_t); \
+ (bx) = (ax); \
+ (bx_t) = (ax_t); \
+ ++(top); \
+ } while (0)
+
+#define estack_pop(stack, top, ax, bx, ax_t, bx_t) \
+ do { \
+ assert((top) > INTERPRETER_STACK_EMPTY); \
+ (ax) = (bx); \
+ (ax_t) = (bx_t); \
+ (bx) = (stack)->e[(top) - 2].u.v; \
+ (bx_t) = (stack)->e[(top) - 2].type; \
+ (top)--; \
+ } while (0)
+
+enum lttng_interpreter_type {
+ LTTNG_INTERPRETER_TYPE_S64,
+ LTTNG_INTERPRETER_TYPE_U64,
+ LTTNG_INTERPRETER_TYPE_SIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_UNSIGNED_ENUM,
+ LTTNG_INTERPRETER_TYPE_DOUBLE,
+ LTTNG_INTERPRETER_TYPE_STRING,
+ LTTNG_INTERPRETER_TYPE_SEQUENCE,
+};
+
+/*
+ * Represents the output parameter of the lttng interpreter.
+ * Currently capturable field classes are integer, double, string and sequence
+ * of integer.
+ */
+struct lttng_interpreter_output {
+ enum lttng_interpreter_type type;
+ union {
+ int64_t s;
+ uint64_t u;
+ double d;
+
+ struct {
+ const char *str;
+ size_t len;
+ } str;
+ struct {
+ const void *ptr;
+ size_t nr_elem;
+
+ /* Inner type. */
+ const struct lttng_ust_type_common *nested_type;
+ } sequence;
+ } u;
+};
+
+const char *lttng_bytecode_print_op(enum bytecode_op op)
+ __attribute__((visibility("hidden")));
+
+void lttng_bytecode_sync_state(struct lttng_ust_bytecode_runtime *runtime)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_validate(struct bytecode_runtime *bytecode)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
+ struct bytecode_runtime *bytecode)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_interpret_error(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *stack_data,
+ void *ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_bytecode_interpret(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *stack_data,
+ void *ctx)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_BYTECODE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <error.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <usterr-signal-safe.h>
+#include <lttng/ust-clock.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+
+#include "clock.h"
+#include "getenv.h"
+
+struct lttng_ust_trace_clock *lttng_ust_trace_clock;
+
+static
+struct lttng_ust_trace_clock user_tc;
+
+static
+void *clock_handle;
+
+static
+uint64_t trace_clock_freq_monotonic(void)
+{
+ return 1000000000ULL;
+}
+
+static
+int trace_clock_uuid_monotonic(char *uuid)
+{
+ int ret = 0;
+ size_t len;
+ FILE *fp;
+
+ /*
+ * boot_id needs to be read once before being used concurrently
+ * to deal with a Linux kernel race. A fix is proposed for
+ * upstream, but the work-around is needed for older kernels.
+ */
+ fp = fopen("/proc/sys/kernel/random/boot_id", "r");
+ if (!fp) {
+ return -ENOENT;
+ }
+ len = fread(uuid, 1, LTTNG_UST_UUID_STR_LEN - 1, fp);
+ if (len < LTTNG_UST_UUID_STR_LEN - 1) {
+ ret = -EINVAL;
+ goto end;
+ }
+ uuid[LTTNG_UST_UUID_STR_LEN - 1] = '\0';
+end:
+ fclose(fp);
+ return ret;
+}
+
+static
+const char *trace_clock_name_monotonic(void)
+{
+ return "monotonic";
+}
+
+static
+const char *trace_clock_description_monotonic(void)
+{
+ return "Monotonic Clock";
+}
+
+int lttng_ust_trace_clock_set_read64_cb(lttng_ust_clock_read64_function read64_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.read64 = read64_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_read64_cb(lttng_ust_clock_read64_function *read64_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *read64_cb = &trace_clock_read64_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *read64_cb = ltc->read64;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_freq_cb(lttng_ust_clock_freq_function freq_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.freq = freq_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_freq_cb(lttng_ust_clock_freq_function *freq_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *freq_cb = &trace_clock_freq_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *freq_cb = ltc->freq;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_uuid_cb(lttng_ust_clock_uuid_function uuid_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.uuid = uuid_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_uuid_cb(lttng_ust_clock_uuid_function *uuid_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *uuid_cb = &trace_clock_uuid_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *uuid_cb = ltc->uuid;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_name_cb(lttng_ust_clock_name_function name_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.name = name_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_name_cb(lttng_ust_clock_name_function *name_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *name_cb = &trace_clock_name_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *name_cb = ltc->name;
+ }
+ return 0;
+}
+
+int lttng_ust_trace_clock_set_description_cb(lttng_ust_clock_description_function description_cb)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ user_tc.description = description_cb;
+ return 0;
+}
+
+int lttng_ust_trace_clock_get_description_cb(lttng_ust_clock_description_function *description_cb)
+{
+ struct lttng_ust_trace_clock *ltc = CMM_LOAD_SHARED(lttng_ust_trace_clock);
+
+ if (caa_likely(!ltc)) {
+ *description_cb = &trace_clock_description_monotonic;
+ } else {
+ cmm_read_barrier_depends(); /* load ltc before content */
+ *description_cb = ltc->description;
+ }
+ return 0;
+}
+
+int lttng_ust_enable_trace_clock_override(void)
+{
+ if (CMM_LOAD_SHARED(lttng_ust_trace_clock))
+ return -EBUSY;
+ if (!user_tc.read64)
+ return -EINVAL;
+ if (!user_tc.freq)
+ return -EINVAL;
+ if (!user_tc.name)
+ return -EINVAL;
+ if (!user_tc.description)
+ return -EINVAL;
+ /* Use default uuid cb when NULL */
+ cmm_smp_mb(); /* Store callbacks before trace clock */
+ CMM_STORE_SHARED(lttng_ust_trace_clock, &user_tc);
+ return 0;
+}
+
+void lttng_ust_clock_init(void)
+{
+ const char *libname;
+ void (*libinit)(void);
+
+ if (clock_handle)
+ return;
+ libname = lttng_ust_getenv("LTTNG_UST_CLOCK_PLUGIN");
+ if (!libname)
+ return;
+ clock_handle = dlopen(libname, RTLD_NOW);
+ if (!clock_handle) {
+ PERROR("Cannot load LTTng UST clock override library %s",
+ libname);
+ return;
+ }
+ dlerror();
+ libinit = (void (*)(void)) dlsym(clock_handle,
+ "lttng_ust_clock_plugin_init");
+ if (!libinit) {
+ PERROR("Cannot find LTTng UST clock override library %s initialization function lttng_ust_clock_plugin_init()",
+ libname);
+ return;
+ }
+ libinit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST cgroup namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_cgroup_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_cgroup_ns(void)
+{
+ struct stat sb;
+ ino_t cgroup_ns;
+
+ cgroup_ns = CMM_LOAD_SHARED(URCU_TLS(cached_cgroup_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(cgroup_ns != NS_INO_UNINITIALIZED))
+ return cgroup_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ cgroup_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/cgroup", &sb) == 0) {
+ cgroup_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/cgroup",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ cgroup_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), cgroup_ns);
+
+ return cgroup_ns;
+}
+
+/*
+ * The cgroup namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWCGROUP
+ * * setns(2) called with the fd of a different cgroup ns
+ * * unshare(2) called with CLONE_NEWCGROUP
+ */
+void lttng_context_cgroup_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_cgroup_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t cgroup_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void cgroup_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t cgroup_ns;
+
+ cgroup_ns = get_cgroup_ns();
+ chan->ops->event_write(ctx, &cgroup_ns, sizeof(cgroup_ns),
+ lttng_ust_rb_alignof(cgroup_ns));
+}
+
+static
+void cgroup_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_cgroup_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("cgroup_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ cgroup_ns_get_size,
+ cgroup_ns_record,
+ cgroup_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_cgroup_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_cgroup_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_cgroup_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST CPU id context.
+ *
+ * Note: threads can be migrated at any point while executing the
+ * tracepoint probe. This means the CPU id field (and filter) is only
+ * statistical. For instance, even though a user might select a
+ * cpu_id==1 filter, there may be few events recorded into the channel
+ * appearing from other CPUs, due to migration.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <limits.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include "../libringbuffer/getcpu.h"
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t cpu_id_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(int));
+ size += sizeof(int);
+ return size;
+}
+
+static
+void cpu_id_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ int cpu;
+
+ cpu = lttng_ust_get_cpu();
+ chan->ops->event_write(ctx, &cpu, sizeof(cpu), lttng_ust_rb_alignof(cpu));
+}
+
+static
+void cpu_id_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = lttng_ust_get_cpu();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("cpu_id",
+ lttng_ust_static_type_integer(sizeof(int) * CHAR_BIT,
+ lttng_ust_rb_alignof(int) * CHAR_BIT,
+ lttng_ust_is_signed_type(int),
+ BYTE_ORDER, 10),
+ false, false),
+ cpu_id_get_size,
+ cpu_id_record,
+ cpu_id_get_value,
+ NULL, NULL);
+
+int lttng_add_cpu_id_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST Instruction Pointer Context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t ip_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(void *));
+ size += sizeof(void *);
+ return size;
+}
+
+static
+void ip_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ void *ip;
+
+ ip = ctx->ip;
+ chan->ops->event_write(ctx, &ip, sizeof(ip), lttng_ust_rb_alignof(ip));
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("ip",
+ lttng_ust_static_type_integer(sizeof(void *) * CHAR_BIT,
+ lttng_ust_rb_alignof(void *) * CHAR_BIT,
+ lttng_ust_is_signed_type(void *),
+ BYTE_ORDER, 10),
+ false, false),
+ ip_get_size,
+ ip_record,
+ NULL, NULL, NULL);
+
+int lttng_add_ip_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST ipc namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_ipc_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_ipc_ns(void)
+{
+ struct stat sb;
+ ino_t ipc_ns;
+
+ ipc_ns = CMM_LOAD_SHARED(URCU_TLS(cached_ipc_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(ipc_ns != NS_INO_UNINITIALIZED))
+ return ipc_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ ipc_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/ipc", &sb) == 0) {
+ ipc_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/ipc",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ ipc_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), ipc_ns);
+
+ return ipc_ns;
+}
+
+/*
+ * The ipc namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWIPC
+ * * setns(2) called with the fd of a different ipc ns
+ * * unshare(2) called with CLONE_NEWIPC
+ */
+void lttng_context_ipc_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_ipc_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t ipc_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void ipc_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t ipc_ns;
+
+ ipc_ns = get_ipc_ns();
+ chan->ops->event_write(ctx, &ipc_ns, sizeof(ipc_ns), lttng_ust_rb_alignof(ipc_ns));
+}
+
+static
+void ipc_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_ipc_ns();
+}
+
+const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("ipc_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ ipc_ns_get_size,
+ ipc_ns_record,
+ ipc_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_ipc_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_ipc_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_ipc_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST mnt namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The mount namespace is global to the process.
+ */
+static ino_t cached_mnt_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_mnt_ns(void)
+{
+ struct stat sb;
+ ino_t mnt_ns;
+
+ mnt_ns = CMM_LOAD_SHARED(cached_mnt_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(mnt_ns != NS_INO_UNINITIALIZED))
+ return mnt_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ mnt_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/mnt", &sb) == 0) {
+ mnt_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_mnt_ns, mnt_ns);
+
+ return mnt_ns;
+}
+
+/*
+ * The mnt namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWNS
+ * * setns(2) called with the fd of a different mnt ns
+ * * unshare(2) called with CLONE_NEWNS
+ */
+void lttng_context_mnt_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_mnt_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t mnt_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void mnt_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t mnt_ns;
+
+ mnt_ns = get_mnt_ns();
+ chan->ops->event_write(ctx, &mnt_ns, sizeof(mnt_ns), lttng_ust_rb_alignof(mnt_ns));
+}
+
+static
+void mnt_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_mnt_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("mnt_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ mnt_ns_get_size,
+ mnt_ns_record,
+ mnt_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_mnt_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST net namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_net_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_net_ns(void)
+{
+ struct stat sb;
+ ino_t net_ns;
+
+ net_ns = CMM_LOAD_SHARED(URCU_TLS(cached_net_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(net_ns != NS_INO_UNINITIALIZED))
+ return net_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ net_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/net", &sb) == 0) {
+ net_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/net",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ net_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_net_ns), net_ns);
+
+ return net_ns;
+}
+
+/*
+ * The net namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWNET
+ * * setns(2) called with the fd of a different net ns
+ * * unshare(2) called with CLONE_NEWNET
+ */
+void lttng_context_net_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_net_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t net_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void net_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t net_ns;
+
+ net_ns = get_net_ns();
+ chan->ops->event_write(ctx, &net_ns, sizeof(net_ns), lttng_ust_rb_alignof(net_ns));
+}
+
+static
+void net_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_net_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("net_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ net_ns_get_size,
+ net_ns_record,
+ net_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_net_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_net_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_net_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST performance monitoring counters (perf-counters) integration.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <lttng/ust-arch.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+#include <urcu/rculist.h>
+#include <ust-helper.h>
+#include <urcu/ref.h>
+#include <usterr-signal-safe.h>
+#include <signal.h>
+#include <urcu/tls-compat.h>
+#include "perf_event.h"
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ust-events-internal.h"
+
+/*
+ * We use a global perf counter key and iterate on per-thread RCU lists
+ * of fields in the fast path, even though this is not strictly speaking
+ * what would provide the best fast-path complexity, to ensure teardown
+ * of sessions vs thread exit is handled racelessly.
+ *
+ * Updates and traversals of thread_list are protected by UST lock.
+ * Updates to rcu_field_list are protected by UST lock.
+ */
+
+struct lttng_perf_counter_thread_field {
+ struct lttng_perf_counter_field *field; /* Back reference */
+ struct perf_event_mmap_page *pc;
+ struct cds_list_head thread_field_node; /* Per-field list of thread fields (node) */
+ struct cds_list_head rcu_field_node; /* RCU per-thread list of fields (node) */
+ int fd; /* Perf FD */
+};
+
+struct lttng_perf_counter_thread {
+ struct cds_list_head rcu_field_list; /* RCU per-thread list of fields */
+};
+
+struct lttng_perf_counter_field {
+ struct perf_event_attr attr;
+ struct cds_list_head thread_field_list; /* Per-field list of thread fields */
+ char *name;
+};
+
+static pthread_key_t perf_counter_key;
+
+/*
+ * lttng_perf_lock - Protect lttng-ust perf counter data structures
+ *
+ * Nests within the ust_lock, and therefore within the libc dl lock.
+ * Therefore, we need to fixup the TLS before nesting into this lock.
+ * Nests inside RCU bp read-side lock. Protects against concurrent
+ * fork.
+ */
+static pthread_mutex_t ust_perf_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * Cancel state when grabbing the ust_perf_mutex. Saved when locking,
+ * restored on unlock. Protected by ust_perf_mutex.
+ */
+static int ust_perf_saved_cancelstate;
+
+/*
+ * Track whether we are tracing from a signal handler nested on an
+ * application thread.
+ */
+static DEFINE_URCU_TLS(int, ust_perf_mutex_nest);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_ust_fixup_perf_counter_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_perf_mutex_nest)));
+}
+
+void lttng_perf_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_perf_mutex_nest)++) {
+ /*
+ * Ensure the compiler don't move the store after the close()
+ * call in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ pthread_mutex_lock(&ust_perf_mutex);
+ ust_perf_saved_cancelstate = oldstate;
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+void lttng_perf_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, newstate, oldstate;
+ bool restore_cancel = false;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ /*
+ * Ensure the compiler don't move the store before the close()
+ * call, in case close() would be marked as leaf.
+ */
+ cmm_barrier();
+ if (!--URCU_TLS(ust_perf_mutex_nest)) {
+ newstate = ust_perf_saved_cancelstate;
+ restore_cancel = true;
+ pthread_mutex_unlock(&ust_perf_mutex);
+ }
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (restore_cancel) {
+ ret = pthread_setcancelstate(newstate, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ }
+}
+
+static
+size_t perf_counter_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ size += sizeof(uint64_t);
+ return size;
+}
+
+static
+uint64_t read_perf_counter_syscall(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint64_t count;
+
+ if (caa_unlikely(thread_field->fd < 0))
+ return 0;
+
+ if (caa_unlikely(read(thread_field->fd, &count, sizeof(count))
+ < sizeof(count)))
+ return 0;
+
+ return count;
+}
+
+#if defined(LTTNG_UST_ARCH_X86)
+
+static
+uint64_t rdpmc(unsigned int counter)
+{
+ unsigned int low, high;
+
+ asm volatile("rdpmc" : "=a" (low), "=d" (high) : "c" (counter));
+
+ return low | ((uint64_t) high) << 32;
+}
+
+static
+bool has_rdpmc(struct perf_event_mmap_page *pc)
+{
+ if (caa_unlikely(!pc->cap_bit0_is_deprecated))
+ return false;
+ /* Since Linux kernel 3.12. */
+ return pc->cap_user_rdpmc;
+}
+
+static
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ uint32_t seq, idx;
+ uint64_t count;
+ struct perf_event_mmap_page *pc = thread_field->pc;
+
+ if (caa_unlikely(!pc))
+ return 0;
+
+ do {
+ seq = CMM_LOAD_SHARED(pc->lock);
+ cmm_barrier();
+
+ idx = pc->index;
+ if (caa_likely(has_rdpmc(pc) && idx)) {
+ int64_t pmcval;
+
+ pmcval = rdpmc(idx - 1);
+ /* Sign-extend the pmc register result. */
+ pmcval <<= 64 - pc->pmc_width;
+ pmcval >>= 64 - pc->pmc_width;
+ count = pc->offset + pmcval;
+ } else {
+ /* Fall-back on system call if rdpmc cannot be used. */
+ return read_perf_counter_syscall(thread_field);
+ }
+ cmm_barrier();
+ } while (CMM_LOAD_SHARED(pc->lock) != seq);
+
+ return count;
+}
+
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ struct perf_event_mmap_page *pc = thread_field->pc;
+
+ if (!pc)
+ return 0;
+ return !has_rdpmc(pc);
+}
+
+#else
+
+/* Generic (slow) implementation using a read system call. */
+static
+uint64_t arch_read_perf_counter(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ return read_perf_counter_syscall(thread_field);
+}
+
+static
+int arch_perf_keep_fd(struct lttng_perf_counter_thread_field *thread_field)
+{
+ return 1;
+}
+
+#endif
+
+static
+int sys_perf_event_open(struct perf_event_attr *attr,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ return syscall(SYS_perf_event_open, attr, pid, cpu,
+ group_fd, flags);
+}
+
+static
+int open_perf_fd(struct perf_event_attr *attr)
+{
+ int fd;
+
+ fd = sys_perf_event_open(attr, 0, -1, -1, 0);
+ if (fd < 0)
+ return -1;
+
+ return fd;
+}
+
+static
+void close_perf_fd(int fd)
+{
+ int ret;
+
+ if (fd < 0)
+ return;
+
+ ret = close(fd);
+ if (ret) {
+ perror("Error closing LTTng-UST perf memory mapping FD");
+ }
+}
+
+static void setup_perf(struct lttng_perf_counter_thread_field *thread_field)
+{
+ void *perf_addr;
+
+ perf_addr = mmap(NULL, sizeof(struct perf_event_mmap_page),
+ PROT_READ, MAP_SHARED, thread_field->fd, 0);
+ if (perf_addr == MAP_FAILED)
+ perf_addr = NULL;
+ thread_field->pc = perf_addr;
+
+ if (!arch_perf_keep_fd(thread_field)) {
+ close_perf_fd(thread_field->fd);
+ thread_field->fd = -1;
+ }
+}
+
+static
+void unmap_perf_page(struct perf_event_mmap_page *pc)
+{
+ int ret;
+
+ if (!pc)
+ return;
+ ret = munmap(pc, sizeof(struct perf_event_mmap_page));
+ if (ret < 0) {
+ PERROR("Error in munmap");
+ abort();
+ }
+}
+
+static
+struct lttng_perf_counter_thread *alloc_perf_counter_thread(void)
+{
+ struct lttng_perf_counter_thread *perf_thread;
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+ /* Check again with signals disabled */
+ perf_thread = pthread_getspecific(perf_counter_key);
+ if (perf_thread)
+ goto skip;
+ perf_thread = zmalloc(sizeof(*perf_thread));
+ if (!perf_thread)
+ abort();
+ CDS_INIT_LIST_HEAD(&perf_thread->rcu_field_list);
+ ret = pthread_setspecific(perf_counter_key, perf_thread);
+ if (ret)
+ abort();
+skip:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ return perf_thread;
+}
+
+static
+struct lttng_perf_counter_thread_field *
+ add_thread_field(struct lttng_perf_counter_field *perf_field,
+ struct lttng_perf_counter_thread *perf_thread)
+{
+ struct lttng_perf_counter_thread_field *thread_field;
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+ /* Check again with signals disabled */
+ cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
+ rcu_field_node) {
+ if (thread_field->field == perf_field)
+ goto skip;
+ }
+ thread_field = zmalloc(sizeof(*thread_field));
+ if (!thread_field)
+ abort();
+ thread_field->field = perf_field;
+ thread_field->fd = open_perf_fd(&perf_field->attr);
+ if (thread_field->fd >= 0)
+ setup_perf(thread_field);
+ /*
+ * Note: thread_field->pc can be NULL if setup_perf() fails.
+ * Also, thread_field->fd can be -1 if open_perf_fd() fails.
+ */
+ lttng_perf_lock();
+ cds_list_add_rcu(&thread_field->rcu_field_node,
+ &perf_thread->rcu_field_list);
+ cds_list_add(&thread_field->thread_field_node,
+ &perf_field->thread_field_list);
+ lttng_perf_unlock();
+skip:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ return thread_field;
+}
+
+static
+struct lttng_perf_counter_thread_field *
+ get_thread_field(struct lttng_perf_counter_field *field)
+{
+ struct lttng_perf_counter_thread *perf_thread;
+ struct lttng_perf_counter_thread_field *thread_field;
+
+ perf_thread = pthread_getspecific(perf_counter_key);
+ if (!perf_thread)
+ perf_thread = alloc_perf_counter_thread();
+ cds_list_for_each_entry_rcu(thread_field, &perf_thread->rcu_field_list,
+ rcu_field_node) {
+ if (thread_field->field == field)
+ return thread_field;
+ }
+ /* perf_counter_thread_field not found, need to add one */
+ return add_thread_field(field, perf_thread);
+}
+
+static
+uint64_t wrapper_perf_counter_read(void *priv)
+{
+ struct lttng_perf_counter_field *perf_field;
+ struct lttng_perf_counter_thread_field *perf_thread_field;
+
+ perf_field = (struct lttng_perf_counter_field *) priv;
+ perf_thread_field = get_thread_field(perf_field);
+ return arch_read_perf_counter(perf_thread_field);
+}
+
+static
+void perf_counter_record(void *priv,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uint64_t value;
+
+ value = wrapper_perf_counter_read(priv);
+ chan->ops->event_write(ctx, &value, sizeof(value), lttng_ust_rb_alignof(value));
+}
+
+static
+void perf_counter_get_value(void *priv,
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_perf_counter_read(priv);
+}
+
+/* Called with perf lock held */
+static
+void lttng_destroy_perf_thread_field(
+ struct lttng_perf_counter_thread_field *thread_field)
+{
+ close_perf_fd(thread_field->fd);
+ unmap_perf_page(thread_field->pc);
+ cds_list_del_rcu(&thread_field->rcu_field_node);
+ cds_list_del(&thread_field->thread_field_node);
+ free(thread_field);
+}
+
+static
+void lttng_destroy_perf_thread_key(void *_key)
+{
+ struct lttng_perf_counter_thread *perf_thread = _key;
+ struct lttng_perf_counter_thread_field *pos, *p;
+
+ lttng_perf_lock();
+ cds_list_for_each_entry_safe(pos, p, &perf_thread->rcu_field_list,
+ rcu_field_node)
+ lttng_destroy_perf_thread_field(pos);
+ lttng_perf_unlock();
+ free(perf_thread);
+}
+
+/* Called with UST lock held */
+static
+void lttng_destroy_perf_counter_ctx_field(void *priv)
+{
+ struct lttng_perf_counter_field *perf_field;
+ struct lttng_perf_counter_thread_field *pos, *p;
+
+ perf_field = (struct lttng_perf_counter_field *) priv;
+ free(perf_field->name);
+ /*
+ * This put is performed when no threads can concurrently
+ * perform a "get" concurrently, thanks to urcu-bp grace
+ * period. Holding the lttng perf lock protects against
+ * concurrent modification of the per-thread thread field
+ * list.
+ */
+ lttng_perf_lock();
+ cds_list_for_each_entry_safe(pos, p, &perf_field->thread_field_list,
+ thread_field_node)
+ lttng_destroy_perf_thread_field(pos);
+ lttng_perf_unlock();
+ free(perf_field);
+}
+
+#ifdef LTTNG_UST_ARCH_ARMV7
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 0;
+}
+
+#else /* LTTNG_UST_ARCH_ARMV7 */
+
+static
+int perf_get_exclude_kernel(void)
+{
+ return 1;
+}
+
+#endif /* LTTNG_UST_ARCH_ARMV7 */
+
+static const struct lttng_ust_type_common *ust_type =
+ lttng_ust_static_type_integer(sizeof(uint64_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uint64_t),
+ BYTE_ORDER, 10);
+
+/* Called with UST lock held */
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ struct lttng_ust_ctx_field ctx_field;
+ struct lttng_ust_event_field *event_field;
+ struct lttng_perf_counter_field *perf_field;
+ char *name_alloc;
+ int ret;
+
+ if (lttng_find_context(*ctx, name)) {
+ ret = -EEXIST;
+ goto find_error;
+ }
+ name_alloc = strdup(name);
+ if (!name_alloc) {
+ ret = -ENOMEM;
+ goto name_alloc_error;
+ }
+ event_field = zmalloc(sizeof(*event_field));
+ if (!event_field) {
+ ret = -ENOMEM;
+ goto event_field_alloc_error;
+ }
+ event_field->name = name_alloc;
+ event_field->type = ust_type;
+
+ perf_field = zmalloc(sizeof(*perf_field));
+ if (!perf_field) {
+ ret = -ENOMEM;
+ goto perf_field_alloc_error;
+ }
+ perf_field->attr.type = type;
+ perf_field->attr.config = config;
+ perf_field->attr.exclude_kernel = perf_get_exclude_kernel();
+ CDS_INIT_LIST_HEAD(&perf_field->thread_field_list);
+ perf_field->name = name_alloc;
+
+ /* Ensure that this perf counter can be used in this process. */
+ ret = open_perf_fd(&perf_field->attr);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto setup_error;
+ }
+ close_perf_fd(ret);
+
+ ctx_field.event_field = event_field;
+ ctx_field.get_size = perf_counter_get_size;
+ ctx_field.record = perf_counter_record;
+ ctx_field.get_value = perf_counter_get_value;
+ ctx_field.destroy = lttng_destroy_perf_counter_ctx_field;
+ ctx_field.priv = perf_field;
+
+ ret = lttng_ust_context_append(ctx, &ctx_field);
+ if (ret) {
+ ret = -ENOMEM;
+ goto append_context_error;
+ }
+ return 0;
+
+append_context_error:
+setup_error:
+ free(perf_field);
+perf_field_alloc_error:
+ free(event_field);
+event_field_alloc_error:
+ free(name_alloc);
+name_alloc_error:
+find_error:
+ return ret;
+}
+
+int lttng_perf_counter_init(void)
+{
+ int ret;
+
+ ret = pthread_key_create(&perf_counter_key,
+ lttng_destroy_perf_thread_key);
+ if (ret)
+ ret = -ret;
+ return ret;
+}
+
+void lttng_perf_counter_exit(void)
+{
+ int ret;
+
+ ret = pthread_key_delete(perf_counter_key);
+ if (ret) {
+ errno = ret;
+ PERROR("Error in pthread_key_delete");
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST pid namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The PID namespace is global to the process.
+ */
+static ino_t cached_pid_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_pid_ns(void)
+{
+ struct stat sb;
+ ino_t pid_ns;
+
+ pid_ns = CMM_LOAD_SHARED(cached_pid_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(pid_ns != NS_INO_UNINITIALIZED))
+ return pid_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ pid_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/pid", &sb) == 0) {
+ pid_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_pid_ns, pid_ns);
+
+ return pid_ns;
+}
+
+/*
+ * A process's PID namespace membership is determined when the process is
+ * created and cannot be changed thereafter.
+ *
+ * The pid namespace can change only on clone(2) / fork(2) :
+ * - clone(2) with the CLONE_NEWPID flag
+ * - clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWPID flag
+ * - clone(2) / fork(2) after a call to setns(2) with a PID namespace fd
+ */
+void lttng_context_pid_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_pid_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t pid_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void pid_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t pid_ns;
+
+ pid_ns = get_pid_ns();
+ chan->ops->event_write(ctx, &pid_ns, sizeof(pid_ns), lttng_ust_rb_alignof(pid_ns));
+}
+
+static
+void pid_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_pid_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("pid_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ pid_ns_get_size,
+ pid_ns_record,
+ pid_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_pid_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST procname context.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <urcu/tls-compat.h>
+#include <assert.h>
+#include "compat.h"
+#include "lttng-tracer-core.h"
+
+#include "context-internal.h"
+
+/* Maximum number of nesting levels for the procname cache. */
+#define PROCNAME_NESTING_MAX 2
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ * Upon exec, procname changes, but exec takes care of throwing away
+ * this cached version.
+ * The procname can also change by calling prctl(). The procname should
+ * be set for a thread before the first event is logged within this
+ * thread.
+ */
+typedef char procname_array[PROCNAME_NESTING_MAX][17];
+
+static DEFINE_URCU_TLS(procname_array, cached_procname);
+
+static DEFINE_URCU_TLS(int, procname_nesting);
+
+static inline
+const char *wrapper_getprocname(void)
+{
+ int nesting = CMM_LOAD_SHARED(URCU_TLS(procname_nesting));
+
+ if (caa_unlikely(nesting >= PROCNAME_NESTING_MAX))
+ return "<unknown>";
+ if (caa_unlikely(!URCU_TLS(cached_procname)[nesting][0])) {
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting + 1);
+ /* Increment nesting before updating cache. */
+ cmm_barrier();
+ lttng_pthread_getname_np(URCU_TLS(cached_procname)[nesting], LTTNG_UST_ABI_PROCNAME_LEN);
+ URCU_TLS(cached_procname)[nesting][LTTNG_UST_ABI_PROCNAME_LEN - 1] = '\0';
+ /* Decrement nesting after updating cache. */
+ cmm_barrier();
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), nesting);
+ }
+ return URCU_TLS(cached_procname)[nesting];
+}
+
+/* Reset should not be called from a signal handler. */
+void lttng_ust_context_procname_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_procname)[1][0], '\0');
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), 1);
+ CMM_STORE_SHARED(URCU_TLS(cached_procname)[0][0], '\0');
+ CMM_STORE_SHARED(URCU_TLS(procname_nesting), 0);
+}
+
+static
+size_t procname_get_size(void *priv __attribute__((unused)),
+ size_t offset __attribute__((unused)))
+{
+ return LTTNG_UST_ABI_PROCNAME_LEN;
+}
+
+static
+void procname_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ const char *procname;
+
+ procname = wrapper_getprocname();
+ chan->ops->event_write(ctx, procname, LTTNG_UST_ABI_PROCNAME_LEN, 1);
+}
+
+static
+void procname_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.str = wrapper_getprocname();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("procname",
+ lttng_ust_static_type_array_text(LTTNG_UST_ABI_PROCNAME_LEN),
+ false, false),
+ procname_get_size,
+ procname_record,
+ procname_get_value,
+ NULL, NULL);
+
+int lttng_add_procname_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_procname_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_procname)[0]));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST application context provider.
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <ust-context-provider.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "context-provider-internal.h"
+#include <ust-helper.h>
+
+struct lttng_ust_registered_context_provider {
+ const struct lttng_ust_context_provider *provider;
+
+ struct cds_hlist_node node;
+};
+
+struct lttng_ust_app_ctx {
+ char *name;
+ struct lttng_ust_event_field *event_field;
+ struct lttng_ust_type_common *type;
+};
+
+#define CONTEXT_PROVIDER_HT_BITS 12
+#define CONTEXT_PROVIDER_HT_SIZE (1U << CONTEXT_PROVIDER_HT_BITS)
+struct context_provider_ht {
+ struct cds_hlist_head table[CONTEXT_PROVIDER_HT_SIZE];
+};
+
+static struct context_provider_ht context_provider_ht;
+
+static const struct lttng_ust_context_provider *
+ lookup_provider_by_name(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct lttng_ust_registered_context_provider *reg_provider;
+ uint32_t hash;
+ const char *end;
+ size_t len;
+
+ /* Lookup using everything before first ':' as key. */
+ end = strchr(name, ':');
+ if (end)
+ len = end - name;
+ else
+ len = strlen(name);
+ hash = jhash(name, len, 0);
+ head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(reg_provider, node, head, node) {
+ if (!strncmp(reg_provider->provider->name, name, len))
+ return reg_provider->provider;
+ }
+ return NULL;
+}
+
+struct lttng_ust_registered_context_provider *lttng_ust_context_provider_register(struct lttng_ust_context_provider *provider)
+{
+ struct lttng_ust_registered_context_provider *reg_provider = NULL;
+ struct cds_hlist_head *head;
+ size_t name_len = strlen(provider->name);
+ uint32_t hash;
+
+ lttng_ust_fixup_tls();
+
+ /* Provider name starts with "$app.". */
+ if (strncmp("$app.", provider->name, strlen("$app.")) != 0)
+ return NULL;
+ /* Provider name cannot contain a colon character. */
+ if (strchr(provider->name, ':'))
+ return NULL;
+ if (ust_lock())
+ goto end;
+ if (lookup_provider_by_name(provider->name))
+ goto end;
+ reg_provider = zmalloc(sizeof(struct lttng_ust_registered_context_provider));
+ if (!reg_provider)
+ goto end;
+ reg_provider->provider = provider;
+ hash = jhash(provider->name, name_len, 0);
+ head = &context_provider_ht.table[hash & (CONTEXT_PROVIDER_HT_SIZE - 1)];
+ cds_hlist_add_head(®_provider->node, head);
+
+ lttng_ust_context_set_session_provider(provider->name,
+ provider->get_size, provider->record,
+ provider->get_value, provider->priv);
+
+ lttng_ust_context_set_event_notifier_group_provider(provider->name,
+ provider->get_size, provider->record,
+ provider->get_value, provider->priv);
+end:
+ ust_unlock();
+ return reg_provider;
+}
+
+void lttng_ust_context_provider_unregister(struct lttng_ust_registered_context_provider *reg_provider)
+{
+ lttng_ust_fixup_tls();
+
+ if (ust_lock())
+ goto end;
+ lttng_ust_context_set_session_provider(reg_provider->provider->name,
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value, NULL);
+
+ lttng_ust_context_set_event_notifier_group_provider(reg_provider->provider->name,
+ lttng_ust_dummy_get_size, lttng_ust_dummy_record,
+ lttng_ust_dummy_get_value, NULL);
+
+ cds_hlist_del(®_provider->node);
+end:
+ ust_unlock();
+ free(reg_provider);
+}
+
+static void destroy_app_ctx(void *priv)
+{
+ struct lttng_ust_app_ctx *app_ctx = (struct lttng_ust_app_ctx *) priv;
+
+ free(app_ctx->name);
+ free(app_ctx->event_field);
+ free(app_ctx->type);
+ free(app_ctx);
+}
+
+/*
+ * Called with ust mutex held.
+ * Add application context to array of context, even if the application
+ * context is not currently loaded by application. It will then use the
+ * dummy callbacks in that case.
+ * Always performed before tracing is started, since it modifies
+ * metadata describing the context.
+ */
+int lttng_ust_add_app_context_to_ctx_rcu(const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ const struct lttng_ust_context_provider *provider;
+ struct lttng_ust_ctx_field new_field = { 0 };
+ struct lttng_ust_event_field *event_field = NULL;
+ struct lttng_ust_type_common *type = NULL;
+ struct lttng_ust_app_ctx *app_ctx = NULL;
+ char *ctx_name;
+ int ret;
+
+ if (*ctx && lttng_find_context(*ctx, name))
+ return -EEXIST;
+ event_field = zmalloc(sizeof(struct lttng_ust_event_field));
+ if (!event_field) {
+ ret = -ENOMEM;
+ goto error_event_field_alloc;
+ }
+ ctx_name = strdup(name);
+ if (!ctx_name) {
+ ret = -ENOMEM;
+ goto error_field_name_alloc;
+ }
+ type = zmalloc(sizeof(struct lttng_ust_type_common));
+ if (!type) {
+ ret = -ENOMEM;
+ goto error_field_type_alloc;
+ }
+ app_ctx = zmalloc(sizeof(struct lttng_ust_app_ctx));
+ if (!app_ctx) {
+ ret = -ENOMEM;
+ goto error_app_ctx_alloc;
+ }
+ event_field->name = ctx_name;
+ type->type = lttng_ust_type_dynamic;
+ event_field->type = type;
+ new_field.event_field = event_field;
+ /*
+ * If provider is not found, we add the context anyway, but
+ * it will provide a dummy context.
+ */
+ provider = lookup_provider_by_name(name);
+ if (provider) {
+ new_field.get_size = provider->get_size;
+ new_field.record = provider->record;
+ new_field.get_value = provider->get_value;
+ } else {
+ new_field.get_size = lttng_ust_dummy_get_size;
+ new_field.record = lttng_ust_dummy_record;
+ new_field.get_value = lttng_ust_dummy_get_value;
+ }
+ new_field.destroy = destroy_app_ctx;
+ new_field.priv = app_ctx;
+ /*
+ * For application context, add it by expanding
+ * ctx array.
+ */
+ ret = lttng_ust_context_append_rcu(ctx, &new_field);
+ if (ret) {
+ destroy_app_ctx(app_ctx);
+ return ret;
+ }
+ return 0;
+
+error_app_ctx_alloc:
+ free(type);
+error_field_type_alloc:
+ free(ctx_name);
+error_field_name_alloc:
+ free(event_field);
+error_event_field_alloc:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST pthread_id context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <pthread.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+static
+size_t pthread_id_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(unsigned long));
+ size += sizeof(unsigned long);
+ return size;
+}
+
+static
+void pthread_id_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ unsigned long pthread_id;
+
+ pthread_id = (unsigned long) pthread_self();
+ chan->ops->event_write(ctx, &pthread_id, sizeof(pthread_id), lttng_ust_rb_alignof(pthread_id));
+}
+
+static
+void pthread_id_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = (unsigned long) pthread_self();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("pthread_id",
+ lttng_ust_static_type_integer(sizeof(unsigned long) * CHAR_BIT,
+ lttng_ust_rb_alignof(unsigned long) * CHAR_BIT,
+ lttng_ust_is_signed_type(unsigned long),
+ BYTE_ORDER, 10),
+ false, false),
+ pthread_id_get_size,
+ pthread_id_record,
+ pthread_id_get_value,
+ NULL, NULL);
+
+int lttng_add_pthread_id_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2020 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST time namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+#include "lttng-tracer-core.h"
+#include "ns.h"
+#include "context-internal.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_time_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_time_ns(void)
+{
+ struct stat sb;
+ ino_t time_ns;
+
+ time_ns = CMM_LOAD_SHARED(URCU_TLS(cached_time_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(time_ns != NS_INO_UNINITIALIZED))
+ return time_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ time_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/time", &sb) == 0) {
+ time_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/time",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ time_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_time_ns), time_ns);
+
+ return time_ns;
+}
+
+/*
+ * The time namespace can change for 2 reasons
+ * * setns(2) called with the fd of a different time ns
+ * * clone(2) / fork(2) after a call to unshare(2) with the CLONE_NEWTIME flag
+ */
+void lttng_context_time_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_time_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t time_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void time_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t time_ns;
+
+ time_ns = get_time_ns();
+ chan->ops->event_write(ctx, &time_ns, sizeof(time_ns), lttng_ust_rb_alignof(time_ns));
+}
+
+static
+void time_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_time_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("time_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ time_ns_get_size,
+ time_ns_record,
+ time_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_time_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_time_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_time_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST user namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "ns.h"
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event. The user namespace is global to the process.
+ */
+static ino_t cached_user_ns = NS_INO_UNINITIALIZED;
+
+static
+ino_t get_user_ns(void)
+{
+ struct stat sb;
+ ino_t user_ns;
+
+ user_ns = CMM_LOAD_SHARED(cached_user_ns);
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(user_ns != NS_INO_UNINITIALIZED))
+ return user_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ user_ns = NS_INO_UNAVAILABLE;
+
+ if (stat("/proc/self/ns/user", &sb) == 0) {
+ user_ns = sb.st_ino;
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(cached_user_ns, user_ns);
+
+ return user_ns;
+}
+
+/*
+ * The user namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWUSER
+ * * setns(2) called with the fd of a different user ns
+ * * unshare(2) called with CLONE_NEWUSER
+ */
+void lttng_context_user_ns_reset(void)
+{
+ CMM_STORE_SHARED(cached_user_ns, NS_INO_UNINITIALIZED);
+}
+
+static
+size_t user_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void user_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t user_ns;
+
+ user_ns = get_user_ns();
+ chan->ops->event_write(ctx, &user_ns, sizeof(user_ns), lttng_ust_rb_alignof(user_ns));
+}
+
+static
+void user_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_user_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("user_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ user_ns_get_size,
+ user_ns_record,
+ user_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_user_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST uts namespace context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+#include "ns.h"
+
+
+/*
+ * We cache the result to ensure we don't stat(2) the proc filesystem on
+ * each event.
+ */
+static DEFINE_URCU_TLS_INIT(ino_t, cached_uts_ns, NS_INO_UNINITIALIZED);
+
+static
+ino_t get_uts_ns(void)
+{
+ struct stat sb;
+ ino_t uts_ns;
+
+ uts_ns = CMM_LOAD_SHARED(URCU_TLS(cached_uts_ns));
+
+ /*
+ * If the cache is populated, do nothing and return the
+ * cached inode number.
+ */
+ if (caa_likely(uts_ns != NS_INO_UNINITIALIZED))
+ return uts_ns;
+
+ /*
+ * At this point we have to populate the cache, set the initial
+ * value to NS_INO_UNAVAILABLE (0), if we fail to get the inode
+ * number from the proc filesystem, this is the value we will
+ * cache.
+ */
+ uts_ns = NS_INO_UNAVAILABLE;
+
+ /*
+ * /proc/thread-self was introduced in kernel v3.17
+ */
+ if (stat("/proc/thread-self/ns/uts", &sb) == 0) {
+ uts_ns = sb.st_ino;
+ } else {
+ char proc_ns_path[LTTNG_PROC_NS_PATH_MAX];
+
+ if (snprintf(proc_ns_path, LTTNG_PROC_NS_PATH_MAX,
+ "/proc/self/task/%d/ns/uts",
+ lttng_gettid()) >= 0) {
+
+ if (stat(proc_ns_path, &sb) == 0) {
+ uts_ns = sb.st_ino;
+ }
+ }
+ }
+
+ /*
+ * And finally, store the inode number in the cache.
+ */
+ CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), uts_ns);
+
+ return uts_ns;
+}
+
+/*
+ * The uts namespace can change for 3 reasons
+ * * clone(2) called with CLONE_NEWUTS
+ * * setns(2) called with the fd of a different uts ns
+ * * unshare(2) called with CLONE_NEWUTS
+ */
+void lttng_context_uts_ns_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_uts_ns), NS_INO_UNINITIALIZED);
+}
+
+static
+size_t uts_ns_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(ino_t));
+ size += sizeof(ino_t);
+ return size;
+}
+
+static
+void uts_ns_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ ino_t uts_ns;
+
+ uts_ns = get_uts_ns();
+ chan->ops->event_write(ctx, &uts_ns, sizeof(uts_ns), lttng_ust_rb_alignof(uts_ns));
+}
+
+static
+void uts_ns_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_uts_ns();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("uts_ns",
+ lttng_ust_static_type_integer(sizeof(ino_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(ino_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(ino_t),
+ BYTE_ORDER, 10),
+ false, false),
+ uts_ns_get_size,
+ uts_ns_record,
+ uts_ns_get_value,
+ NULL, NULL);
+
+int lttng_add_uts_ns_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ * */
+void lttng_fixup_uts_ns_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_uts_ns)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced effective group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vegid = INVALID_GID;
+
+static
+gid_t get_vegid(void)
+{
+ gid_t vegid;
+
+ vegid = CMM_LOAD_SHARED(cached_vegid);
+
+ if (caa_unlikely(vegid == INVALID_GID)) {
+ vegid = getegid();
+ CMM_STORE_SHARED(cached_vegid, vegid);
+ }
+
+ return vegid;
+}
+
+/*
+ * The vegid can change on setuid, setreuid, setresuid and seteuid.
+ */
+void lttng_context_vegid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vegid, INVALID_GID);
+}
+
+static
+size_t vegid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vegid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vegid;
+
+ vegid = get_vegid();
+ chan->ops->event_write(ctx, &vegid, sizeof(vegid), lttng_ust_rb_alignof(vegid));
+}
+
+static
+void vegid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vegid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vegid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vegid_get_size,
+ vegid_record,
+ vegid_get_value,
+ NULL, NULL);
+
+int lttng_add_vegid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced effective user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_veuid = INVALID_UID;
+
+static
+uid_t get_veuid(void)
+{
+ uid_t veuid;
+
+ veuid = CMM_LOAD_SHARED(cached_veuid);
+
+ if (caa_unlikely(veuid == INVALID_UID)) {
+ veuid = geteuid();
+ CMM_STORE_SHARED(cached_veuid, veuid);
+ }
+
+ return veuid;
+}
+
+/*
+ * The veuid can change on setuid, setreuid, setresuid and seteuid.
+ */
+void lttng_context_veuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_veuid, INVALID_UID);
+}
+
+static
+size_t veuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void veuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t veuid;
+
+ veuid = get_veuid();
+ chan->ops->event_write(ctx, &veuid, sizeof(veuid), lttng_ust_rb_alignof(veuid));
+}
+
+static
+void veuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_veuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("veuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ veuid_get_size,
+ veuid_record,
+ veuid_get_value,
+ NULL, NULL);
+
+int lttng_add_veuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced real group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vgid = INVALID_GID;
+
+static
+gid_t get_vgid(void)
+{
+ gid_t vgid;
+
+ vgid = CMM_LOAD_SHARED(cached_vgid);
+
+ if (caa_unlikely(cached_vgid == (gid_t) -1)) {
+ vgid = getgid();
+ CMM_STORE_SHARED(cached_vgid, vgid);
+ }
+
+ return vgid;
+}
+
+/*
+ * The vgid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vgid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vgid, INVALID_GID);
+}
+
+static
+size_t vgid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vgid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vgid;
+
+ vgid = get_vgid();
+ chan->ops->event_write(ctx, &vgid, sizeof(vgid), lttng_ust_rb_alignof(vgid));
+}
+
+static
+void vgid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vgid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vgid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vgid_get_size,
+ vgid_record,
+ vgid_get_value,
+ NULL, NULL);
+
+int lttng_add_vgid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST vpid context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ */
+static pid_t cached_vpid;
+
+static inline
+pid_t wrapper_getvpid(void)
+{
+ pid_t vpid;
+
+ vpid = CMM_LOAD_SHARED(cached_vpid);
+ if (caa_unlikely(!vpid)) {
+ vpid = getpid();
+ CMM_STORE_SHARED(cached_vpid, vpid);
+ }
+ return vpid;
+}
+
+/*
+ * Upon fork or clone, the PID assigned to our thread is not the same as
+ * we kept in cache.
+ */
+void lttng_context_vpid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vpid, 0);
+}
+
+static
+size_t vpid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += sizeof(pid_t);
+ return size;
+}
+
+static
+void vpid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ pid_t vpid = wrapper_getvpid();
+
+ chan->ops->event_write(ctx, &vpid, sizeof(vpid), lttng_ust_rb_alignof(vpid));
+}
+
+static
+void vpid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_getvpid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vpid",
+ lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(pid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vpid_get_size,
+ vpid_record,
+ vpid_get_value,
+ NULL, NULL);
+
+int lttng_add_vpid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced saved set-group ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static gid_t cached_vsgid = INVALID_GID;
+
+static
+gid_t get_vsgid(void)
+{
+ gid_t vsgid;
+
+ vsgid = CMM_LOAD_SHARED(cached_vsgid);
+
+ if (caa_unlikely(vsgid == INVALID_GID)) {
+ gid_t gid, egid, sgid;
+
+ if (getresgid(&gid, &egid, &sgid) == 0) {
+ vsgid = sgid;
+ CMM_STORE_SHARED(cached_vsgid, vsgid);
+ }
+ }
+
+ return vsgid;
+}
+
+/*
+ * The vsgid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vsgid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vsgid, INVALID_GID);
+}
+
+static
+size_t vsgid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(gid_t));
+ size += sizeof(gid_t);
+ return size;
+}
+
+static
+void vsgid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ gid_t vsgid;
+
+ vsgid = get_vsgid();
+ chan->ops->event_write(ctx, &vsgid, sizeof(vsgid), lttng_ust_rb_alignof(vsgid));
+}
+
+static
+void vsgid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vsgid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vsgid",
+ lttng_ust_static_type_integer(sizeof(gid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(gid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(gid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vsgid_get_size,
+ vsgid_record,
+ vsgid_get_value,
+ NULL, NULL);
+
+int lttng_add_vsgid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced saved set-user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_vsuid = INVALID_UID;
+
+static
+uid_t get_vsuid(void)
+{
+ uid_t vsuid;
+
+ vsuid = CMM_LOAD_SHARED(cached_vsuid);
+
+ if (caa_unlikely(vsuid == INVALID_UID)) {
+ uid_t uid, euid, suid;
+
+ if (getresuid(&uid, &euid, &suid) == 0) {
+ vsuid = suid;
+ CMM_STORE_SHARED(cached_vsuid, vsuid);
+ }
+ }
+
+ return vsuid;
+}
+
+/*
+ * The vsuid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vsuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vsuid, INVALID_UID);
+}
+
+static
+size_t vsuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void vsuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t vsuid;
+
+ vsuid = get_vsuid();
+ chan->ops->event_write(ctx, &vsuid, sizeof(vsuid), lttng_ust_rb_alignof(vsuid));
+}
+
+static
+void vsuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vsuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vsuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vsuid_get_size,
+ vsuid_record,
+ vsuid_get_value,
+ NULL, NULL);
+
+int lttng_add_vsuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST vtid context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <ust-tid.h>
+#include <urcu/tls-compat.h>
+
+#include "context-internal.h"
+#include "lttng-tracer-core.h"
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event.
+ */
+static DEFINE_URCU_TLS(pid_t, cached_vtid);
+
+/*
+ * Upon fork or clone, the TID assigned to our thread is not the same as
+ * we kept in cache. Luckily, we are the only thread surviving in the
+ * child process, so we can simply clear our cached version.
+ */
+void lttng_context_vtid_reset(void)
+{
+ CMM_STORE_SHARED(URCU_TLS(cached_vtid), 0);
+}
+
+static
+size_t vtid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(pid_t));
+ size += sizeof(pid_t);
+ return size;
+}
+
+static inline
+pid_t wrapper_getvtid(void)
+{
+ pid_t vtid;
+
+ vtid = CMM_LOAD_SHARED(URCU_TLS(cached_vtid));
+ if (caa_unlikely(!vtid)) {
+ vtid = lttng_gettid();
+ CMM_STORE_SHARED(URCU_TLS(cached_vtid), vtid);
+ }
+ return vtid;
+}
+
+static
+void vtid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ pid_t vtid = wrapper_getvtid();
+
+ chan->ops->event_write(ctx, &vtid, sizeof(vtid), lttng_ust_rb_alignof(vtid));
+}
+
+static
+void vtid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = wrapper_getvtid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vtid",
+ lttng_ust_static_type_integer(sizeof(pid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(pid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(pid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vtid_get_size,
+ vtid_record,
+ vtid_get_value,
+ NULL, NULL);
+
+int lttng_add_vtid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_vtid_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(cached_vtid)));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ *
+ * LTTng UST namespaced real user ID context.
+ */
+
+#define _LGPL_SOURCE
+#include <limits.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "context-internal.h"
+#include "creds.h"
+
+
+/*
+ * At the kernel level, user IDs and group IDs are a per-thread attribute.
+ * However, POSIX requires that all threads in a process share the same
+ * credentials. The NPTL threading implementation handles the POSIX
+ * requirements by providing wrapper functions for the various system calls
+ * that change process UIDs and GIDs. These wrapper functions (including those
+ * for setreuid() and setregid()) employ a signal-based technique to ensure
+ * that when one thread changes credentials, all of the other threads in the
+ * process also change their credentials.
+ */
+
+/*
+ * We cache the result to ensure we don't trigger a system call for
+ * each event. User / group IDs are global to the process.
+ */
+static uid_t cached_vuid = INVALID_UID;
+
+static
+uid_t get_vuid(void)
+{
+ uid_t vuid;
+
+ vuid = CMM_LOAD_SHARED(cached_vuid);
+
+ if (caa_unlikely(vuid == INVALID_UID)) {
+ vuid = getuid();
+ CMM_STORE_SHARED(cached_vuid, vuid);
+ }
+
+ return vuid;
+}
+
+/*
+ * The vuid can change on setuid, setreuid and setresuid.
+ */
+void lttng_context_vuid_reset(void)
+{
+ CMM_STORE_SHARED(cached_vuid, INVALID_UID);
+}
+
+static
+size_t vuid_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uid_t));
+ size += sizeof(uid_t);
+ return size;
+}
+
+static
+void vuid_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ uid_t vuid;
+
+ vuid = get_vuid();
+ chan->ops->event_write(ctx, &vuid, sizeof(vuid), lttng_ust_rb_alignof(vuid));
+}
+
+static
+void vuid_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->u.s64 = get_vuid();
+}
+
+static const struct lttng_ust_ctx_field *ctx_field = lttng_ust_static_ctx_field(
+ lttng_ust_static_event_field("vuid",
+ lttng_ust_static_type_integer(sizeof(uid_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uid_t) * CHAR_BIT,
+ lttng_ust_is_signed_type(uid_t),
+ BYTE_ORDER, 10),
+ false, false),
+ vuid_get_size,
+ vuid_record,
+ vuid_get_value,
+ NULL, NULL);
+
+int lttng_add_vuid_to_ctx(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ if (lttng_find_context(*ctx, ctx_field->event_field->name)) {
+ ret = -EEXIST;
+ goto error_find_context;
+ }
+ ret = lttng_ust_context_append(ctx, ctx_field);
+ if (ret)
+ return ret;
+ return 0;
+
+error_find_context:
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST trace/channel/event context management.
+ */
+
+#define _LGPL_SOURCE
+#include <lttng/ust-events.h>
+#include <lttng/ust-tracer.h>
+#include <ust-context-provider.h>
+#include <lttng/urcu/pointer.h>
+#include <lttng/urcu/urcu-ust.h>
+#include <usterr-signal-safe.h>
+#include <ust-helper.h>
+#include <stddef.h>
+#include <string.h>
+#include <assert.h>
+#include <limits.h>
+#include "tracepoint-internal.h"
+
+#include "context-internal.h"
+
+/*
+ * The filter implementation requires that two consecutive "get" for the
+ * same context performed by the same thread return the same result.
+ */
+
+int lttng_find_context(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+ const char *subname;
+
+ if (!ctx)
+ return 0;
+ if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
+ subname = name + strlen("$ctx.");
+ } else {
+ subname = name;
+ }
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strcmp(ctx->fields[i].event_field->name, subname))
+ return 1;
+ }
+ return 0;
+}
+
+int lttng_get_context_index(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+ const char *subname;
+
+ if (!ctx)
+ return -1;
+ if (strncmp(name, "$ctx.", strlen("$ctx.")) == 0) {
+ subname = name + strlen("$ctx.");
+ } else {
+ subname = name;
+ }
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strcmp(ctx->fields[i].event_field->name, subname))
+ return i;
+ }
+ return -1;
+}
+
+static int lttng_find_context_provider(struct lttng_ust_ctx *ctx, const char *name)
+{
+ unsigned int i;
+
+ for (i = 0; i < ctx->nr_fields; i++) {
+ /* Skip allocated (but non-initialized) contexts */
+ if (!ctx->fields[i].event_field->name)
+ continue;
+ if (!strncmp(ctx->fields[i].event_field->name, name,
+ strlen(name)))
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Note: as we append context information, the pointer location may change.
+ * lttng_ust_context_add_field leaves the new last context initialized to NULL.
+ */
+static
+int lttng_ust_context_add_field(struct lttng_ust_ctx **ctx_p)
+{
+ struct lttng_ust_ctx *ctx;
+
+ if (!*ctx_p) {
+ *ctx_p = zmalloc(sizeof(struct lttng_ust_ctx));
+ if (!*ctx_p)
+ return -ENOMEM;
+ (*ctx_p)->largest_align = 1;
+ }
+ ctx = *ctx_p;
+ if (ctx->nr_fields + 1 > ctx->allocated_fields) {
+ struct lttng_ust_ctx_field *new_fields;
+
+ ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
+ new_fields = zmalloc(ctx->allocated_fields * sizeof(*new_fields));
+ if (!new_fields)
+ return -ENOMEM;
+ /* Copy elements */
+ if (ctx->fields)
+ memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
+ free(ctx->fields);
+ ctx->fields = new_fields;
+ }
+ ctx->nr_fields++;
+ return 0;
+}
+
+static size_t get_type_max_align(const struct lttng_ust_type_common *type)
+{
+ switch (type->type) {
+ case lttng_ust_type_integer:
+ return lttng_ust_get_type_integer(type)->alignment;
+ case lttng_ust_type_string:
+ return CHAR_BIT;
+ case lttng_ust_type_dynamic:
+ return 0;
+ case lttng_ust_type_enum:
+ return get_type_max_align(lttng_ust_get_type_enum(type)->container_type);
+ case lttng_ust_type_array:
+ return max_t(size_t, get_type_max_align(lttng_ust_get_type_array(type)->elem_type),
+ lttng_ust_get_type_array(type)->alignment);
+ case lttng_ust_type_sequence:
+ return max_t(size_t, get_type_max_align(lttng_ust_get_type_sequence(type)->elem_type),
+ lttng_ust_get_type_sequence(type)->alignment);
+ case lttng_ust_type_struct:
+ {
+ unsigned int i;
+ size_t field_align = 0;
+ const struct lttng_ust_type_struct *struct_type = lttng_ust_get_type_struct(type);
+
+ for (i = 0; i < struct_type->nr_fields; i++) {
+ field_align = max_t(size_t,
+ get_type_max_align(struct_type->fields[i]->type),
+ field_align);
+ }
+ return field_align;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+}
+
+/*
+ * lttng_context_update() should be called at least once between context
+ * modification and trace start.
+ */
+static
+void lttng_context_update(struct lttng_ust_ctx *ctx)
+{
+ int i;
+ size_t largest_align = 8; /* in bits */
+
+ for (i = 0; i < ctx->nr_fields; i++) {
+ size_t field_align = 8;
+
+ field_align = get_type_max_align(ctx->fields[i].event_field->type);
+ largest_align = max_t(size_t, largest_align, field_align);
+ }
+ ctx->largest_align = largest_align >> 3; /* bits to bytes */
+}
+
+int lttng_ust_context_append_rcu(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+{
+ struct lttng_ust_ctx *old_ctx = *ctx_p, *new_ctx = NULL;
+ struct lttng_ust_ctx_field *new_fields = NULL;
+ int ret;
+
+ if (old_ctx) {
+ new_ctx = zmalloc(sizeof(struct lttng_ust_ctx));
+ if (!new_ctx)
+ return -ENOMEM;
+ *new_ctx = *old_ctx;
+ new_fields = zmalloc(new_ctx->allocated_fields * sizeof(*new_fields));
+ if (!new_fields) {
+ free(new_ctx);
+ return -ENOMEM;
+ }
+ /* Copy elements */
+ memcpy(new_fields, old_ctx->fields,
+ sizeof(*old_ctx->fields) * old_ctx->nr_fields);
+ new_ctx->fields = new_fields;
+ }
+ ret = lttng_ust_context_add_field(&new_ctx);
+ if (ret) {
+ free(new_fields);
+ free(new_ctx);
+ return ret;
+ }
+ new_ctx->fields[new_ctx->nr_fields - 1] = *f;
+ lttng_context_update(new_ctx);
+ lttng_ust_rcu_assign_pointer(*ctx_p, new_ctx);
+ lttng_ust_urcu_synchronize_rcu();
+ if (old_ctx) {
+ free(old_ctx->fields);
+ free(old_ctx);
+ }
+ return 0;
+}
+
+int lttng_ust_context_append(struct lttng_ust_ctx **ctx_p,
+ const struct lttng_ust_ctx_field *f)
+{
+ int ret;
+
+ ret = lttng_ust_context_add_field(ctx_p);
+ if (ret)
+ return ret;
+ (*ctx_p)->fields[(*ctx_p)->nr_fields - 1] = *f;
+ lttng_context_update(*ctx_p);
+ return 0;
+}
+
+void lttng_destroy_context(struct lttng_ust_ctx *ctx)
+{
+ int i;
+
+ if (!ctx)
+ return;
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (ctx->fields[i].destroy)
+ ctx->fields[i].destroy(ctx->fields[i].priv);
+ }
+ free(ctx->fields);
+ free(ctx);
+}
+
+/*
+ * Can be safely performed concurrently with tracing using the struct
+ * lttng_ctx. Using RCU update. Needs to match RCU read-side handling of
+ * contexts.
+ *
+ * This does not allow adding, removing, or changing typing of the
+ * contexts, since this needs to stay invariant for metadata. However,
+ * it allows updating the handlers associated with all contexts matching
+ * a provider (by name) while tracing is using it, in a way that ensures
+ * a single RCU read-side critical section see either all old, or all
+ * new handlers.
+ */
+int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
+ const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ int i, ret;
+ struct lttng_ust_ctx *ctx = *_ctx, *new_ctx;
+ struct lttng_ust_ctx_field *new_fields;
+
+ if (!ctx || !lttng_find_context_provider(ctx, name))
+ return 0;
+ /*
+ * We have at least one instance of context for the provider.
+ */
+ new_ctx = zmalloc(sizeof(*new_ctx));
+ if (!new_ctx)
+ return -ENOMEM;
+ *new_ctx = *ctx;
+ new_fields = zmalloc(sizeof(*new_fields) * ctx->allocated_fields);
+ if (!new_fields) {
+ ret = -ENOMEM;
+ goto field_error;
+ }
+ /* Copy elements */
+ memcpy(new_fields, ctx->fields,
+ sizeof(*new_fields) * ctx->allocated_fields);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (strncmp(new_fields[i].event_field->name,
+ name, strlen(name)) != 0)
+ continue;
+ new_fields[i].get_size = get_size;
+ new_fields[i].record = record;
+ new_fields[i].get_value = get_value;
+ new_fields[i].priv = priv;
+ }
+ new_ctx->fields = new_fields;
+ lttng_ust_rcu_assign_pointer(*_ctx, new_ctx);
+ lttng_ust_urcu_synchronize_rcu();
+ free(ctx->fields);
+ free(ctx);
+ return 0;
+
+field_error:
+ free(new_ctx);
+ return ret;
+}
+
+int lttng_context_init_all(struct lttng_ust_ctx **ctx)
+{
+ int ret;
+
+ ret = lttng_add_pthread_id_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_pthread_id_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vtid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vtid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vpid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vpid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_procname_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_procname_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_cpu_id_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_cpu_id_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_cgroup_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_cgroup_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_ipc_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_ipc_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_mnt_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_mnt_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_net_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_net_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_pid_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_pid_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_time_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_time_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_user_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_user_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_uts_ns_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_uts_ns_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_veuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_veuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vsuid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vsuid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vgid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vgid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vegid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vegid_to_ctx");
+ goto error;
+ }
+ ret = lttng_add_vsgid_to_ctx(ctx);
+ if (ret) {
+ WARN("Cannot add context lttng_add_vsgid_to_ctx");
+ goto error;
+ }
+ lttng_context_update(*ctx);
+ return 0;
+
+error:
+ lttng_destroy_context(*ctx);
+ return ret;
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-32-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 32-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "ust-events-internal.h"
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+#include "lttng-tracer-core.h"
+#include "lttng-counter-client.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_32_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-32-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_32_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_32_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * lttng-counter-client-percpu-64-modular.c
+ *
+ * LTTng lib counter client. Per-cpu 64-bit counters in modular
+ * arithmetic.
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "ust-events-internal.h"
+#include "../libcounter/counter.h"
+#include "../libcounter/counter-api.h"
+#include "lttng-tracer-core.h"
+#include "lttng-counter-client.h"
+
+static const struct lib_counter_config client_config = {
+ .alloc = COUNTER_ALLOC_PER_CPU,
+ .sync = COUNTER_SYNC_PER_CPU,
+ .arithmetic = COUNTER_ARITHMETIC_MODULAR,
+ .counter_size = COUNTER_SIZE_64_BIT,
+};
+
+static struct lib_counter *counter_create(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon)
+{
+ size_t max_nr_elem[LTTNG_COUNTER_DIMENSION_MAX], i;
+
+ if (nr_dimensions > LTTNG_COUNTER_DIMENSION_MAX)
+ return NULL;
+ for (i = 0; i < nr_dimensions; i++) {
+ if (dimensions[i].has_underflow || dimensions[i].has_overflow)
+ return NULL;
+ max_nr_elem[i] = dimensions[i].size;
+ }
+ return lttng_counter_create(&client_config, nr_dimensions, max_nr_elem,
+ global_sum_step, global_counter_fd, nr_counter_cpu_fds,
+ counter_cpu_fds, is_daemon);
+}
+
+static void counter_destroy(struct lib_counter *counter)
+{
+ lttng_counter_destroy(counter);
+}
+
+static int counter_add(struct lib_counter *counter, const size_t *dimension_indexes, int64_t v)
+{
+ return lttng_counter_add(&client_config, counter, dimension_indexes, v);
+}
+
+static int counter_read(struct lib_counter *counter, const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_read(&client_config, counter, dimension_indexes, cpu, value,
+ overflow, underflow);
+}
+
+static int counter_aggregate(struct lib_counter *counter, const size_t *dimension_indexes,
+ int64_t *value, bool *overflow, bool *underflow)
+{
+ return lttng_counter_aggregate(&client_config, counter, dimension_indexes, value,
+ overflow, underflow);
+}
+
+static int counter_clear(struct lib_counter *counter, const size_t *dimension_indexes)
+{
+ return lttng_counter_clear(&client_config, counter, dimension_indexes);
+}
+
+static struct lttng_counter_transport lttng_counter_transport = {
+ .name = "counter-per-cpu-64-modular",
+ .ops = {
+ .counter_create = counter_create,
+ .counter_destroy = counter_destroy,
+ .counter_add = counter_add,
+ .counter_read = counter_read,
+ .counter_aggregate = counter_aggregate,
+ .counter_clear = counter_clear,
+ },
+ .client_config = &client_config,
+};
+
+void lttng_counter_client_percpu_64_modular_init(void)
+{
+ lttng_counter_transport_register(<tng_counter_transport);
+}
+
+void lttng_counter_client_percpu_64_modular_exit(void)
+{
+ lttng_counter_transport_unregister(<tng_counter_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
+ *
+ * Copyright (C) 2020 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib counter client.
+ */
+
+#ifndef _LTTNG_UST_COUNTER_CLIENT_H
+#define _LTTNG_UST_COUNTER_CLIENT_H
+
+/*
+ * The counter clients init/exit symbols are private ABI for
+ * liblttng-ust-ctl, which is why they are not hidden.
+ */
+
+void lttng_ust_counter_clients_init(void);
+void lttng_ust_counter_clients_exit(void);
+
+void lttng_counter_client_percpu_32_modular_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_32_modular_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_64_modular_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_client_percpu_64_modular_exit(void)
+ __attribute__((visibility("hidden")));
+
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Holds LTTng per-session event registry.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sys/shm.h>
+#include <sys/ipc.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+#include <time.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <dlfcn.h>
+#include <lttng/ust-endian.h>
+
+#include <urcu/arch.h>
+#include <urcu/compiler.h>
+#include <urcu/hlist.h>
+#include <urcu/list.h>
+#include <urcu/uatomic.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-events.h>
+
+#include <usterr-signal-safe.h>
+#include <ust-helper.h>
+#include <lttng/ust-ctl.h>
+#include <ust-comm.h>
+#include <ust-fd.h>
+#include <ust-dynamic-type.h>
+#include <ust-context-provider.h>
+#include "error.h"
+#include "compat.h"
+#include "lttng-ust-uuid.h"
+
+#include "tracepoint-internal.h"
+#include "string-utils.h"
+#include "lttng-bytecode.h"
+#include "lttng-tracer.h"
+#include "lttng-tracer-core.h"
+#include "lttng-ust-statedump.h"
+#include "context-internal.h"
+#include "ust-events-internal.h"
+#include "wait.h"
+#include "../libringbuffer/shm.h"
+#include "../libringbuffer/frontend_types.h"
+#include "../libringbuffer/frontend.h"
+#include "../libcounter/counter.h"
+#include "jhash.h"
+#include <lttng/ust-abi.h>
+#include "context-provider-internal.h"
+
+/*
+ * All operations within this file are called by the communication
+ * thread, under ust_lock protection.
+ */
+
+static CDS_LIST_HEAD(sessions);
+static CDS_LIST_HEAD(event_notifier_groups);
+
+struct cds_list_head *lttng_get_sessions(void)
+{
+ return &sessions;
+}
+
+static void _lttng_event_destroy(struct lttng_ust_event_common *event);
+static void _lttng_enum_destroy(struct lttng_enum *_enum);
+
+static
+void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session);
+static
+void lttng_session_sync_event_enablers(struct lttng_ust_session *session);
+static
+void lttng_event_notifier_group_sync_enablers(
+ struct lttng_event_notifier_group *event_notifier_group);
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler);
+
+bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
+{
+ if (strlen(desc->probe_desc->provider_name) + 1 +
+ strlen(desc->event_name) >= LTTNG_UST_ABI_SYM_NAME_LEN)
+ return false;
+ return true;
+}
+
+void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
+ char *name)
+{
+ strcpy(name, desc->probe_desc->provider_name);
+ strcat(name, ":");
+ strcat(name, desc->event_name);
+}
+
+/*
+ * Called with ust lock held.
+ */
+int lttng_session_active(void)
+{
+ struct lttng_ust_session_private *iter;
+
+ cds_list_for_each_entry(iter, &sessions, node) {
+ if (iter->pub->active)
+ return 1;
+ }
+ return 0;
+}
+
+static
+int lttng_loglevel_match(int loglevel,
+ unsigned int has_loglevel,
+ enum lttng_ust_abi_loglevel_type req_type,
+ int req_loglevel)
+{
+ if (!has_loglevel)
+ loglevel = TRACE_DEFAULT;
+ switch (req_type) {
+ case LTTNG_UST_ABI_LOGLEVEL_RANGE:
+ if (loglevel <= req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
+ return 1;
+ else
+ return 0;
+ case LTTNG_UST_ABI_LOGLEVEL_SINGLE:
+ if (loglevel == req_loglevel
+ || (req_loglevel == -1 && loglevel <= TRACE_DEBUG))
+ return 1;
+ else
+ return 0;
+ case LTTNG_UST_ABI_LOGLEVEL_ALL:
+ default:
+ if (loglevel <= TRACE_DEBUG)
+ return 1;
+ else
+ return 0;
+ }
+}
+
+struct lttng_ust_session *lttng_session_create(void)
+{
+ struct lttng_ust_session *session;
+ struct lttng_ust_session_private *session_priv;
+ int i;
+
+ session = zmalloc(sizeof(struct lttng_ust_session));
+ if (!session)
+ return NULL;
+ session->struct_size = sizeof(struct lttng_ust_session);
+ session_priv = zmalloc(sizeof(struct lttng_ust_session_private));
+ if (!session_priv) {
+ free(session);
+ return NULL;
+ }
+ session->priv = session_priv;
+ session_priv->pub = session;
+ if (lttng_context_init_all(&session->priv->ctx)) {
+ free(session_priv);
+ free(session);
+ return NULL;
+ }
+ CDS_INIT_LIST_HEAD(&session->priv->chan_head);
+ CDS_INIT_LIST_HEAD(&session->priv->events_head);
+ CDS_INIT_LIST_HEAD(&session->priv->enums_head);
+ CDS_INIT_LIST_HEAD(&session->priv->enablers_head);
+ for (i = 0; i < LTTNG_UST_EVENT_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->priv->events_ht.table[i]);
+ for (i = 0; i < LTTNG_UST_ENUM_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&session->priv->enums_ht.table[i]);
+ cds_list_add(&session->priv->node, &sessions);
+ return session;
+}
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+{
+ struct lttng_counter_transport *counter_transport = NULL;
+ struct lttng_counter *counter = NULL;
+
+ counter_transport = lttng_counter_transport_find(counter_transport_name);
+ if (!counter_transport)
+ goto notransport;
+ counter = zmalloc(sizeof(struct lttng_counter));
+ if (!counter)
+ goto nomem;
+
+ counter->ops = &counter_transport->ops;
+ counter->transport = counter_transport;
+
+ counter->counter = counter->ops->counter_create(
+ number_dimensions, dimensions, 0,
+ -1, 0, NULL, false);
+ if (!counter->counter) {
+ goto create_error;
+ }
+
+ return counter;
+
+create_error:
+ free(counter);
+nomem:
+notransport:
+ return NULL;
+}
+
+static
+void lttng_ust_counter_destroy(struct lttng_counter *counter)
+{
+ counter->ops->counter_destroy(counter->counter);
+ free(counter);
+}
+
+struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+ int i;
+
+ event_notifier_group = zmalloc(sizeof(struct lttng_event_notifier_group));
+ if (!event_notifier_group)
+ return NULL;
+
+ /* Add all contexts. */
+ if (lttng_context_init_all(&event_notifier_group->ctx)) {
+ free(event_notifier_group);
+ return NULL;
+ }
+
+ CDS_INIT_LIST_HEAD(&event_notifier_group->enablers_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_group->event_notifiers_head);
+ for (i = 0; i < LTTNG_UST_EVENT_NOTIFIER_HT_SIZE; i++)
+ CDS_INIT_HLIST_HEAD(&event_notifier_group->event_notifiers_ht.table[i]);
+
+ cds_list_add(&event_notifier_group->node, &event_notifier_groups);
+
+ return event_notifier_group;
+}
+
+/*
+ * Only used internally at session destruction.
+ */
+static
+void _lttng_channel_unmap(struct lttng_ust_channel_buffer *lttng_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_shm_handle *handle;
+
+ cds_list_del(<tng_chan->priv->node);
+ lttng_destroy_context(lttng_chan->priv->ctx);
+ chan = lttng_chan->priv->rb_chan;
+ handle = chan->handle;
+ channel_destroy(chan, handle, 0);
+ free(lttng_chan->parent);
+ free(lttng_chan->priv);
+ free(lttng_chan);
+}
+
+static
+void register_event(struct lttng_ust_event_common *event)
+{
+ int ret;
+ const struct lttng_ust_event_desc *desc;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ assert(event->priv->registered == 0);
+ desc = event->priv->desc;
+ lttng_ust_format_event_name(desc, name);
+ ret = lttng_ust_tp_probe_register_queue_release(name,
+ desc->probe_callback,
+ event, desc->signature);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->priv->registered = 1;
+}
+
+static
+void unregister_event(struct lttng_ust_event_common *event)
+{
+ int ret;
+ const struct lttng_ust_event_desc *desc;
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ assert(event->priv->registered == 1);
+ desc = event->priv->desc;
+ lttng_ust_format_event_name(desc, name);
+ ret = lttng_ust_tp_probe_unregister_queue_release(name,
+ desc->probe_callback,
+ event);
+ WARN_ON_ONCE(ret);
+ if (!ret)
+ event->priv->registered = 0;
+}
+
+static
+void _lttng_event_unregister(struct lttng_ust_event_common *event)
+{
+ if (event->priv->registered)
+ unregister_event(event);
+}
+
+void lttng_session_destroy(struct lttng_ust_session *session)
+{
+ struct lttng_ust_channel_buffer_private *chan, *tmpchan;
+ struct lttng_ust_event_recorder_private *event_recorder_priv, *tmpevent_recorder_priv;
+ struct lttng_enum *_enum, *tmp_enum;
+ struct lttng_event_enabler *event_enabler, *event_tmpenabler;
+
+ CMM_ACCESS_ONCE(session->active) = 0;
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ _lttng_event_unregister(event_recorder_priv->parent.pub);
+ }
+ lttng_ust_urcu_synchronize_rcu(); /* Wait for in-flight events to complete */
+ lttng_ust_tp_probe_prune_release_queue();
+ cds_list_for_each_entry_safe(event_enabler, event_tmpenabler,
+ &session->priv->enablers_head, node)
+ lttng_event_enabler_destroy(event_enabler);
+ cds_list_for_each_entry_safe(event_recorder_priv, tmpevent_recorder_priv,
+ &session->priv->events_head, node)
+ _lttng_event_destroy(event_recorder_priv->parent.pub);
+ cds_list_for_each_entry_safe(_enum, tmp_enum,
+ &session->priv->enums_head, node)
+ _lttng_enum_destroy(_enum);
+ cds_list_for_each_entry_safe(chan, tmpchan, &session->priv->chan_head, node)
+ _lttng_channel_unmap(chan->pub);
+ cds_list_del(&session->priv->node);
+ lttng_destroy_context(session->priv->ctx);
+ free(session->priv);
+ free(session);
+}
+
+void lttng_event_notifier_group_destroy(
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ int close_ret;
+ struct lttng_event_notifier_enabler *notifier_enabler, *tmpnotifier_enabler;
+ struct lttng_ust_event_notifier_private *event_notifier_priv, *tmpevent_notifier_priv;
+
+ if (!event_notifier_group) {
+ return;
+ }
+
+ cds_list_for_each_entry(event_notifier_priv,
+ &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_unregister(event_notifier_priv->parent.pub);
+
+ lttng_ust_urcu_synchronize_rcu();
+
+ cds_list_for_each_entry_safe(notifier_enabler, tmpnotifier_enabler,
+ &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_destroy(notifier_enabler);
+
+ cds_list_for_each_entry_safe(event_notifier_priv, tmpevent_notifier_priv,
+ &event_notifier_group->event_notifiers_head, node)
+ _lttng_event_destroy(event_notifier_priv->parent.pub);
+
+ if (event_notifier_group->error_counter)
+ lttng_ust_counter_destroy(event_notifier_group->error_counter);
+
+ /* Close the notification fd to the listener of event_notifiers. */
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(event_notifier_group->notification_fd);
+ if (!close_ret) {
+ lttng_ust_delete_fd_from_tracker(
+ event_notifier_group->notification_fd);
+ } else {
+ PERROR("close");
+ abort();
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ cds_list_del(&event_notifier_group->node);
+
+ free(event_notifier_group);
+}
+
+static
+void lttng_enabler_destroy(struct lttng_enabler *enabler)
+{
+ struct lttng_ust_bytecode_node *filter_node, *tmp_filter_node;
+ struct lttng_ust_excluder_node *excluder_node, *tmp_excluder_node;
+
+ if (!enabler) {
+ return;
+ }
+
+ /* Destroy filter bytecode */
+ cds_list_for_each_entry_safe(filter_node, tmp_filter_node,
+ &enabler->filter_bytecode_head, node) {
+ free(filter_node);
+ }
+
+ /* Destroy excluders */
+ cds_list_for_each_entry_safe(excluder_node, tmp_excluder_node,
+ &enabler->excluder_head, node) {
+ free(excluder_node);
+ }
+}
+
+ void lttng_event_notifier_enabler_destroy(struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ if (!event_notifier_enabler) {
+ return;
+ }
+
+ cds_list_del(&event_notifier_enabler->node);
+
+ lttng_enabler_destroy(lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+
+ free(event_notifier_enabler);
+}
+
+static
+int lttng_enum_create(const struct lttng_ust_enum_desc *desc,
+ struct lttng_ust_session *session)
+{
+ const char *enum_name = desc->name;
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ int ret = 0;
+ size_t name_len = strlen(enum_name);
+ uint32_t hash;
+ int notify_socket;
+
+ /* Check if this enum is already registered for this session. */
+ hash = jhash(enum_name, name_len, 0);
+ head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+
+ _enum = lttng_ust_enum_get_from_desc(session, desc);
+ if (_enum) {
+ ret = -EEXIST;
+ goto exist;
+ }
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ _enum = zmalloc(sizeof(*_enum));
+ if (!_enum) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ _enum->session = session;
+ _enum->desc = desc;
+
+ ret = ustcomm_register_enum(notify_socket,
+ session->priv->objd,
+ enum_name,
+ desc->nr_entries,
+ desc->entries,
+ &_enum->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering enumeration to sessiond", ret);
+ goto sessiond_register_error;
+ }
+ cds_list_add(&_enum->node, &session->priv->enums_head);
+ cds_hlist_add_head(&_enum->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(_enum);
+cache_error:
+socket_error:
+exist:
+ return ret;
+}
+
+static
+int lttng_create_enum_check(const struct lttng_ust_type_common *type,
+ struct lttng_ust_session *session)
+{
+ switch (type->type) {
+ case lttng_ust_type_enum:
+ {
+ const struct lttng_ust_enum_desc *enum_desc;
+ int ret;
+
+ enum_desc = lttng_ust_get_type_enum(type)->desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ case lttng_ust_type_dynamic:
+ {
+ const struct lttng_ust_event_field *tag_field_generic;
+ const struct lttng_ust_enum_desc *enum_desc;
+ int ret;
+
+ tag_field_generic = lttng_ust_dynamic_type_tag_field();
+ enum_desc = lttng_ust_get_type_enum(tag_field_generic->type)->desc;
+ ret = lttng_enum_create(enum_desc, session);
+ if (ret && ret != -EEXIST) {
+ DBG("Unable to create enum error: (%d)", ret);
+ return ret;
+ }
+ break;
+ }
+ default:
+ /* TODO: nested types when they become supported. */
+ break;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_event_enums(size_t nr_fields,
+ const struct lttng_ust_event_field **event_fields,
+ struct lttng_ust_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_type_common *type = event_fields[i]->type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static
+int lttng_create_all_ctx_enums(size_t nr_fields,
+ struct lttng_ust_ctx_field *ctx_fields,
+ struct lttng_ust_session *session)
+{
+ size_t i;
+ int ret;
+
+ /* For each field, ensure enum is part of the session. */
+ for (i = 0; i < nr_fields; i++) {
+ const struct lttng_ust_type_common *type = ctx_fields[i].event_field->type;
+
+ ret = lttng_create_enum_check(type, session);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+/*
+ * Ensure that a state-dump will be performed for this session at the end
+ * of the current handle_message().
+ */
+int lttng_session_statedump(struct lttng_ust_session *session)
+{
+ session->priv->statedump_pending = 1;
+ lttng_ust_sockinfo_session_enabled(session->priv->owner);
+ return 0;
+}
+
+int lttng_session_enable(struct lttng_ust_session *session)
+{
+ int ret = 0;
+ struct lttng_ust_channel_buffer_private *chan;
+ int notify_socket;
+
+ if (session->active) {
+ ret = -EBUSY;
+ goto end;
+ }
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0)
+ return notify_socket;
+
+ /* Set transient enabler state to "enabled" */
+ session->priv->tstate = 1;
+
+ /* We need to sync enablers with session before activation. */
+ lttng_session_sync_event_enablers(session);
+
+ /*
+ * Snapshot the number of events per channel to know the type of header
+ * we need to use.
+ */
+ cds_list_for_each_entry(chan, &session->priv->chan_head, node) {
+ struct lttng_ust_ctx *ctx;
+ struct lttng_ust_ctx_field *fields = NULL;
+ size_t nr_fields = 0;
+ uint32_t chan_id;
+
+ /* don't change it if session stop/restart */
+ if (chan->header_type)
+ continue;
+ ctx = chan->ctx;
+ if (ctx) {
+ nr_fields = ctx->nr_fields;
+ fields = ctx->fields;
+ ret = lttng_create_all_ctx_enums(nr_fields, fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ return ret;
+ }
+ }
+ ret = ustcomm_register_channel(notify_socket,
+ session,
+ session->priv->objd,
+ chan->parent.objd,
+ nr_fields,
+ fields,
+ &chan_id,
+ &chan->header_type);
+ if (ret) {
+ DBG("Error (%d) registering channel to sessiond", ret);
+ return ret;
+ }
+ if (chan_id != chan->id) {
+ DBG("Error: channel registration id (%u) does not match id assigned at creation (%u)",
+ chan_id, chan->id);
+ return -EINVAL;
+ }
+ }
+
+ /* Set atomically the state to "active" */
+ CMM_ACCESS_ONCE(session->active) = 1;
+ CMM_ACCESS_ONCE(session->priv->been_active) = 1;
+
+ ret = lttng_session_statedump(session);
+ if (ret)
+ return ret;
+end:
+ return ret;
+}
+
+int lttng_session_disable(struct lttng_ust_session *session)
+{
+ int ret = 0;
+
+ if (!session->active) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set atomically the state to "inactive" */
+ CMM_ACCESS_ONCE(session->active) = 0;
+
+ /* Set transient enabler state to "disabled" */
+ session->priv->tstate = 0;
+ lttng_session_sync_event_enablers(session);
+end:
+ return ret;
+}
+
+int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
+{
+ int ret = 0;
+
+ if (lttng_channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set transient enabler state to "enabled" */
+ lttng_channel->priv->tstate = 1;
+ lttng_session_sync_event_enablers(lttng_channel->session);
+ /* Set atomically the state to "enabled" */
+ CMM_ACCESS_ONCE(lttng_channel->enabled) = 1;
+end:
+ return ret;
+}
+
+int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
+{
+ int ret = 0;
+
+ if (!lttng_channel->enabled) {
+ ret = -EBUSY;
+ goto end;
+ }
+ /* Set atomically the state to "disabled" */
+ CMM_ACCESS_ONCE(lttng_channel->enabled) = 0;
+ /* Set transient enabler state to "enabled" */
+ lttng_channel->priv->tstate = 0;
+ lttng_session_sync_event_enablers(lttng_channel->session);
+end:
+ return ret;
+}
+
+static inline
+struct cds_hlist_head *borrow_hash_table_bucket(
+ struct cds_hlist_head *hash_table,
+ unsigned int hash_table_size,
+ const struct lttng_ust_event_desc *desc)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ size_t name_len;
+ uint32_t hash;
+
+ lttng_ust_format_event_name(desc, name);
+ name_len = strlen(name);
+
+ hash = jhash(name, name_len, 0);
+ return &hash_table[hash & (hash_table_size - 1)];
+}
+
+/*
+ * Supports event creation while tracing session is active.
+ */
+static
+int lttng_event_recorder_create(const struct lttng_ust_event_desc *desc,
+ struct lttng_ust_channel_buffer *chan)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ struct lttng_ust_event_recorder *event_recorder;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ struct lttng_ust_session *session = chan->parent->session;
+ struct cds_hlist_head *head;
+ int ret = 0;
+ int notify_socket, loglevel;
+ const char *uri;
+
+ head = borrow_hash_table_bucket(chan->parent->session->priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
+
+ notify_socket = lttng_get_notify_socket(session->priv->owner);
+ if (notify_socket < 0) {
+ ret = notify_socket;
+ goto socket_error;
+ }
+
+ ret = lttng_create_all_event_enums(desc->nr_fields, desc->fields,
+ session);
+ if (ret < 0) {
+ DBG("Error (%d) adding enum to session", ret);
+ goto create_enum_error;
+ }
+
+ /*
+ * Check if loglevel match. Refuse to connect event if not.
+ */
+ event_recorder = zmalloc(sizeof(struct lttng_ust_event_recorder));
+ if (!event_recorder) {
+ ret = -ENOMEM;
+ goto cache_error;
+ }
+ event_recorder->struct_size = sizeof(struct lttng_ust_event_recorder);
+
+ event_recorder->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_recorder->parent) {
+ ret = -ENOMEM;
+ goto parent_error;
+ }
+ event_recorder->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_recorder->parent->type = LTTNG_UST_EVENT_TYPE_RECORDER;
+ event_recorder->parent->child = event_recorder;
+
+ event_recorder_priv = zmalloc(sizeof(struct lttng_ust_event_recorder_private));
+ if (!event_recorder_priv) {
+ ret = -ENOMEM;
+ goto priv_error;
+ }
+ event_recorder->priv = event_recorder_priv;
+ event_recorder_priv->pub = event_recorder;
+ event_recorder->parent->priv = &event_recorder_priv->parent;
+ event_recorder_priv->parent.pub = event_recorder->parent;
+
+ event_recorder->chan = chan;
+
+ /* Event will be enabled by enabler sync. */
+ event_recorder->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_recorder->parent->enabled = 0;
+ event_recorder->parent->priv->registered = 0;
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_recorder->parent->priv->enablers_ref_head);
+ event_recorder->parent->priv->desc = desc;
+
+ if (desc->loglevel)
+ loglevel = *(*event_recorder->parent->priv->desc->loglevel);
+ else
+ loglevel = TRACE_DEFAULT;
+ if (desc->model_emf_uri)
+ uri = *(desc->model_emf_uri);
+ else
+ uri = NULL;
+
+ lttng_ust_format_event_name(desc, name);
+
+ /* Fetch event ID from sessiond */
+ ret = ustcomm_register_event(notify_socket,
+ session,
+ session->priv->objd,
+ chan->priv->parent.objd,
+ name,
+ loglevel,
+ desc->signature,
+ desc->nr_fields,
+ desc->fields,
+ uri,
+ &event_recorder->priv->id);
+ if (ret < 0) {
+ DBG("Error (%d) registering event to sessiond", ret);
+ goto sessiond_register_error;
+ }
+
+ cds_list_add(&event_recorder_priv->node, &chan->parent->session->priv->events_head);
+ cds_hlist_add_head(&event_recorder_priv->hlist, head);
+ return 0;
+
+sessiond_register_error:
+ free(event_recorder_priv);
+priv_error:
+ free(event_recorder->parent);
+parent_error:
+ free(event_recorder);
+cache_error:
+create_enum_error:
+socket_error:
+ return ret;
+}
+
+static
+int lttng_event_notifier_create(const struct lttng_ust_event_desc *desc,
+ uint64_t token, uint64_t error_counter_index,
+ struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_ust_event_notifier *event_notifier;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct cds_hlist_head *head;
+ int ret = 0;
+
+ /*
+ * Get the hashtable bucket the created lttng_event_notifier object
+ * should be inserted.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
+
+ event_notifier = zmalloc(sizeof(struct lttng_ust_event_notifier));
+ if (!event_notifier) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ event_notifier->struct_size = sizeof(struct lttng_ust_event_notifier);
+
+ event_notifier->parent = zmalloc(sizeof(struct lttng_ust_event_common));
+ if (!event_notifier->parent) {
+ ret = -ENOMEM;
+ goto parent_error;
+ }
+ event_notifier->parent->struct_size = sizeof(struct lttng_ust_event_common);
+ event_notifier->parent->type = LTTNG_UST_EVENT_TYPE_NOTIFIER;
+ event_notifier->parent->child = event_notifier;
+
+ event_notifier_priv = zmalloc(sizeof(struct lttng_ust_event_notifier_private));
+ if (!event_notifier_priv) {
+ ret = -ENOMEM;
+ goto priv_error;
+ }
+ event_notifier->priv = event_notifier_priv;
+ event_notifier_priv->pub = event_notifier;
+ event_notifier->parent->priv = &event_notifier_priv->parent;
+ event_notifier_priv->parent.pub = event_notifier->parent;
+
+ event_notifier_priv->group = event_notifier_group;
+ event_notifier_priv->parent.user_token = token;
+ event_notifier_priv->error_counter_index = error_counter_index;
+
+ /* Event notifier will be enabled by enabler sync. */
+ event_notifier->parent->run_filter = lttng_ust_interpret_event_filter;
+ event_notifier->parent->enabled = 0;
+ event_notifier_priv->parent.registered = 0;
+
+ CDS_INIT_LIST_HEAD(&event_notifier->parent->priv->filter_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier->priv->capture_bytecode_runtime_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_priv->parent.enablers_ref_head);
+ event_notifier_priv->parent.desc = desc;
+ event_notifier->notification_send = lttng_event_notifier_notification_send;
+
+ cds_list_add(&event_notifier_priv->node,
+ &event_notifier_group->event_notifiers_head);
+ cds_hlist_add_head(&event_notifier_priv->hlist, head);
+
+ return 0;
+
+priv_error:
+ free(event_notifier->parent);
+parent_error:
+ free(event_notifier);
+error:
+ return ret;
+}
+
+static
+int lttng_desc_match_star_glob_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ int loglevel = 0;
+ unsigned int has_loglevel = 0;
+
+ lttng_ust_format_event_name(desc, name);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ if (!strutils_star_glob_match(enabler->event_param.name, SIZE_MAX,
+ name, SIZE_MAX))
+ return 0;
+ if (desc->loglevel) {
+ loglevel = *(*desc->loglevel);
+ has_loglevel = 1;
+ }
+ if (!lttng_loglevel_match(loglevel,
+ has_loglevel,
+ enabler->event_param.loglevel_type,
+ enabler->event_param.loglevel))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_event_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+ int loglevel = 0;
+ unsigned int has_loglevel = 0;
+
+ lttng_ust_format_event_name(desc, name);
+ assert(enabler->format_type == LTTNG_ENABLER_FORMAT_EVENT);
+ if (strcmp(name, enabler->event_param.name))
+ return 0;
+ if (desc->loglevel) {
+ loglevel = *(*desc->loglevel);
+ has_loglevel = 1;
+ }
+ if (!lttng_loglevel_match(loglevel,
+ has_loglevel,
+ enabler->event_param.loglevel_type,
+ enabler->event_param.loglevel))
+ return 0;
+ return 1;
+}
+
+static
+int lttng_desc_match_enabler(const struct lttng_ust_event_desc *desc,
+ struct lttng_enabler *enabler)
+{
+ switch (enabler->format_type) {
+ case LTTNG_ENABLER_FORMAT_STAR_GLOB:
+ {
+ struct lttng_ust_excluder_node *excluder;
+
+ if (!lttng_desc_match_star_glob_enabler(desc, enabler)) {
+ return 0;
+ }
+
+ /*
+ * If the matching event matches with an excluder,
+ * return 'does not match'
+ */
+ cds_list_for_each_entry(excluder, &enabler->excluder_head, node) {
+ int count;
+
+ for (count = 0; count < excluder->excluder.count; count++) {
+ int len;
+ char *excluder_name;
+
+ excluder_name = (char *) (excluder->excluder.names)
+ + count * LTTNG_UST_ABI_SYM_NAME_LEN;
+ len = strnlen(excluder_name, LTTNG_UST_ABI_SYM_NAME_LEN);
+ if (len > 0) {
+ char name[LTTNG_UST_ABI_SYM_NAME_LEN];
+
+ lttng_ust_format_event_name(desc, name);
+ if (strutils_star_glob_match(excluder_name, len, name, SIZE_MAX)) {
+ return 0;
+ }
+ }
+ }
+ }
+ return 1;
+ }
+ case LTTNG_ENABLER_FORMAT_EVENT:
+ return lttng_desc_match_event_enabler(desc, enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_enabler_match_event(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_event_recorder *event_recorder)
+{
+ if (lttng_desc_match_enabler(event_recorder->parent->priv->desc,
+ lttng_event_enabler_as_enabler(event_enabler))
+ && event_recorder->chan == event_enabler->chan)
+ return 1;
+ else
+ return 0;
+}
+
+static
+int lttng_event_notifier_enabler_match_event_notifier(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_event_notifier *event_notifier)
+{
+ int desc_matches = lttng_desc_match_enabler(event_notifier->priv->parent.desc,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+
+ if (desc_matches && event_notifier->priv->group == event_notifier_enabler->group &&
+ event_notifier->priv->parent.user_token == event_notifier_enabler->user_token)
+ return 1;
+ else
+ return 0;
+}
+
+static
+struct lttng_enabler_ref *lttng_enabler_ref(
+ struct cds_list_head *enabler_ref_list,
+ struct lttng_enabler *enabler)
+{
+ struct lttng_enabler_ref *enabler_ref;
+
+ cds_list_for_each_entry(enabler_ref, enabler_ref_list, node) {
+ if (enabler_ref->ref == enabler)
+ return enabler_ref;
+ }
+ return NULL;
+}
+
+/*
+ * Create struct lttng_event if it is missing and present in the list of
+ * tracepoint probes.
+ */
+static
+void lttng_create_event_recorder_if_missing(struct lttng_event_enabler *event_enabler)
+{
+ struct lttng_ust_session *session = event_enabler->chan->parent->session;
+ struct lttng_ust_registered_probe *reg_probe;
+ const struct lttng_ust_event_desc *desc;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ int i;
+ struct cds_list_head *probe_list;
+
+ probe_list = lttng_get_probe_list_head();
+ /*
+ * For each probe event, if we find that a probe event matches
+ * our enabler, create an associated lttng_event if not
+ * already present.
+ */
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int ret;
+ bool found = false;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ desc = probe_desc->event_desc[i];
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_enabler_as_enabler(event_enabler)))
+ continue;
+
+ head = borrow_hash_table_bucket(
+ session->priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, desc);
+
+ cds_hlist_for_each_entry(event_recorder_priv, node, head, hlist) {
+ if (event_recorder_priv->parent.desc == desc
+ && event_recorder_priv->pub->chan == event_enabler->chan) {
+ found = true;
+ break;
+ }
+ }
+ if (found)
+ continue;
+
+ /*
+ * We need to create an event for this
+ * event probe.
+ */
+ ret = lttng_event_recorder_create(probe_desc->event_desc[i],
+ event_enabler->chan);
+ if (ret) {
+ DBG("Unable to create event \"%s:%s\", error %d\n",
+ probe_desc->provider_name,
+ probe_desc->event_desc[i]->event_name, ret);
+ }
+ }
+ }
+}
+
+static
+void probe_provider_event_for_each(const struct lttng_ust_probe_desc *provider_desc,
+ void (*event_func)(struct lttng_ust_event_common *event))
+{
+ struct cds_hlist_node *node, *tmp_node;
+ struct cds_list_head *sessionsp;
+ unsigned int i;
+
+ /* Get handle on list of sessions. */
+ sessionsp = lttng_get_sessions();
+
+ /*
+ * Iterate over all events in the probe provider descriptions and
+ * sessions to queue the unregistration of the events.
+ */
+ for (i = 0; i < provider_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc;
+ struct lttng_event_notifier_group *event_notifier_group;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct lttng_ust_session_private *session_priv;
+ struct cds_hlist_head *head;
+
+ event_desc = provider_desc->event_desc[i];
+
+ /*
+ * Iterate over all session to find the current event
+ * description.
+ */
+ cds_list_for_each_entry(session_priv, sessionsp, node) {
+ /*
+ * Get the list of events in the hashtable bucket and
+ * iterate to find the event matching this descriptor.
+ */
+ head = borrow_hash_table_bucket(
+ session_priv->events_ht.table,
+ LTTNG_UST_EVENT_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(event_recorder_priv, node, tmp_node, head, hlist) {
+ if (event_desc == event_recorder_priv->parent.desc) {
+ event_func(event_recorder_priv->parent.pub);
+ break;
+ }
+ }
+ }
+
+ /*
+ * Iterate over all event_notifier groups to find the current event
+ * description.
+ */
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ /*
+ * Get the list of event_notifiers in the hashtable bucket and
+ * iterate to find the event_notifier matching this
+ * descriptor.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, event_desc);
+
+ cds_hlist_for_each_entry_safe(event_notifier_priv, node, tmp_node, head, hlist) {
+ if (event_desc == event_notifier_priv->parent.desc) {
+ event_func(event_notifier_priv->parent.pub);
+ break;
+ }
+ }
+ }
+ }
+}
+
+static
+void _event_enum_destroy(struct lttng_ust_event_common *event)
+{
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder *event_recorder = event->child;
+ struct lttng_ust_session *session = event_recorder->chan->parent->session;
+ unsigned int i;
+
+ /* Destroy enums of the current event. */
+ for (i = 0; i < event_recorder->parent->priv->desc->nr_fields; i++) {
+ const struct lttng_ust_enum_desc *enum_desc;
+ const struct lttng_ust_event_field *field;
+ struct lttng_enum *curr_enum;
+
+ field = event_recorder->parent->priv->desc->fields[i];
+ switch (field->type->type) {
+ case lttng_ust_type_enum:
+ enum_desc = lttng_ust_get_type_enum(field->type)->desc;
+ break;
+ default:
+ continue;
+ }
+
+ curr_enum = lttng_ust_enum_get_from_desc(session, enum_desc);
+ if (curr_enum) {
+ _lttng_enum_destroy(curr_enum);
+ }
+ }
+ break;
+ }
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ break;
+ default:
+ abort();
+ }
+ /* Destroy event. */
+ _lttng_event_destroy(event);
+}
+
+/*
+ * Iterate over all the UST sessions to unregister and destroy all probes from
+ * the probe provider descriptor received as argument. Must me called with the
+ * ust_lock held.
+ */
+void lttng_probe_provider_unregister_events(
+ const struct lttng_ust_probe_desc *provider_desc)
+{
+ /*
+ * Iterate over all events in the probe provider descriptions and sessions
+ * to queue the unregistration of the events.
+ */
+ probe_provider_event_for_each(provider_desc, _lttng_event_unregister);
+
+ /* Wait for grace period. */
+ lttng_ust_urcu_synchronize_rcu();
+ /* Prune the unregistration queue. */
+ lttng_ust_tp_probe_prune_release_queue();
+
+ /*
+ * It is now safe to destroy the events and remove them from the event list
+ * and hashtables.
+ */
+ probe_provider_event_for_each(provider_desc, _event_enum_destroy);
+}
+
+/*
+ * Create events associated with an event enabler (if not already present),
+ * and add backward reference from the event to the enabler.
+ */
+static
+int lttng_event_enabler_ref_event_recorders(struct lttng_event_enabler *event_enabler)
+{
+ struct lttng_ust_session *session = event_enabler->chan->parent->session;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+
+ if (!lttng_event_enabler_as_enabler(event_enabler)->enabled)
+ goto end;
+
+ /* First ensure that probe events are created for this enabler. */
+ lttng_create_event_recorder_if_missing(event_enabler);
+
+ /* For each event matching enabler in session event list. */
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_enabler_match_event(event_enabler, event_recorder_priv->pub))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&event_recorder_priv->parent.enablers_ref_head,
+ lttng_event_enabler_as_enabler(event_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event to enabler.
+ */
+ enabler_ref = zmalloc(sizeof(*enabler_ref));
+ if (!enabler_ref)
+ return -ENOMEM;
+ enabler_ref->ref = lttng_event_enabler_as_enabler(
+ event_enabler);
+ cds_list_add(&enabler_ref->node,
+ &event_recorder_priv->parent.enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_recorder_priv->parent.desc,
+ &session->priv->ctx,
+ &event_recorder_priv->parent.filter_bytecode_runtime_head,
+ <tng_event_enabler_as_enabler(event_enabler)->filter_bytecode_head);
+
+ /* TODO: merge event context. */
+ }
+end:
+ return 0;
+}
+
+/*
+ * Called at library load: connect the probe on all enablers matching
+ * this event.
+ * Called with session mutex held.
+ */
+int lttng_fix_pending_events(void)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ lttng_session_lazy_sync_event_enablers(session_priv->pub);
+ }
+ return 0;
+}
+
+int lttng_fix_pending_event_notifiers(void)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ lttng_event_notifier_group_sync_enablers(event_notifier_group);
+ }
+ return 0;
+}
+
+/*
+ * For each session of the owner thread, execute pending statedump.
+ * Only dump state for the sessions owned by the caller thread, because
+ * we don't keep ust_lock across the entire iteration.
+ */
+void lttng_handle_pending_statedump(void *owner)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ /* Execute state dump */
+ do_lttng_ust_statedump(owner);
+
+ /* Clear pending state dump */
+ if (ust_lock()) {
+ goto end;
+ }
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ if (session_priv->owner != owner)
+ continue;
+ if (!session_priv->statedump_pending)
+ continue;
+ session_priv->statedump_pending = 0;
+ }
+end:
+ ust_unlock();
+ return;
+}
+
+static
+void _lttng_event_destroy(struct lttng_ust_event_common *event)
+{
+ struct lttng_enabler_ref *enabler_ref, *tmp_enabler_ref;
+
+ lttng_free_event_filter_runtime(event);
+ /* Free event enabler refs */
+ cds_list_for_each_entry_safe(enabler_ref, tmp_enabler_ref,
+ &event->priv->enablers_ref_head, node)
+ free(enabler_ref);
+
+ switch (event->type) {
+ case LTTNG_UST_EVENT_TYPE_RECORDER:
+ {
+ struct lttng_ust_event_recorder *event_recorder = event->child;
+
+ /* Remove from event list. */
+ cds_list_del(&event_recorder->priv->node);
+ /* Remove from event hash table. */
+ cds_hlist_del(&event_recorder->priv->hlist);
+
+ lttng_destroy_context(event_recorder->priv->ctx);
+ free(event_recorder->parent);
+ free(event_recorder->priv);
+ free(event_recorder);
+ break;
+ }
+ case LTTNG_UST_EVENT_TYPE_NOTIFIER:
+ {
+ struct lttng_ust_event_notifier *event_notifier = event->child;
+
+ /* Remove from event list. */
+ cds_list_del(&event_notifier->priv->node);
+ /* Remove from event hash table. */
+ cds_hlist_del(&event_notifier->priv->hlist);
+
+ free(event_notifier->priv);
+ free(event_notifier->parent);
+ free(event_notifier);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+static
+void _lttng_enum_destroy(struct lttng_enum *_enum)
+{
+ cds_list_del(&_enum->node);
+ cds_hlist_del(&_enum->hlist);
+ free(_enum);
+}
+
+void lttng_ust_abi_events_exit(void)
+{
+ struct lttng_ust_session_private *session_priv, *tmpsession_priv;
+
+ cds_list_for_each_entry_safe(session_priv, tmpsession_priv, &sessions, node)
+ lttng_session_destroy(session_priv->pub);
+}
+
+/*
+ * Enabler management.
+ */
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event *event_param,
+ struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_event_enabler *event_enabler;
+
+ event_enabler = zmalloc(sizeof(*event_enabler));
+ if (!event_enabler)
+ return NULL;
+ event_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_enabler->base.excluder_head);
+ memcpy(&event_enabler->base.event_param, event_param,
+ sizeof(event_enabler->base.event_param));
+ event_enabler->chan = chan;
+ /* ctx left NULL */
+ event_enabler->base.enabled = 0;
+ cds_list_add(&event_enabler->node, &event_enabler->chan->parent->session->priv->enablers_head);
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return event_enabler;
+}
+
+struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
+ struct lttng_event_notifier_group *event_notifier_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event_notifier *event_notifier_param)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+
+ event_notifier_enabler = zmalloc(sizeof(*event_notifier_enabler));
+ if (!event_notifier_enabler)
+ return NULL;
+ event_notifier_enabler->base.format_type = format_type;
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.filter_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->capture_bytecode_head);
+ CDS_INIT_LIST_HEAD(&event_notifier_enabler->base.excluder_head);
+
+ event_notifier_enabler->user_token = event_notifier_param->event.token;
+ event_notifier_enabler->error_counter_index = event_notifier_param->error_counter_index;
+ event_notifier_enabler->num_captures = 0;
+
+ memcpy(&event_notifier_enabler->base.event_param.name,
+ event_notifier_param->event.name,
+ sizeof(event_notifier_enabler->base.event_param.name));
+ event_notifier_enabler->base.event_param.instrumentation =
+ event_notifier_param->event.instrumentation;
+ event_notifier_enabler->base.event_param.loglevel =
+ event_notifier_param->event.loglevel;
+ event_notifier_enabler->base.event_param.loglevel_type =
+ event_notifier_param->event.loglevel_type;
+
+ event_notifier_enabler->base.enabled = 0;
+ event_notifier_enabler->group = event_notifier_group;
+
+ cds_list_add(&event_notifier_enabler->node,
+ &event_notifier_group->enablers_head);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_group);
+
+ return event_notifier_enabler;
+}
+
+int lttng_event_enabler_enable(struct lttng_event_enabler *event_enabler)
+{
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 1;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return 0;
+}
+
+int lttng_event_enabler_disable(struct lttng_event_enabler *event_enabler)
+{
+ lttng_event_enabler_as_enabler(event_enabler)->enabled = 0;
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+
+ return 0;
+}
+
+static
+void _lttng_enabler_attach_filter_bytecode(struct lttng_enabler *enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ (*bytecode)->enabler = enabler;
+ cds_list_add_tail(&(*bytecode)->node, &enabler->filter_bytecode_head);
+ /* Take ownership of bytecode */
+ *bytecode = NULL;
+}
+
+int lttng_event_enabler_attach_filter_bytecode(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_event_enabler_as_enabler(event_enabler), bytecode);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ return 0;
+}
+
+static
+void _lttng_enabler_attach_exclusion(struct lttng_enabler *enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ (*excluder)->enabler = enabler;
+ cds_list_add_tail(&(*excluder)->node, &enabler->excluder_head);
+ /* Take ownership of excluder */
+ *excluder = NULL;
+}
+
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *event_enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_event_enabler_as_enabler(event_enabler), excluder);
+
+ lttng_session_lazy_sync_event_enablers(event_enabler->chan->parent->session);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_enable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 1;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+
+ return 0;
+}
+
+int lttng_event_notifier_enabler_disable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled = 0;
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_filter_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ _lttng_enabler_attach_filter_bytecode(
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
+ bytecode);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+{
+ (*bytecode)->enabler = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
+ cds_list_add_tail(&(*bytecode)->node,
+ &event_notifier_enabler->capture_bytecode_head);
+ /* Take ownership of bytecode */
+ *bytecode = NULL;
+ event_notifier_enabler->num_captures++;
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_event_notifier_enabler_attach_exclusion(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_excluder_node **excluder)
+{
+ _lttng_enabler_attach_exclusion(
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler),
+ excluder);
+
+ lttng_event_notifier_group_sync_enablers(event_notifier_enabler->group);
+ return 0;
+}
+
+int lttng_attach_context(struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+{
+ /*
+ * We cannot attach a context after trace has been started for a
+ * session because the metadata does not allow expressing this
+ * information outside of the original channel scope.
+ */
+ if (session->priv->been_active)
+ return -EPERM;
+
+ switch (context_param->ctx) {
+ case LTTNG_UST_ABI_CONTEXT_PTHREAD_ID:
+ return lttng_add_pthread_id_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PERF_THREAD_COUNTER:
+ {
+ struct lttng_ust_abi_perf_counter_ctx *perf_ctx_param;
+
+ perf_ctx_param = &context_param->u.perf_counter;
+ return lttng_add_perf_counter_to_ctx(
+ perf_ctx_param->type,
+ perf_ctx_param->config,
+ perf_ctx_param->name,
+ ctx);
+ }
+ case LTTNG_UST_ABI_CONTEXT_VTID:
+ return lttng_add_vtid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VPID:
+ return lttng_add_vpid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PROCNAME:
+ return lttng_add_procname_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_IP:
+ return lttng_add_ip_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_CPU_ID:
+ return lttng_add_cpu_id_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+ return lttng_ust_add_app_context_to_ctx_rcu(uargs->app_context.ctxname,
+ ctx);
+ case LTTNG_UST_ABI_CONTEXT_CGROUP_NS:
+ return lttng_add_cgroup_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_IPC_NS:
+ return lttng_add_ipc_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_MNT_NS:
+ return lttng_add_mnt_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_NET_NS:
+ return lttng_add_net_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_PID_NS:
+ return lttng_add_pid_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_TIME_NS:
+ return lttng_add_time_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_USER_NS:
+ return lttng_add_user_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_UTS_NS:
+ return lttng_add_uts_ns_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VUID:
+ return lttng_add_vuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VEUID:
+ return lttng_add_veuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VSUID:
+ return lttng_add_vsuid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VGID:
+ return lttng_add_vgid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VEGID:
+ return lttng_add_vegid_to_ctx(ctx);
+ case LTTNG_UST_ABI_CONTEXT_VSGID:
+ return lttng_add_vsgid_to_ctx(ctx);
+ default:
+ return -EINVAL;
+ }
+}
+
+int lttng_event_enabler_attach_context(
+ struct lttng_event_enabler *enabler __attribute__((unused)),
+ struct lttng_ust_abi_context *context_param __attribute__((unused)))
+{
+ return -ENOSYS;
+}
+
+void lttng_event_enabler_destroy(struct lttng_event_enabler *event_enabler)
+{
+ if (!event_enabler) {
+ return;
+ }
+ cds_list_del(&event_enabler->node);
+
+ lttng_enabler_destroy(lttng_event_enabler_as_enabler(event_enabler));
+
+ lttng_destroy_context(event_enabler->ctx);
+ free(event_enabler);
+}
+
+/*
+ * lttng_session_sync_event_enablers should be called just before starting a
+ * session.
+ */
+static
+void lttng_session_sync_event_enablers(struct lttng_ust_session *session)
+{
+ struct lttng_event_enabler *event_enabler;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+
+ cds_list_for_each_entry(event_enabler, &session->priv->enablers_head, node)
+ lttng_event_enabler_ref_event_recorders(event_enabler);
+ /*
+ * For each event, if at least one of its enablers is enabled,
+ * and its channel and session transient states are enabled, we
+ * enable the event, else we disable it.
+ */
+ cds_list_for_each_entry(event_recorder_priv, &session->priv->events_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_ust_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_filter_bytecode = 0;
+ int nr_filters = 0;
+
+ /* Enable events */
+ cds_list_for_each_entry(enabler_ref,
+ &event_recorder_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+ /*
+ * Enabled state is based on union of enablers, with
+ * intesection of session and channel transient enable
+ * states.
+ */
+ enabled = enabled && session->priv->tstate && event_recorder_priv->pub->chan->priv->parent.tstate;
+
+ CMM_STORE_SHARED(event_recorder_priv->pub->parent->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event_recorder_priv->parent.registered)
+ register_event(event_recorder_priv->parent.pub);
+ } else {
+ if (event_recorder_priv->parent.registered)
+ unregister_event(event_recorder_priv->parent.pub);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref,
+ &event_recorder_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_filter_bytecode = 1;
+ break;
+ }
+ }
+ event_recorder_priv->parent.has_enablers_without_filter_bytecode =
+ has_enablers_without_filter_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime,
+ &event_recorder_priv->parent.filter_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_filters++;
+ }
+ CMM_STORE_SHARED(event_recorder_priv->parent.pub->eval_filter,
+ !(has_enablers_without_filter_bytecode || !nr_filters));
+ }
+ lttng_ust_tp_probe_prune_release_queue();
+}
+
+/* Support for event notifier is introduced by probe provider major version 2. */
+static
+bool lttng_ust_probe_supports_event_notifier(const struct lttng_ust_probe_desc *probe_desc)
+{
+ return probe_desc->major >= 2;
+}
+
+static
+void lttng_create_event_notifier_if_missing(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ int ret;
+ bool found = false;
+ const struct lttng_ust_event_desc *desc;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+
+ desc = probe_desc->event_desc[i];
+
+ if (!lttng_desc_match_enabler(desc,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)))
+ continue;
+
+ /*
+ * Given the current event_notifier group, get the bucket that
+ * the target event_notifier would be if it was already
+ * created.
+ */
+ head = borrow_hash_table_bucket(
+ event_notifier_group->event_notifiers_ht.table,
+ LTTNG_UST_EVENT_NOTIFIER_HT_SIZE, desc);
+
+ cds_hlist_for_each_entry(event_notifier_priv, node, head, hlist) {
+ /*
+ * Check if event_notifier already exists by checking
+ * if the event_notifier and enabler share the same
+ * description and id.
+ */
+ if (event_notifier_priv->parent.desc == desc &&
+ event_notifier_priv->parent.user_token == event_notifier_enabler->user_token) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Check that the probe supports event notifiers, else report the error. */
+ if (!lttng_ust_probe_supports_event_notifier(probe_desc)) {
+ ERR("Probe \"%s\" contains event \"%s:%s\" which matches an enabled event notifier, "
+ "but its version (%u.%u) is too old and does not implement event notifiers. "
+ "It needs to be recompiled against a newer version of LTTng-UST, otherwise "
+ "this event will not generate any notification.",
+ probe_desc->provider_name,
+ probe_desc->provider_name, desc->event_name,
+ probe_desc->major,
+ probe_desc->minor);
+ continue;
+ }
+ /*
+ * We need to create a event_notifier for this event probe.
+ */
+ ret = lttng_event_notifier_create(desc,
+ event_notifier_enabler->user_token,
+ event_notifier_enabler->error_counter_index,
+ event_notifier_group);
+ if (ret) {
+ DBG("Unable to create event_notifier \"%s:%s\", error %d\n",
+ probe_desc->provider_name,
+ probe_desc->event_desc[i]->event_name, ret);
+ }
+ }
+ }
+}
+
+/*
+ * Create event_notifiers associated with a event_notifier enabler (if not already present).
+ */
+static
+int lttng_event_notifier_enabler_ref_event_notifiers(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ struct lttng_event_notifier_group *event_notifier_group = event_notifier_enabler->group;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+
+ /*
+ * Only try to create event_notifiers for enablers that are enabled, the user
+ * might still be attaching filter or exclusion to the
+ * event_notifier_enabler.
+ */
+ if (!lttng_event_notifier_enabler_as_enabler(event_notifier_enabler)->enabled)
+ goto end;
+
+ /* First, ensure that probe event_notifiers are created for this enabler. */
+ lttng_create_event_notifier_if_missing(event_notifier_enabler);
+
+ /* Link the created event_notifier with its associated enabler. */
+ cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+
+ if (!lttng_event_notifier_enabler_match_event_notifier(event_notifier_enabler, event_notifier_priv->pub))
+ continue;
+
+ enabler_ref = lttng_enabler_ref(&event_notifier_priv->parent.enablers_ref_head,
+ lttng_event_notifier_enabler_as_enabler(event_notifier_enabler));
+ if (!enabler_ref) {
+ /*
+ * If no backward ref, create it.
+ * Add backward ref from event_notifier to enabler.
+ */
+ enabler_ref = zmalloc(sizeof(*enabler_ref));
+ if (!enabler_ref)
+ return -ENOMEM;
+
+ enabler_ref->ref = lttng_event_notifier_enabler_as_enabler(
+ event_notifier_enabler);
+ cds_list_add(&enabler_ref->node,
+ &event_notifier_priv->parent.enablers_ref_head);
+ }
+
+ /*
+ * Link filter bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
+ &event_notifier_group->ctx,
+ &event_notifier_priv->parent.filter_bytecode_runtime_head,
+ <tng_event_notifier_enabler_as_enabler(event_notifier_enabler)->filter_bytecode_head);
+
+ /*
+ * Link capture bytecodes if not linked yet.
+ */
+ lttng_enabler_link_bytecode(event_notifier_priv->parent.desc,
+ &event_notifier_group->ctx, &event_notifier_priv->capture_bytecode_runtime_head,
+ &event_notifier_enabler->capture_bytecode_head);
+
+ event_notifier_priv->num_captures = event_notifier_enabler->num_captures;
+ }
+end:
+ return 0;
+}
+
+static
+void lttng_event_notifier_group_sync_enablers(struct lttng_event_notifier_group *event_notifier_group)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ struct lttng_ust_event_notifier_private *event_notifier_priv;
+
+ cds_list_for_each_entry(event_notifier_enabler, &event_notifier_group->enablers_head, node)
+ lttng_event_notifier_enabler_ref_event_notifiers(event_notifier_enabler);
+
+ /*
+ * For each event_notifier, if at least one of its enablers is enabled,
+ * we enable the event_notifier, else we disable it.
+ */
+ cds_list_for_each_entry(event_notifier_priv, &event_notifier_group->event_notifiers_head, node) {
+ struct lttng_enabler_ref *enabler_ref;
+ struct lttng_ust_bytecode_runtime *runtime;
+ int enabled = 0, has_enablers_without_filter_bytecode = 0;
+ int nr_filters = 0, nr_captures = 0;
+
+ /* Enable event_notifiers */
+ cds_list_for_each_entry(enabler_ref,
+ &event_notifier_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled) {
+ enabled = 1;
+ break;
+ }
+ }
+
+ CMM_STORE_SHARED(event_notifier_priv->pub->parent->enabled, enabled);
+ /*
+ * Sync tracepoint registration with event_notifier enabled
+ * state.
+ */
+ if (enabled) {
+ if (!event_notifier_priv->parent.registered)
+ register_event(event_notifier_priv->parent.pub);
+ } else {
+ if (event_notifier_priv->parent.registered)
+ unregister_event(event_notifier_priv->parent.pub);
+ }
+
+ /* Check if has enablers without bytecode enabled */
+ cds_list_for_each_entry(enabler_ref,
+ &event_notifier_priv->parent.enablers_ref_head, node) {
+ if (enabler_ref->ref->enabled
+ && cds_list_empty(&enabler_ref->ref->filter_bytecode_head)) {
+ has_enablers_without_filter_bytecode = 1;
+ break;
+ }
+ }
+ event_notifier_priv->parent.has_enablers_without_filter_bytecode =
+ has_enablers_without_filter_bytecode;
+
+ /* Enable filters */
+ cds_list_for_each_entry(runtime,
+ &event_notifier_priv->parent.filter_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_filters++;
+ }
+ CMM_STORE_SHARED(event_notifier_priv->parent.pub->eval_filter,
+ !(has_enablers_without_filter_bytecode || !nr_filters));
+
+ /* Enable captures. */
+ cds_list_for_each_entry(runtime,
+ &event_notifier_priv->capture_bytecode_runtime_head, node) {
+ lttng_bytecode_sync_state(runtime);
+ nr_captures++;
+ }
+ CMM_STORE_SHARED(event_notifier_priv->pub->eval_capture,
+ !!nr_captures);
+ }
+ lttng_ust_tp_probe_prune_release_queue();
+}
+
+/*
+ * Apply enablers to session events, adding events to session if need
+ * be. It is required after each modification applied to an active
+ * session, and right before session "start".
+ * "lazy" sync means we only sync if required.
+ */
+static
+void lttng_session_lazy_sync_event_enablers(struct lttng_ust_session *session)
+{
+ /* We can skip if session is not active */
+ if (!session->active)
+ return;
+ lttng_session_sync_event_enablers(session);
+}
+
+/*
+ * Update all sessions with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_session_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ struct lttng_ust_session_private *session_priv;
+
+ cds_list_for_each_entry(session_priv, &sessions, node) {
+ struct lttng_ust_channel_buffer_private *chan;
+ struct lttng_ust_event_recorder_private *event_recorder_priv;
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(&session_priv->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ cds_list_for_each_entry(chan, &session_priv->chan_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&chan->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+ cds_list_for_each_entry(event_recorder_priv, &session_priv->events_head, node) {
+ ret = lttng_ust_context_set_provider_rcu(&event_recorder_priv->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+ }
+}
+
+/*
+ * Update all event_notifier groups with the given app context.
+ * Called with ust lock held.
+ * This is invoked when an application context gets loaded/unloaded. It
+ * ensures the context callbacks are in sync with the application
+ * context (either app context callbacks, or dummy callbacks).
+ */
+void lttng_ust_context_set_event_notifier_group_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+
+ cds_list_for_each_entry(event_notifier_group, &event_notifier_groups, node) {
+ int ret;
+
+ ret = lttng_ust_context_set_provider_rcu(
+ &event_notifier_group->ctx,
+ name, get_size, record, get_value, priv);
+ if (ret)
+ abort();
+ }
+}
+
+int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
+ unsigned char *uuid)
+{
+ if (!session)
+ return 0;
+ /* Compare UUID with session. */
+ if (session->priv->uuid_set) {
+ if (memcmp(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN)) {
+ return -1;
+ }
+ } else {
+ memcpy(session->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ session->priv->uuid_set = true;
+ }
+ return 0;
+
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <error.h>
+#include <dlfcn.h>
+#include <stdlib.h>
+#include <usterr-signal-safe.h>
+#include <lttng/ust-getcpu.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+
+#include "getenv.h"
+#include "../libringbuffer/getcpu.h"
+
+int (*lttng_get_cpu)(void);
+
+static
+void *getcpu_handle;
+
+int lttng_ust_getcpu_override(int (*getcpu)(void))
+{
+ CMM_STORE_SHARED(lttng_get_cpu, getcpu);
+ return 0;
+}
+
+void lttng_ust_getcpu_init(void)
+{
+ const char *libname;
+ void (*libinit)(void);
+
+ if (getcpu_handle)
+ return;
+ libname = lttng_ust_getenv("LTTNG_UST_GETCPU_PLUGIN");
+ if (!libname)
+ return;
+ getcpu_handle = dlopen(libname, RTLD_NOW);
+ if (!getcpu_handle) {
+ PERROR("Cannot load LTTng UST getcpu override library %s",
+ libname);
+ return;
+ }
+ dlerror();
+ libinit = (void (*)(void)) dlsym(getcpu_handle,
+ "lttng_ust_getcpu_plugin_init");
+ if (!libinit) {
+ PERROR("Cannot find LTTng UST getcpu override library %s initialization function lttng_ust_getcpu_plugin_init()",
+ libname);
+ return;
+ }
+ libinit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng hash table helpers.
+ */
+
+#ifndef _LTTNG_HASH_HELPER_H
+#define _LTTNG_HASH_HELPER_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <urcu/compiler.h>
+
+/*
+ * Hash function
+ * Source: http://burtleburtle.net/bob/c/lookup3.c
+ * Originally Public Domain
+ */
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+#define mix(a, b, c) \
+do { \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c, 16); c += b; \
+ b -= a; b ^= rot(a, 19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+} while (0)
+
+#define final(a, b, c) \
+{ \
+ c ^= b; c -= rot(b, 14); \
+ a ^= c; a -= rot(c, 11); \
+ b ^= a; b -= rot(a, 25); \
+ c ^= b; c -= rot(b, 16); \
+ a ^= c; a -= rot(c, 4);\
+ b ^= a; b -= rot(a, 14); \
+ c ^= b; c -= rot(b, 24); \
+}
+
+static inline
+uint32_t lttng_hash_u32(const uint32_t *k, size_t length, uint32_t initval)
+ __attribute__((unused));
+static inline
+uint32_t lttng_hash_u32(
+ const uint32_t *k, /* the key, an array of uint32_t values */
+ size_t length, /* the length of the key, in uint32_ts */
+ uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t) length) << 2) + initval;
+
+ /*----------------------------------------- handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*----------------------------------- handle the last 3 uint32_t's */
+ switch (length) { /* all the case statements fall through */
+ case 3: c += k[2];
+ case 2: b += k[1];
+ case 1: a += k[0];
+ final(a, b, c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*---------------------------------------------- report the result */
+ return c;
+}
+
+static inline
+void lttng_hashword2(
+ const uint32_t *k, /* the key, an array of uint32_t values */
+ size_t length, /* the length of the key, in uint32_ts */
+ uint32_t *pc, /* IN: seed OUT: primary hash value */
+ uint32_t *pb) /* IN: more seed OUT: secondary hash value */
+{
+ uint32_t a, b, c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t) (length << 2)) + *pc;
+ c += *pb;
+
+ /*----------------------------------------- handle most of the key */
+ while (length > 3) {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a, b, c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*----------------------------------- handle the last 3 uint32_t's */
+ switch (length) { /* all the case statements fall through */
+ case 3: c += k[2]; /* fall through */
+ case 2: b += k[1]; /* fall through */
+ case 1: a += k[0];
+ final(a, b, c); /* fall through */
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*---------------------------------------------- report the result */
+ *pc = c;
+ *pb = b;
+}
+
+#if (CAA_BITS_PER_LONG == 32)
+static inline
+unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
+{
+ unsigned int key = (unsigned int) _key;
+
+ assert(length == sizeof(unsigned int));
+ return lttng_hash_u32(&key, 1, seed);
+}
+#else
+static inline
+unsigned long lttng_hash_mix(const void *_key, size_t length, unsigned long seed)
+{
+ union {
+ uint64_t v64;
+ uint32_t v32[2];
+ } v;
+ union {
+ uint64_t v64;
+ uint32_t v32[2];
+ } key;
+
+ assert(length == sizeof(unsigned long));
+ v.v64 = (uint64_t) seed;
+ key.v64 = (uint64_t) _key;
+ lttng_hashword2(key.v32, 2, &v.v32[0], &v.v32[1]);
+ return v.v64;
+}
+#endif
+
+#endif /* _LTTNG_HASH_HELPER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright 2010-2012 (C) Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Holds LTTng probes registry.
+ */
+
+#define _LGPL_SOURCE
+#include <string.h>
+#include <errno.h>
+#include <urcu/list.h>
+#include <urcu/hlist.h>
+#include <lttng/ust-events.h>
+#include <lttng/tracepoint.h>
+#include "tracepoint-internal.h"
+#include <assert.h>
+#include <ust-helper.h>
+#include <ctype.h>
+
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "error.h"
+#include "ust-events-internal.h"
+
+/*
+ * probe list is protected by ust_lock()/ust_unlock().
+ */
+static CDS_LIST_HEAD(_probe_list);
+
+/*
+ * List of probes registered by not yet processed.
+ */
+static CDS_LIST_HEAD(lazy_probe_init);
+
+/*
+ * lazy_nesting counter ensures we don't trigger lazy probe registration
+ * fixup while we are performing the fixup. It is protected by the ust
+ * mutex.
+ */
+static int lazy_nesting;
+
+/*
+ * Validate that each event within the probe provider refers to the
+ * right probe, and that the resulting name is not too long.
+ */
+static
+bool check_event_provider(const struct lttng_ust_probe_desc *probe_desc)
+{
+ int i;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc = probe_desc->event_desc[i];
+
+ if (event_desc->probe_desc != probe_desc) {
+ ERR("Error registering probe provider '%s'. Event '%s:%s' refers to the wrong provider descriptor.",
+ probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
+ return false; /* provider mismatch */
+ }
+ if (!lttng_ust_validate_event_name(event_desc)) {
+ ERR("Error registering probe provider '%s'. Event '%s:%s' name is too long.",
+ probe_desc->provider_name, probe_desc->provider_name, event_desc->event_name);
+ return false; /* provider mismatch */
+ }
+ }
+ return true;
+}
+
+/*
+ * Called under ust lock.
+ */
+static
+void lttng_lazy_probe_register(struct lttng_ust_registered_probe *reg_probe)
+{
+ struct lttng_ust_registered_probe *iter;
+ struct cds_list_head *probe_list;
+
+ /*
+ * The provider ensures there are no duplicate event names.
+ * Duplicated TRACEPOINT_EVENT event names would generate a
+ * compile-time error due to duplicated symbol names.
+ */
+
+ /*
+ * We sort the providers by struct lttng_ust_probe_desc pointer
+ * address.
+ */
+ probe_list = &_probe_list;
+ cds_list_for_each_entry_reverse(iter, probe_list, head) {
+ BUG_ON(iter == reg_probe); /* Should never be in the list twice */
+ if (iter < reg_probe) {
+ /* We belong to the location right after iter. */
+ cds_list_add(®_probe->head, &iter->head);
+ goto probe_added;
+ }
+ }
+ /* We should be added at the head of the list */
+ cds_list_add(®_probe->head, probe_list);
+probe_added:
+ DBG("just registered probe %s containing %u events",
+ reg_probe->desc->provider_name, reg_probe->desc->nr_events);
+}
+
+/*
+ * Called under ust lock.
+ */
+static
+void fixup_lazy_probes(void)
+{
+ struct lttng_ust_registered_probe *iter, *tmp;
+ int ret;
+
+ lazy_nesting++;
+ cds_list_for_each_entry_safe(iter, tmp,
+ &lazy_probe_init, lazy_init_head) {
+ lttng_lazy_probe_register(iter);
+ iter->lazy = 0;
+ cds_list_del(&iter->lazy_init_head);
+ }
+ ret = lttng_fix_pending_events();
+ assert(!ret);
+ lazy_nesting--;
+}
+
+/*
+ * Called under ust lock.
+ */
+struct cds_list_head *lttng_get_probe_list_head(void)
+{
+ if (!lazy_nesting && !cds_list_empty(&lazy_probe_init))
+ fixup_lazy_probes();
+ return &_probe_list;
+}
+
+static
+int check_provider_version(const struct lttng_ust_probe_desc *desc)
+{
+ /*
+ * Check tracepoint provider version compatibility.
+ */
+ if (desc->major <= LTTNG_UST_PROVIDER_MAJOR) {
+ DBG("Provider \"%s\" accepted, version %u.%u is compatible "
+ "with LTTng UST provider version %u.%u.",
+ desc->provider_name, desc->major, desc->minor,
+ LTTNG_UST_PROVIDER_MAJOR,
+ LTTNG_UST_PROVIDER_MINOR);
+ if (desc->major < LTTNG_UST_PROVIDER_MAJOR) {
+ DBG("However, some LTTng UST features might not be "
+ "available for this provider unless it is "
+ "recompiled against a more recent LTTng UST.");
+ }
+ return 1; /* accept */
+ } else {
+ ERR("Provider \"%s\" rejected, version %u.%u is incompatible "
+ "with LTTng UST provider version %u.%u. Please upgrade "
+ "LTTng UST.",
+ desc->provider_name, desc->major, desc->minor,
+ LTTNG_UST_PROVIDER_MAJOR,
+ LTTNG_UST_PROVIDER_MINOR);
+ return 0; /* reject */
+ }
+}
+
+struct lttng_ust_registered_probe *lttng_ust_probe_register(const struct lttng_ust_probe_desc *desc)
+{
+ struct lttng_ust_registered_probe *reg_probe = NULL;
+
+ lttng_ust_fixup_tls();
+
+ /*
+ * If version mismatch, don't register, but don't trigger assert
+ * on caller. The version check just prints an error.
+ */
+ if (!check_provider_version(desc))
+ return NULL;
+ if (!check_event_provider(desc))
+ return NULL;
+
+ ust_lock_nocheck();
+
+ reg_probe = zmalloc(sizeof(struct lttng_ust_registered_probe));
+ if (!reg_probe)
+ goto end;
+ reg_probe->desc = desc;
+ cds_list_add(®_probe->lazy_init_head, &lazy_probe_init);
+ reg_probe->lazy = 1;
+
+ DBG("adding probe %s containing %u events to lazy registration list",
+ desc->provider_name, desc->nr_events);
+ /*
+ * If there is at least one active session, we need to register
+ * the probe immediately, since we cannot delay event
+ * registration because they are needed ASAP.
+ */
+ if (lttng_session_active())
+ fixup_lazy_probes();
+
+ lttng_fix_pending_event_notifiers();
+end:
+ ust_unlock();
+ return reg_probe;
+}
+
+void lttng_ust_probe_unregister(struct lttng_ust_registered_probe *reg_probe)
+{
+ lttng_ust_fixup_tls();
+
+ if (!reg_probe)
+ return;
+ if (!check_provider_version(reg_probe->desc))
+ return;
+
+ ust_lock_nocheck();
+ if (!reg_probe->lazy)
+ cds_list_del(®_probe->head);
+ else
+ cds_list_del(®_probe->lazy_init_head);
+
+ lttng_probe_provider_unregister_events(reg_probe->desc);
+ DBG("just unregistered probes of provider %s", reg_probe->desc->provider_name);
+ ust_unlock();
+ free(reg_probe);
+}
+
+void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
+{
+ struct tp_list_entry *list_entry, *tmp;
+
+ cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
+ cds_list_del(&list_entry->head);
+ free(list_entry);
+ }
+}
+
+/*
+ * called with UST lock held.
+ */
+int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
+{
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+ CDS_INIT_LIST_HEAD(&list->head);
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc =
+ probe_desc->event_desc[i];
+ struct tp_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->tp.name);
+ if (!event_desc->loglevel) {
+ list_entry->tp.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->tp.loglevel = *(*event_desc->loglevel);
+ }
+ }
+ }
+ if (cds_list_empty(&list->head))
+ list->iter = NULL;
+ else
+ list->iter =
+ cds_list_first_entry(&list->head, struct tp_list_entry, head);
+ return 0;
+
+err_nomem:
+ lttng_probes_prune_event_list(list);
+ return -ENOMEM;
+}
+
+/*
+ * Return current iteration position, advance internal iterator to next.
+ * Return NULL if end of list.
+ */
+struct lttng_ust_abi_tracepoint_iter *
+ lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
+{
+ struct tp_list_entry *entry;
+
+ if (!list->iter)
+ return NULL;
+ entry = list->iter;
+ if (entry->head.next == &list->head)
+ list->iter = NULL;
+ else
+ list->iter = cds_list_entry(entry->head.next,
+ struct tp_list_entry, head);
+ return &entry->tp;
+}
+
+void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
+{
+ struct tp_field_list_entry *list_entry, *tmp;
+
+ cds_list_for_each_entry_safe(list_entry, tmp, &list->head, head) {
+ cds_list_del(&list_entry->head);
+ free(list_entry);
+ }
+}
+
+/*
+ * called with UST lock held.
+ */
+int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
+{
+ struct lttng_ust_registered_probe *reg_probe;
+ struct cds_list_head *probe_list;
+ int i;
+
+ probe_list = lttng_get_probe_list_head();
+ CDS_INIT_LIST_HEAD(&list->head);
+ cds_list_for_each_entry(reg_probe, probe_list, head) {
+ const struct lttng_ust_probe_desc *probe_desc = reg_probe->desc;
+
+ for (i = 0; i < probe_desc->nr_events; i++) {
+ const struct lttng_ust_event_desc *event_desc =
+ probe_desc->event_desc[i];
+ int j;
+
+ if (event_desc->nr_fields == 0) {
+ /* Events without fields. */
+ struct tp_field_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
+ list_entry->field.field_name[0] = '\0';
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ if (!event_desc->loglevel) {
+ list_entry->field.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->field.loglevel = *(*event_desc->loglevel);
+ }
+ list_entry->field.nowrite = 1;
+ }
+
+ for (j = 0; j < event_desc->nr_fields; j++) {
+ const struct lttng_ust_event_field *event_field =
+ event_desc->fields[j];
+ struct tp_field_list_entry *list_entry;
+
+ /* Skip event if name is too long. */
+ if (!lttng_ust_validate_event_name(event_desc))
+ continue;
+ list_entry = zmalloc(sizeof(*list_entry));
+ if (!list_entry)
+ goto err_nomem;
+ cds_list_add(&list_entry->head, &list->head);
+ lttng_ust_format_event_name(event_desc, list_entry->field.event_name);
+ strncpy(list_entry->field.field_name,
+ event_field->name,
+ LTTNG_UST_ABI_SYM_NAME_LEN);
+ list_entry->field.field_name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ switch (event_field->type->type) {
+ case lttng_ust_type_integer:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_INTEGER;
+ break;
+ case lttng_ust_type_string:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_array:
+ if (lttng_ust_get_type_array(event_field->type)->encoding == lttng_ust_string_encoding_none)
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ else
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_sequence:
+ if (lttng_ust_get_type_sequence(event_field->type)->encoding == lttng_ust_string_encoding_none)
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ else
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_STRING;
+ break;
+ case lttng_ust_type_float:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_FLOAT;
+ break;
+ case lttng_ust_type_enum:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_ENUM;
+ break;
+ default:
+ list_entry->field.type = LTTNG_UST_ABI_FIELD_OTHER;
+ }
+ if (!event_desc->loglevel) {
+ list_entry->field.loglevel = TRACE_DEFAULT;
+ } else {
+ list_entry->field.loglevel = *(*event_desc->loglevel);
+ }
+ list_entry->field.nowrite = event_field->nowrite;
+ }
+ }
+ }
+ if (cds_list_empty(&list->head))
+ list->iter = NULL;
+ else
+ list->iter =
+ cds_list_first_entry(&list->head,
+ struct tp_field_list_entry, head);
+ return 0;
+
+err_nomem:
+ lttng_probes_prune_field_list(list);
+ return -ENOMEM;
+}
+
+/*
+ * Return current iteration position, advance internal iterator to next.
+ * Return NULL if end of list.
+ */
+struct lttng_ust_abi_field_iter *
+ lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
+{
+ struct tp_field_list_entry *entry;
+
+ if (!list->iter)
+ return NULL;
+ entry = list->iter;
+ if (entry->head.next == &list->head)
+ list->iter = NULL;
+ else
+ list->iter = cds_list_entry(entry->head.next,
+ struct tp_field_list_entry, head);
+ return &entry->field;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2013 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_RB_CLIENT_H
+#define _LTTNG_RB_CLIENT_H
+
+#include <stdint.h>
+#include "../libringbuffer/ringbuffer-config.h"
+
+struct lttng_ust_client_lib_ring_buffer_client_cb {
+ struct lttng_ust_lib_ring_buffer_client_cb parent;
+
+ int (*timestamp_begin) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_begin);
+ int (*timestamp_end) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_end);
+ int (*events_discarded) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *events_discarded);
+ int (*content_size) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *content_size);
+ int (*packet_size) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *packet_size);
+ int (*stream_id) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *stream_id);
+ int (*current_timestamp) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *ts);
+ int (*sequence_number) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *seq);
+ int (*instance_id) (struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan, uint64_t *id);
+};
+
+/*
+ * The ring buffer clients init/exit symbols are private ABI for
+ * liblttng-ust-ctl, which is why they are not hidden.
+ */
+void lttng_ust_ring_buffer_clients_init(void);
+void lttng_ust_ring_buffer_clients_exit(void);
+
+void lttng_ring_buffer_client_overwrite_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_overwrite_rt_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_rt_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_metadata_client_init(void)
+ __attribute__((visibility("hidden")));
+
+
+void lttng_ring_buffer_client_overwrite_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_overwrite_rt_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_client_discard_rt_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ring_buffer_metadata_client_exit(void)
+ __attribute__((visibility("hidden")));
+
+
+void lttng_ust_fixup_ring_buffer_client_overwrite_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_discard_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_ring_buffer_client_discard_rt_tls(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_RB_CLIENT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (discard mode) for RT.
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard-rt"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_discard_rt_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_discard_rt_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_discard_rt_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD_RT
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (discard mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "discard"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_discard_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_discard_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_discard_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_DISCARD
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite-rt"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_overwrite_rt_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_overwrite_rt_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE_RT
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_TIMER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client (overwrite mode).
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+#include "lttng-rb-clients.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_OVERWRITE
+#define RING_BUFFER_MODE_TEMPLATE_STRING "overwrite"
+#define RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP \
+ lttng_ust_fixup_ring_buffer_client_overwrite_tls
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_client_overwrite_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_client_overwrite_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_OVERWRITE
+#define LTTNG_CLIENT_WAKEUP RING_BUFFER_WAKEUP_BY_WRITER
+#include "lttng-ring-buffer-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client template.
+ */
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ust-events-internal.h>
+#include <lttng/urcu/pointer.h>
+#include "ust-bitfield.h"
+#include "ust-compat.h"
+#include "clock.h"
+#include "context-internal.h"
+#include "lttng-tracer.h"
+#include "../libringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
+
+#define LTTNG_COMPACT_EVENT_BITS 5
+#define LTTNG_COMPACT_TSC_BITS 27
+
+/*
+ * Keep the natural field alignment for _each field_ within this structure if
+ * you ever add/remove a field from this header. Packed attribute is not used
+ * because gcc generates poor code on at least powerpc and mips. Don't ever
+ * let gcc add padding between the structure elements.
+ */
+
+struct packet_header {
+ /* Trace packet header */
+ uint32_t magic; /*
+ * Trace magic number.
+ * contains endianness information.
+ */
+ uint8_t uuid[LTTNG_UST_UUID_LEN];
+ uint32_t stream_id;
+ uint64_t stream_instance_id;
+
+ struct {
+ /* Stream packet context */
+ uint64_t timestamp_begin; /* Cycle count at subbuffer start */
+ uint64_t timestamp_end; /* Cycle count at subbuffer end */
+ uint64_t content_size; /* Size of data in subbuffer */
+ uint64_t packet_size; /* Subbuffer size (include padding) */
+ uint64_t packet_seq_num; /* Packet sequence number */
+ unsigned long events_discarded; /*
+ * Events lost in this subbuffer since
+ * the beginning of the trace.
+ * (may overflow)
+ */
+ uint32_t cpu_id; /* CPU id associated with stream */
+ uint8_t header_end; /* End of header */
+ } ctx;
+};
+
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+ struct lttng_ust_ctx *chan_ctx;
+ struct lttng_ust_ctx *event_ctx;
+};
+
+/*
+ * Indexed by lib_ring_buffer_nesting_count().
+ */
+typedef struct lttng_ust_lib_ring_buffer_ctx_private private_ctx_stack_t[LIB_RING_BUFFER_MAX_NESTING];
+static DEFINE_URCU_TLS(private_ctx_stack_t, private_ctx_stack);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void RING_BUFFER_MODE_TEMPLATE_TLS_FIXUP(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(private_ctx_stack)));
+}
+
+static inline uint64_t lib_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return trace_clock_read64();
+}
+
+static inline
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ust_ctx *ctx,
+ size_t ctx_len)
+{
+ size_t orig_offset = offset;
+
+ if (caa_likely(!ctx))
+ return 0;
+ offset += lttng_ust_lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += ctx_len;
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ust_ctx *ctx, size_t *ctx_len)
+{
+ int i;
+ size_t offset = 0;
+
+ if (caa_likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
+ for (i = 0; i < ctx->nr_fields; i++)
+ offset += ctx->fields[i].get_size(ctx->fields[i].priv, offset);
+ *ctx_len = offset;
+}
+
+static inline
+void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
+ struct lttng_ust_channel_buffer *chan,
+ struct lttng_ust_ctx *ctx)
+{
+ int i;
+
+ if (caa_likely(!ctx))
+ return;
+ lttng_ust_lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
+ for (i = 0; i < ctx->nr_fields; i++)
+ ctx->fields[i].record(ctx->fields[i].priv, bufctx, chan);
+}
+
+/*
+ * record_header_size - Calculate the header size and padding necessary.
+ * @config: ring buffer instance configuration
+ * @chan: channel
+ * @offset: offset in the write buffer
+ * @pre_header_padding: padding to add before the header (output)
+ * @ctx: reservation context
+ *
+ * Returns the event header size (including padding).
+ *
+ * The payload must itself determine its own alignment from the biggest type it
+ * contains.
+ */
+static __inline__
+size_t record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
+ size_t *pre_header_padding,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+ size_t orig_offset = offset;
+ size_t padding;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ offset += padding;
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ offset += sizeof(uint32_t); /* id and timestamp */
+ } else {
+ /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
+ offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
+ /* Align extended struct on largest member */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ case 2: /* large */
+ padding = lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint16_t));
+ offset += padding;
+ offset += sizeof(uint16_t);
+ if (!(ctx->priv->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint32_t));
+ offset += sizeof(uint32_t); /* timestamp */
+ } else {
+ /* Align extended struct on largest member */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint32_t); /* id */
+ offset += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(uint64_t));
+ offset += sizeof(uint64_t); /* timestamp */
+ }
+ break;
+ default:
+ padding = 0;
+ WARN_ON_ONCE(1);
+ }
+ offset += ctx_get_aligned_size(offset, client_ctx->chan_ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, client_ctx->event_ctx,
+ client_ctx->event_context_len);
+ *pre_header_padding = padding;
+ return offset - orig_offset;
+}
+
+#include "../libringbuffer/api.h"
+#include "lttng-rb-clients.h"
+
+static
+void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id);
+
+/*
+ * lttng_write_event_header
+ *
+ * Writes the event header to the offset (already aligned on 32-bits).
+ *
+ * @config: ring buffer instance configuration
+ * @ctx: reservation context
+ * @event_id: event ID
+ */
+static __inline__
+void lttng_write_event_header(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
+
+ if (caa_unlikely(ctx->priv->rflags))
+ goto slow_path;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ event_id);
+ bt_bitfield_write(&id_time, uint32_t,
+ LTTNG_COMPACT_EVENT_BITS,
+ LTTNG_COMPACT_TSC_BITS,
+ ctx->priv->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ break;
+ }
+ case 2: /* large */
+ {
+ uint32_t timestamp = (uint32_t) ctx->priv->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
+ ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+
+ return;
+
+slow_path:
+ lttng_write_event_header_slow(config, ctx, client_ctx, event_id);
+}
+
+static
+void lttng_write_event_header_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx,
+ uint32_t event_id)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(ctx->priv->chan);
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t id_time = 0;
+
+ bt_bitfield_write(&id_time, uint32_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ event_id);
+ bt_bitfield_write(&id_time, uint32_t,
+ LTTNG_COMPACT_EVENT_BITS,
+ LTTNG_COMPACT_TSC_BITS,
+ ctx_private->tsc);
+ lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
+ } else {
+ uint8_t id = 0;
+ uint64_t timestamp = ctx_private->tsc;
+
+ bt_bitfield_write(&id, uint8_t,
+ 0,
+ LTTNG_COMPACT_EVENT_BITS,
+ 31);
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ case 2: /* large */
+ {
+ if (!(ctx_private->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
+ uint32_t timestamp = (uint32_t) ctx_private->tsc;
+ uint16_t id = event_id;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint32_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ } else {
+ uint16_t id = 65535;
+ uint64_t timestamp = ctx_private->tsc;
+
+ lib_ring_buffer_write(config, ctx, &id, sizeof(id));
+ /* Align extended struct on largest member */
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, lttng_ust_rb_alignof(uint64_t));
+ lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp));
+ }
+ break;
+ }
+ default:
+ WARN_ON_ONCE(1);
+ }
+ ctx_record(ctx, lttng_chan, client_ctx->chan_ctx);
+ ctx_record(ctx, lttng_chan, client_ctx->event_ctx);
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
+}
+
+static const struct lttng_ust_lib_ring_buffer_config client_config;
+
+static uint64_t client_ring_buffer_clock_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return lib_ring_buffer_clock_read(chan);
+}
+
+static
+size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
+ size_t *pre_header_padding,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ return record_header_size(config, chan, offset,
+ pre_header_padding, ctx, client_ctx);
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct packet_header, ctx.header_end);
+}
+
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct packet_header *header =
+ (struct packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+ uint64_t cnt = shmp_index(handle, buf->backend.buf_cnt, subbuf_idx)->seq_cnt;
+
+ assert(header);
+ if (!header)
+ return;
+ header->magic = CTF_MAGIC_NUMBER;
+ memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
+ header->stream_id = lttng_chan->priv->id;
+ header->stream_instance_id = buf->backend.cpu;
+ header->ctx.timestamp_begin = tsc;
+ header->ctx.timestamp_end = 0;
+ header->ctx.content_size = ~0ULL; /* for debugging */
+ header->ctx.packet_size = ~0ULL;
+ header->ctx.packet_seq_num = chan->backend.num_subbuf * cnt + subbuf_idx;
+ header->ctx.events_discarded = 0;
+ header->ctx.cpu_id = buf->backend.cpu;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx, unsigned long data_size,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct packet_header *header =
+ (struct packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ unsigned long records_lost = 0;
+
+ assert(header);
+ if (!header)
+ return;
+ header->ctx.timestamp_end = tsc;
+ header->ctx.content_size =
+ (uint64_t) data_size * CHAR_BIT; /* in bits */
+ header->ctx.packet_size =
+ (uint64_t) LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+
+ records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+ header->ctx.events_discarded = records_lost;
+}
+
+static int client_buffer_create(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ const char *name __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ return 0;
+}
+
+static void client_buffer_finalize(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+
+static void client_content_size_field(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.content_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.content_size);
+}
+
+static void client_packet_size_field(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ size_t *offset, size_t *length)
+{
+ *offset = offsetof(struct packet_header, ctx.packet_size);
+ *length = sizeof(((struct packet_header *) NULL)->ctx.packet_size);
+}
+
+static struct packet_header *client_packet_header(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ return lib_ring_buffer_read_offset_address(&buf->backend, 0, handle);
+}
+
+static int client_timestamp_begin(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_begin)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *timestamp_begin = header->ctx.timestamp_begin;
+ return 0;
+}
+
+static int client_timestamp_end(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *timestamp_end)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *timestamp_end = header->ctx.timestamp_end;
+ return 0;
+}
+
+static int client_events_discarded(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *events_discarded)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *events_discarded = header->ctx.events_discarded;
+ return 0;
+}
+
+static int client_content_size(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *content_size)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *content_size = header->ctx.content_size;
+ return 0;
+}
+
+static int client_packet_size(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *packet_size)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *packet_size = header->ctx.packet_size;
+ return 0;
+}
+
+static int client_stream_id(struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *stream_id)
+{
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+
+ *stream_id = lttng_chan->priv->id;
+
+ return 0;
+}
+
+static int client_current_timestamp(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *ts)
+{
+ *ts = client_ring_buffer_clock_read(chan);
+
+ return 0;
+}
+
+static int client_sequence_number(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ uint64_t *seq)
+{
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ if (!header)
+ return -1;
+ *seq = header->ctx.packet_seq_num;
+ return 0;
+}
+
+static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ uint64_t *id)
+{
+ *id = buf->backend.cpu;
+
+ return 0;
+}
+
+static const
+struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
+ .parent = {
+ .ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .record_header_size = client_record_header_size,
+ .subbuffer_header_size = client_packet_header_size,
+ .buffer_begin = client_buffer_begin,
+ .buffer_end = client_buffer_end,
+ .buffer_create = client_buffer_create,
+ .buffer_finalize = client_buffer_finalize,
+ .content_size_field = client_content_size_field,
+ .packet_size_field = client_packet_size_field,
+ },
+ .timestamp_begin = client_timestamp_begin,
+ .timestamp_end = client_timestamp_end,
+ .events_discarded = client_events_discarded,
+ .content_size = client_content_size,
+ .packet_size = client_packet_size,
+ .stream_id = client_stream_id,
+ .current_timestamp = client_current_timestamp,
+ .sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+ .cb.content_size_field = client_content_size_field,
+ .cb.packet_size_field = client_packet_size_field,
+
+ .tsc_bits = LTTNG_COMPACT_TSC_BITS,
+ .alloc = RING_BUFFER_ALLOC_PER_CPU,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_MMAP,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = LTTNG_CLIENT_WAKEUP,
+ .client_type = LTTNG_CLIENT_TYPE,
+
+ .cb_ptr = &client_cb.parent,
+};
+
+static
+struct lttng_ust_channel_buffer *_channel_create(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+{
+ struct lttng_ust_abi_channel_config chan_priv_init;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf)
+ return NULL;
+ memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ lttng_chan_buf->priv->id = chan_id;
+
+ memset(&chan_priv_init, 0, sizeof(chan_priv_init));
+ memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
+ chan_priv_init.id = chan_id;
+
+ handle = channel_create(&client_config, name,
+ __alignof__(struct lttng_ust_abi_channel_config),
+ sizeof(struct lttng_ust_abi_channel_config),
+ &chan_priv_init,
+ lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval,
+ stream_fds, nr_stream_fds, blocking_timeout);
+ if (!handle)
+ goto error;
+ lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
+ return lttng_chan_buf;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return NULL;
+}
+
+static
+void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
+{
+ channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+}
+
+static
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ struct lttng_ust_event_recorder *event_recorder = ctx->client_priv;
+ struct lttng_ust_channel_buffer *lttng_chan = event_recorder->chan;
+ struct lttng_client_ctx client_ctx;
+ int ret, nesting;
+ struct lttng_ust_lib_ring_buffer_ctx_private *private_ctx;
+ uint32_t event_id;
+
+ event_id = event_recorder->priv->id;
+ client_ctx.chan_ctx = lttng_ust_rcu_dereference(lttng_chan->priv->ctx);
+ client_ctx.event_ctx = lttng_ust_rcu_dereference(event_recorder->priv->ctx);
+ /* Compute internal size of context structures. */
+ ctx_get_struct_size(client_ctx.chan_ctx, &client_ctx.packet_context_len);
+ ctx_get_struct_size(client_ctx.event_ctx, &client_ctx.event_context_len);
+
+ nesting = lib_ring_buffer_nesting_inc(&client_config);
+ if (nesting < 0)
+ return -EPERM;
+
+ private_ctx = &URCU_TLS(private_ctx_stack)[nesting];
+ memset(private_ctx, 0, sizeof(*private_ctx));
+ private_ctx->pub = ctx;
+ private_ctx->chan = lttng_chan->priv->rb_chan;
+
+ ctx->priv = private_ctx;
+
+ switch (lttng_chan->priv->header_type) {
+ case 1: /* compact */
+ if (event_id > 30)
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ break;
+ case 2: /* large */
+ if (event_id > 65534)
+ private_ctx->rflags |= LTTNG_RFLAG_EXTENDED;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
+ if (caa_unlikely(ret))
+ goto put;
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &private_ctx->backend_pages)) {
+ ret = -EPERM;
+ goto put;
+ }
+ lttng_write_event_header(&client_config, ctx, &client_ctx, event_id);
+ return 0;
+put:
+ lib_ring_buffer_nesting_dec(&client_config);
+ return ret;
+}
+
+static
+void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+ lib_ring_buffer_nesting_dec(&client_config);
+}
+
+static
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
+{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+void lttng_event_strcpy(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len)
+{
+ lib_ring_buffer_strcpy(&client_config, ctx, src, len, '#');
+}
+
+static
+void lttng_event_pstrcpy_pad(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len)
+{
+ lib_ring_buffer_pstrcpy(&client_config, ctx, src, len, '\0');
+}
+
+static
+int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_finalized(rb_chan);
+}
+
+static
+int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_disabled(rb_chan);
+}
+
+static
+int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+ int cpu;
+
+ for_each_channel_cpu(cpu, rb_chan) {
+ int shm_fd, wait_fd, wakeup_fd;
+ uint64_t memory_map_size;
+
+ buf = channel_get_ring_buffer(&client_config, rb_chan,
+ cpu, rb_chan->handle, &shm_fd, &wait_fd,
+ &wakeup_fd, &memory_map_size);
+ lib_ring_buffer_switch(&client_config, buf,
+ SWITCH_ACTIVE, rb_chan->handle);
+ }
+ return 0;
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
+ .ops = {
+ .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
+ .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
+ .pub = <tng_relay_transport.ops,
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .packet_avail_size = NULL, /* Would be racy anyway */
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .flush_buffer = lttng_flush_buffer,
+ }),
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write = lttng_event_write,
+ .event_strcpy = lttng_event_strcpy,
+ .event_pstrcpy_pad = lttng_event_pstrcpy_pad,
+ },
+ .client_config = &client_config,
+};
+
+void RING_BUFFER_MODE_TEMPLATE_INIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" init\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_register(<tng_relay_transport);
+}
+
+void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" exit\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_unregister(<tng_relay_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer client template.
+ */
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <ust-events-internal.h>
+#include "ust-bitfield.h"
+#include "ust-compat.h"
+#include "lttng-tracer.h"
+#include "../libringbuffer/frontend_types.h"
+#include <urcu/tls-compat.h>
+
+struct metadata_packet_header {
+ uint32_t magic; /* 0x75D11D57 */
+ uint8_t uuid[LTTNG_UST_UUID_LEN]; /* Unique Universal Identifier */
+ uint32_t checksum; /* 0 if unused */
+ uint32_t content_size; /* in bits */
+ uint32_t packet_size; /* in bits */
+ uint8_t compression_scheme; /* 0 if unused */
+ uint8_t encryption_scheme; /* 0 if unused */
+ uint8_t checksum_scheme; /* 0 if unused */
+ uint8_t major; /* CTF spec major version number */
+ uint8_t minor; /* CTF spec minor version number */
+ uint8_t header_end[0];
+};
+
+struct metadata_record_header {
+ uint8_t header_end[0]; /* End of header */
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config;
+
+/* No nested use supported for metadata ring buffer. */
+static DEFINE_URCU_TLS(struct lttng_ust_lib_ring_buffer_ctx_private, private_ctx);
+
+static inline uint64_t lib_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return 0;
+}
+
+static inline
+size_t record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ size_t offset __attribute__((unused)),
+ size_t *pre_header_padding __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ void *client_ctx __attribute__((unused)))
+{
+ return 0;
+}
+
+#include "../libringbuffer/api.h"
+#include "lttng-rb-clients.h"
+
+static uint64_t client_ring_buffer_clock_read(
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)))
+{
+ return 0;
+}
+
+static
+size_t client_record_header_size(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ size_t offset __attribute__((unused)),
+ size_t *pre_header_padding __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ void *client_ctx __attribute__((unused)))
+{
+ return 0;
+}
+
+/**
+ * client_packet_header_size - called on buffer-switch to a new sub-buffer
+ *
+ * Return header size without padding after the structure. Don't use packed
+ * structure because gcc generates inefficient code on some architectures
+ * (powerpc, mips..)
+ */
+static size_t client_packet_header_size(void)
+{
+ return offsetof(struct metadata_packet_header, header_end);
+}
+
+static void client_buffer_begin(struct lttng_ust_lib_ring_buffer *buf,
+ uint64_t tsc __attribute__((unused)),
+ unsigned int subbuf_idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct metadata_packet_header *header =
+ (struct metadata_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ struct lttng_ust_channel_buffer *lttng_chan = channel_get_private(chan);
+
+ assert(header);
+ if (!header)
+ return;
+ header->magic = TSDL_MAGIC_NUMBER;
+ memcpy(header->uuid, lttng_chan->priv->uuid, sizeof(lttng_chan->priv->uuid));
+ header->checksum = 0; /* 0 if unused */
+ header->content_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->packet_size = 0xFFFFFFFF; /* in bits, for debugging */
+ header->compression_scheme = 0; /* 0 if unused */
+ header->encryption_scheme = 0; /* 0 if unused */
+ header->checksum_scheme = 0; /* 0 if unused */
+ header->major = CTF_SPEC_MAJOR;
+ header->minor = CTF_SPEC_MINOR;
+}
+
+/*
+ * offset is assumed to never be 0 here : never deliver a completely empty
+ * subbuffer. data_size is between 1 and subbuf_size.
+ */
+static void client_buffer_end(struct lttng_ust_lib_ring_buffer *buf,
+ uint64_t tsc __attribute__((unused)),
+ unsigned int subbuf_idx, unsigned long data_size,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+ struct metadata_packet_header *header =
+ (struct metadata_packet_header *)
+ lib_ring_buffer_offset_address(&buf->backend,
+ subbuf_idx * chan->backend.subbuf_size,
+ handle);
+ unsigned long records_lost = 0;
+
+ assert(header);
+ if (!header)
+ return;
+ header->content_size = data_size * CHAR_BIT; /* in bits */
+ header->packet_size = LTTNG_UST_PAGE_ALIGN(data_size) * CHAR_BIT; /* in bits */
+ /*
+ * We do not care about the records lost count, because the metadata
+ * channel waits and retry.
+ */
+ (void) lib_ring_buffer_get_records_lost_full(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
+ records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
+ WARN_ON_ONCE(records_lost != 0);
+}
+
+static int client_buffer_create(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ const char *name __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ return 0;
+}
+
+static void client_buffer_finalize(
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ void *priv __attribute__((unused)),
+ int cpu __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+
+static const
+struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
+ .parent = {
+ .ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .record_header_size = client_record_header_size,
+ .subbuffer_header_size = client_packet_header_size,
+ .buffer_begin = client_buffer_begin,
+ .buffer_end = client_buffer_end,
+ .buffer_create = client_buffer_create,
+ .buffer_finalize = client_buffer_finalize,
+ },
+};
+
+static const struct lttng_ust_lib_ring_buffer_config client_config = {
+ .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
+ .cb.record_header_size = client_record_header_size,
+ .cb.subbuffer_header_size = client_packet_header_size,
+ .cb.buffer_begin = client_buffer_begin,
+ .cb.buffer_end = client_buffer_end,
+ .cb.buffer_create = client_buffer_create,
+ .cb.buffer_finalize = client_buffer_finalize,
+
+ .tsc_bits = 0,
+ .alloc = RING_BUFFER_ALLOC_GLOBAL,
+ .sync = RING_BUFFER_SYNC_GLOBAL,
+ .mode = RING_BUFFER_MODE_TEMPLATE,
+ .backend = RING_BUFFER_PAGE,
+ .output = RING_BUFFER_MMAP,
+ .oops = RING_BUFFER_OOPS_CONSISTENCY,
+ .ipi = RING_BUFFER_NO_IPI_BARRIER,
+ .wakeup = RING_BUFFER_WAKEUP_BY_WRITER,
+ .client_type = LTTNG_CLIENT_TYPE,
+
+ .cb_ptr = &client_cb.parent,
+};
+
+static
+struct lttng_ust_channel_buffer *_channel_create(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+{
+ struct lttng_ust_abi_channel_config chan_priv_init;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf)
+ return NULL;
+ memcpy(lttng_chan_buf->priv->uuid, uuid, LTTNG_UST_UUID_LEN);
+ lttng_chan_buf->priv->id = chan_id;
+
+ memset(&chan_priv_init, 0, sizeof(chan_priv_init));
+ memcpy(chan_priv_init.uuid, uuid, LTTNG_UST_UUID_LEN);
+ chan_priv_init.id = chan_id;
+
+ handle = channel_create(&client_config, name,
+ __alignof__(struct lttng_ust_channel_buffer),
+ sizeof(struct lttng_ust_channel_buffer),
+ &chan_priv_init,
+ lttng_chan_buf, buf_addr, subbuf_size, num_subbuf,
+ switch_timer_interval, read_timer_interval,
+ stream_fds, nr_stream_fds, blocking_timeout);
+ if (!handle)
+ goto error;
+ lttng_chan_buf->priv->rb_chan = shmp(handle, handle->chan);
+ return lttng_chan_buf;
+
+error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return NULL;
+}
+
+static
+void lttng_channel_destroy(struct lttng_ust_channel_buffer *lttng_chan_buf)
+{
+ channel_destroy(lttng_chan_buf->priv->rb_chan, lttng_chan_buf->priv->rb_chan->handle, 1);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+}
+
+static
+int lttng_event_reserve(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ int ret;
+
+ memset(&URCU_TLS(private_ctx), 0, sizeof(struct lttng_ust_lib_ring_buffer_ctx_private));
+ URCU_TLS(private_ctx).pub = ctx;
+ URCU_TLS(private_ctx).chan = ctx->client_priv;
+ ctx->priv = &URCU_TLS(private_ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
+ if (ret)
+ return ret;
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->priv->backend_pages))
+ return -EPERM;
+ return 0;
+}
+
+static
+void lttng_event_commit(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ lib_ring_buffer_commit(&client_config, ctx);
+}
+
+static
+void lttng_event_write(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len, size_t alignment)
+{
+ lttng_ust_lib_ring_buffer_align_ctx(ctx, alignment);
+ lib_ring_buffer_write(&client_config, ctx, src, len);
+}
+
+static
+size_t lttng_packet_avail_size(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ unsigned long o_begin;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ buf = shmp(rb_chan->handle, rb_chan->backend.buf[0].shmp); /* Only for global buffer ! */
+ o_begin = v_read(&client_config, &buf->offset);
+ if (subbuf_offset(o_begin, rb_chan) != 0) {
+ return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan);
+ } else {
+ return rb_chan->backend.subbuf_size - subbuf_offset(o_begin, rb_chan)
+ - sizeof(struct metadata_packet_header);
+ }
+}
+
+static
+int lttng_is_finalized(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_finalized(rb_chan);
+}
+
+static
+int lttng_is_disabled(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+
+ return lib_ring_buffer_channel_is_disabled(rb_chan);
+}
+
+static
+int lttng_flush_buffer(struct lttng_ust_channel_buffer *chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan = chan->priv->rb_chan;
+ struct lttng_ust_lib_ring_buffer *buf;
+ int shm_fd, wait_fd, wakeup_fd;
+ uint64_t memory_map_size;
+
+ buf = channel_get_ring_buffer(&client_config, rb_chan,
+ 0, rb_chan->handle, &shm_fd, &wait_fd, &wakeup_fd,
+ &memory_map_size);
+ lib_ring_buffer_switch(&client_config, buf,
+ SWITCH_ACTIVE, rb_chan->handle);
+ return 0;
+}
+
+static struct lttng_transport lttng_relay_transport = {
+ .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap",
+ .ops = {
+ .struct_size = sizeof(struct lttng_ust_channel_buffer_ops),
+
+ .priv = __LTTNG_COMPOUND_LITERAL(struct lttng_ust_channel_buffer_ops_private, {
+ .pub = <tng_relay_transport.ops,
+ .channel_create = _channel_create,
+ .channel_destroy = lttng_channel_destroy,
+ .packet_avail_size = lttng_packet_avail_size,
+ .is_finalized = lttng_is_finalized,
+ .is_disabled = lttng_is_disabled,
+ .flush_buffer = lttng_flush_buffer,
+ }),
+ .event_reserve = lttng_event_reserve,
+ .event_commit = lttng_event_commit,
+ .event_write = lttng_event_write,
+ },
+ .client_config = &client_config,
+};
+
+void RING_BUFFER_MODE_TEMPLATE_INIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" init\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_register(<tng_relay_transport);
+}
+
+void RING_BUFFER_MODE_TEMPLATE_EXIT(void)
+{
+ DBG("LTT : ltt ring buffer client \"%s\" exit\n",
+ "relay-" RING_BUFFER_MODE_TEMPLATE_STRING "-mmap");
+ lttng_transport_unregister(<tng_relay_transport);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng lib ring buffer metadta client.
+ */
+
+#define _LGPL_SOURCE
+#include "lttng-tracer.h"
+
+#define RING_BUFFER_MODE_TEMPLATE RING_BUFFER_DISCARD
+#define RING_BUFFER_MODE_TEMPLATE_STRING "metadata"
+#define RING_BUFFER_MODE_TEMPLATE_INIT \
+ lttng_ring_buffer_metadata_client_init
+#define RING_BUFFER_MODE_TEMPLATE_EXIT \
+ lttng_ring_buffer_metadata_client_exit
+#define LTTNG_CLIENT_TYPE LTTNG_CLIENT_METADATA
+#include "lttng-ring-buffer-metadata-client-template.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This contains the core definitions for the Linux Trace Toolkit.
+ */
+
+#ifndef _LTTNG_TRACER_CORE_H
+#define _LTTNG_TRACER_CORE_H
+
+#include <stddef.h>
+#include <urcu/arch.h>
+#include <urcu/list.h>
+#include <lttng/ust-tracer.h>
+#include <lttng/ringbuffer-context.h>
+#include <usterr-signal-safe.h>
+
+/*
+ * The longuest possible namespace proc path is with the cgroup ns
+ * and the maximum theoretical linux pid of 536870912 :
+ *
+ * /proc/self/task/536870912/ns/cgroup
+ */
+#define LTTNG_PROC_NS_PATH_MAX 40
+
+struct lttng_ust_session;
+struct lttng_ust_channel_buffer;
+struct lttng_ust_ctx_field;
+struct lttng_ust_lib_ring_buffer_ctx;
+struct lttng_ust_ctx_value;
+struct lttng_ust_event_recorder;
+struct lttng_ust_event_notifier;
+struct lttng_ust_notification_ctx;
+
+int ust_lock(void) __attribute__ ((warn_unused_result))
+ __attribute__((visibility("hidden")));
+
+void ust_lock_nocheck(void)
+ __attribute__((visibility("hidden")));
+
+void ust_unlock(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_event_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_vtid_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_procname_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_cgroup_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_ipc_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_net_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_time_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_fixup_uts_ns_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_fixup_fd_tracker_tls(void)
+ __attribute__((visibility("hidden")));
+
+const char *lttng_ust_obj_get_name(int id)
+ __attribute__((visibility("hidden")));
+
+int lttng_get_notify_socket(void *owner)
+ __attribute__((visibility("hidden")));
+
+char* lttng_ust_sockinfo_get_procname(void *owner)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_sockinfo_session_enabled(void *owner)
+ __attribute__((visibility("hidden")));
+
+ssize_t lttng_ust_read(int fd, void *buf, size_t len)
+ __attribute__((visibility("hidden")));
+
+size_t lttng_ust_dummy_get_size(void *priv, size_t offset)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_dummy_record(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_dummy_get_value(void *priv, struct lttng_ust_ctx_value *value)
+ __attribute__((visibility("hidden")));
+
+void lttng_event_notifier_notification_send(
+ struct lttng_ust_event_notifier *event_notifier,
+ const char *stack_data,
+ struct lttng_ust_notification_ctx *notif_ctx)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+ __attribute__((visibility("hidden")));
+
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+ __attribute__((visibility("hidden")));
+
+#ifdef HAVE_LINUX_PERF_EVENT_H
+void lttng_ust_fixup_perf_counter_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_lock(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_unlock(void)
+ __attribute__((visibility("hidden")));
+#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
+static inline
+void lttng_ust_fixup_perf_counter_tls(void)
+{
+}
+static inline
+void lttng_perf_lock(void)
+{
+}
+static inline
+void lttng_perf_unlock(void)
+{
+}
+#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+#endif /* _LTTNG_TRACER_CORE_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This contains the definitions for the Linux Trace Toolkit tracer.
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+#ifndef _LTTNG_TRACER_H
+#define _LTTNG_TRACER_H
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <lttng/ust-events.h>
+#include "lttng-tracer-core.h"
+#include "compat.h"
+
+/* Tracer properties */
+#define CTF_MAGIC_NUMBER 0xC1FC1FC1
+#define TSDL_MAGIC_NUMBER 0x75D11D57
+
+/* CTF specification version followed */
+#define CTF_SPEC_MAJOR 1
+#define CTF_SPEC_MINOR 8
+
+/*
+ * Number of milliseconds to retry before failing metadata writes on buffer full
+ * condition. (10 seconds)
+ */
+#define LTTNG_METADATA_TIMEOUT_MSEC 10000
+
+#define LTTNG_RFLAG_EXTENDED RING_BUFFER_RFLAG_END
+#define LTTNG_RFLAG_END (LTTNG_RFLAG_EXTENDED << 1)
+
+/*
+ * LTTng client type enumeration. Used by the consumer to map the
+ * callbacks from its own address space.
+ */
+enum lttng_client_types {
+ LTTNG_CLIENT_METADATA = 0,
+ LTTNG_CLIENT_DISCARD = 1,
+ LTTNG_CLIENT_OVERWRITE = 2,
+ LTTNG_CLIENT_DISCARD_RT = 3,
+ LTTNG_CLIENT_OVERWRITE_RT = 4,
+ LTTNG_NR_CLIENT_TYPES,
+};
+
+#endif /* _LTTNG_TRACER_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * LTTng UST ABI
+ *
+ * Mimic system calls for:
+ * - session creation, returns an object descriptor or failure.
+ * - channel creation, returns an object descriptor or failure.
+ * - Operates on a session object descriptor
+ * - Takes all channel options as parameters.
+ * - stream get, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor.
+ * - stream notifier get, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor.
+ * - event creation, returns an object descriptor or failure.
+ * - Operates on a channel object descriptor
+ * - Takes an event name as parameter
+ * - Takes an instrumentation source as parameter
+ * - e.g. tracepoints, dynamic_probes...
+ * - Takes instrumentation source specific arguments.
+ */
+
+#define _LGPL_SOURCE
+#include <fcntl.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#include <urcu/compiler.h>
+#include <urcu/list.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-abi.h>
+#include <lttng/ust-error.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-version.h>
+#include <ust-fd.h>
+#include <usterr-signal-safe.h>
+
+#include "../libringbuffer/frontend_types.h"
+#include "../libringbuffer/frontend.h"
+#include "../libringbuffer/shm.h"
+#include "../libcounter/counter.h"
+#include "tracepoint-internal.h"
+#include "lttng-tracer.h"
+#include "string-utils.h"
+#include "ust-events-internal.h"
+#include "context-internal.h"
+#include "ust-helper.h"
+
+#define OBJ_NAME_LEN 16
+
+static int lttng_ust_abi_close_in_progress;
+
+static
+int lttng_abi_tracepoint_list(void *owner);
+static
+int lttng_abi_tracepoint_field_list(void *owner);
+
+/*
+ * Object descriptor table. Should be protected from concurrent access
+ * by the caller.
+ */
+
+struct lttng_ust_abi_obj {
+ union {
+ struct {
+ void *private_data;
+ const struct lttng_ust_abi_objd_ops *ops;
+ int f_count;
+ int owner_ref; /* has ref from owner */
+ void *owner;
+ char name[OBJ_NAME_LEN];
+ } s;
+ int freelist_next; /* offset freelist. end is -1. */
+ } u;
+};
+
+struct lttng_ust_abi_objd_table {
+ struct lttng_ust_abi_obj *array;
+ unsigned int len, allocated_len;
+ int freelist_head; /* offset freelist head. end is -1 */
+};
+
+static struct lttng_ust_abi_objd_table objd_table = {
+ .freelist_head = -1,
+};
+
+static
+int objd_alloc(void *private_data, const struct lttng_ust_abi_objd_ops *ops,
+ void *owner, const char *name)
+{
+ struct lttng_ust_abi_obj *obj;
+
+ if (objd_table.freelist_head != -1) {
+ obj = &objd_table.array[objd_table.freelist_head];
+ objd_table.freelist_head = obj->u.freelist_next;
+ goto end;
+ }
+
+ if (objd_table.len >= objd_table.allocated_len) {
+ unsigned int new_allocated_len, old_allocated_len;
+ struct lttng_ust_abi_obj *new_table, *old_table;
+
+ old_allocated_len = objd_table.allocated_len;
+ old_table = objd_table.array;
+ if (!old_allocated_len)
+ new_allocated_len = 1;
+ else
+ new_allocated_len = old_allocated_len << 1;
+ new_table = zmalloc(sizeof(struct lttng_ust_abi_obj) * new_allocated_len);
+ if (!new_table)
+ return -ENOMEM;
+ memcpy(new_table, old_table,
+ sizeof(struct lttng_ust_abi_obj) * old_allocated_len);
+ free(old_table);
+ objd_table.array = new_table;
+ objd_table.allocated_len = new_allocated_len;
+ }
+ obj = &objd_table.array[objd_table.len];
+ objd_table.len++;
+end:
+ obj->u.s.private_data = private_data;
+ obj->u.s.ops = ops;
+ obj->u.s.f_count = 2; /* count == 1 : object is allocated */
+ /* count == 2 : allocated + hold ref */
+ obj->u.s.owner_ref = 1; /* One owner reference */
+ obj->u.s.owner = owner;
+ strncpy(obj->u.s.name, name, OBJ_NAME_LEN);
+ obj->u.s.name[OBJ_NAME_LEN - 1] = '\0';
+ return obj - objd_table.array;
+}
+
+static
+struct lttng_ust_abi_obj *_objd_get(int id)
+{
+ if (id >= objd_table.len)
+ return NULL;
+ if (!objd_table.array[id].u.s.f_count)
+ return NULL;
+ return &objd_table.array[id];
+}
+
+static
+void *objd_private(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj);
+ return obj->u.s.private_data;
+}
+
+static
+void objd_set_private(int id, void *private_data)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj);
+ obj->u.s.private_data = private_data;
+}
+
+const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return NULL;
+ return obj->u.s.ops;
+}
+
+static
+void objd_free(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ assert(obj);
+ obj->u.freelist_next = objd_table.freelist_head;
+ objd_table.freelist_head = obj - objd_table.array;
+ assert(obj->u.s.f_count == 1);
+ obj->u.s.f_count = 0; /* deallocated */
+}
+
+static
+void objd_ref(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+ assert(obj != NULL);
+ obj->u.s.f_count++;
+}
+
+int lttng_ust_abi_objd_unref(int id, int is_owner)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return -EINVAL;
+ if (obj->u.s.f_count == 1) {
+ ERR("Reference counting error\n");
+ return -EINVAL;
+ }
+ if (is_owner) {
+ if (!obj->u.s.owner_ref) {
+ ERR("Error decrementing owner reference");
+ return -EINVAL;
+ }
+ obj->u.s.owner_ref--;
+ }
+ if ((--obj->u.s.f_count) == 1) {
+ const struct lttng_ust_abi_objd_ops *ops = lttng_ust_abi_objd_ops(id);
+
+ if (ops->release)
+ ops->release(id);
+ objd_free(id);
+ }
+ return 0;
+}
+
+static
+void objd_table_destroy(void)
+{
+ int i;
+
+ for (i = 0; i < objd_table.allocated_len; i++) {
+ struct lttng_ust_abi_obj *obj;
+
+ obj = _objd_get(i);
+ if (!obj)
+ continue;
+ if (!obj->u.s.owner_ref)
+ continue; /* only unref owner ref. */
+ (void) lttng_ust_abi_objd_unref(i, 1);
+ }
+ free(objd_table.array);
+ objd_table.array = NULL;
+ objd_table.len = 0;
+ objd_table.allocated_len = 0;
+ objd_table.freelist_head = -1;
+}
+
+const char *lttng_ust_obj_get_name(int id)
+{
+ struct lttng_ust_abi_obj *obj = _objd_get(id);
+
+ if (!obj)
+ return NULL;
+ return obj->u.s.name;
+}
+
+void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
+{
+ int i;
+
+ for (i = 0; i < objd_table.allocated_len; i++) {
+ struct lttng_ust_abi_obj *obj;
+
+ obj = _objd_get(i);
+ if (!obj)
+ continue;
+ if (!obj->u.s.owner)
+ continue; /* skip root handles */
+ if (!obj->u.s.owner_ref)
+ continue; /* only unref owner ref. */
+ if (obj->u.s.owner == owner)
+ (void) lttng_ust_abi_objd_unref(i, 1);
+ }
+}
+
+/*
+ * This is LTTng's own personal way to create an ABI for sessiond.
+ * We send commands over a socket.
+ */
+
+static const struct lttng_ust_abi_objd_ops lttng_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops;
+static const struct lttng_ust_abi_objd_ops lttng_session_ops;
+static const struct lttng_ust_abi_objd_ops lttng_channel_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops;
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops;
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops;
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops;
+
+int lttng_abi_create_root_handle(void)
+{
+ int root_handle;
+
+ /* root handles have NULL owners */
+ root_handle = objd_alloc(NULL, <tng_ops, NULL, "root");
+ return root_handle;
+}
+
+static
+int lttng_is_channel_ready(struct lttng_ust_channel_buffer *lttng_chan)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ unsigned int nr_streams, exp_streams;
+
+ chan = lttng_chan->priv->rb_chan;
+ nr_streams = channel_handle_get_nr_streams(lttng_chan->priv->rb_chan->handle);
+ exp_streams = chan->nr_streams;
+ return nr_streams == exp_streams;
+}
+
+static
+int lttng_abi_create_session(void *owner)
+{
+ struct lttng_ust_session *session;
+ int session_objd, ret;
+
+ session = lttng_session_create();
+ if (!session)
+ return -ENOMEM;
+ session_objd = objd_alloc(session, <tng_session_ops, owner, "session");
+ if (session_objd < 0) {
+ ret = session_objd;
+ goto objd_error;
+ }
+ session->priv->objd = session_objd;
+ session->priv->owner = owner;
+ return session_objd;
+
+objd_error:
+ lttng_session_destroy(session);
+ return ret;
+}
+
+static
+long lttng_abi_tracer_version(int objd __attribute__((unused)),
+ struct lttng_ust_abi_tracer_version *v)
+{
+ v->major = LTTNG_UST_MAJOR_VERSION;
+ v->minor = LTTNG_UST_MINOR_VERSION;
+ v->patchlevel = LTTNG_UST_PATCHLEVEL_VERSION;
+ return 0;
+}
+
+static
+int lttng_abi_event_notifier_send_fd(void *owner, int *event_notifier_notif_fd)
+{
+ struct lttng_event_notifier_group *event_notifier_group;
+ int event_notifier_group_objd, ret, fd_flag;
+
+ event_notifier_group = lttng_event_notifier_group_create();
+ if (!event_notifier_group)
+ return -ENOMEM;
+
+ /*
+ * Set this file descriptor as NON-BLOCKING.
+ */
+ fd_flag = fcntl(*event_notifier_notif_fd, F_GETFL);
+
+ fd_flag |= O_NONBLOCK;
+
+ ret = fcntl(*event_notifier_notif_fd, F_SETFL, fd_flag);
+ if (ret) {
+ ret = -errno;
+ goto fd_error;
+ }
+
+ event_notifier_group_objd = objd_alloc(event_notifier_group,
+ <tng_event_notifier_group_ops, owner, "event_notifier_group");
+ if (event_notifier_group_objd < 0) {
+ ret = event_notifier_group_objd;
+ goto objd_error;
+ }
+
+ event_notifier_group->objd = event_notifier_group_objd;
+ event_notifier_group->owner = owner;
+ event_notifier_group->notification_fd = *event_notifier_notif_fd;
+ /* Object descriptor takes ownership of notification fd. */
+ *event_notifier_notif_fd = -1;
+
+ return event_notifier_group_objd;
+
+objd_error:
+ lttng_event_notifier_group_destroy(event_notifier_group);
+fd_error:
+ return ret;
+}
+
+static
+long lttng_abi_add_context(int objd __attribute__((unused)),
+ struct lttng_ust_abi_context *context_param,
+ union lttng_ust_abi_args *uargs,
+ struct lttng_ust_ctx **ctx, struct lttng_ust_session *session)
+{
+ return lttng_attach_context(context_param, uargs, ctx, session);
+}
+
+/**
+ * lttng_cmd - lttng control through socket commands
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_SESSION
+ * Returns a LTTng trace session object descriptor
+ * LTTNG_UST_ABI_TRACER_VERSION
+ * Returns the LTTng kernel tracer version
+ * LTTNG_UST_ABI_TRACEPOINT_LIST
+ * Returns a file descriptor listing available tracepoints
+ * LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST
+ * Returns a file descriptor listing available tracepoint fields
+ * LTTNG_UST_ABI_WAIT_QUIESCENT
+ * Returns after all previously running probes have completed
+ *
+ * The returned session will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_SESSION:
+ return lttng_abi_create_session(owner);
+ case LTTNG_UST_ABI_TRACER_VERSION:
+ return lttng_abi_tracer_version(objd,
+ (struct lttng_ust_abi_tracer_version *) arg);
+ case LTTNG_UST_ABI_TRACEPOINT_LIST:
+ return lttng_abi_tracepoint_list(owner);
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST:
+ return lttng_abi_tracepoint_field_list(owner);
+ case LTTNG_UST_ABI_WAIT_QUIESCENT:
+ lttng_ust_urcu_synchronize_rcu();
+ return 0;
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
+ return lttng_abi_event_notifier_send_fd(owner,
+ &uargs->event_notifier_handle.event_notifier_notif_fd);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_ops = {
+ .cmd = lttng_cmd,
+};
+
+static
+int lttng_abi_map_channel(int session_objd,
+ struct lttng_ust_abi_channel *ust_chan,
+ union lttng_ust_abi_args *uargs,
+ void *owner)
+{
+ struct lttng_ust_session *session = objd_private(session_objd);
+ const char *transport_name;
+ struct lttng_transport *transport;
+ const char *chan_name;
+ int chan_objd;
+ struct lttng_ust_shm_handle *channel_handle;
+ struct lttng_ust_abi_channel_config *lttng_chan_config;
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer_config *config;
+ void *chan_data;
+ int wakeup_fd;
+ uint64_t len;
+ int ret;
+ enum lttng_ust_abi_chan_type type;
+
+ chan_data = uargs->channel.chan_data;
+ wakeup_fd = uargs->channel.wakeup_fd;
+ len = ust_chan->len;
+ type = ust_chan->type;
+
+ switch (type) {
+ case LTTNG_UST_ABI_CHAN_PER_CPU:
+ break;
+ default:
+ ret = -EINVAL;
+ goto invalid;
+ }
+
+ if (session->priv->been_active) {
+ ret = -EBUSY;
+ goto active; /* Refuse to add channel to active session */
+ }
+
+ lttng_chan_buf = lttng_ust_alloc_channel_buffer();
+ if (!lttng_chan_buf) {
+ ret = -ENOMEM;
+ goto lttng_chan_buf_error;
+ }
+
+ channel_handle = channel_handle_create(chan_data, len, wakeup_fd);
+ if (!channel_handle) {
+ ret = -EINVAL;
+ goto handle_error;
+ }
+
+ /* Ownership of chan_data and wakeup_fd taken by channel handle. */
+ uargs->channel.chan_data = NULL;
+ uargs->channel.wakeup_fd = -1;
+
+ chan = shmp(channel_handle, channel_handle->chan);
+ assert(chan);
+ chan->handle = channel_handle;
+ config = &chan->backend.config;
+ lttng_chan_config = channel_get_private_config(chan);
+ if (!lttng_chan_config) {
+ ret = -EINVAL;
+ goto alloc_error;
+ }
+
+ if (lttng_ust_session_uuid_validate(session, lttng_chan_config->uuid)) {
+ ret = -EINVAL;
+ goto uuid_error;
+ }
+
+ /* Lookup transport name */
+ switch (type) {
+ case LTTNG_UST_ABI_CHAN_PER_CPU:
+ if (config->output == RING_BUFFER_MMAP) {
+ if (config->mode == RING_BUFFER_OVERWRITE) {
+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
+ transport_name = "relay-overwrite-mmap";
+ } else {
+ transport_name = "relay-overwrite-rt-mmap";
+ }
+ } else {
+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER) {
+ transport_name = "relay-discard-mmap";
+ } else {
+ transport_name = "relay-discard-rt-mmap";
+ }
+ }
+ } else {
+ ret = -EINVAL;
+ goto notransport;
+ }
+ chan_name = "channel";
+ break;
+ default:
+ ret = -EINVAL;
+ goto notransport;
+ }
+ transport = lttng_ust_transport_find(transport_name);
+ if (!transport) {
+ DBG("LTTng transport %s not found\n",
+ transport_name);
+ ret = -EINVAL;
+ goto notransport;
+ }
+
+ chan_objd = objd_alloc(NULL, <tng_channel_ops, owner, chan_name);
+ if (chan_objd < 0) {
+ ret = chan_objd;
+ goto objd_error;
+ }
+
+ /* Initialize our lttng chan */
+ lttng_chan_buf->parent->enabled = 1;
+ lttng_chan_buf->parent->session = session;
+
+ lttng_chan_buf->priv->parent.tstate = 1;
+ lttng_chan_buf->priv->ctx = NULL;
+ lttng_chan_buf->priv->rb_chan = chan;
+
+ lttng_chan_buf->ops = &transport->ops;
+
+ memcpy(&chan->backend.config,
+ transport->client_config,
+ sizeof(chan->backend.config));
+ cds_list_add(<tng_chan_buf->priv->node, &session->priv->chan_head);
+ lttng_chan_buf->priv->header_type = 0;
+ lttng_chan_buf->priv->type = type;
+ /* Copy fields from lttng ust chan config. */
+ lttng_chan_buf->priv->id = lttng_chan_config->id;
+ memcpy(lttng_chan_buf->priv->uuid, lttng_chan_config->uuid, LTTNG_UST_UUID_LEN);
+ channel_set_private(chan, lttng_chan_buf);
+
+ /*
+ * We tolerate no failure path after channel creation. It will stay
+ * invariant for the rest of the session.
+ */
+ objd_set_private(chan_objd, lttng_chan_buf);
+ lttng_chan_buf->priv->parent.objd = chan_objd;
+ /* The channel created holds a reference on the session */
+ objd_ref(session_objd);
+ return chan_objd;
+
+ /* error path after channel was created */
+objd_error:
+notransport:
+uuid_error:
+alloc_error:
+ channel_destroy(chan, channel_handle, 0);
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+ return ret;
+
+handle_error:
+ lttng_ust_free_channel_common(lttng_chan_buf->parent);
+lttng_chan_buf_error:
+active:
+invalid:
+ return ret;
+}
+
+/**
+ * lttng_session_cmd - lttng session object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_CHANNEL
+ * Returns a LTTng channel object descriptor
+ * LTTNG_UST_ABI_ENABLE
+ * Enables tracing for a session (weak enable)
+ * LTTNG_UST_ABI_DISABLE
+ * Disables tracing for a session (strong disable)
+ *
+ * The returned channel will be deleted when its file descriptor is closed.
+ */
+static
+long lttng_session_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ struct lttng_ust_session *session = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_CHANNEL:
+ return lttng_abi_map_channel(objd,
+ (struct lttng_ust_abi_channel *) arg,
+ uargs, owner);
+ case LTTNG_UST_ABI_SESSION_START:
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_session_enable(session);
+ case LTTNG_UST_ABI_SESSION_STOP:
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_session_disable(session);
+ case LTTNG_UST_ABI_SESSION_STATEDUMP:
+ return lttng_session_statedump(session);
+ case LTTNG_UST_ABI_COUNTER:
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ /* Not implemented yet. */
+ return -EINVAL;
+ default:
+ return -EINVAL;
+ }
+}
+
+/*
+ * Called when the last file reference is dropped.
+ *
+ * Big fat note: channels and events are invariant for the whole session after
+ * their creation. So this session destruction also destroys all channel and
+ * event structures specific to this session (they are not destroyed when their
+ * individual file is released).
+ */
+static
+int lttng_release_session(int objd)
+{
+ struct lttng_ust_session *session = objd_private(objd);
+
+ if (session) {
+ lttng_session_destroy(session);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_session_ops = {
+ .release = lttng_release_session,
+ .cmd = lttng_session_cmd,
+};
+
+static int lttng_ust_event_notifier_enabler_create(int event_notifier_group_obj,
+ void *owner, struct lttng_ust_abi_event_notifier *event_notifier_param,
+ enum lttng_enabler_format_type type)
+{
+ struct lttng_event_notifier_group *event_notifier_group =
+ objd_private(event_notifier_group_obj);
+ struct lttng_event_notifier_enabler *event_notifier_enabler;
+ int event_notifier_objd, ret;
+
+ event_notifier_param->event.name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ event_notifier_objd = objd_alloc(NULL, <tng_event_notifier_enabler_ops, owner,
+ "event_notifier enabler");
+ if (event_notifier_objd < 0) {
+ ret = event_notifier_objd;
+ goto objd_error;
+ }
+
+ event_notifier_enabler = lttng_event_notifier_enabler_create(
+ event_notifier_group, type, event_notifier_param);
+ if (!event_notifier_enabler) {
+ ret = -ENOMEM;
+ goto event_notifier_error;
+ }
+
+ objd_set_private(event_notifier_objd, event_notifier_enabler);
+ /* The event_notifier holds a reference on the event_notifier group. */
+ objd_ref(event_notifier_enabler->group->objd);
+
+ return event_notifier_objd;
+
+event_notifier_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(event_notifier_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_event_notifier_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
+ switch (cmd) {
+ case LTTNG_UST_ABI_FILTER:
+ return lttng_event_notifier_enabler_attach_filter_bytecode(
+ event_notifier_enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ case LTTNG_UST_ABI_EXCLUSION:
+ return lttng_event_notifier_enabler_attach_exclusion(event_notifier_enabler,
+ (struct lttng_ust_excluder_node **) arg);
+ case LTTNG_UST_ABI_CAPTURE:
+ return lttng_event_notifier_enabler_attach_capture_bytecode(
+ event_notifier_enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_event_notifier_enabler_enable(event_notifier_enabler);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_event_notifier_enabler_disable(event_notifier_enabler);
+ default:
+ return -EINVAL;
+ }
+}
+
+/**
+ * lttng_event_notifier_group_error_counter_cmd - lttng event_notifier group error counter object command
+ *
+ * @obj: the object
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This descriptor implements lttng commands:
+ * LTTNG_UST_ABI_COUNTER_GLOBAL
+ * Return negative error code on error, 0 on success.
+ * LTTNG_UST_ABI_COUNTER_CPU
+ * Return negative error code on error, 0 on success.
+ */
+static
+long lttng_event_notifier_group_error_counter_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
+{
+ int ret;
+ struct lttng_counter *counter = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ ret = -EINVAL; /* Unimplemented. */
+ break;
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ {
+ struct lttng_ust_abi_counter_cpu *counter_cpu =
+ (struct lttng_ust_abi_counter_cpu *)arg;
+
+ ret = lttng_counter_set_cpu_shm(counter->counter,
+ counter_cpu->cpu_nr, uargs->counter_shm.shm_fd);
+ if (!ret) {
+ /* Take ownership of the shm_fd. */
+ uargs->counter_shm.shm_fd = -1;
+ }
+ break;
+ }
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int lttng_release_event_notifier_group_error_counter(int objd)
+ __attribute__((visibility("hidden")));
+int lttng_release_event_notifier_group_error_counter(int objd)
+{
+ struct lttng_counter *counter = objd_private(objd);
+
+ if (counter) {
+ return lttng_ust_abi_objd_unref(counter->event_notifier_group->objd, 0);
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_error_counter_ops = {
+ .release = lttng_release_event_notifier_group_error_counter,
+ .cmd = lttng_event_notifier_group_error_counter_cmd,
+};
+
+static
+int lttng_ust_event_notifier_group_create_error_counter(int event_notifier_group_objd, void *owner,
+ struct lttng_ust_abi_counter_conf *error_counter_conf)
+{
+ const char *counter_transport_name;
+ struct lttng_event_notifier_group *event_notifier_group =
+ objd_private(event_notifier_group_objd);
+ struct lttng_counter *counter;
+ int counter_objd, ret;
+ struct lttng_counter_dimension dimensions[1];
+ size_t counter_len;
+
+ if (event_notifier_group->error_counter)
+ return -EBUSY;
+
+ if (error_counter_conf->arithmetic != LTTNG_UST_ABI_COUNTER_ARITHMETIC_MODULAR)
+ return -EINVAL;
+
+ if (error_counter_conf->number_dimensions != 1)
+ return -EINVAL;
+
+ switch (error_counter_conf->bitness) {
+ case LTTNG_UST_ABI_COUNTER_BITNESS_64:
+ counter_transport_name = "counter-per-cpu-64-modular";
+ break;
+ case LTTNG_UST_ABI_COUNTER_BITNESS_32:
+ counter_transport_name = "counter-per-cpu-32-modular";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ counter_objd = objd_alloc(NULL, <tng_event_notifier_group_error_counter_ops, owner,
+ "event_notifier group error counter");
+ if (counter_objd < 0) {
+ ret = counter_objd;
+ goto objd_error;
+ }
+
+ counter_len = error_counter_conf->dimensions[0].size;
+ dimensions[0].size = counter_len;
+ dimensions[0].underflow_index = 0;
+ dimensions[0].overflow_index = 0;
+ dimensions[0].has_underflow = 0;
+ dimensions[0].has_overflow = 0;
+
+ counter = lttng_ust_counter_create(counter_transport_name, 1, dimensions);
+ if (!counter) {
+ ret = -EINVAL;
+ goto create_error;
+ }
+
+ event_notifier_group->error_counter_len = counter_len;
+ /*
+ * store-release to publish error counter matches load-acquire
+ * in record_error. Ensures the counter is created and the
+ * error_counter_len is set before they are used.
+ * Currently a full memory barrier is used, which could be
+ * turned into acquire-release barriers.
+ */
+ cmm_smp_mb();
+ CMM_STORE_SHARED(event_notifier_group->error_counter, counter);
+
+ counter->objd = counter_objd;
+ counter->event_notifier_group = event_notifier_group; /* owner */
+
+ objd_set_private(counter_objd, counter);
+ /* The error counter holds a reference on the event_notifier group. */
+ objd_ref(event_notifier_group->objd);
+
+ return counter_objd;
+
+create_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(counter_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+long lttng_event_notifier_group_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ {
+ struct lttng_ust_abi_event_notifier *event_notifier_param =
+ (struct lttng_ust_abi_event_notifier *) arg;
+ if (strutils_is_star_glob_pattern(event_notifier_param->event.name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ return lttng_ust_event_notifier_enabler_create(objd,
+ owner, event_notifier_param,
+ LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ } else {
+ return lttng_ust_event_notifier_enabler_create(objd,
+ owner, event_notifier_param,
+ LTTNG_ENABLER_FORMAT_EVENT);
+ }
+ }
+ case LTTNG_UST_ABI_COUNTER:
+ {
+ struct lttng_ust_abi_counter_conf *counter_conf =
+ (struct lttng_ust_abi_counter_conf *) uargs->counter.counter_data;
+ return lttng_ust_event_notifier_group_create_error_counter(
+ objd, owner, counter_conf);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_notifier_enabler_release(int objd)
+{
+ struct lttng_event_notifier_enabler *event_notifier_enabler = objd_private(objd);
+
+ if (event_notifier_enabler)
+ return lttng_ust_abi_objd_unref(event_notifier_enabler->group->objd, 0);
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_enabler_ops = {
+ .release = lttng_event_notifier_enabler_release,
+ .cmd = lttng_event_notifier_enabler_cmd,
+};
+
+static
+int lttng_release_event_notifier_group(int objd)
+{
+ struct lttng_event_notifier_group *event_notifier_group = objd_private(objd);
+
+ if (event_notifier_group) {
+ lttng_event_notifier_group_destroy(event_notifier_group);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_notifier_group_ops = {
+ .release = lttng_release_event_notifier_group,
+ .cmd = lttng_event_notifier_group_cmd,
+};
+
+static
+long lttng_tracepoint_list_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_ust_tracepoint_list *list = objd_private(objd);
+ struct lttng_ust_abi_tracepoint_iter *tp =
+ (struct lttng_ust_abi_tracepoint_iter *) arg;
+ struct lttng_ust_abi_tracepoint_iter *iter;
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
+ {
+ iter = lttng_ust_tracepoint_list_get_iter_next(list);
+ if (!iter)
+ return -LTTNG_UST_ERR_NOENT;
+ memcpy(tp, iter, sizeof(*tp));
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_tracepoint_list(void *owner)
+{
+ int list_objd, ret;
+ struct lttng_ust_tracepoint_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_tracepoint_list_ops, owner, "tp_list");
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ /* populate list by walking on all registered probes. */
+ ret = lttng_probes_get_event_list(list);
+ if (ret) {
+ goto list_error;
+ }
+ return list_objd;
+
+list_error:
+ free(list);
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(list_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_tracepoint_list(int objd)
+{
+ struct lttng_ust_tracepoint_list *list = objd_private(objd);
+
+ if (list) {
+ lttng_probes_prune_event_list(list);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_list_ops = {
+ .release = lttng_release_tracepoint_list,
+ .cmd = lttng_tracepoint_list_cmd,
+};
+
+static
+long lttng_tracepoint_field_list_cmd(int objd, unsigned int cmd,
+ unsigned long arg __attribute__((unused)), union lttng_ust_abi_args *uargs,
+ void *owner __attribute__((unused)))
+{
+ struct lttng_ust_field_list *list = objd_private(objd);
+ struct lttng_ust_abi_field_iter *tp = &uargs->field_list.entry;
+ struct lttng_ust_abi_field_iter *iter;
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
+ {
+ iter = lttng_ust_field_list_get_iter_next(list);
+ if (!iter)
+ return -LTTNG_UST_ERR_NOENT;
+ memcpy(tp, iter, sizeof(*tp));
+ return 0;
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_abi_tracepoint_field_list(void *owner)
+{
+ int list_objd, ret;
+ struct lttng_ust_field_list *list;
+
+ list_objd = objd_alloc(NULL, <tng_tracepoint_field_list_ops, owner,
+ "tp_field_list");
+ if (list_objd < 0) {
+ ret = list_objd;
+ goto objd_error;
+ }
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ objd_set_private(list_objd, list);
+
+ /* populate list by walking on all registered probes. */
+ ret = lttng_probes_get_field_list(list);
+ if (ret) {
+ goto list_error;
+ }
+ return list_objd;
+
+list_error:
+ free(list);
+alloc_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(list_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+static
+int lttng_release_tracepoint_field_list(int objd)
+{
+ struct lttng_ust_field_list *list = objd_private(objd);
+
+ if (list) {
+ lttng_probes_prune_field_list(list);
+ free(list);
+ return 0;
+ } else {
+ return -EINVAL;
+ }
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_tracepoint_field_list_ops = {
+ .release = lttng_release_tracepoint_field_list,
+ .cmd = lttng_tracepoint_field_list_cmd,
+};
+
+static
+int lttng_abi_map_stream(int channel_objd, struct lttng_ust_abi_stream *info,
+ union lttng_ust_abi_args *uargs, void *owner __attribute__((unused)))
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(channel_objd);
+ int ret;
+
+ ret = channel_handle_add_stream(lttng_chan_buf->priv->rb_chan->handle,
+ uargs->stream.shm_fd, uargs->stream.wakeup_fd,
+ info->stream_nr, info->len);
+ if (ret)
+ goto error_add_stream;
+ /* Take ownership of shm_fd and wakeup_fd. */
+ uargs->stream.shm_fd = -1;
+ uargs->stream.wakeup_fd = -1;
+
+ return 0;
+
+error_add_stream:
+ return ret;
+}
+
+static
+int lttng_abi_create_event_enabler(int channel_objd,
+ struct lttng_ust_abi_event *event_param,
+ void *owner,
+ enum lttng_enabler_format_type format_type)
+{
+ struct lttng_ust_channel_buffer *channel = objd_private(channel_objd);
+ struct lttng_event_enabler *enabler;
+ int event_objd, ret;
+
+ event_param->name[LTTNG_UST_ABI_SYM_NAME_LEN - 1] = '\0';
+ event_objd = objd_alloc(NULL, <tng_event_enabler_ops, owner,
+ "event enabler");
+ if (event_objd < 0) {
+ ret = event_objd;
+ goto objd_error;
+ }
+ /*
+ * We tolerate no failure path after event creation. It will stay
+ * invariant for the rest of the session.
+ */
+ enabler = lttng_event_enabler_create(format_type, event_param, channel);
+ if (!enabler) {
+ ret = -ENOMEM;
+ goto event_error;
+ }
+ objd_set_private(event_objd, enabler);
+ /* The event holds a reference on the channel */
+ objd_ref(channel_objd);
+ return event_objd;
+
+event_error:
+ {
+ int err;
+
+ err = lttng_ust_abi_objd_unref(event_objd, 1);
+ assert(!err);
+ }
+objd_error:
+ return ret;
+}
+
+/**
+ * lttng_channel_cmd - lttng control through object descriptors
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This object descriptor implements lttng commands:
+ * LTTNG_UST_ABI_STREAM
+ * Returns an event stream object descriptor or failure.
+ * (typically, one event stream records events from one CPU)
+ * LTTNG_UST_ABI_EVENT
+ * Returns an event object descriptor or failure.
+ * LTTNG_UST_ABI_CONTEXT
+ * Prepend a context field to each event in the channel
+ * LTTNG_UST_ABI_ENABLE
+ * Enable recording for events in this channel (weak enable)
+ * LTTNG_UST_ABI_DISABLE
+ * Disable recording for events in this channel (strong disable)
+ *
+ * Channel and event file descriptors also hold a reference on the session.
+ */
+static
+long lttng_channel_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs, void *owner)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
+
+ if (cmd != LTTNG_UST_ABI_STREAM) {
+ /*
+ * Check if channel received all streams.
+ */
+ if (!lttng_is_channel_ready(lttng_chan_buf))
+ return -EPERM;
+ }
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_STREAM:
+ {
+ struct lttng_ust_abi_stream *stream;
+
+ stream = (struct lttng_ust_abi_stream *) arg;
+ /* stream used as output */
+ return lttng_abi_map_stream(objd, stream, uargs, owner);
+ }
+ case LTTNG_UST_ABI_EVENT:
+ {
+ struct lttng_ust_abi_event *event_param =
+ (struct lttng_ust_abi_event *) arg;
+
+ if (strutils_is_star_glob_pattern(event_param->name)) {
+ /*
+ * If the event name is a star globbing pattern,
+ * we create the special star globbing enabler.
+ */
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_STAR_GLOB);
+ } else {
+ return lttng_abi_create_event_enabler(objd, event_param,
+ owner, LTTNG_ENABLER_FORMAT_EVENT);
+ }
+ }
+ case LTTNG_UST_ABI_CONTEXT:
+ return lttng_abi_add_context(objd,
+ (struct lttng_ust_abi_context *) arg, uargs,
+ <tng_chan_buf->priv->ctx,
+ lttng_chan_buf->parent->session);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_channel_enable(lttng_chan_buf->parent);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_channel_disable(lttng_chan_buf->parent);
+ case LTTNG_UST_ABI_FLUSH_BUFFER:
+ return lttng_chan_buf->ops->priv->flush_buffer(lttng_chan_buf);
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_channel_release(int objd)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf = objd_private(objd);
+
+ if (lttng_chan_buf)
+ return lttng_ust_abi_objd_unref(lttng_chan_buf->parent->session->priv->objd, 0);
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_channel_ops = {
+ .release = lttng_channel_release,
+ .cmd = lttng_channel_cmd,
+};
+
+/**
+ * lttng_enabler_cmd - lttng control through object descriptors
+ *
+ * @objd: the object descriptor
+ * @cmd: the command
+ * @arg: command arg
+ * @uargs: UST arguments (internal)
+ * @owner: objd owner
+ *
+ * This object descriptor implements lttng commands:
+ * LTTNG_UST_ABI_CONTEXT
+ * Prepend a context field to each record of events of this
+ * enabler.
+ * LTTNG_UST_ABI_ENABLE
+ * Enable recording for this enabler
+ * LTTNG_UST_ABI_DISABLE
+ * Disable recording for this enabler
+ * LTTNG_UST_ABI_FILTER
+ * Attach a filter to an enabler.
+ * LTTNG_UST_ABI_EXCLUSION
+ * Attach exclusions to an enabler.
+ */
+static
+long lttng_event_enabler_cmd(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *uargs __attribute__((unused)),
+ void *owner __attribute__((unused)))
+{
+ struct lttng_event_enabler *enabler = objd_private(objd);
+
+ switch (cmd) {
+ case LTTNG_UST_ABI_CONTEXT:
+ return lttng_event_enabler_attach_context(enabler,
+ (struct lttng_ust_abi_context *) arg);
+ case LTTNG_UST_ABI_ENABLE:
+ return lttng_event_enabler_enable(enabler);
+ case LTTNG_UST_ABI_DISABLE:
+ return lttng_event_enabler_disable(enabler);
+ case LTTNG_UST_ABI_FILTER:
+ {
+ int ret;
+
+ ret = lttng_event_enabler_attach_filter_bytecode(enabler,
+ (struct lttng_ust_bytecode_node **) arg);
+ if (ret)
+ return ret;
+ return 0;
+ }
+ case LTTNG_UST_ABI_EXCLUSION:
+ {
+ return lttng_event_enabler_attach_exclusion(enabler,
+ (struct lttng_ust_excluder_node **) arg);
+ }
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int lttng_event_enabler_release(int objd)
+{
+ struct lttng_event_enabler *event_enabler = objd_private(objd);
+
+ if (event_enabler)
+ return lttng_ust_abi_objd_unref(event_enabler->chan->priv->parent.objd, 0);
+
+ return 0;
+}
+
+static const struct lttng_ust_abi_objd_ops lttng_event_enabler_ops = {
+ .release = lttng_event_enabler_release,
+ .cmd = lttng_event_enabler_cmd,
+};
+
+void lttng_ust_abi_exit(void)
+{
+ lttng_ust_abi_close_in_progress = 1;
+ ust_lock_nocheck();
+ objd_table_destroy();
+ ust_unlock();
+ lttng_ust_abi_close_in_progress = 0;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 David Goulet <david.goulet@polymtl.ca>
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <time.h>
+#include <assert.h>
+#include <signal.h>
+#include <limits.h>
+#include <urcu/uatomic.h>
+#include "futex.h"
+#include <urcu/compiler.h>
+#include <lttng/urcu/urcu-ust.h>
+
+#include <lttng/ust-utils.h>
+#include <lttng/ust-events.h>
+#include <lttng/ust-abi.h>
+#include <lttng/ust-fork.h>
+#include <lttng/ust-error.h>
+#include <lttng/ust-ctl.h>
+#include <lttng/ust-libc-wrapper.h>
+#include <lttng/ust-thread.h>
+#include <lttng/ust-tracer.h>
+#include <urcu/tls-compat.h>
+#include <ust-comm.h>
+#include <ust-fd.h>
+#include <usterr-signal-safe.h>
+#include <ust-helper.h>
+#include "tracepoint-internal.h"
+#include "lttng-tracer-core.h"
+#include "compat.h"
+#include "../libringbuffer/rb-init.h"
+#include "lttng-ust-statedump.h"
+#include "clock.h"
+#include "../libringbuffer/getcpu.h"
+#include "getenv.h"
+#include "ust-events-internal.h"
+#include "context-internal.h"
+#include "ust-compat.h"
+#include "lttng-counter-client.h"
+#include "lttng-rb-clients.h"
+
+/*
+ * Has lttng ust comm constructor been called ?
+ */
+static int initialized;
+
+/*
+ * The ust_lock/ust_unlock lock is used as a communication thread mutex.
+ * Held when handling a command, also held by fork() to deal with
+ * removal of threads, and by exit path.
+ *
+ * The UST lock is the centralized mutex across UST tracing control and
+ * probe registration.
+ *
+ * ust_exit_mutex must never nest in ust_mutex.
+ *
+ * ust_fork_mutex must never nest in ust_mutex.
+ *
+ * ust_mutex_nest is a per-thread nesting counter, allowing the perf
+ * counter lazy initialization called by events within the statedump,
+ * which traces while the ust_mutex is held.
+ *
+ * ust_lock nests within the dynamic loader lock (within glibc) because
+ * it is taken within the library constructor.
+ *
+ * The ust fd tracker lock nests within the ust_mutex.
+ */
+static pthread_mutex_t ust_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Allow nesting the ust_mutex within the same thread. */
+static DEFINE_URCU_TLS(int, ust_mutex_nest);
+
+/*
+ * ust_exit_mutex protects thread_active variable wrt thread exit. It
+ * cannot be done by ust_mutex because pthread_cancel(), which takes an
+ * internal libc lock, cannot nest within ust_mutex.
+ *
+ * It never nests within a ust_mutex.
+ */
+static pthread_mutex_t ust_exit_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * ust_fork_mutex protects base address statedump tracing against forks. It
+ * prevents the dynamic loader lock to be taken (by base address statedump
+ * tracing) while a fork is happening, thus preventing deadlock issues with
+ * the dynamic loader lock.
+ */
+static pthread_mutex_t ust_fork_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/* Should the ust comm thread quit ? */
+static int lttng_ust_comm_should_quit;
+
+/*
+ * This variable can be tested by applications to check whether
+ * lttng-ust is loaded. They simply have to define their own
+ * "lttng_ust_loaded" weak symbol, and test it. It is set to 1 by the
+ * library constructor.
+ */
+int lttng_ust_loaded __attribute__((weak));
+
+/*
+ * Return 0 on success, -1 if should quit.
+ * The lock is taken in both cases.
+ * Signal-safe.
+ */
+int ust_lock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (lttng_ust_comm_should_quit) {
+ return -1;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ * ust_lock_nocheck() can be used in constructors/destructors, because
+ * they are already nested within the dynamic loader lock, and therefore
+ * have exclusive access against execution of liblttng-ust destructor.
+ * Signal-safe.
+ */
+void ust_lock_nocheck(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_ENABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!URCU_TLS(ust_mutex_nest)++)
+ pthread_mutex_lock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+}
+
+/*
+ * Signal-safe.
+ */
+void ust_unlock(void)
+{
+ sigset_t sig_all_blocked, orig_mask;
+ int ret, oldstate;
+
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ if (!--URCU_TLS(ust_mutex_nest))
+ pthread_mutex_unlock(&ust_mutex);
+ ret = pthread_sigmask(SIG_SETMASK, &orig_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+ ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+ if (ret) {
+ ERR("pthread_setcancelstate: %s", strerror(ret));
+ }
+ if (oldstate != PTHREAD_CANCEL_DISABLE) {
+ ERR("pthread_setcancelstate: unexpected oldstate");
+ }
+}
+
+/*
+ * Wait for either of these before continuing to the main
+ * program:
+ * - the register_done message from sessiond daemon
+ * (will let the sessiond daemon enable sessions before main
+ * starts.)
+ * - sessiond daemon is not reachable.
+ * - timeout (ensuring applications are resilient to session
+ * daemon problems).
+ */
+static sem_t constructor_wait;
+/*
+ * Doing this for both the global and local sessiond.
+ */
+enum {
+ sem_count_initial_value = 4,
+};
+
+static int sem_count = sem_count_initial_value;
+
+/*
+ * Counting nesting within lttng-ust. Used to ensure that calling fork()
+ * from liblttng-ust does not execute the pre/post fork handlers.
+ */
+static DEFINE_URCU_TLS(int, lttng_ust_nest_count);
+
+/*
+ * Info about socket and associated listener thread.
+ */
+struct sock_info {
+ const char *name;
+ pthread_t ust_listener; /* listener thread */
+ int root_handle;
+ int registration_done;
+ int allowed;
+ int global;
+ int thread_active;
+
+ char sock_path[PATH_MAX];
+ int socket;
+ int notify_socket;
+
+ char wait_shm_path[PATH_MAX];
+ char *wait_shm_mmap;
+ /* Keep track of lazy state dump not performed yet. */
+ int statedump_pending;
+ int initial_statedump_done;
+ /* Keep procname for statedump */
+ char procname[LTTNG_UST_ABI_PROCNAME_LEN];
+};
+
+/* Socket from app (connect) to session daemon (listen) for communication */
+struct sock_info global_apps = {
+ .name = "global",
+ .global = 1,
+
+ .root_handle = -1,
+ .registration_done = 0,
+ .allowed = 0,
+ .thread_active = 0,
+
+ .sock_path = LTTNG_DEFAULT_RUNDIR "/" LTTNG_UST_SOCK_FILENAME,
+ .socket = -1,
+ .notify_socket = -1,
+
+ .wait_shm_path = "/" LTTNG_UST_WAIT_FILENAME,
+
+ .statedump_pending = 0,
+ .initial_statedump_done = 0,
+ .procname[0] = '\0'
+};
+
+/* TODO: allow global_apps_sock_path override */
+
+struct sock_info local_apps = {
+ .name = "local",
+ .global = 0,
+ .root_handle = -1,
+ .registration_done = 0,
+ .allowed = 0, /* Check setuid bit first */
+ .thread_active = 0,
+
+ .socket = -1,
+ .notify_socket = -1,
+
+ .statedump_pending = 0,
+ .initial_statedump_done = 0,
+ .procname[0] = '\0'
+};
+
+static int wait_poll_fallback;
+
+static const char *cmd_name_mapping[] = {
+ [ LTTNG_UST_ABI_RELEASE ] = "Release",
+ [ LTTNG_UST_ABI_SESSION ] = "Create Session",
+ [ LTTNG_UST_ABI_TRACER_VERSION ] = "Get Tracer Version",
+
+ [ LTTNG_UST_ABI_TRACEPOINT_LIST ] = "Create Tracepoint List",
+ [ LTTNG_UST_ABI_WAIT_QUIESCENT ] = "Wait for Quiescent State",
+ [ LTTNG_UST_ABI_REGISTER_DONE ] = "Registration Done",
+ [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST ] = "Create Tracepoint Field List",
+
+ [ LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE ] = "Create event notifier group",
+
+ /* Session FD commands */
+ [ LTTNG_UST_ABI_CHANNEL ] = "Create Channel",
+ [ LTTNG_UST_ABI_SESSION_START ] = "Start Session",
+ [ LTTNG_UST_ABI_SESSION_STOP ] = "Stop Session",
+
+ /* Channel FD commands */
+ [ LTTNG_UST_ABI_STREAM ] = "Create Stream",
+ [ LTTNG_UST_ABI_EVENT ] = "Create Event",
+
+ /* Event and Channel FD commands */
+ [ LTTNG_UST_ABI_CONTEXT ] = "Create Context",
+ [ LTTNG_UST_ABI_FLUSH_BUFFER ] = "Flush Buffer",
+
+ /* Event, Channel and Session commands */
+ [ LTTNG_UST_ABI_ENABLE ] = "Enable",
+ [ LTTNG_UST_ABI_DISABLE ] = "Disable",
+
+ /* Tracepoint list commands */
+ [ LTTNG_UST_ABI_TRACEPOINT_LIST_GET ] = "List Next Tracepoint",
+ [ LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET ] = "List Next Tracepoint Field",
+
+ /* Event FD commands */
+ [ LTTNG_UST_ABI_FILTER ] = "Create Filter",
+ [ LTTNG_UST_ABI_EXCLUSION ] = "Add exclusions to event",
+
+ /* Event notifier group commands */
+ [ LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE ] = "Create event notifier",
+
+ /* Session and event notifier group commands */
+ [ LTTNG_UST_ABI_COUNTER ] = "Create Counter",
+
+ /* Counter commands */
+ [ LTTNG_UST_ABI_COUNTER_GLOBAL ] = "Create Counter Global",
+ [ LTTNG_UST_ABI_COUNTER_CPU ] = "Create Counter CPU",
+};
+
+static const char *str_timeout;
+static int got_timeout_env;
+
+static char *get_map_shm(struct sock_info *sock_info);
+
+ssize_t lttng_ust_read(int fd, void *buf, size_t len)
+{
+ ssize_t ret;
+ size_t copied = 0, to_copy = len;
+
+ do {
+ ret = read(fd, buf + copied, to_copy);
+ if (ret > 0) {
+ copied += ret;
+ to_copy -= ret;
+ }
+ } while ((ret > 0 && to_copy > 0)
+ || (ret < 0 && errno == EINTR));
+ if (ret > 0) {
+ ret = copied;
+ }
+ return ret;
+}
+/*
+ * Returns the HOME directory path. Caller MUST NOT free(3) the returned
+ * pointer.
+ */
+static
+const char *get_lttng_home_dir(void)
+{
+ const char *val;
+
+ val = (const char *) lttng_ust_getenv("LTTNG_HOME");
+ if (val != NULL) {
+ return val;
+ }
+ return (const char *) lttng_ust_getenv("HOME");
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+static
+void lttng_fixup_nest_count_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(lttng_ust_nest_count)));
+}
+
+static
+void lttng_fixup_ust_mutex_nest_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(ust_mutex_nest)));
+}
+
+/*
+ * Fixup lttng-ust urcu TLS.
+ */
+static
+void lttng_fixup_lttng_ust_urcu_tls(void)
+{
+ (void) lttng_ust_urcu_read_ongoing();
+}
+
+void lttng_ust_fixup_tls(void)
+{
+ lttng_fixup_lttng_ust_urcu_tls();
+ lttng_fixup_ringbuffer_tls();
+ lttng_fixup_vtid_tls();
+ lttng_fixup_nest_count_tls();
+ lttng_fixup_procname_tls();
+ lttng_fixup_ust_mutex_nest_tls();
+ lttng_ust_fixup_perf_counter_tls();
+ lttng_ust_fixup_fd_tracker_tls();
+ lttng_fixup_cgroup_ns_tls();
+ lttng_fixup_ipc_ns_tls();
+ lttng_fixup_net_ns_tls();
+ lttng_fixup_time_ns_tls();
+ lttng_fixup_uts_ns_tls();
+ lttng_ust_fixup_ring_buffer_client_discard_tls();
+ lttng_ust_fixup_ring_buffer_client_discard_rt_tls();
+ lttng_ust_fixup_ring_buffer_client_overwrite_tls();
+ lttng_ust_fixup_ring_buffer_client_overwrite_rt_tls();
+}
+
+/*
+ * LTTng-UST uses Global Dynamic model TLS variables rather than IE
+ * model because many versions of glibc don't preallocate a pool large
+ * enough for TLS variables IE model defined in other shared libraries,
+ * and causes issues when using LTTng-UST for Java tracing.
+ *
+ * Because of this use of Global Dynamic TLS variables, users wishing to
+ * trace from signal handlers need to explicitly trigger the lazy
+ * allocation of those variables for each thread before using them.
+ * This can be triggered by calling lttng_ust_init_thread().
+ */
+void lttng_ust_init_thread(void)
+{
+ /*
+ * Because those TLS variables are global dynamic, we need to
+ * ensure those are initialized before a signal handler nesting over
+ * this thread attempts to use them.
+ */
+ lttng_ust_fixup_tls();
+}
+
+int lttng_get_notify_socket(void *owner)
+{
+ struct sock_info *info = owner;
+
+ return info->notify_socket;
+}
+
+
+char* lttng_ust_sockinfo_get_procname(void *owner)
+{
+ struct sock_info *info = owner;
+
+ return info->procname;
+}
+
+static
+void print_cmd(int cmd, int handle)
+{
+ const char *cmd_name = "Unknown";
+
+ if (cmd >= 0 && cmd < LTTNG_ARRAY_SIZE(cmd_name_mapping)
+ && cmd_name_mapping[cmd]) {
+ cmd_name = cmd_name_mapping[cmd];
+ }
+ DBG("Message Received \"%s\" (%d), Handle \"%s\" (%d)",
+ cmd_name, cmd,
+ lttng_ust_obj_get_name(handle), handle);
+}
+
+static
+int setup_global_apps(void)
+{
+ int ret = 0;
+ assert(!global_apps.wait_shm_mmap);
+
+ global_apps.wait_shm_mmap = get_map_shm(&global_apps);
+ if (!global_apps.wait_shm_mmap) {
+ WARN("Unable to get map shm for global apps. Disabling LTTng-UST global tracing.");
+ global_apps.allowed = 0;
+ ret = -EIO;
+ goto error;
+ }
+
+ global_apps.allowed = 1;
+ lttng_pthread_getname_np(global_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
+error:
+ return ret;
+}
+static
+int setup_local_apps(void)
+{
+ int ret = 0;
+ const char *home_dir;
+ uid_t uid;
+
+ assert(!local_apps.wait_shm_mmap);
+
+ uid = getuid();
+ /*
+ * Disallow per-user tracing for setuid binaries.
+ */
+ if (uid != geteuid()) {
+ assert(local_apps.allowed == 0);
+ ret = 0;
+ goto end;
+ }
+ home_dir = get_lttng_home_dir();
+ if (!home_dir) {
+ WARN("HOME environment variable not set. Disabling LTTng-UST per-user tracing.");
+ assert(local_apps.allowed == 0);
+ ret = -ENOENT;
+ goto end;
+ }
+ local_apps.allowed = 1;
+ snprintf(local_apps.sock_path, PATH_MAX, "%s/%s/%s",
+ home_dir,
+ LTTNG_DEFAULT_HOME_RUNDIR,
+ LTTNG_UST_SOCK_FILENAME);
+ snprintf(local_apps.wait_shm_path, PATH_MAX, "/%s-%u",
+ LTTNG_UST_WAIT_FILENAME,
+ uid);
+
+ local_apps.wait_shm_mmap = get_map_shm(&local_apps);
+ if (!local_apps.wait_shm_mmap) {
+ WARN("Unable to get map shm for local apps. Disabling LTTng-UST per-user tracing.");
+ local_apps.allowed = 0;
+ ret = -EIO;
+ goto end;
+ }
+
+ lttng_pthread_getname_np(local_apps.procname, LTTNG_UST_ABI_PROCNAME_LEN);
+end:
+ return ret;
+}
+
+/*
+ * Get socket timeout, in ms.
+ * -1: wait forever. 0: don't wait. >0: timeout, in ms.
+ */
+static
+long get_timeout(void)
+{
+ long constructor_delay_ms = LTTNG_UST_DEFAULT_CONSTRUCTOR_TIMEOUT_MS;
+
+ if (!got_timeout_env) {
+ str_timeout = lttng_ust_getenv("LTTNG_UST_REGISTER_TIMEOUT");
+ got_timeout_env = 1;
+ }
+ if (str_timeout)
+ constructor_delay_ms = strtol(str_timeout, NULL, 10);
+ /* All negative values are considered as "-1". */
+ if (constructor_delay_ms < -1)
+ constructor_delay_ms = -1;
+ return constructor_delay_ms;
+}
+
+/* Timeout for notify socket send and recv. */
+static
+long get_notify_sock_timeout(void)
+{
+ return get_timeout();
+}
+
+/* Timeout for connecting to cmd and notify sockets. */
+static
+long get_connect_sock_timeout(void)
+{
+ return get_timeout();
+}
+
+/*
+ * Return values: -1: wait forever. 0: don't wait. 1: timeout wait.
+ */
+static
+int get_constructor_timeout(struct timespec *constructor_timeout)
+{
+ long constructor_delay_ms;
+ int ret;
+
+ constructor_delay_ms = get_timeout();
+
+ switch (constructor_delay_ms) {
+ case -1:/* fall-through */
+ case 0:
+ return constructor_delay_ms;
+ default:
+ break;
+ }
+
+ /*
+ * If we are unable to find the current time, don't wait.
+ */
+ ret = clock_gettime(CLOCK_REALTIME, constructor_timeout);
+ if (ret) {
+ /* Don't wait. */
+ return 0;
+ }
+ constructor_timeout->tv_sec += constructor_delay_ms / 1000UL;
+ constructor_timeout->tv_nsec +=
+ (constructor_delay_ms % 1000UL) * 1000000UL;
+ if (constructor_timeout->tv_nsec >= 1000000000UL) {
+ constructor_timeout->tv_sec++;
+ constructor_timeout->tv_nsec -= 1000000000UL;
+ }
+ /* Timeout wait (constructor_delay_ms). */
+ return 1;
+}
+
+static
+void get_allow_blocking(void)
+{
+ const char *str_allow_blocking =
+ lttng_ust_getenv("LTTNG_UST_ALLOW_BLOCKING");
+
+ if (str_allow_blocking) {
+ DBG("%s environment variable is set",
+ "LTTNG_UST_ALLOW_BLOCKING");
+ lttng_ust_ringbuffer_set_allow_blocking();
+ }
+}
+
+static
+int register_to_sessiond(int socket, enum ustctl_socket_type type)
+{
+ return ustcomm_send_reg_msg(socket,
+ type,
+ CAA_BITS_PER_LONG,
+ lttng_ust_rb_alignof(uint8_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint16_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint32_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(uint64_t) * CHAR_BIT,
+ lttng_ust_rb_alignof(unsigned long) * CHAR_BIT);
+}
+
+static
+int send_reply(int sock, struct ustcomm_ust_reply *lur)
+{
+ ssize_t len;
+
+ len = ustcomm_send_unix_sock(sock, lur, sizeof(*lur));
+ switch (len) {
+ case sizeof(*lur):
+ DBG("message successfully sent");
+ return 0;
+ default:
+ if (len == -ECONNRESET) {
+ DBG("remote end closed connection");
+ return 0;
+ }
+ if (len < 0)
+ return len;
+ DBG("incorrect message size: %zd", len);
+ return -EINVAL;
+ }
+}
+
+static
+void decrement_sem_count(unsigned int count)
+{
+ int ret;
+
+ assert(uatomic_read(&sem_count) >= count);
+
+ if (uatomic_read(&sem_count) <= 0) {
+ return;
+ }
+
+ ret = uatomic_add_return(&sem_count, -count);
+ if (ret == 0) {
+ ret = sem_post(&constructor_wait);
+ assert(!ret);
+ }
+}
+
+static
+int handle_register_done(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done)
+ return 0;
+ sock_info->registration_done = 1;
+
+ decrement_sem_count(1);
+ if (!sock_info->statedump_pending) {
+ sock_info->initial_statedump_done = 1;
+ decrement_sem_count(1);
+ }
+
+ return 0;
+}
+
+static
+int handle_register_failed(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done)
+ return 0;
+ sock_info->registration_done = 1;
+ sock_info->initial_statedump_done = 1;
+
+ decrement_sem_count(2);
+
+ return 0;
+}
+
+/*
+ * Only execute pending statedump after the constructor semaphore has
+ * been posted by the current listener thread. This means statedump will
+ * only be performed after the "registration done" command is received
+ * from this thread's session daemon.
+ *
+ * This ensures we don't run into deadlock issues with the dynamic
+ * loader mutex, which is held while the constructor is called and
+ * waiting on the constructor semaphore. All operations requiring this
+ * dynamic loader lock need to be postponed using this mechanism.
+ *
+ * In a scenario with two session daemons connected to the application,
+ * it is possible that the first listener thread which receives the
+ * registration done command issues its statedump while the dynamic
+ * loader lock is still held by the application constructor waiting on
+ * the semaphore. It will however be allowed to proceed when the
+ * second session daemon sends the registration done command to the
+ * second listener thread. This situation therefore does not produce
+ * a deadlock.
+ */
+static
+void handle_pending_statedump(struct sock_info *sock_info)
+{
+ if (sock_info->registration_done && sock_info->statedump_pending) {
+ sock_info->statedump_pending = 0;
+ pthread_mutex_lock(&ust_fork_mutex);
+ lttng_handle_pending_statedump(sock_info);
+ pthread_mutex_unlock(&ust_fork_mutex);
+
+ if (!sock_info->initial_statedump_done) {
+ sock_info->initial_statedump_done = 1;
+ decrement_sem_count(1);
+ }
+ }
+}
+
+static inline
+const char *bytecode_type_str(uint32_t cmd)
+{
+ switch (cmd) {
+ case LTTNG_UST_ABI_CAPTURE:
+ return "capture";
+ case LTTNG_UST_ABI_FILTER:
+ return "filter";
+ default:
+ abort();
+ }
+}
+
+static
+int handle_bytecode_recv(struct sock_info *sock_info,
+ int sock, struct ustcomm_ust_msg *lum)
+{
+ struct lttng_ust_bytecode_node *bytecode = NULL;
+ enum lttng_ust_bytecode_type type;
+ const struct lttng_ust_abi_objd_ops *ops;
+ uint32_t data_size, data_size_max, reloc_offset;
+ uint64_t seqnum;
+ ssize_t len;
+ int ret = 0;
+
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_FILTER:
+ type = LTTNG_UST_BYTECODE_TYPE_FILTER;
+ data_size = lum->u.filter.data_size;
+ data_size_max = LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.filter.reloc_offset;
+ seqnum = lum->u.filter.seqnum;
+ break;
+ case LTTNG_UST_ABI_CAPTURE:
+ type = LTTNG_UST_BYTECODE_TYPE_CAPTURE;
+ data_size = lum->u.capture.data_size;
+ data_size_max = LTTNG_UST_ABI_CAPTURE_BYTECODE_MAX_LEN;
+ reloc_offset = lum->u.capture.reloc_offset;
+ seqnum = lum->u.capture.seqnum;
+ break;
+ default:
+ abort();
+ }
+
+ if (data_size > data_size_max) {
+ ERR("Bytecode %s data size is too large: %u bytes",
+ bytecode_type_str(lum->cmd), data_size);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ if (reloc_offset > data_size) {
+ ERR("Bytecode %s reloc offset %u is not within data",
+ bytecode_type_str(lum->cmd), reloc_offset);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ /* Allocate the structure AND the `data[]` field. */
+ bytecode = zmalloc(sizeof(*bytecode) + data_size);
+ if (!bytecode) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ bytecode->bc.len = data_size;
+ bytecode->bc.reloc_offset = reloc_offset;
+ bytecode->bc.seqnum = seqnum;
+ bytecode->type = type;
+
+ len = ustcomm_recv_unix_sock(sock, bytecode->bc.data, bytecode->bc.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto end;
+ default:
+ if (len == bytecode->bc.len) {
+ DBG("Bytecode %s data received",
+ bytecode_type_str(lum->cmd));
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d",
+ (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection",
+ sock_info->name);
+ ret = len;
+ goto end;
+ }
+ ret = len;
+ goto end;
+ } else {
+ DBG("Incorrect %s bytecode data message size: %zd",
+ bytecode_type_str(lum->cmd), len);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+
+ ops = lttng_ust_abi_objd_ops(lum->handle);
+ if (!ops) {
+ ret = -ENOENT;
+ goto end;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &bytecode,
+ NULL, sock_info);
+ else
+ ret = -ENOSYS;
+
+end:
+ free(bytecode);
+ return ret;
+}
+
+static
+int handle_message(struct sock_info *sock_info,
+ int sock, struct ustcomm_ust_msg *lum)
+{
+ int ret = 0;
+ const struct lttng_ust_abi_objd_ops *ops;
+ struct ustcomm_ust_reply lur;
+ union lttng_ust_abi_args args;
+ char ctxstr[LTTNG_UST_ABI_SYM_NAME_LEN]; /* App context string. */
+ ssize_t len;
+
+ memset(&lur, 0, sizeof(lur));
+
+ if (ust_lock()) {
+ ret = -LTTNG_UST_ERR_EXITING;
+ goto error;
+ }
+
+ ops = lttng_ust_abi_objd_ops(lum->handle);
+ if (!ops) {
+ ret = -ENOENT;
+ goto error;
+ }
+
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_REGISTER_DONE:
+ if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
+ ret = handle_register_done(sock_info);
+ else
+ ret = -EINVAL;
+ break;
+ case LTTNG_UST_ABI_RELEASE:
+ if (lum->handle == LTTNG_UST_ABI_ROOT_HANDLE)
+ ret = -EPERM;
+ else
+ ret = lttng_ust_abi_objd_unref(lum->handle, 1);
+ break;
+ case LTTNG_UST_ABI_CAPTURE:
+ case LTTNG_UST_ABI_FILTER:
+ ret = handle_bytecode_recv(sock_info, sock, lum);
+ if (ret)
+ goto error;
+ break;
+ case LTTNG_UST_ABI_EXCLUSION:
+ {
+ /* Receive exclusion names */
+ struct lttng_ust_excluder_node *node;
+ unsigned int count;
+
+ count = lum->u.exclusion.count;
+ if (count == 0) {
+ /* There are no names to read */
+ ret = 0;
+ goto error;
+ }
+ node = zmalloc(sizeof(*node) +
+ count * LTTNG_UST_ABI_SYM_NAME_LEN);
+ if (!node) {
+ ret = -ENOMEM;
+ goto error;
+ }
+ node->excluder.count = count;
+ len = ustcomm_recv_unix_sock(sock, node->excluder.names,
+ count * LTTNG_UST_ABI_SYM_NAME_LEN);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ free(node);
+ goto error;
+ default:
+ if (len == count * LTTNG_UST_ABI_SYM_NAME_LEN) {
+ DBG("Exclusion data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ free(node);
+ goto error;
+ }
+ ret = len;
+ free(node);
+ goto error;
+ } else {
+ DBG("Incorrect exclusion data message size: %zd", len);
+ ret = -EINVAL;
+ free(node);
+ goto error;
+ }
+ }
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &node,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ free(node);
+ break;
+ }
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_GROUP_CREATE:
+ {
+ int event_notifier_notif_fd, close_ret;
+
+ len = ustcomm_recv_event_notifier_notif_fd_from_sessiond(sock,
+ &event_notifier_notif_fd);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ case 1:
+ break;
+ default:
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d",
+ (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection",
+ sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("Incorrect event notifier fd message size: %zd",
+ len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.event_notifier_handle.event_notifier_notif_fd =
+ event_notifier_notif_fd;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.event_notifier_handle.event_notifier_notif_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.event_notifier_handle.event_notifier_notif_fd);
+ lttng_ust_unlock_fd_tracker();
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_CHANNEL:
+ {
+ void *chan_data;
+ int wakeup_fd;
+
+ len = ustcomm_recv_channel_from_sessiond(sock,
+ &chan_data, lum->u.channel.len,
+ &wakeup_fd);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.channel.len) {
+ DBG("channel data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect channel data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.channel.chan_data = chan_data;
+ args.channel.wakeup_fd = wakeup_fd;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.channel.wakeup_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.channel.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.channel.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ free(args.channel.chan_data);
+ break;
+ }
+ case LTTNG_UST_ABI_STREAM:
+ {
+ int close_ret;
+
+ /* Receive shm_fd, wakeup_fd */
+ ret = ustcomm_recv_stream_from_sessiond(sock,
+ NULL,
+ &args.stream.shm_fd,
+ &args.stream.wakeup_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.stream.shm_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ if (args.stream.wakeup_fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.stream.wakeup_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.stream.wakeup_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_CONTEXT:
+ switch (lum->u.context.ctx) {
+ case LTTNG_UST_ABI_CONTEXT_APP_CONTEXT:
+ {
+ char *p;
+ size_t ctxlen, recvlen;
+
+ ctxlen = strlen("$app.") + lum->u.context.u.app_ctx.provider_name_len - 1
+ + strlen(":") + lum->u.context.u.app_ctx.ctx_name_len;
+ if (ctxlen >= LTTNG_UST_ABI_SYM_NAME_LEN) {
+ ERR("Application context string length size is too large: %zu bytes",
+ ctxlen);
+ ret = -EINVAL;
+ goto error;
+ }
+ strcpy(ctxstr, "$app.");
+ p = &ctxstr[strlen("$app.")];
+ recvlen = ctxlen - strlen("$app.");
+ len = ustcomm_recv_unix_sock(sock, p, recvlen);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == recvlen) {
+ DBG("app context data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect app context data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ /* Put : between provider and ctxname. */
+ p[lum->u.context.u.app_ctx.provider_name_len - 1] = ':';
+ args.app_context.ctxname = ctxstr;
+ break;
+ }
+ default:
+ break;
+ }
+ if (ops->cmd) {
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ } else {
+ ret = -ENOSYS;
+ }
+ break;
+ case LTTNG_UST_ABI_COUNTER:
+ {
+ void *counter_data;
+
+ len = ustcomm_recv_counter_from_sessiond(sock,
+ &counter_data, lum->u.counter.len);
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == lum->u.counter.len) {
+ DBG("counter data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect counter data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ args.counter.counter_data = counter_data;
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ free(args.counter.counter_data);
+ break;
+ }
+ case LTTNG_UST_ABI_COUNTER_GLOBAL:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.counter_shm.shm_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.counter_shm.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.counter_shm.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_COUNTER_CPU:
+ {
+ /* Receive shm_fd */
+ ret = ustcomm_recv_counter_shm_from_sessiond(sock,
+ &args.counter_shm.shm_fd);
+ if (ret) {
+ goto error;
+ }
+
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ if (args.counter_shm.shm_fd >= 0) {
+ int close_ret;
+
+ lttng_ust_lock_fd_tracker();
+ close_ret = close(args.counter_shm.shm_fd);
+ lttng_ust_unlock_fd_tracker();
+ args.counter_shm.shm_fd = -1;
+ if (close_ret)
+ PERROR("close");
+ }
+ break;
+ }
+ case LTTNG_UST_ABI_EVENT_NOTIFIER_CREATE:
+ {
+ /* Receive struct lttng_ust_event_notifier */
+ struct lttng_ust_abi_event_notifier event_notifier;
+
+ if (sizeof(event_notifier) != lum->u.event_notifier.len) {
+ DBG("incorrect event notifier data message size: %u", lum->u.event_notifier.len);
+ ret = -EINVAL;
+ goto error;
+ }
+ len = ustcomm_recv_unix_sock(sock, &event_notifier, sizeof(event_notifier));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ ret = 0;
+ goto error;
+ default:
+ if (len == sizeof(event_notifier)) {
+ DBG("event notifier data received");
+ break;
+ } else if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ if (len == -ECONNRESET) {
+ ERR("%s remote end closed connection", sock_info->name);
+ ret = len;
+ goto error;
+ }
+ ret = len;
+ goto error;
+ } else {
+ DBG("incorrect event notifier data message size: %zd", len);
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &event_notifier,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
+ default:
+ if (ops->cmd)
+ ret = ops->cmd(lum->handle, lum->cmd,
+ (unsigned long) &lum->u,
+ &args, sock_info);
+ else
+ ret = -ENOSYS;
+ break;
+ }
+
+ lur.handle = lum->handle;
+ lur.cmd = lum->cmd;
+ lur.ret_val = ret;
+ if (ret >= 0) {
+ lur.ret_code = LTTNG_UST_OK;
+ } else {
+ /*
+ * Use -LTTNG_UST_ERR as wildcard for UST internal
+ * error that are not caused by the transport, except if
+ * we already have a more precise error message to
+ * report.
+ */
+ if (ret > -LTTNG_UST_ERR) {
+ /* Translate code to UST error. */
+ switch (ret) {
+ case -EEXIST:
+ lur.ret_code = -LTTNG_UST_ERR_EXIST;
+ break;
+ case -EINVAL:
+ lur.ret_code = -LTTNG_UST_ERR_INVAL;
+ break;
+ case -ENOENT:
+ lur.ret_code = -LTTNG_UST_ERR_NOENT;
+ break;
+ case -EPERM:
+ lur.ret_code = -LTTNG_UST_ERR_PERM;
+ break;
+ case -ENOSYS:
+ lur.ret_code = -LTTNG_UST_ERR_NOSYS;
+ break;
+ default:
+ lur.ret_code = -LTTNG_UST_ERR;
+ break;
+ }
+ } else {
+ lur.ret_code = ret;
+ }
+ }
+ if (ret >= 0) {
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_TRACER_VERSION:
+ lur.u.version = lum->u.version;
+ break;
+ case LTTNG_UST_ABI_TRACEPOINT_LIST_GET:
+ memcpy(&lur.u.tracepoint, &lum->u.tracepoint, sizeof(lur.u.tracepoint));
+ break;
+ }
+ }
+ DBG("Return value: %d", lur.ret_val);
+
+ ust_unlock();
+
+ /*
+ * Performed delayed statedump operations outside of the UST
+ * lock. We need to take the dynamic loader lock before we take
+ * the UST lock internally within handle_pending_statedump().
+ */
+ handle_pending_statedump(sock_info);
+
+ if (ust_lock()) {
+ ret = -LTTNG_UST_ERR_EXITING;
+ goto error;
+ }
+
+ ret = send_reply(sock, &lur);
+ if (ret < 0) {
+ DBG("error sending reply");
+ goto error;
+ }
+
+ /*
+ * LTTNG_UST_TRACEPOINT_FIELD_LIST_GET needs to send the field
+ * after the reply.
+ */
+ if (lur.ret_code == LTTNG_UST_OK) {
+ switch (lum->cmd) {
+ case LTTNG_UST_ABI_TRACEPOINT_FIELD_LIST_GET:
+ len = ustcomm_send_unix_sock(sock,
+ &args.field_list.entry,
+ sizeof(args.field_list.entry));
+ if (len < 0) {
+ ret = len;
+ goto error;
+ }
+ if (len != sizeof(args.field_list.entry)) {
+ ret = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+error:
+ ust_unlock();
+
+ return ret;
+}
+
+static
+void cleanup_sock_info(struct sock_info *sock_info, int exiting)
+{
+ int ret;
+
+ if (sock_info->root_handle != -1) {
+ ret = lttng_ust_abi_objd_unref(sock_info->root_handle, 1);
+ if (ret) {
+ ERR("Error unref root handle");
+ }
+ sock_info->root_handle = -1;
+ }
+ sock_info->registration_done = 0;
+ sock_info->initial_statedump_done = 0;
+
+ /*
+ * wait_shm_mmap, socket and notify socket are used by listener
+ * threads outside of the ust lock, so we cannot tear them down
+ * ourselves, because we cannot join on these threads. Leave
+ * responsibility of cleaning up these resources to the OS
+ * process exit.
+ */
+ if (exiting)
+ return;
+
+ if (sock_info->socket != -1) {
+ ret = ustcomm_close_unix_sock(sock_info->socket);
+ if (ret) {
+ ERR("Error closing ust cmd socket");
+ }
+ sock_info->socket = -1;
+ }
+ if (sock_info->notify_socket != -1) {
+ ret = ustcomm_close_unix_sock(sock_info->notify_socket);
+ if (ret) {
+ ERR("Error closing ust notify socket");
+ }
+ sock_info->notify_socket = -1;
+ }
+ if (sock_info->wait_shm_mmap) {
+ long page_size;
+
+ page_size = LTTNG_UST_PAGE_SIZE;
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ } else {
+ ret = munmap(sock_info->wait_shm_mmap, page_size);
+ if (ret) {
+ ERR("Error unmapping wait shm");
+ }
+ }
+ sock_info->wait_shm_mmap = NULL;
+ }
+}
+
+/*
+ * Using fork to set umask in the child process (not multi-thread safe).
+ * We deal with the shm_open vs ftruncate race (happening when the
+ * sessiond owns the shm and does not let everybody modify it, to ensure
+ * safety against shm_unlink) by simply letting the mmap fail and
+ * retrying after a few seconds.
+ * For global shm, everybody has rw access to it until the sessiond
+ * starts.
+ */
+static
+int get_wait_shm(struct sock_info *sock_info, size_t mmap_size)
+{
+ int wait_shm_fd, ret;
+ pid_t pid;
+
+ /*
+ * Try to open read-only.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd >= 0) {
+ int32_t tmp_read;
+ ssize_t len;
+ size_t bytes_read = 0;
+
+ /*
+ * Try to read the fd. If unable to do so, try opening
+ * it in write mode.
+ */
+ do {
+ len = read(wait_shm_fd,
+ &((char *) &tmp_read)[bytes_read],
+ sizeof(tmp_read) - bytes_read);
+ if (len > 0) {
+ bytes_read += len;
+ }
+ } while ((len < 0 && errno == EINTR)
+ || (len > 0 && bytes_read < sizeof(tmp_read)));
+ if (bytes_read != sizeof(tmp_read)) {
+ ret = close(wait_shm_fd);
+ if (ret) {
+ ERR("close wait_shm_fd");
+ }
+ goto open_write;
+ }
+ goto end;
+ } else if (wait_shm_fd < 0 && errno != ENOENT) {
+ /*
+ * Real-only open did not work, and it's not because the
+ * entry was not present. It's a failure that prohibits
+ * using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+
+open_write:
+ /*
+ * If the open failed because the file did not exist, or because
+ * the file was not truncated yet, try creating it ourself.
+ */
+ URCU_TLS(lttng_ust_nest_count)++;
+ pid = fork();
+ URCU_TLS(lttng_ust_nest_count)--;
+ if (pid > 0) {
+ int status;
+
+ /*
+ * Parent: wait for child to return, in which case the
+ * shared memory map will have been created.
+ */
+ pid = wait(&status);
+ if (pid < 0 || !WIFEXITED(status) || WEXITSTATUS(status) != 0) {
+ wait_shm_fd = -1;
+ goto end;
+ }
+ /*
+ * Try to open read-only again after creation.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path, O_RDONLY, 0);
+ if (wait_shm_fd < 0) {
+ /*
+ * Real-only open did not work. It's a failure
+ * that prohibits using shm.
+ */
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ goto end;
+ }
+ goto end;
+ } else if (pid == 0) {
+ int create_mode;
+
+ /* Child */
+ create_mode = S_IRUSR | S_IWUSR | S_IRGRP;
+ if (sock_info->global)
+ create_mode |= S_IROTH | S_IWGRP | S_IWOTH;
+ /*
+ * We're alone in a child process, so we can modify the
+ * process-wide umask.
+ */
+ umask(~create_mode);
+ /*
+ * Try creating shm (or get rw access).
+ * We don't do an exclusive open, because we allow other
+ * processes to create+ftruncate it concurrently.
+ */
+ wait_shm_fd = shm_open(sock_info->wait_shm_path,
+ O_RDWR | O_CREAT, create_mode);
+ if (wait_shm_fd >= 0) {
+ ret = ftruncate(wait_shm_fd, mmap_size);
+ if (ret) {
+ PERROR("ftruncate");
+ _exit(EXIT_FAILURE);
+ }
+ _exit(EXIT_SUCCESS);
+ }
+ /*
+ * For local shm, we need to have rw access to accept
+ * opening it: this means the local sessiond will be
+ * able to wake us up. For global shm, we open it even
+ * if rw access is not granted, because the root.root
+ * sessiond will be able to override all rights and wake
+ * us up.
+ */
+ if (!sock_info->global && errno != EACCES) {
+ ERR("Error opening shm %s", sock_info->wait_shm_path);
+ _exit(EXIT_FAILURE);
+ }
+ /*
+ * The shm exists, but we cannot open it RW. Report
+ * success.
+ */
+ _exit(EXIT_SUCCESS);
+ } else {
+ return -1;
+ }
+end:
+ if (wait_shm_fd >= 0 && !sock_info->global) {
+ struct stat statbuf;
+
+ /*
+ * Ensure that our user is the owner of the shm file for
+ * local shm. If we do not own the file, it means our
+ * sessiond will not have access to wake us up (there is
+ * probably a rogue process trying to fake our
+ * sessiond). Fallback to polling method in this case.
+ */
+ ret = fstat(wait_shm_fd, &statbuf);
+ if (ret) {
+ PERROR("fstat");
+ goto error_close;
+ }
+ if (statbuf.st_uid != getuid())
+ goto error_close;
+ }
+ return wait_shm_fd;
+
+error_close:
+ ret = close(wait_shm_fd);
+ if (ret) {
+ PERROR("Error closing fd");
+ }
+ return -1;
+}
+
+static
+char *get_map_shm(struct sock_info *sock_info)
+{
+ long page_size;
+ int wait_shm_fd, ret;
+ char *wait_shm_mmap;
+
+ page_size = sysconf(_SC_PAGE_SIZE);
+ if (page_size <= 0) {
+ if (!page_size) {
+ errno = EINVAL;
+ }
+ PERROR("Error in sysconf(_SC_PAGE_SIZE)");
+ goto error;
+ }
+
+ lttng_ust_lock_fd_tracker();
+ wait_shm_fd = get_wait_shm(sock_info, page_size);
+ if (wait_shm_fd < 0) {
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(wait_shm_fd);
+ if (ret < 0) {
+ ret = close(wait_shm_fd);
+ if (!ret) {
+ PERROR("Error closing fd");
+ }
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ wait_shm_fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ wait_shm_mmap = mmap(NULL, page_size, PROT_READ,
+ MAP_SHARED, wait_shm_fd, 0);
+
+ /* close shm fd immediately after taking the mmap reference */
+ lttng_ust_lock_fd_tracker();
+ ret = close(wait_shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(wait_shm_fd);
+ } else {
+ PERROR("Error closing fd");
+ }
+ lttng_ust_unlock_fd_tracker();
+
+ if (wait_shm_mmap == MAP_FAILED) {
+ DBG("mmap error (can be caused by race with sessiond). Fallback to poll mode.");
+ goto error;
+ }
+ return wait_shm_mmap;
+
+error:
+ return NULL;
+}
+
+static
+void wait_for_sessiond(struct sock_info *sock_info)
+{
+ /* Use ust_lock to check if we should quit. */
+ if (ust_lock()) {
+ goto quit;
+ }
+ if (wait_poll_fallback) {
+ goto error;
+ }
+ ust_unlock();
+
+ assert(sock_info->wait_shm_mmap);
+
+ DBG("Waiting for %s apps sessiond", sock_info->name);
+ /* Wait for futex wakeup */
+ if (uatomic_read((int32_t *) sock_info->wait_shm_mmap))
+ goto end_wait;
+
+ while (lttng_ust_futex_async((int32_t *) sock_info->wait_shm_mmap,
+ FUTEX_WAIT, 0, NULL, NULL, 0)) {
+ switch (errno) {
+ case EWOULDBLOCK:
+ /* Value already changed. */
+ goto end_wait;
+ case EINTR:
+ /* Retry if interrupted by signal. */
+ break; /* Get out of switch. */
+ case EFAULT:
+ wait_poll_fallback = 1;
+ DBG(
+"Linux kernels 2.6.33 to 3.0 (with the exception of stable versions) "
+"do not support FUTEX_WAKE on read-only memory mappings correctly. "
+"Please upgrade your kernel "
+"(fix is commit 9ea71503a8ed9184d2d0b8ccc4d269d05f7940ae in Linux kernel "
+"mainline). LTTng-UST will use polling mode fallback.");
+ if (ust_err_debug_enabled())
+ PERROR("futex");
+ goto end_wait;
+ }
+ }
+end_wait:
+ return;
+
+quit:
+ ust_unlock();
+ return;
+
+error:
+ ust_unlock();
+ return;
+}
+
+/*
+ * This thread does not allocate any resource, except within
+ * handle_message, within mutex protection. This mutex protects against
+ * fork and exit.
+ * The other moment it allocates resources is at socket connection, which
+ * is also protected by the mutex.
+ */
+static
+void *ust_listener_thread(void *arg)
+{
+ struct sock_info *sock_info = arg;
+ int sock, ret, prev_connect_failed = 0, has_waited = 0, fd;
+ long timeout;
+
+ lttng_ust_fixup_tls();
+ /*
+ * If available, add '-ust' to the end of this thread's
+ * process name
+ */
+ ret = lttng_ust_setustprocname();
+ if (ret) {
+ ERR("Unable to set UST process name");
+ }
+
+ /* Restart trying to connect to the session daemon */
+restart:
+ if (prev_connect_failed) {
+ /* Wait for sessiond availability with pipe */
+ wait_for_sessiond(sock_info);
+ if (has_waited) {
+ has_waited = 0;
+ /*
+ * Sleep for 5 seconds before retrying after a
+ * sequence of failure / wait / failure. This
+ * deals with a killed or broken session daemon.
+ */
+ sleep(5);
+ } else {
+ has_waited = 1;
+ }
+ prev_connect_failed = 0;
+ }
+
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ if (sock_info->socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
+ ret = ustcomm_close_unix_sock(sock_info->socket);
+ if (ret) {
+ ERR("Error closing %s ust cmd socket",
+ sock_info->name);
+ }
+ sock_info->socket = -1;
+ }
+ if (sock_info->notify_socket != -1) {
+ /* FD tracker is updated by ustcomm_close_unix_sock() */
+ ret = ustcomm_close_unix_sock(sock_info->notify_socket);
+ if (ret) {
+ ERR("Error closing %s ust notify socket",
+ sock_info->name);
+ }
+ sock_info->notify_socket = -1;
+ }
+
+
+ /*
+ * Register. We need to perform both connect and sending
+ * registration message before doing the next connect otherwise
+ * we may reach unix socket connect queue max limits and block
+ * on the 2nd connect while the session daemon is awaiting the
+ * first connect registration message.
+ */
+ /* Connect cmd socket */
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
+ if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
+ DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
+ prev_connect_failed = 1;
+
+ /*
+ * If we cannot find the sessiond daemon, don't delay
+ * constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+ fd = ret;
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on sock_info->socket");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+ goto quit;
+ }
+
+ sock_info->socket = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ /*
+ * Create only one root handle per listener thread for the whole
+ * process lifetime, so we ensure we get ID which is statically
+ * assigned to the root handle.
+ */
+ if (sock_info->root_handle == -1) {
+ ret = lttng_abi_create_root_handle();
+ if (ret < 0) {
+ ERR("Error creating root handle");
+ goto quit;
+ }
+ sock_info->root_handle = ret;
+ }
+
+ ret = register_to_sessiond(sock_info->socket, USTCTL_SOCKET_CMD);
+ if (ret < 0) {
+ ERR("Error registering to %s ust cmd socket",
+ sock_info->name);
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ /* Connect notify socket */
+ lttng_ust_lock_fd_tracker();
+ ret = ustcomm_connect_unix_sock(sock_info->sock_path,
+ get_connect_sock_timeout());
+ if (ret < 0) {
+ lttng_ust_unlock_fd_tracker();
+ DBG("Info: sessiond not accepting connections to %s apps socket", sock_info->name);
+ prev_connect_failed = 1;
+
+ /*
+ * If we cannot find the sessiond daemon, don't delay
+ * constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+
+ fd = ret;
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on sock_info->notify_socket");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+ goto quit;
+ }
+
+ sock_info->notify_socket = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ ust_unlock();
+ /*
+ * Unlock/relock ust lock because connect is blocking (with
+ * timeout). Don't delay constructors on the ust lock for too
+ * long.
+ */
+ if (ust_lock()) {
+ goto quit;
+ }
+
+ timeout = get_notify_sock_timeout();
+ if (timeout >= 0) {
+ /*
+ * Give at least 10ms to sessiond to reply to
+ * notifications.
+ */
+ if (timeout < 10)
+ timeout = 10;
+ ret = ustcomm_setsockopt_rcv_timeout(sock_info->notify_socket,
+ timeout);
+ if (ret < 0) {
+ WARN("Error setting socket receive timeout");
+ }
+ ret = ustcomm_setsockopt_snd_timeout(sock_info->notify_socket,
+ timeout);
+ if (ret < 0) {
+ WARN("Error setting socket send timeout");
+ }
+ } else if (timeout < -1) {
+ WARN("Unsupported timeout value %ld", timeout);
+ }
+
+ ret = register_to_sessiond(sock_info->notify_socket,
+ USTCTL_SOCKET_NOTIFY);
+ if (ret < 0) {
+ ERR("Error registering to %s ust notify socket",
+ sock_info->name);
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto restart;
+ }
+ sock = sock_info->socket;
+
+ ust_unlock();
+
+ for (;;) {
+ ssize_t len;
+ struct ustcomm_ust_msg lum;
+
+ len = ustcomm_recv_unix_sock(sock, &lum, sizeof(lum));
+ switch (len) {
+ case 0: /* orderly shutdown */
+ DBG("%s lttng-sessiond has performed an orderly shutdown", sock_info->name);
+ if (ust_lock()) {
+ goto quit;
+ }
+ /*
+ * Either sessiond has shutdown or refused us by closing the socket.
+ * In either case, we don't want to delay construction execution,
+ * and we need to wait before retry.
+ */
+ prev_connect_failed = 1;
+ /*
+ * If we cannot register to the sessiond daemon, don't
+ * delay constructor execution.
+ */
+ ret = handle_register_failed(sock_info);
+ assert(!ret);
+ ust_unlock();
+ goto end;
+ case sizeof(lum):
+ print_cmd(lum.cmd, lum.handle);
+ ret = handle_message(sock_info, sock, &lum);
+ if (ret) {
+ ERR("Error handling message for %s socket",
+ sock_info->name);
+ /*
+ * Close socket if protocol error is
+ * detected.
+ */
+ goto end;
+ }
+ continue;
+ default:
+ if (len < 0) {
+ DBG("Receive failed from lttng-sessiond with errno %d", (int) -len);
+ } else {
+ DBG("incorrect message size (%s socket): %zd", sock_info->name, len);
+ }
+ if (len == -ECONNRESET) {
+ DBG("%s remote end closed connection", sock_info->name);
+ goto end;
+ }
+ goto end;
+ }
+
+ }
+end:
+ if (ust_lock()) {
+ goto quit;
+ }
+ /* Cleanup socket handles before trying to reconnect */
+ lttng_ust_abi_objd_table_owner_cleanup(sock_info);
+ ust_unlock();
+ goto restart; /* try to reconnect */
+
+quit:
+ ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ sock_info->thread_active = 0;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ return NULL;
+}
+
+/*
+ * Weak symbol to call when the ust malloc wrapper is not loaded.
+ */
+__attribute__((weak))
+void lttng_ust_libc_wrapper_malloc_init(void)
+{
+}
+
+/*
+ * sessiond monitoring thread: monitor presence of global and per-user
+ * sessiond by polling the application common named pipe.
+ */
+static
+void lttng_ust_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_init(void)
+{
+ struct timespec constructor_timeout;
+ sigset_t sig_all_blocked, orig_parent_mask;
+ pthread_attr_t thread_attr;
+ int timeout_mode;
+ int ret;
+ void *handle;
+
+ if (uatomic_xchg(&initialized, 1) == 1)
+ return;
+
+ /*
+ * Fixup interdependency between TLS fixup mutex (which happens
+ * to be the dynamic linker mutex) and ust_lock, taken within
+ * the ust lock.
+ */
+ lttng_ust_fixup_tls();
+
+ lttng_ust_loaded = 1;
+
+ /*
+ * We need to ensure that the liblttng-ust library is not unloaded to avoid
+ * the unloading of code used by the ust_listener_threads as we can not
+ * reliably know when they exited. To do that, manually load
+ * liblttng-ust.so to increment the dynamic loader's internal refcount for
+ * this library so it never becomes zero, thus never gets unloaded from the
+ * address space of the process. Since we are already running in the
+ * constructor of the LTTNG_UST_LIB_SONAME library, calling dlopen will
+ * simply increment the refcount and no additionnal work is needed by the
+ * dynamic loader as the shared library is already loaded in the address
+ * space. As a safe guard, we use the RTLD_NODELETE flag to prevent
+ * unloading of the UST library if its refcount becomes zero (which should
+ * never happen). Do the return value check but discard the handle at the
+ * end of the function as it's not needed.
+ */
+ handle = dlopen(LTTNG_UST_LIB_SONAME, RTLD_LAZY | RTLD_NODELETE);
+ if (!handle) {
+ ERR("dlopen of liblttng-ust shared library (%s).", LTTNG_UST_LIB_SONAME);
+ }
+
+ /*
+ * We want precise control over the order in which we construct
+ * our sub-libraries vs starting to receive commands from
+ * sessiond (otherwise leading to errors when trying to create
+ * sessiond before the init functions are completed).
+ */
+ ust_err_init();
+ lttng_ust_getenv_init(); /* Needs ust_err_init() to be completed. */
+ lttng_ust_tp_init();
+ lttng_ust_init_fd_tracker();
+ lttng_ust_clock_init();
+ lttng_ust_getcpu_init();
+ lttng_ust_statedump_init();
+ lttng_ust_ring_buffer_clients_init();
+ lttng_ust_counter_clients_init();
+ lttng_perf_counter_init();
+ /*
+ * Invoke ust malloc wrapper init before starting other threads.
+ */
+ lttng_ust_libc_wrapper_malloc_init();
+
+ timeout_mode = get_constructor_timeout(&constructor_timeout);
+
+ get_allow_blocking();
+
+ ret = sem_init(&constructor_wait, 0, 0);
+ if (ret) {
+ PERROR("sem_init");
+ }
+
+ ret = setup_global_apps();
+ if (ret) {
+ assert(global_apps.allowed == 0);
+ DBG("global apps setup returned %d", ret);
+ }
+
+ ret = setup_local_apps();
+ if (ret) {
+ assert(local_apps.allowed == 0);
+ DBG("local apps setup returned %d", ret);
+ }
+
+ /* A new thread created by pthread_create inherits the signal mask
+ * from the parent. To avoid any signal being received by the
+ * listener thread, we block all signals temporarily in the parent,
+ * while we create the listener thread.
+ */
+ sigfillset(&sig_all_blocked);
+ ret = pthread_sigmask(SIG_SETMASK, &sig_all_blocked, &orig_parent_mask);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+
+ ret = pthread_attr_init(&thread_attr);
+ if (ret) {
+ ERR("pthread_attr_init: %s", strerror(ret));
+ }
+ ret = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_DETACHED);
+ if (ret) {
+ ERR("pthread_attr_setdetachstate: %s", strerror(ret));
+ }
+
+ if (global_apps.allowed) {
+ pthread_mutex_lock(&ust_exit_mutex);
+ ret = pthread_create(&global_apps.ust_listener, &thread_attr,
+ ust_listener_thread, &global_apps);
+ if (ret) {
+ ERR("pthread_create global: %s", strerror(ret));
+ }
+ global_apps.thread_active = 1;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ } else {
+ handle_register_done(&global_apps);
+ }
+
+ if (local_apps.allowed) {
+ pthread_mutex_lock(&ust_exit_mutex);
+ ret = pthread_create(&local_apps.ust_listener, &thread_attr,
+ ust_listener_thread, &local_apps);
+ if (ret) {
+ ERR("pthread_create local: %s", strerror(ret));
+ }
+ local_apps.thread_active = 1;
+ pthread_mutex_unlock(&ust_exit_mutex);
+ } else {
+ handle_register_done(&local_apps);
+ }
+ ret = pthread_attr_destroy(&thread_attr);
+ if (ret) {
+ ERR("pthread_attr_destroy: %s", strerror(ret));
+ }
+
+ /* Restore original signal mask in parent */
+ ret = pthread_sigmask(SIG_SETMASK, &orig_parent_mask, NULL);
+ if (ret) {
+ ERR("pthread_sigmask: %s", strerror(ret));
+ }
+
+ switch (timeout_mode) {
+ case 1: /* timeout wait */
+ do {
+ ret = sem_timedwait(&constructor_wait,
+ &constructor_timeout);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ switch (errno) {
+ case ETIMEDOUT:
+ ERR("Timed out waiting for lttng-sessiond");
+ break;
+ case EINVAL:
+ PERROR("sem_timedwait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_timedwait",
+ strerror(errno));
+ }
+ }
+ break;
+ case -1:/* wait forever */
+ do {
+ ret = sem_wait(&constructor_wait);
+ } while (ret < 0 && errno == EINTR);
+ if (ret < 0) {
+ switch (errno) {
+ case EINVAL:
+ PERROR("sem_wait");
+ break;
+ default:
+ ERR("Unexpected error \"%s\" returned by sem_wait",
+ strerror(errno));
+ }
+ }
+ break;
+ case 0: /* no timeout */
+ break;
+ }
+}
+
+static
+void lttng_ust_cleanup(int exiting)
+{
+ cleanup_sock_info(&global_apps, exiting);
+ cleanup_sock_info(&local_apps, exiting);
+ local_apps.allowed = 0;
+ global_apps.allowed = 0;
+ /*
+ * The teardown in this function all affect data structures
+ * accessed under the UST lock by the listener thread. This
+ * lock, along with the lttng_ust_comm_should_quit flag, ensure
+ * that none of these threads are accessing this data at this
+ * point.
+ */
+ lttng_ust_abi_exit();
+ lttng_ust_abi_events_exit();
+ lttng_perf_counter_exit();
+ lttng_ust_ring_buffer_clients_exit();
+ lttng_ust_counter_clients_exit();
+ lttng_ust_statedump_destroy();
+ lttng_ust_tp_exit();
+ if (!exiting) {
+ /* Reinitialize values for fork */
+ sem_count = sem_count_initial_value;
+ lttng_ust_comm_should_quit = 0;
+ initialized = 0;
+ }
+}
+
+static
+void lttng_ust_exit(void)
+ __attribute__((destructor));
+static
+void lttng_ust_exit(void)
+{
+ int ret;
+
+ /*
+ * Using pthread_cancel here because:
+ * A) we don't want to hang application teardown.
+ * B) the thread is not allocating any resource.
+ */
+
+ /*
+ * Require the communication thread to quit. Synchronize with
+ * mutexes to ensure it is not in a mutex critical section when
+ * pthread_cancel is later called.
+ */
+ ust_lock_nocheck();
+ lttng_ust_comm_should_quit = 1;
+ ust_unlock();
+
+ pthread_mutex_lock(&ust_exit_mutex);
+ /* cancel threads */
+ if (global_apps.thread_active) {
+ ret = pthread_cancel(global_apps.ust_listener);
+ if (ret) {
+ ERR("Error cancelling global ust listener thread: %s",
+ strerror(ret));
+ } else {
+ global_apps.thread_active = 0;
+ }
+ }
+ if (local_apps.thread_active) {
+ ret = pthread_cancel(local_apps.ust_listener);
+ if (ret) {
+ ERR("Error cancelling local ust listener thread: %s",
+ strerror(ret));
+ } else {
+ local_apps.thread_active = 0;
+ }
+ }
+ pthread_mutex_unlock(&ust_exit_mutex);
+
+ /*
+ * Do NOT join threads: use of sys_futex makes it impossible to
+ * join the threads without using async-cancel, but async-cancel
+ * is delivered by a signal, which could hit the target thread
+ * anywhere in its code path, including while the ust_lock() is
+ * held, causing a deadlock for the other thread. Let the OS
+ * cleanup the threads if there are stalled in a syscall.
+ */
+ lttng_ust_cleanup(1);
+}
+
+static
+void ust_context_ns_reset(void)
+{
+ lttng_context_pid_ns_reset();
+ lttng_context_cgroup_ns_reset();
+ lttng_context_ipc_ns_reset();
+ lttng_context_mnt_ns_reset();
+ lttng_context_net_ns_reset();
+ lttng_context_user_ns_reset();
+ lttng_context_time_ns_reset();
+ lttng_context_uts_ns_reset();
+}
+
+static
+void ust_context_vuids_reset(void)
+{
+ lttng_context_vuid_reset();
+ lttng_context_veuid_reset();
+ lttng_context_vsuid_reset();
+}
+
+static
+void ust_context_vgids_reset(void)
+{
+ lttng_context_vgid_reset();
+ lttng_context_vegid_reset();
+ lttng_context_vsgid_reset();
+}
+
+/*
+ * We exclude the worker threads across fork and clone (except
+ * CLONE_VM), because these system calls only keep the forking thread
+ * running in the child. Therefore, we don't want to call fork or clone
+ * in the middle of an tracepoint or ust tracing state modification.
+ * Holding this mutex protects these structures across fork and clone.
+ */
+void lttng_ust_before_fork(sigset_t *save_sigset)
+{
+ /*
+ * Disable signals. This is to avoid that the child intervenes
+ * before it is properly setup for tracing. It is safer to
+ * disable all signals, because then we know we are not breaking
+ * anything by restoring the original mask.
+ */
+ sigset_t all_sigs;
+ int ret;
+
+ /* Fixup lttng-ust TLS. */
+ lttng_ust_fixup_tls();
+
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ /* Disable signals */
+ sigfillset(&all_sigs);
+ ret = sigprocmask(SIG_BLOCK, &all_sigs, save_sigset);
+ if (ret == -1) {
+ PERROR("sigprocmask");
+ }
+
+ pthread_mutex_lock(&ust_fork_mutex);
+
+ ust_lock_nocheck();
+ lttng_ust_urcu_before_fork();
+ lttng_ust_lock_fd_tracker();
+ lttng_perf_lock();
+}
+
+static void ust_after_fork_common(sigset_t *restore_sigset)
+{
+ int ret;
+
+ DBG("process %d", getpid());
+ lttng_perf_unlock();
+ lttng_ust_unlock_fd_tracker();
+ ust_unlock();
+
+ pthread_mutex_unlock(&ust_fork_mutex);
+
+ /* Restore signals */
+ ret = sigprocmask(SIG_SETMASK, restore_sigset, NULL);
+ if (ret == -1) {
+ PERROR("sigprocmask");
+ }
+}
+
+void lttng_ust_after_fork_parent(sigset_t *restore_sigset)
+{
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ DBG("process %d", getpid());
+ lttng_ust_urcu_after_fork_parent();
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(restore_sigset);
+}
+
+/*
+ * After fork, in the child, we need to cleanup all the leftover state,
+ * except the worker thread which already magically disappeared thanks
+ * to the weird Linux fork semantics. After tyding up, we call
+ * lttng_ust_init() again to start over as a new PID.
+ *
+ * This is meant for forks() that have tracing in the child between the
+ * fork and following exec call (if there is any).
+ */
+void lttng_ust_after_fork_child(sigset_t *restore_sigset)
+{
+ if (URCU_TLS(lttng_ust_nest_count))
+ return;
+ lttng_context_vpid_reset();
+ lttng_context_vtid_reset();
+ lttng_ust_context_procname_reset();
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+ DBG("process %d", getpid());
+ /* Release urcu mutexes */
+ lttng_ust_urcu_after_fork_child();
+ lttng_ust_cleanup(0);
+ /* Release mutexes and reenable signals */
+ ust_after_fork_common(restore_sigset);
+ lttng_ust_init();
+}
+
+void lttng_ust_after_setns(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_unshare(void)
+{
+ ust_context_ns_reset();
+ ust_context_vuids_reset();
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_seteuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setreuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setresuid(void)
+{
+ ust_context_vuids_reset();
+}
+
+void lttng_ust_after_setgid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setegid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setregid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_after_setresgid(void)
+{
+ ust_context_vgids_reset();
+}
+
+void lttng_ust_sockinfo_session_enabled(void *owner)
+{
+ struct sock_info *sock_info = owner;
+ sock_info->statedump_pending = 1;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * UST dynamic type implementation.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <inttypes.h>
+
+#include <ust-helper.h>
+#include <ust-dynamic-type.h>
+
+#define ctf_enum_value(_string, _value) \
+ __LTTNG_COMPOUND_LITERAL(struct lttng_ust_enum_entry, { \
+ .struct_size = sizeof(struct lttng_ust_enum_entry), \
+ .start = { \
+ .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
+ .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
+ (long long) (_value) : (_value), \
+ }, \
+ .end = { \
+ .signedness = lttng_ust_is_signed_type(__typeof__(_value)), \
+ .value = lttng_ust_is_signed_type(__typeof__(_value)) ? \
+ (long long) (_value) : (_value), \
+ }, \
+ .string = (_string), \
+ }),
+
+static const struct lttng_ust_enum_entry *dt_enum[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
+ [LTTNG_UST_DYNAMIC_TYPE_NONE] = ctf_enum_value("_none", 0)
+ [LTTNG_UST_DYNAMIC_TYPE_S8] = ctf_enum_value("_int8", 1)
+ [LTTNG_UST_DYNAMIC_TYPE_S16] = ctf_enum_value("_int16", 2)
+ [LTTNG_UST_DYNAMIC_TYPE_S32] = ctf_enum_value("_int32", 3)
+ [LTTNG_UST_DYNAMIC_TYPE_S64] = ctf_enum_value("_int64", 4)
+ [LTTNG_UST_DYNAMIC_TYPE_U8] = ctf_enum_value("_uint8", 5)
+ [LTTNG_UST_DYNAMIC_TYPE_U16] = ctf_enum_value("_uint16", 6)
+ [LTTNG_UST_DYNAMIC_TYPE_U32] = ctf_enum_value("_uint32", 7)
+ [LTTNG_UST_DYNAMIC_TYPE_U64] = ctf_enum_value("_uint64", 8)
+ [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = ctf_enum_value("_float", 9)
+ [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = ctf_enum_value("_double", 10)
+ [LTTNG_UST_DYNAMIC_TYPE_STRING] = ctf_enum_value("_string", 11)
+};
+
+static struct lttng_ust_enum_desc dt_enum_desc = {
+ .name = "dynamic_type_enum",
+ .entries = dt_enum,
+ .nr_entries = LTTNG_ARRAY_SIZE(dt_enum),
+};
+
+const struct lttng_ust_event_field *dt_var_fields[_NR_LTTNG_UST_DYNAMIC_TYPES] = {
+ [LTTNG_UST_DYNAMIC_TYPE_NONE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "none",
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_struct, {
+ .parent = {
+ .type = lttng_ust_type_struct,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_struct),
+ .nr_fields = 0, /* empty struct */
+ .alignment = 0,
+ }),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int8",
+ .type = lttng_ust_type_integer_define(int8_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int16",
+ .type = lttng_ust_type_integer_define(int16_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int32",
+ .type = lttng_ust_type_integer_define(int32_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_S64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "int64",
+ .type = lttng_ust_type_integer_define(int64_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U8] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint8",
+ .type = lttng_ust_type_integer_define(uint8_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U16] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint16",
+ .type = lttng_ust_type_integer_define(uint16_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U32] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint32",
+ .type = lttng_ust_type_integer_define(uint32_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_U64] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "uint64",
+ .type = lttng_ust_type_integer_define(uint64_t, BYTE_ORDER, 10),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_FLOAT] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "float",
+ .type = lttng_ust_type_float_define(float),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_DOUBLE] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "double",
+ .type = lttng_ust_type_float_define(double),
+ .nowrite = 0,
+ }),
+ [LTTNG_UST_DYNAMIC_TYPE_STRING] = __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = "string",
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_string, {
+ .parent = {
+ .type = lttng_ust_type_string,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_string),
+ .encoding = lttng_ust_string_encoding_UTF8,
+ }),
+ .nowrite = 0,
+ }),
+};
+
+static const struct lttng_ust_event_field dt_enum_field = {
+ .struct_size = sizeof(struct lttng_ust_event_field),
+ .name = NULL,
+ .type = (struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(struct lttng_ust_type_enum, {
+ .parent = {
+ .type = lttng_ust_type_enum,
+ },
+ .struct_size = sizeof(struct lttng_ust_type_enum),
+ .desc = &dt_enum_desc,
+ .container_type = lttng_ust_type_integer_define(char, BYTE_ORDER, 10),
+ }),
+ .nowrite = 0,
+};
+
+const struct lttng_ust_event_field *lttng_ust_dynamic_type_field(int64_t value)
+{
+ if (value >= _NR_LTTNG_UST_DYNAMIC_TYPES || value < 0)
+ return NULL;
+ return dt_var_fields[value];
+}
+
+int lttng_ust_dynamic_type_choices(size_t *nr_choices, const struct lttng_ust_event_field ***choices)
+{
+ *nr_choices = _NR_LTTNG_UST_DYNAMIC_TYPES;
+ *choices = dt_var_fields;
+ return 0;
+}
+
+const struct lttng_ust_event_field *lttng_ust_dynamic_type_tag_field(void)
+{
+ return &dt_enum_field;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <fcntl.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <lttng/ust-utils.h>
+
+#include <ust-elf.h>
+#include <ust-fd.h>
+
+#include "lttng-tracer-core.h"
+#include "lttng-ust-elf.h"
+#include "ust-helper.h"
+
+#define BUF_LEN 4096
+
+#ifndef NT_GNU_BUILD_ID
+# define NT_GNU_BUILD_ID 3
+#endif
+
+/*
+ * Retrieve the nth (where n is the `index` argument) phdr (program
+ * header) from the given elf instance.
+ *
+ * A pointer to the phdr is returned on success, NULL on failure.
+ */
+static
+struct lttng_ust_elf_phdr *lttng_ust_elf_get_phdr(struct lttng_ust_elf *elf,
+ uint16_t index)
+{
+ struct lttng_ust_elf_phdr *phdr = NULL;
+ off_t offset;
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (index >= elf->ehdr->e_phnum) {
+ goto error;
+ }
+
+ phdr = zmalloc(sizeof(struct lttng_ust_elf_phdr));
+ if (!phdr) {
+ goto error;
+ }
+
+ offset = (off_t) elf->ehdr->e_phoff
+ + (off_t) index * elf->ehdr->e_phentsize;
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Phdr elf_phdr;
+
+ if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
+ < sizeof(elf_phdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_phdr(elf_phdr);
+ }
+ copy_phdr(elf_phdr, *phdr);
+ } else {
+ Elf64_Phdr elf_phdr;
+
+ if (lttng_ust_read(elf->fd, &elf_phdr, sizeof(elf_phdr))
+ < sizeof(elf_phdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_phdr(elf_phdr);
+ }
+ copy_phdr(elf_phdr, *phdr);
+ }
+
+ return phdr;
+
+error:
+ free(phdr);
+ return NULL;
+}
+
+/*
+ * Retrieve the nth (where n is the `index` argument) shdr (section
+ * header) from the given elf instance.
+ *
+ * A pointer to the shdr is returned on success, NULL on failure.
+ */
+static
+struct lttng_ust_elf_shdr *lttng_ust_elf_get_shdr(struct lttng_ust_elf *elf,
+ uint16_t index)
+{
+ struct lttng_ust_elf_shdr *shdr = NULL;
+ off_t offset;
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (index >= elf->ehdr->e_shnum) {
+ goto error;
+ }
+
+ shdr = zmalloc(sizeof(struct lttng_ust_elf_shdr));
+ if (!shdr) {
+ goto error;
+ }
+
+ offset = (off_t) elf->ehdr->e_shoff
+ + (off_t) index * elf->ehdr->e_shentsize;
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Shdr elf_shdr;
+
+ if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
+ < sizeof(elf_shdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_shdr(elf_shdr);
+ }
+ copy_shdr(elf_shdr, *shdr);
+ } else {
+ Elf64_Shdr elf_shdr;
+
+ if (lttng_ust_read(elf->fd, &elf_shdr, sizeof(elf_shdr))
+ < sizeof(elf_shdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_shdr(elf_shdr);
+ }
+ copy_shdr(elf_shdr, *shdr);
+ }
+
+ return shdr;
+
+error:
+ free(shdr);
+ return NULL;
+}
+
+/*
+ * Lookup a section's name from a given offset (usually from an shdr's
+ * sh_name value) in bytes relative to the beginning of the section
+ * names string table.
+ *
+ * If no name is found, NULL is returned.
+ */
+static
+char *lttng_ust_elf_get_section_name(struct lttng_ust_elf *elf, off_t offset)
+{
+ char *name = NULL;
+ size_t len = 0, to_read; /* len does not include \0 */
+
+ if (!elf) {
+ goto error;
+ }
+
+ if (offset >= elf->section_names_size) {
+ goto error;
+ }
+
+ if (lseek(elf->fd, elf->section_names_offset + offset, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ to_read = elf->section_names_size - offset;
+
+ /* Find first \0 after or at current location, remember len. */
+ for (;;) {
+ char buf[BUF_LEN];
+ ssize_t read_len;
+ size_t i;
+
+ if (!to_read) {
+ goto error;
+ }
+ read_len = lttng_ust_read(elf->fd, buf,
+ min_t(size_t, BUF_LEN, to_read));
+ if (read_len <= 0) {
+ goto error;
+ }
+ for (i = 0; i < read_len; i++) {
+ if (buf[i] == '\0') {
+ len += i;
+ goto end;
+ }
+ }
+ len += read_len;
+ to_read -= read_len;
+ }
+end:
+ name = zmalloc(sizeof(char) * (len + 1)); /* + 1 for \0 */
+ if (!name) {
+ goto error;
+ }
+ if (lseek(elf->fd, elf->section_names_offset + offset,
+ SEEK_SET) < 0) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, name, len + 1) < len + 1) {
+ goto error;
+ }
+
+ return name;
+
+error:
+ free(name);
+ return NULL;
+}
+
+/*
+ * Create an instance of lttng_ust_elf for the ELF file located at
+ * `path`.
+ *
+ * Return a pointer to the instance on success, NULL on failure.
+ */
+struct lttng_ust_elf *lttng_ust_elf_create(const char *path)
+{
+ uint8_t e_ident[EI_NIDENT];
+ struct lttng_ust_elf_shdr *section_names_shdr;
+ struct lttng_ust_elf *elf = NULL;
+ int ret, fd;
+
+ elf = zmalloc(sizeof(struct lttng_ust_elf));
+ if (!elf) {
+ goto error;
+ }
+
+ /* Initialize fd field to -1. 0 is a valid fd number */
+ elf->fd = -1;
+
+ elf->path = strdup(path);
+ if (!elf->path) {
+ goto error;
+ }
+
+ lttng_ust_lock_fd_tracker();
+ fd = open(elf->path, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+
+ ret = lttng_ust_add_fd_to_tracker(fd);
+ if (ret < 0) {
+ ret = close(fd);
+ if (ret) {
+ PERROR("close on elf->fd");
+ }
+ ret = -1;
+ lttng_ust_unlock_fd_tracker();
+ goto error;
+ }
+ elf->fd = ret;
+ lttng_ust_unlock_fd_tracker();
+
+ if (lttng_ust_read(elf->fd, e_ident, EI_NIDENT) < EI_NIDENT) {
+ goto error;
+ }
+ elf->bitness = e_ident[EI_CLASS];
+ elf->endianness = e_ident[EI_DATA];
+
+ if (lseek(elf->fd, 0, SEEK_SET) < 0) {
+ goto error;
+ }
+
+ elf->ehdr = zmalloc(sizeof(struct lttng_ust_elf_ehdr));
+ if (!elf->ehdr) {
+ goto error;
+ }
+
+ if (is_elf_32_bit(elf)) {
+ Elf32_Ehdr elf_ehdr;
+
+ if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
+ < sizeof(elf_ehdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_ehdr(elf_ehdr);
+ }
+ copy_ehdr(elf_ehdr, *(elf->ehdr));
+ } else {
+ Elf64_Ehdr elf_ehdr;
+
+ if (lttng_ust_read(elf->fd, &elf_ehdr, sizeof(elf_ehdr))
+ < sizeof(elf_ehdr)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ bswap_ehdr(elf_ehdr);
+ }
+ copy_ehdr(elf_ehdr, *(elf->ehdr));
+ }
+
+ section_names_shdr = lttng_ust_elf_get_shdr(elf, elf->ehdr->e_shstrndx);
+ if (!section_names_shdr) {
+ goto error;
+ }
+
+ elf->section_names_offset = section_names_shdr->sh_offset;
+ elf->section_names_size = section_names_shdr->sh_size;
+
+ free(section_names_shdr);
+ return elf;
+
+error:
+ lttng_ust_elf_destroy(elf);
+ return NULL;
+}
+
+/*
+ * Test whether the ELF file is position independent code (PIC)
+ */
+uint8_t lttng_ust_elf_is_pic(struct lttng_ust_elf *elf)
+{
+ /*
+ * PIC has and e_type value of ET_DYN, see ELF specification
+ * version 1.1 p. 1-3.
+ */
+ return elf->ehdr->e_type == ET_DYN;
+}
+
+/*
+ * Destroy the given lttng_ust_elf instance.
+ */
+void lttng_ust_elf_destroy(struct lttng_ust_elf *elf)
+{
+ int ret;
+
+ if (!elf) {
+ return;
+ }
+
+ if (elf->fd >= 0) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(elf->fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(elf->fd);
+ } else {
+ PERROR("close");
+ abort();
+ }
+ lttng_ust_unlock_fd_tracker();
+ }
+
+ free(elf->ehdr);
+ free(elf->path);
+ free(elf);
+}
+
+/*
+ * Compute the total in-memory size of the ELF file, in bytes.
+ *
+ * Returns 0 if successful, -1 if not. On success, the memory size is
+ * returned through the out parameter `memsz`.
+ */
+int lttng_ust_elf_get_memsz(struct lttng_ust_elf *elf, uint64_t *memsz)
+{
+ uint16_t i;
+ uint64_t low_addr = UINT64_MAX, high_addr = 0;
+
+ if (!elf || !memsz) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_phnum; ++i) {
+ struct lttng_ust_elf_phdr *phdr;
+
+ phdr = lttng_ust_elf_get_phdr(elf, i);
+ if (!phdr) {
+ goto error;
+ }
+
+ /*
+ * Only PT_LOAD segments contribute to memsz. Skip
+ * other segments.
+ */
+ if (phdr->p_type != PT_LOAD) {
+ goto next_loop;
+ }
+
+ low_addr = min_t(uint64_t, low_addr, phdr->p_vaddr);
+ high_addr = max_t(uint64_t, high_addr,
+ phdr->p_vaddr + phdr->p_memsz);
+ next_loop:
+ free(phdr);
+ }
+
+ if (high_addr < low_addr) {
+ /* No PT_LOAD segments or corrupted data. */
+ goto error;
+ }
+
+ *memsz = high_addr - low_addr;
+ return 0;
+error:
+ return -1;
+}
+
+/*
+ * Internal method used to try and get the build_id from a PT_NOTE
+ * segment ranging from `offset` to `segment_end`.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the build id information was present in the
+ * segment or not. If `found` is not 0, the out parameters `build_id`
+ * and `length` will both have been set with the retrieved
+ * information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+static
+int lttng_ust_elf_get_build_id_from_segment(
+ struct lttng_ust_elf *elf, uint8_t **build_id, size_t *length,
+ off_t offset, off_t segment_end)
+{
+ uint8_t *_build_id = NULL; /* Silence old gcc warning. */
+ size_t _length = 0; /* Silence old gcc warning. */
+
+ while (offset < segment_end) {
+ struct lttng_ust_elf_nhdr nhdr;
+ size_t read_len;
+
+ /* Align start of note entry */
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_ENTRY_ALIGN);
+ if (offset >= segment_end) {
+ break;
+ }
+ /*
+ * We seek manually because if the note isn't the
+ * build id the data following the header will not
+ * have been read.
+ */
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, &nhdr, sizeof(nhdr))
+ < sizeof(nhdr)) {
+ goto error;
+ }
+
+ if (!is_elf_native_endian(elf)) {
+ nhdr.n_namesz = bswap_32(nhdr.n_namesz);
+ nhdr.n_descsz = bswap_32(nhdr.n_descsz);
+ nhdr.n_type = bswap_32(nhdr.n_type);
+ }
+
+ offset += sizeof(nhdr) + nhdr.n_namesz;
+ /* Align start of desc entry */
+ offset += lttng_ust_offset_align(offset, ELF_NOTE_DESC_ALIGN);
+
+ if (nhdr.n_type != NT_GNU_BUILD_ID) {
+ /*
+ * Ignore non build id notes but still
+ * increase the offset.
+ */
+ offset += nhdr.n_descsz;
+ continue;
+ }
+
+ _length = nhdr.n_descsz;
+ _build_id = zmalloc(sizeof(uint8_t) * _length);
+ if (!_build_id) {
+ goto error;
+ }
+
+ if (lseek(elf->fd, offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ read_len = sizeof(*_build_id) * _length;
+ if (lttng_ust_read(elf->fd, _build_id, read_len) < read_len) {
+ goto error;
+ }
+
+ break;
+ }
+
+ if (_build_id) {
+ *build_id = _build_id;
+ *length = _length;
+ }
+
+ return 0;
+error:
+ free(_build_id);
+ return -1;
+}
+
+/*
+ * Retrieve a build ID (an array of bytes) from the corresponding
+ * section in the ELF file. The length of the build ID can be either
+ * 16 or 20 bytes depending on the method used to generate it, hence
+ * the length out parameter.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the build id information was present in the ELF
+ * file or not. If `found` is not 0, the out parameters `build_id` and
+ * `length` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+int lttng_ust_elf_get_build_id(struct lttng_ust_elf *elf, uint8_t **build_id,
+ size_t *length, int *found)
+{
+ uint16_t i;
+ uint8_t *_build_id = NULL; /* Silence old gcc warning. */
+ size_t _length = 0; /* Silence old gcc warning. */
+
+ if (!elf || !build_id || !length || !found) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_phnum; ++i) {
+ off_t offset, segment_end;
+ struct lttng_ust_elf_phdr *phdr;
+ int ret = 0;
+
+ phdr = lttng_ust_elf_get_phdr(elf, i);
+ if (!phdr) {
+ goto error;
+ }
+
+ /* Build ID will be contained in a PT_NOTE segment. */
+ if (phdr->p_type != PT_NOTE) {
+ goto next_loop;
+ }
+
+ offset = phdr->p_offset;
+ segment_end = offset + phdr->p_filesz;
+ ret = lttng_ust_elf_get_build_id_from_segment(
+ elf, &_build_id, &_length, offset, segment_end);
+ next_loop:
+ free(phdr);
+ if (ret) {
+ goto error;
+ }
+ if (_build_id) {
+ break;
+ }
+ }
+
+ if (_build_id) {
+ *build_id = _build_id;
+ *length = _length;
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+
+ return 0;
+error:
+ free(_build_id);
+ return -1;
+}
+
+/*
+ * Try to retrieve filename and CRC from given ELF section `shdr`.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the debug link information was present in the ELF
+ * section or not. If `found` is not 0, the out parameters `filename` and
+ * `crc` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+static
+int lttng_ust_elf_get_debug_link_from_section(struct lttng_ust_elf *elf,
+ char **filename, uint32_t *crc,
+ struct lttng_ust_elf_shdr *shdr)
+{
+ char *_filename = NULL; /* Silence old gcc warning. */
+ size_t filename_len;
+ char *section_name = NULL;
+ uint32_t _crc = 0; /* Silence old gcc warning. */
+
+ if (!elf || !filename || !crc || !shdr) {
+ goto error;
+ }
+
+ /*
+ * The .gnu_debuglink section is of type SHT_PROGBITS,
+ * skip the other sections.
+ */
+ if (shdr->sh_type != SHT_PROGBITS) {
+ goto end;
+ }
+
+ section_name = lttng_ust_elf_get_section_name(elf,
+ shdr->sh_name);
+ if (!section_name) {
+ goto end;
+ }
+ if (strcmp(section_name, ".gnu_debuglink")) {
+ goto end;
+ }
+
+ /*
+ * The length of the filename is the sh_size excluding the CRC
+ * which comes after it in the section.
+ */
+ _filename = zmalloc(sizeof(char) * (shdr->sh_size - ELF_CRC_SIZE));
+ if (!_filename) {
+ goto error;
+ }
+ if (lseek(elf->fd, shdr->sh_offset, SEEK_SET) < 0) {
+ goto error;
+ }
+ filename_len = sizeof(*_filename) * (shdr->sh_size - ELF_CRC_SIZE);
+ if (lttng_ust_read(elf->fd, _filename, filename_len) < filename_len) {
+ goto error;
+ }
+ if (lttng_ust_read(elf->fd, &_crc, sizeof(_crc)) < sizeof(_crc)) {
+ goto error;
+ }
+ if (!is_elf_native_endian(elf)) {
+ _crc = bswap_32(_crc);
+ }
+
+end:
+ free(section_name);
+ if (_filename) {
+ *filename = _filename;
+ *crc = _crc;
+ }
+
+ return 0;
+
+error:
+ free(_filename);
+ free(section_name);
+ return -1;
+}
+
+/*
+ * Retrieve filename and CRC from ELF's .gnu_debuglink section, if any.
+ *
+ * If the function returns successfully, the out parameter `found`
+ * indicates whether the debug link information was present in the ELF
+ * file or not. If `found` is not 0, the out parameters `filename` and
+ * `crc` will both have been set with the retrieved information.
+ *
+ * Returns 0 on success, -1 if an error occurred.
+ */
+int lttng_ust_elf_get_debug_link(struct lttng_ust_elf *elf, char **filename,
+ uint32_t *crc, int *found)
+{
+ int ret;
+ uint16_t i;
+ char *_filename = NULL; /* Silence old gcc warning. */
+ uint32_t _crc = 0; /* Silence old gcc warning. */
+
+ if (!elf || !filename || !crc || !found) {
+ goto error;
+ }
+
+ for (i = 0; i < elf->ehdr->e_shnum; ++i) {
+ struct lttng_ust_elf_shdr *shdr = NULL;
+
+ shdr = lttng_ust_elf_get_shdr(elf, i);
+ if (!shdr) {
+ goto error;
+ }
+
+ ret = lttng_ust_elf_get_debug_link_from_section(
+ elf, &_filename, &_crc, shdr);
+ free(shdr);
+
+ if (ret) {
+ goto error;
+ }
+ if (_filename) {
+ break;
+ }
+ }
+
+ if (_filename) {
+ *filename = _filename;
+ *crc = _crc;
+ *found = 1;
+ } else {
+ *found = 0;
+ }
+
+ return 0;
+
+error:
+ free(_filename);
+ return -1;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#ifndef _LIB_LTTNG_UST_ELF_H
+#define _LIB_LTTNG_UST_ELF_H
+
+#include <elf.h>
+#include <lttng/ust-endian.h>
+
+/*
+ * Determine native endianness in order to convert when reading an ELF
+ * file if there is a mismatch.
+ */
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define NATIVE_ELF_ENDIANNESS ELFDATA2LSB
+#else
+#define NATIVE_ELF_ENDIANNESS ELFDATA2MSB
+#endif
+
+/*
+ * The size in bytes of the debug link CRC as contained in an ELF
+ * section.
+ */
+#define ELF_CRC_SIZE 4
+/*
+ * ELF notes are aligned on 4 bytes. ref: ELF specification version
+ * 1.1 p. 2-5.
+ */
+#define ELF_NOTE_ENTRY_ALIGN 4
+/*
+ * Within an ELF note, the `desc` field is also aligned on 4
+ * bytes. ref: ELF specification version 1.1 p. 2-5.
+ */
+#define ELF_NOTE_DESC_ALIGN 4
+
+#define bswap(x) \
+ do { \
+ switch (sizeof(x)) { \
+ case 8: \
+ x = bswap_64(x); \
+ break; \
+ case 4: \
+ x = bswap_32(x); \
+ break; \
+ case 2: \
+ x = bswap_16(x); \
+ break; \
+ case 1: \
+ break; \
+ default: \
+ abort(); \
+ } \
+ } while (0)
+
+#define bswap_phdr(phdr) \
+ do { \
+ bswap((phdr).p_type); \
+ bswap((phdr).p_offset); \
+ bswap((phdr).p_filesz); \
+ bswap((phdr).p_memsz); \
+ bswap((phdr).p_align); \
+ bswap((phdr).p_vaddr); \
+ } while (0)
+
+#define bswap_shdr(shdr) \
+ do { \
+ bswap((shdr).sh_name); \
+ bswap((shdr).sh_type); \
+ bswap((shdr).sh_flags); \
+ bswap((shdr).sh_addr); \
+ bswap((shdr).sh_offset); \
+ bswap((shdr).sh_size); \
+ bswap((shdr).sh_link); \
+ bswap((shdr).sh_info); \
+ bswap((shdr).sh_addralign); \
+ bswap((shdr).sh_entsize); \
+ } while (0)
+
+#define bswap_ehdr(ehdr) \
+ do { \
+ bswap((ehdr).e_type); \
+ bswap((ehdr).e_machine); \
+ bswap((ehdr).e_version); \
+ bswap((ehdr).e_entry); \
+ bswap((ehdr).e_phoff); \
+ bswap((ehdr).e_shoff); \
+ bswap((ehdr).e_flags); \
+ bswap((ehdr).e_ehsize); \
+ bswap((ehdr).e_phentsize); \
+ bswap((ehdr).e_phnum); \
+ bswap((ehdr).e_shentsize); \
+ bswap((ehdr).e_shnum); \
+ bswap((ehdr).e_shstrndx); \
+ } while (0)
+
+#define copy_phdr(src_phdr, dst_phdr) \
+ do { \
+ (dst_phdr).p_type = (src_phdr).p_type; \
+ (dst_phdr).p_offset = (src_phdr).p_offset; \
+ (dst_phdr).p_filesz = (src_phdr).p_filesz; \
+ (dst_phdr).p_memsz = (src_phdr).p_memsz; \
+ (dst_phdr).p_align = (src_phdr).p_align; \
+ (dst_phdr).p_vaddr = (src_phdr).p_vaddr; \
+ } while (0)
+
+#define copy_shdr(src_shdr, dst_shdr) \
+ do { \
+ (dst_shdr).sh_name = (src_shdr).sh_name; \
+ (dst_shdr).sh_type = (src_shdr).sh_type; \
+ (dst_shdr).sh_flags = (src_shdr).sh_flags; \
+ (dst_shdr).sh_addr = (src_shdr).sh_addr; \
+ (dst_shdr).sh_offset = (src_shdr).sh_offset; \
+ (dst_shdr).sh_size = (src_shdr).sh_size; \
+ (dst_shdr).sh_link = (src_shdr).sh_link; \
+ (dst_shdr).sh_info = (src_shdr).sh_info; \
+ (dst_shdr).sh_addralign = (src_shdr).sh_addralign; \
+ (dst_shdr).sh_entsize = (src_shdr).sh_entsize; \
+ } while (0)
+
+#define copy_ehdr(src_ehdr, dst_ehdr) \
+ do { \
+ (dst_ehdr).e_type = (src_ehdr).e_type; \
+ (dst_ehdr).e_machine = (src_ehdr).e_machine; \
+ (dst_ehdr).e_version = (src_ehdr).e_version; \
+ (dst_ehdr).e_entry = (src_ehdr).e_entry; \
+ (dst_ehdr).e_phoff = (src_ehdr).e_phoff; \
+ (dst_ehdr).e_shoff = (src_ehdr).e_shoff; \
+ (dst_ehdr).e_flags = (src_ehdr).e_flags; \
+ (dst_ehdr).e_ehsize = (src_ehdr).e_ehsize; \
+ (dst_ehdr).e_phentsize = (src_ehdr).e_phentsize; \
+ (dst_ehdr).e_phnum = (src_ehdr).e_phnum; \
+ (dst_ehdr).e_shentsize = (src_ehdr).e_shentsize; \
+ (dst_ehdr).e_shnum = (src_ehdr).e_shnum; \
+ (dst_ehdr).e_shstrndx = (src_ehdr).e_shstrndx; \
+ } while (0)
+
+static inline
+int is_elf_32_bit(struct lttng_ust_elf *elf)
+{
+ return elf->bitness == ELFCLASS32;
+}
+
+static inline
+int is_elf_native_endian(struct lttng_ust_elf *elf)
+{
+ return elf->endianness == NATIVE_ELF_ENDIANNESS;
+}
+
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_statedump
+
+#if !defined(_TRACEPOINT_LTTNG_UST_STATEDUMP_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_STATEDUMP_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <lttng/ust-events.h>
+#include "compat.h"
+
+#define LTTNG_UST_STATEDUMP_PROVIDER
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_statedump, start,
+ TP_ARGS(struct lttng_ust_session *, session),
+ TP_FIELDS(
+ ctf_unused(session)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, bin_info,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ const char*, path,
+ uint64_t, memsz,
+ uint8_t, is_pic,
+ uint8_t, has_build_id,
+ uint8_t, has_debug_link
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, is_pic, is_pic)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, build_id,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ uint8_t *, build_id,
+ size_t, build_id_len
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_sequence_hex(uint8_t, build_id, build_id,
+ size_t, build_id_len)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, debug_link,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ void *, baddr,
+ char *, filename,
+ uint32_t, crc
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint32_t, crc, crc)
+ ctf_string(filename, filename)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, procname,
+ TP_ARGS(
+ struct lttng_ust_session *, session,
+ char *, name
+ ),
+ TP_FIELDS(
+ ctf_unused(session)
+ ctf_array_text(char, procname, name, LTTNG_UST_ABI_PROCNAME_LEN)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_statedump, end,
+ TP_ARGS(struct lttng_ust_session *, session),
+ TP_FIELDS(
+ ctf_unused(session)
+ )
+)
+
+#endif /* _TRACEPOINT_LTTNG_UST_STATEDUMP_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./lttng-ust-statedump-provider.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <link.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include <ust-elf.h>
+#include <ust-helper.h>
+#include "lttng-tracer-core.h"
+#include "lttng-ust-statedump.h"
+#include "jhash.h"
+#include "getenv.h"
+#include "compat.h"
+#include "ust-events-internal.h"
+
+#define TRACEPOINT_DEFINE
+#include "ust_lib.h" /* Only define. */
+
+#define TRACEPOINT_CREATE_PROBES
+#define TP_SESSION_CHECK
+#include "lttng-ust-statedump-provider.h" /* Define and create probes. */
+
+struct dl_iterate_data {
+ int exec_found;
+ bool first;
+ bool cancel;
+};
+
+struct bin_info_data {
+ void *base_addr_ptr;
+ char resolved_path[PATH_MAX];
+ char *dbg_file;
+ uint8_t *build_id;
+ uint64_t memsz;
+ size_t build_id_len;
+ int vdso;
+ uint32_t crc;
+ uint8_t is_pic;
+ uint8_t has_build_id;
+ uint8_t has_debug_link;
+};
+
+struct lttng_ust_dl_node {
+ struct bin_info_data bin_data;
+ struct cds_hlist_node node;
+ bool traced;
+ bool marked;
+};
+
+#define UST_DL_STATE_HASH_BITS 8
+#define UST_DL_STATE_TABLE_SIZE (1 << UST_DL_STATE_HASH_BITS)
+struct cds_hlist_head dl_state_table[UST_DL_STATE_TABLE_SIZE];
+
+typedef void (*tracepoint_cb)(struct lttng_ust_session *session, void *priv);
+
+static
+struct lttng_ust_dl_node *alloc_dl_node(const struct bin_info_data *bin_data)
+{
+ struct lttng_ust_dl_node *e;
+
+ e = zmalloc(sizeof(struct lttng_ust_dl_node));
+ if (!e)
+ return NULL;
+ if (bin_data->dbg_file) {
+ e->bin_data.dbg_file = strdup(bin_data->dbg_file);
+ if (!e->bin_data.dbg_file)
+ goto error;
+ }
+ if (bin_data->build_id) {
+ e->bin_data.build_id = zmalloc(bin_data->build_id_len);
+ if (!e->bin_data.build_id)
+ goto error;
+ memcpy(e->bin_data.build_id, bin_data->build_id,
+ bin_data->build_id_len);
+ }
+ e->bin_data.base_addr_ptr = bin_data->base_addr_ptr;
+ memcpy(e->bin_data.resolved_path, bin_data->resolved_path, PATH_MAX);
+ e->bin_data.memsz = bin_data->memsz;
+ e->bin_data.build_id_len = bin_data->build_id_len;
+ e->bin_data.vdso = bin_data->vdso;
+ e->bin_data.crc = bin_data->crc;
+ e->bin_data.is_pic = bin_data->is_pic;
+ e->bin_data.has_build_id = bin_data->has_build_id;
+ e->bin_data.has_debug_link = bin_data->has_debug_link;
+ return e;
+
+error:
+ free(e->bin_data.build_id);
+ free(e->bin_data.dbg_file);
+ free(e);
+ return NULL;
+}
+
+static
+void free_dl_node(struct lttng_ust_dl_node *e)
+{
+ free(e->bin_data.build_id);
+ free(e->bin_data.dbg_file);
+ free(e);
+}
+
+/* Return 0 if same, nonzero if not. */
+static
+int compare_bin_data(const struct bin_info_data *a,
+ const struct bin_info_data *b)
+{
+ if (a->base_addr_ptr != b->base_addr_ptr)
+ return -1;
+ if (strcmp(a->resolved_path, b->resolved_path) != 0)
+ return -1;
+ if (a->dbg_file && !b->dbg_file)
+ return -1;
+ if (!a->dbg_file && b->dbg_file)
+ return -1;
+ if (a->dbg_file && strcmp(a->dbg_file, b->dbg_file) != 0)
+ return -1;
+ if (a->build_id && !b->build_id)
+ return -1;
+ if (!a->build_id && b->build_id)
+ return -1;
+ if (a->build_id_len != b->build_id_len)
+ return -1;
+ if (a->build_id &&
+ memcmp(a->build_id, b->build_id, a->build_id_len) != 0)
+ return -1;
+ if (a->memsz != b->memsz)
+ return -1;
+ if (a->vdso != b->vdso)
+ return -1;
+ if (a->crc != b->crc)
+ return -1;
+ if (a->is_pic != b->is_pic)
+ return -1;
+ if (a->has_build_id != b->has_build_id)
+ return -1;
+ if (a->has_debug_link != b->has_debug_link)
+ return -1;
+ return 0;
+}
+
+static
+struct lttng_ust_dl_node *find_or_create_dl_node(struct bin_info_data *bin_data)
+{
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+ unsigned int hash;
+ bool found = false;
+
+ hash = jhash(&bin_data->base_addr_ptr,
+ sizeof(bin_data->base_addr_ptr), 0);
+ head = &dl_state_table[hash & (UST_DL_STATE_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (compare_bin_data(&e->bin_data, bin_data) != 0)
+ continue;
+ found = true;
+ break;
+ }
+ if (!found) {
+ /* Create */
+ e = alloc_dl_node(bin_data);
+ if (!e)
+ return NULL;
+ cds_hlist_add_head(&e->node, head);
+ }
+ return e;
+}
+
+static
+void remove_dl_node(struct lttng_ust_dl_node *e)
+{
+ cds_hlist_del(&e->node);
+}
+
+/*
+ * Trace statedump event into all sessions owned by the caller thread
+ * for which statedump is pending.
+ */
+static
+void trace_statedump_event(tracepoint_cb tp_cb, void *owner, void *priv)
+{
+ struct cds_list_head *sessionsp;
+ struct lttng_ust_session_private *session_priv;
+
+ sessionsp = lttng_get_sessions();
+ cds_list_for_each_entry(session_priv, sessionsp, node) {
+ if (session_priv->owner != owner)
+ continue;
+ if (!session_priv->statedump_pending)
+ continue;
+ tp_cb(session_priv->pub, priv);
+ }
+}
+
+static
+void trace_bin_info_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, bin_info,
+ session, bin_data->base_addr_ptr,
+ bin_data->resolved_path, bin_data->memsz,
+ bin_data->is_pic, bin_data->has_build_id,
+ bin_data->has_debug_link);
+}
+
+static
+void trace_build_id_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, build_id,
+ session, bin_data->base_addr_ptr,
+ bin_data->build_id, bin_data->build_id_len);
+}
+
+static
+void trace_debug_link_cb(struct lttng_ust_session *session, void *priv)
+{
+ struct bin_info_data *bin_data = (struct bin_info_data *) priv;
+
+ tracepoint(lttng_ust_statedump, debug_link,
+ session, bin_data->base_addr_ptr,
+ bin_data->dbg_file, bin_data->crc);
+}
+
+static
+void procname_cb(struct lttng_ust_session *session, void *priv)
+{
+ char *procname = (char *) priv;
+ tracepoint(lttng_ust_statedump, procname, session, procname);
+}
+
+static
+void trace_start_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
+{
+ tracepoint(lttng_ust_statedump, start, session);
+}
+
+static
+void trace_end_cb(struct lttng_ust_session *session, void *priv __attribute__((unused)))
+{
+ tracepoint(lttng_ust_statedump, end, session);
+}
+
+static
+int get_elf_info(struct bin_info_data *bin_data)
+{
+ struct lttng_ust_elf *elf;
+ int ret = 0, found;
+
+ elf = lttng_ust_elf_create(bin_data->resolved_path);
+ if (!elf) {
+ ret = -1;
+ goto end;
+ }
+
+ ret = lttng_ust_elf_get_memsz(elf, &bin_data->memsz);
+ if (ret) {
+ goto end;
+ }
+
+ found = 0;
+ ret = lttng_ust_elf_get_build_id(elf, &bin_data->build_id,
+ &bin_data->build_id_len,
+ &found);
+ if (ret) {
+ goto end;
+ }
+ bin_data->has_build_id = !!found;
+ found = 0;
+ ret = lttng_ust_elf_get_debug_link(elf, &bin_data->dbg_file,
+ &bin_data->crc,
+ &found);
+ if (ret) {
+ goto end;
+ }
+ bin_data->has_debug_link = !!found;
+
+ bin_data->is_pic = lttng_ust_elf_is_pic(elf);
+
+end:
+ lttng_ust_elf_destroy(elf);
+ return ret;
+}
+
+static
+void trace_baddr(struct bin_info_data *bin_data, void *owner)
+{
+ trace_statedump_event(trace_bin_info_cb, owner, bin_data);
+
+ if (bin_data->has_build_id)
+ trace_statedump_event(trace_build_id_cb, owner, bin_data);
+
+ if (bin_data->has_debug_link)
+ trace_statedump_event(trace_debug_link_cb, owner, bin_data);
+}
+
+static
+int extract_baddr(struct bin_info_data *bin_data)
+{
+ int ret = 0;
+ struct lttng_ust_dl_node *e;
+
+ if (!bin_data->vdso) {
+ ret = get_elf_info(bin_data);
+ if (ret) {
+ goto end;
+ }
+ } else {
+ bin_data->memsz = 0;
+ bin_data->has_build_id = 0;
+ bin_data->has_debug_link = 0;
+ }
+
+ e = find_or_create_dl_node(bin_data);
+ if (!e) {
+ ret = -1;
+ goto end;
+ }
+ e->marked = true;
+end:
+ free(bin_data->build_id);
+ bin_data->build_id = NULL;
+ free(bin_data->dbg_file);
+ bin_data->dbg_file = NULL;
+ return ret;
+}
+
+static
+void trace_statedump_start(void *owner)
+{
+ trace_statedump_event(trace_start_cb, owner, NULL);
+}
+
+static
+void trace_statedump_end(void *owner)
+{
+ trace_statedump_event(trace_end_cb, owner, NULL);
+}
+
+static
+void iter_begin(struct dl_iterate_data *data)
+{
+ unsigned int i;
+
+ /*
+ * UST lock nests within dynamic loader lock.
+ *
+ * Hold this lock across handling of the module listing to
+ * protect memory allocation at early process start, due to
+ * interactions with libc-wrapper lttng malloc instrumentation.
+ */
+ if (ust_lock()) {
+ data->cancel = true;
+ return;
+ }
+
+ /* Ensure all entries are unmarked. */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node)
+ assert(!e->marked);
+ }
+}
+
+static
+void trace_lib_load(const struct bin_info_data *bin_data, void *ip)
+{
+ tracepoint(lttng_ust_lib, load,
+ ip, bin_data->base_addr_ptr, bin_data->resolved_path,
+ bin_data->memsz, bin_data->has_build_id,
+ bin_data->has_debug_link);
+
+ if (bin_data->has_build_id) {
+ tracepoint(lttng_ust_lib, build_id,
+ ip, bin_data->base_addr_ptr, bin_data->build_id,
+ bin_data->build_id_len);
+ }
+
+ if (bin_data->has_debug_link) {
+ tracepoint(lttng_ust_lib, debug_link,
+ ip, bin_data->base_addr_ptr, bin_data->dbg_file,
+ bin_data->crc);
+ }
+}
+
+static
+void trace_lib_unload(const struct bin_info_data *bin_data, void *ip)
+{
+ tracepoint(lttng_ust_lib, unload, ip, bin_data->base_addr_ptr);
+}
+
+static
+void iter_end(struct dl_iterate_data *data, void *ip)
+{
+ unsigned int i;
+
+ if (data->cancel)
+ goto end;
+ /*
+ * Iterate on hash table.
+ * For each marked, traced, do nothing.
+ * For each marked, not traced, trace lib open event. traced = true.
+ * For each unmarked, traced, trace lib close event. remove node.
+ * For each unmarked, not traced, remove node.
+ */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (e->marked) {
+ if (!e->traced) {
+ trace_lib_load(&e->bin_data, ip);
+ e->traced = true;
+ }
+ e->marked = false;
+ } else {
+ if (e->traced)
+ trace_lib_unload(&e->bin_data, ip);
+ remove_dl_node(e);
+ free_dl_node(e);
+ }
+ }
+ }
+end:
+ ust_unlock();
+}
+
+static
+int extract_bin_info_events(struct dl_phdr_info *info, size_t size __attribute__((unused)), void *_data)
+{
+ int j, ret = 0;
+ struct dl_iterate_data *data = _data;
+
+ if (data->first) {
+ iter_begin(data);
+ data->first = false;
+ }
+
+ if (data->cancel)
+ goto end;
+
+ for (j = 0; j < info->dlpi_phnum; j++) {
+ struct bin_info_data bin_data;
+
+ if (info->dlpi_phdr[j].p_type != PT_LOAD)
+ continue;
+
+ memset(&bin_data, 0, sizeof(bin_data));
+
+ /* Calculate virtual memory address of the loadable segment */
+ bin_data.base_addr_ptr = (void *) info->dlpi_addr +
+ info->dlpi_phdr[j].p_vaddr;
+
+ if ((info->dlpi_name == NULL || info->dlpi_name[0] == 0)) {
+ /*
+ * Only the first phdr without a dlpi_name
+ * encountered is considered as the program
+ * executable. The rest are vdsos.
+ */
+ if (!data->exec_found) {
+ ssize_t path_len;
+ data->exec_found = 1;
+
+ /*
+ * Use /proc/self/exe to resolve the
+ * executable's full path.
+ */
+ path_len = readlink("/proc/self/exe",
+ bin_data.resolved_path,
+ PATH_MAX - 1);
+ if (path_len <= 0)
+ break;
+
+ bin_data.resolved_path[path_len] = '\0';
+ bin_data.vdso = 0;
+ } else {
+ snprintf(bin_data.resolved_path,
+ PATH_MAX - 1, "[vdso]");
+ bin_data.vdso = 1;
+ }
+ } else {
+ /*
+ * For regular dl_phdr_info entries check if
+ * the path to the binary really exists. If not,
+ * treat as vdso and use dlpi_name as 'path'.
+ */
+ if (!realpath(info->dlpi_name,
+ bin_data.resolved_path)) {
+ snprintf(bin_data.resolved_path,
+ PATH_MAX - 1, "[%s]",
+ info->dlpi_name);
+ bin_data.vdso = 1;
+ } else {
+ bin_data.vdso = 0;
+ }
+ }
+
+ ret = extract_baddr(&bin_data);
+ break;
+ }
+end:
+ return ret;
+}
+
+static
+void ust_dl_table_statedump(void *owner)
+{
+ unsigned int i;
+
+ if (ust_lock())
+ goto end;
+
+ /* Statedump each traced table entry into session for owner. */
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_2(e, head, node) {
+ if (e->traced)
+ trace_baddr(&e->bin_data, owner);
+ }
+ }
+
+end:
+ ust_unlock();
+}
+
+void lttng_ust_dl_update(void *ip)
+{
+ struct dl_iterate_data data;
+
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
+ return;
+
+ /*
+ * Fixup lttng-ust TLS when called from dlopen/dlclose
+ * instrumentation.
+ */
+ lttng_ust_fixup_tls();
+
+ data.exec_found = 0;
+ data.first = true;
+ data.cancel = false;
+ /*
+ * Iterate through the list of currently loaded shared objects and
+ * generate tables entries for loadable segments using
+ * extract_bin_info_events.
+ * Removed libraries are detected by mark-and-sweep: marking is
+ * done in the iteration over libraries, and sweeping is
+ * performed by iter_end().
+ */
+ dl_iterate_phdr(extract_bin_info_events, &data);
+ if (data.first)
+ iter_begin(&data);
+ iter_end(&data, ip);
+}
+
+/*
+ * Generate a statedump of base addresses of all shared objects loaded
+ * by the traced application, as well as for the application's
+ * executable itself.
+ */
+static
+int do_baddr_statedump(void *owner)
+{
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_BADDR_STATEDUMP"))
+ return 0;
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+ ust_dl_table_statedump(owner);
+ return 0;
+}
+
+static
+int do_procname_statedump(void *owner)
+{
+ if (lttng_ust_getenv("LTTNG_UST_WITHOUT_PROCNAME_STATEDUMP"))
+ return 0;
+
+ trace_statedump_event(procname_cb, owner, lttng_ust_sockinfo_get_procname(owner));
+ return 0;
+}
+
+/*
+ * Generate a statedump of a given traced application. A statedump is
+ * delimited by start and end events. For a given (process, session)
+ * pair, begin/end events are serialized and will match. However, in a
+ * session, statedumps from different processes may be
+ * interleaved. The vpid context should be used to identify which
+ * events belong to which process.
+ *
+ * Grab the ust_lock outside of the RCU read-side lock because we
+ * perform synchronize_rcu with the ust_lock held, which can trigger
+ * deadlocks otherwise.
+ */
+int do_lttng_ust_statedump(void *owner)
+{
+ ust_lock_nocheck();
+ trace_statedump_start(owner);
+ ust_unlock();
+
+ do_procname_statedump(owner);
+ do_baddr_statedump(owner);
+
+ ust_lock_nocheck();
+ trace_statedump_end(owner);
+ ust_unlock();
+
+ return 0;
+}
+
+void lttng_ust_statedump_init(void)
+{
+ __tracepoints__init();
+ __tracepoints__ptrs_init();
+ __lttng_events_init__lttng_ust_statedump();
+ lttng_ust_dl_update(LTTNG_UST_CALLER_IP());
+}
+
+static
+void ust_dl_state_destroy(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < UST_DL_STATE_TABLE_SIZE; i++) {
+ struct cds_hlist_head *head;
+ struct lttng_ust_dl_node *e, *tmp;
+
+ head = &dl_state_table[i];
+ cds_hlist_for_each_entry_safe_2(e, tmp, head, node)
+ free_dl_node(e);
+ CDS_INIT_HLIST_HEAD(head);
+ }
+}
+
+void lttng_ust_statedump_destroy(void)
+{
+ __lttng_events_exit__lttng_ust_statedump();
+ __tracepoints__ptrs_destroy();
+ __tracepoints__destroy();
+ ust_dl_state_destroy();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ */
+
+#ifndef LTTNG_UST_STATEDUMP_H
+#define LTTNG_UST_STATEDUMP_H
+
+#include <lttng/ust-events.h>
+
+void lttng_ust_statedump_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_statedump_destroy(void)
+ __attribute__((visibility("hidden")));
+
+int do_lttng_ust_statedump(void *owner)
+ __attribute__((visibility("hidden")));
+
+#endif /* LTTNG_UST_STATEDUMP_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_tracef
+
+#if !defined(_TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H
+
+#include <lttng/tp/lttng-ust-tracef.h>
+
+#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
+
+#define TP_IP_PARAM ip /* IP context received as parameter */
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracef.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011-2015 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_tracelog
+
+#if !defined(_TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_LTTNG_UST_TRACELOG_PROVIDER_H
+
+#include <lttng/tp/lttng-ust-tracelog.h>
+
+#endif /* _TRACEPOINT_LTTNG_UST_TRACEF_PROVIDER_H */
+
+#define TP_IP_PARAM ip /* IP context received as parameter */
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./tp/lttng-ust-tracelog.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+#include <urcu/uatomic.h>
+
+#include <lttng/urcu/static/pointer.h>
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#include <lttng/urcu/pointer.h>
+
+void *lttng_ust_rcu_dereference_sym(void *p)
+{
+ return _lttng_ust_rcu_dereference(p);
+}
+
+void *lttng_ust_rcu_set_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ uatomic_set(p, v);
+ return v;
+}
+
+void *lttng_ust_rcu_xchg_pointer_sym(void **p, void *v)
+{
+ cmm_wmb();
+ return uatomic_xchg(p, v);
+}
+
+void *lttng_ust_rcu_cmpxchg_pointer_sym(void **p, void *old, void *_new)
+{
+ cmm_wmb();
+ return uatomic_cmpxchg(p, old, _new);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
+ *
+ * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version.
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <poll.h>
+#include <unistd.h>
+#include <stdbool.h>
+#include <sys/mman.h>
+
+#include <urcu/arch.h>
+#include <urcu/wfcqueue.h>
+#include <lttng/urcu/static/urcu-ust.h>
+#include <lttng/urcu/pointer.h>
+#include <urcu/tls-compat.h>
+
+/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
+#undef _LGPL_SOURCE
+#include <lttng/urcu/urcu-ust.h>
+#define _LGPL_SOURCE
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+#ifdef __linux__
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ return mremap(old_address, old_size, new_size, flags);
+}
+#else
+
+#define MREMAP_MAYMOVE 1
+#define MREMAP_FIXED 2
+
+/*
+ * mremap wrapper for non-Linux systems not allowing MAYMOVE.
+ * This is not generic.
+*/
+static
+void *mremap_wrapper(void *old_address, size_t old_size,
+ size_t new_size, int flags)
+{
+ assert(!(flags & MREMAP_MAYMOVE));
+
+ return MAP_FAILED;
+}
+#endif
+
+/* Sleep delay in ms */
+#define RCU_SLEEP_DELAY_MS 10
+#define INIT_NR_THREADS 8
+#define ARENA_INIT_ALLOC \
+ sizeof(struct registry_chunk) \
+ + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader)
+
+/*
+ * Active attempts to check for reader Q.S. before calling sleep().
+ */
+#define RCU_QS_ACTIVE_ATTEMPTS 100
+
+static
+int lttng_ust_urcu_refcount;
+
+/* If the headers do not support membarrier system call, fall back smp_mb. */
+#ifdef __NR_membarrier
+# define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
+#else
+# define membarrier(...) -ENOSYS
+#endif
+
+enum membarrier_cmd {
+ MEMBARRIER_CMD_QUERY = 0,
+ MEMBARRIER_CMD_SHARED = (1 << 0),
+ /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
+ /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
+ MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
+ MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
+};
+
+static
+void _lttng_ust_urcu_init(void)
+ __attribute__((constructor));
+static
+void lttng_ust_urcu_exit(void)
+ __attribute__((destructor));
+
+#ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+int lttng_ust_urcu_has_sys_membarrier;
+#endif
+
+/*
+ * rcu_gp_lock ensures mutual exclusion between threads calling
+ * synchronize_rcu().
+ */
+static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
+/*
+ * rcu_registry_lock ensures mutual exclusion between threads
+ * registering and unregistering themselves to/from the registry, and
+ * with threads reading that registry from synchronize_rcu(). However,
+ * this lock is not held all the way through the completion of awaiting
+ * for the grace period. It is sporadically released between iterations
+ * on the registry.
+ * rcu_registry_lock may nest inside rcu_gp_lock.
+ */
+static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
+static int initialized;
+
+static pthread_key_t lttng_ust_urcu_key;
+
+struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT };
+
+/*
+ * Pointer to registry elements. Written to only by each individual reader. Read
+ * by both the reader and the writers.
+ */
+DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader);
+
+static CDS_LIST_HEAD(registry);
+
+struct registry_chunk {
+ size_t data_len; /* data length */
+ size_t used; /* amount of data used */
+ struct cds_list_head node; /* chunk_list node */
+ char data[];
+};
+
+struct registry_arena {
+ struct cds_list_head chunk_list;
+};
+
+static struct registry_arena registry_arena = {
+ .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
+};
+
+/* Saved fork signal mask, protected by rcu_gp_lock */
+static sigset_t saved_fork_signal_mask;
+
+static void mutex_lock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+#ifndef DISTRUST_SIGNALS_EXTREME
+ ret = pthread_mutex_lock(mutex);
+ if (ret)
+ abort();
+#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
+ while ((ret = pthread_mutex_trylock(mutex)) != 0) {
+ if (ret != EBUSY && ret != EINTR)
+ abort();
+ poll(NULL,0,10);
+ }
+#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
+}
+
+static void mutex_unlock(pthread_mutex_t *mutex)
+{
+ int ret;
+
+ ret = pthread_mutex_unlock(mutex);
+ if (ret)
+ abort();
+}
+
+static void smp_mb_master(void)
+{
+ if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) {
+ if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
+ abort();
+ } else {
+ cmm_smp_mb();
+ }
+}
+
+/*
+ * Always called with rcu_registry lock held. Releases this lock between
+ * iterations and grabs it again. Holds the lock when it returns.
+ */
+static void wait_for_readers(struct cds_list_head *input_readers,
+ struct cds_list_head *cur_snap_readers,
+ struct cds_list_head *qsreaders)
+{
+ unsigned int wait_loops = 0;
+ struct lttng_ust_urcu_reader *index, *tmp;
+
+ /*
+ * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either
+ * indicate quiescence (not nested), or observe the current
+ * rcu_gp.ctr value.
+ */
+ for (;;) {
+ if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
+ wait_loops++;
+
+ cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
+ switch (lttng_ust_urcu_reader_state(&index->ctr)) {
+ case LTTNG_UST_URCU_READER_ACTIVE_CURRENT:
+ if (cur_snap_readers) {
+ cds_list_move(&index->node,
+ cur_snap_readers);
+ break;
+ }
+ /* Fall-through */
+ case LTTNG_UST_URCU_READER_INACTIVE:
+ cds_list_move(&index->node, qsreaders);
+ break;
+ case LTTNG_UST_URCU_READER_ACTIVE_OLD:
+ /*
+ * Old snapshot. Leaving node in
+ * input_readers will make us busy-loop
+ * until the snapshot becomes current or
+ * the reader becomes inactive.
+ */
+ break;
+ }
+ }
+
+ if (cds_list_empty(input_readers)) {
+ break;
+ } else {
+ /* Temporarily unlock the registry lock. */
+ mutex_unlock(&rcu_registry_lock);
+ if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
+ (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
+ else
+ caa_cpu_relax();
+ /* Re-lock the registry lock before the next loop. */
+ mutex_lock(&rcu_registry_lock);
+ }
+ }
+}
+
+void lttng_ust_urcu_synchronize_rcu(void)
+{
+ CDS_LIST_HEAD(cur_snap_readers);
+ CDS_LIST_HEAD(qsreaders);
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+
+ mutex_lock(&rcu_gp_lock);
+
+ mutex_lock(&rcu_registry_lock);
+
+ if (cds_list_empty(®istry))
+ goto out;
+
+ /* All threads should read qparity before accessing data structure
+ * where new ptr points to. */
+ /* Write new ptr before changing the qparity */
+ smp_mb_master();
+
+ /*
+ * Wait for readers to observe original parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(®istry, &cur_snap_readers, &qsreaders);
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /* Switch parity: 0 -> 1, 1 -> 0 */
+ CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE);
+
+ /*
+ * Must commit qparity update to memory before waiting for other parity
+ * quiescent state. Failure to do so could result in the writer waiting
+ * forever while new readers are always accessing data (no progress).
+ * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
+ */
+
+ /*
+ * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
+ * model easier to understand. It does not have a big performance impact
+ * anyway, given this is the write-side.
+ */
+ cmm_smp_mb();
+
+ /*
+ * Wait for readers to observe new parity or be quiescent.
+ * wait_for_readers() can release and grab again rcu_registry_lock
+ * interally.
+ */
+ wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
+
+ /*
+ * Put quiescent reader list back into registry.
+ */
+ cds_list_splice(&qsreaders, ®istry);
+
+ /*
+ * Finish waiting for reader threads before letting the old ptr being
+ * freed.
+ */
+ smp_mb_master();
+out:
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * library wrappers to be used by non-LGPL compatible source code.
+ */
+
+void lttng_ust_urcu_read_lock(void)
+{
+ _lttng_ust_urcu_read_lock();
+}
+
+void lttng_ust_urcu_read_unlock(void)
+{
+ _lttng_ust_urcu_read_unlock();
+}
+
+int lttng_ust_urcu_read_ongoing(void)
+{
+ return _lttng_ust_urcu_read_ongoing();
+}
+
+/*
+ * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
+ * Else, try expanding the last chunk. If this fails, allocate a new
+ * chunk twice as big as the last chunk.
+ * Memory used by chunks _never_ moves. A chunk could theoretically be
+ * freed when all "used" slots are released, but we don't do it at this
+ * point.
+ */
+static
+void expand_arena(struct registry_arena *arena)
+{
+ struct registry_chunk *new_chunk, *last_chunk;
+ size_t old_chunk_len, new_chunk_len;
+
+ /* No chunk. */
+ if (cds_list_empty(&arena->chunk_list)) {
+ assert(ARENA_INIT_ALLOC >=
+ sizeof(struct registry_chunk)
+ + sizeof(struct lttng_ust_urcu_reader));
+ new_chunk_len = ARENA_INIT_ALLOC;
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+ return; /* We're done. */
+ }
+
+ /* Try expanding last chunk. */
+ last_chunk = cds_list_entry(arena->chunk_list.prev,
+ struct registry_chunk, node);
+ old_chunk_len =
+ last_chunk->data_len + sizeof(struct registry_chunk);
+ new_chunk_len = old_chunk_len << 1;
+
+ /* Don't allow memory mapping to move, just expand. */
+ new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
+ new_chunk_len, 0);
+ if (new_chunk != MAP_FAILED) {
+ /* Should not have moved. */
+ assert(new_chunk == last_chunk);
+ memset((char *) last_chunk + old_chunk_len, 0,
+ new_chunk_len - old_chunk_len);
+ last_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ return; /* We're done. */
+ }
+
+ /* Remap did not succeed, we need to add a new chunk. */
+ new_chunk = (struct registry_chunk *) mmap(NULL,
+ new_chunk_len,
+ PROT_READ | PROT_WRITE,
+ MAP_ANONYMOUS | MAP_PRIVATE,
+ -1, 0);
+ if (new_chunk == MAP_FAILED)
+ abort();
+ memset(new_chunk, 0, new_chunk_len);
+ new_chunk->data_len =
+ new_chunk_len - sizeof(struct registry_chunk);
+ cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
+}
+
+static
+struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int expand_done = 0; /* Only allow to expand once per alloc */
+ size_t len = sizeof(struct lttng_ust_urcu_reader);
+
+retry:
+ cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
+ if (chunk->data_len - chunk->used < len)
+ continue;
+ /* Find spot */
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc) {
+ rcu_reader_reg->alloc = 1;
+ chunk->used += len;
+ return rcu_reader_reg;
+ }
+ }
+ }
+
+ if (!expand_done) {
+ expand_arena(arena);
+ expand_done = 1;
+ goto retry;
+ }
+
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void add_thread(void)
+{
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+ int ret;
+
+ rcu_reader_reg = arena_alloc(®istry_arena);
+ if (!rcu_reader_reg)
+ abort();
+ ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg);
+ if (ret)
+ abort();
+
+ /* Add to registry */
+ rcu_reader_reg->tid = pthread_self();
+ assert(rcu_reader_reg->ctr == 0);
+ cds_list_add(&rcu_reader_reg->node, ®istry);
+ /*
+ * Reader threads are pointing to the reader registry. This is
+ * why its memory should never be relocated.
+ */
+ URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg;
+}
+
+/* Called with mutex locked */
+static
+void cleanup_thread(struct registry_chunk *chunk,
+ struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ rcu_reader_reg->ctr = 0;
+ cds_list_del(&rcu_reader_reg->node);
+ rcu_reader_reg->tid = 0;
+ rcu_reader_reg->alloc = 0;
+ chunk->used -= sizeof(struct lttng_ust_urcu_reader);
+}
+
+static
+struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ struct registry_chunk *chunk;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0])
+ continue;
+ if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len])
+ continue;
+ return chunk;
+ }
+ return NULL;
+}
+
+/* Called with signals off and mutex locked */
+static
+void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
+ URCU_TLS(lttng_ust_urcu_reader) = NULL;
+}
+
+/* Disable signals, take mutex, add to registry */
+void lttng_ust_urcu_register(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ /*
+ * Check if a signal concurrently registered our thread since
+ * the check in rcu_read_lock().
+ */
+ if (URCU_TLS(lttng_ust_urcu_reader))
+ goto end;
+
+ /*
+ * Take care of early registration before lttng_ust_urcu constructor.
+ */
+ _lttng_ust_urcu_init();
+
+ mutex_lock(&rcu_registry_lock);
+ add_thread();
+ mutex_unlock(&rcu_registry_lock);
+end:
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+}
+
+void lttng_ust_urcu_register_thread(void)
+{
+ if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader)))
+ lttng_ust_urcu_register(); /* If not yet registered. */
+}
+
+/* Disable signals, take mutex, remove from registry */
+static
+void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ if (ret)
+ abort();
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ if (ret)
+ abort();
+
+ mutex_lock(&rcu_registry_lock);
+ remove_thread(rcu_reader_reg);
+ mutex_unlock(&rcu_registry_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ if (ret)
+ abort();
+ lttng_ust_urcu_exit();
+}
+
+/*
+ * Remove thread from the registry when it exits, and flag it as
+ * destroyed so garbage collection can take care of it.
+ */
+static
+void lttng_ust_urcu_thread_exit_notifier(void *rcu_key)
+{
+ lttng_ust_urcu_unregister(rcu_key);
+}
+
+#ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ abort();
+}
+#else
+static
+void lttng_ust_urcu_sys_membarrier_status(bool available)
+{
+ if (!available)
+ return;
+ lttng_ust_urcu_has_sys_membarrier = 1;
+}
+#endif
+
+static
+void lttng_ust_urcu_sys_membarrier_init(void)
+{
+ bool available = false;
+ int mask;
+
+ mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
+ if (mask >= 0) {
+ if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
+ if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
+ abort();
+ available = true;
+ }
+ }
+ lttng_ust_urcu_sys_membarrier_status(available);
+}
+
+static
+void _lttng_ust_urcu_init(void)
+{
+ mutex_lock(&init_lock);
+ if (!lttng_ust_urcu_refcount++) {
+ int ret;
+
+ ret = pthread_key_create(<tng_ust_urcu_key,
+ lttng_ust_urcu_thread_exit_notifier);
+ if (ret)
+ abort();
+ lttng_ust_urcu_sys_membarrier_init();
+ initialized = 1;
+ }
+ mutex_unlock(&init_lock);
+}
+
+static
+void lttng_ust_urcu_exit(void)
+{
+ mutex_lock(&init_lock);
+ if (!--lttng_ust_urcu_refcount) {
+ struct registry_chunk *chunk, *tmp;
+ int ret;
+
+ cds_list_for_each_entry_safe(chunk, tmp,
+ ®istry_arena.chunk_list, node) {
+ munmap((void *) chunk, chunk->data_len
+ + sizeof(struct registry_chunk));
+ }
+ CDS_INIT_LIST_HEAD(®istry_arena.chunk_list);
+ ret = pthread_key_delete(lttng_ust_urcu_key);
+ if (ret)
+ abort();
+ }
+ mutex_unlock(&init_lock);
+}
+
+/*
+ * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
+ * sure we fork() don't race with a concurrent thread executing with
+ * any of those locks held. This ensures that the registry and data
+ * protected by rcu_gp_lock are in a coherent state in the child.
+ */
+void lttng_ust_urcu_before_fork(void)
+{
+ sigset_t newmask, oldmask;
+ int ret;
+
+ ret = sigfillset(&newmask);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
+ assert(!ret);
+ mutex_lock(&rcu_gp_lock);
+ mutex_lock(&rcu_registry_lock);
+ saved_fork_signal_mask = oldmask;
+}
+
+void lttng_ust_urcu_after_fork_parent(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
+
+/*
+ * Prune all entries from registry except our own thread. Fits the Linux
+ * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
+ */
+static
+void lttng_ust_urcu_prune_registry(void)
+{
+ struct registry_chunk *chunk;
+ struct lttng_ust_urcu_reader *rcu_reader_reg;
+
+ cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) {
+ for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0];
+ rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len];
+ rcu_reader_reg++) {
+ if (!rcu_reader_reg->alloc)
+ continue;
+ if (rcu_reader_reg->tid == pthread_self())
+ continue;
+ cleanup_thread(chunk, rcu_reader_reg);
+ }
+ }
+}
+
+void lttng_ust_urcu_after_fork_child(void)
+{
+ sigset_t oldmask;
+ int ret;
+
+ lttng_ust_urcu_prune_registry();
+ oldmask = saved_fork_signal_mask;
+ mutex_unlock(&rcu_registry_lock);
+ mutex_unlock(&rcu_gp_lock);
+ ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
+ assert(!ret);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_UUID_H
+#define _LTTNG_UST_UUID_H
+
+#include <lttng/ust-events.h> /* For LTTNG_UST_UUID_LEN */
+#include <lttng/ust-clock.h>
+
+#endif /* _LTTNG_UST_UUID_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#ifndef _LTTNG_NS_H
+#define _LTTNG_NS_H
+
+/*
+ * The lowest valid inode number that can be allocated in the proc filesystem
+ * is 0xF0000000. Any number below can be used internally as an error code.
+ *
+ * Zero is used in the kernel as an error code, it's the value we will return
+ * when we fail to read the proper inode number.
+ *
+ * One is used internally to identify an uninitialized cache entry, it should
+ * never be returned.
+ */
+
+enum ns_ino_state {
+ NS_INO_UNAVAILABLE = 0x0,
+ NS_INO_UNINITIALIZED = 0x1,
+ NS_INO_MIN = 0xF0000000,
+};
+
+#endif /* _LTTNG_NS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0-only
+ *
+ * Performance events:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * Header copied from Linux kernel v4.7 installed headers.
+ */
+
+#ifndef _UAPI_LINUX_PERF_EVENT_H
+#define _UAPI_LINUX_PERF_EVENT_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+ PERF_TYPE_BREAKPOINT = 5,
+
+ PERF_TYPE_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized performance event event_id types, used by the
+ * attr.event_id parameter of the sys_perf_event_open()
+ * syscall:
+ */
+enum perf_hw_id {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+ PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
+ PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
+ PERF_COUNT_HW_REF_CPU_CYCLES = 9,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache events:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+ PERF_COUNT_HW_CACHE_NODE = 6,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
+
+/*
+ * Special "software" events provided by the kernel, even if the hardware
+ * does not support performance events. These events measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+ PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
+ PERF_COUNT_SW_EMULATION_FAULTS = 8,
+ PERF_COUNT_SW_DUMMY = 9,
+ PERF_COUNT_SW_BPF_OUTPUT = 10,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_event_sample_format {
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_READ = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+ PERF_SAMPLE_STREAM_ID = 1U << 9,
+ PERF_SAMPLE_RAW = 1U << 10,
+ PERF_SAMPLE_BRANCH_STACK = 1U << 11,
+ PERF_SAMPLE_REGS_USER = 1U << 12,
+ PERF_SAMPLE_STACK_USER = 1U << 13,
+ PERF_SAMPLE_WEIGHT = 1U << 14,
+ PERF_SAMPLE_DATA_SRC = 1U << 15,
+ PERF_SAMPLE_IDENTIFIER = 1U << 16,
+ PERF_SAMPLE_TRANSACTION = 1U << 17,
+ PERF_SAMPLE_REGS_INTR = 1U << 18,
+
+ PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */
+};
+
+/*
+ * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
+ *
+ * If the user does not pass priv level information via branch_sample_type,
+ * the kernel uses the event's priv level. Branch and event priv levels do
+ * not have to match. Branch priv level is checked for permissions.
+ *
+ * The branch types can be combined, however BRANCH_ANY covers all types
+ * of branches and therefore it supersedes all the other types.
+ */
+enum perf_branch_sample_type_shift {
+ PERF_SAMPLE_BRANCH_USER_SHIFT = 0, /* user branches */
+ PERF_SAMPLE_BRANCH_KERNEL_SHIFT = 1, /* kernel branches */
+ PERF_SAMPLE_BRANCH_HV_SHIFT = 2, /* hypervisor branches */
+
+ PERF_SAMPLE_BRANCH_ANY_SHIFT = 3, /* any branch types */
+ PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT = 4, /* any call branch */
+ PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT = 5, /* any return branch */
+ PERF_SAMPLE_BRANCH_IND_CALL_SHIFT = 6, /* indirect calls */
+ PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT = 7, /* transaction aborts */
+ PERF_SAMPLE_BRANCH_IN_TX_SHIFT = 8, /* in transaction */
+ PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
+ PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
+
+ PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
+ PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
+ PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
+
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
+
+ PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
+};
+
+enum perf_branch_sample_type {
+ PERF_SAMPLE_BRANCH_USER = 1U << PERF_SAMPLE_BRANCH_USER_SHIFT,
+ PERF_SAMPLE_BRANCH_KERNEL = 1U << PERF_SAMPLE_BRANCH_KERNEL_SHIFT,
+ PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
+
+ PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
+ PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
+
+ PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+ PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
+ PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
+
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+
+ PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
+};
+
+#define PERF_SAMPLE_BRANCH_PLM_ALL \
+ (PERF_SAMPLE_BRANCH_USER|\
+ PERF_SAMPLE_BRANCH_KERNEL|\
+ PERF_SAMPLE_BRANCH_HV)
+
+/*
+ * Values to determine ABI of the registers dump.
+ */
+enum perf_sample_regs_abi {
+ PERF_SAMPLE_REGS_ABI_NONE = 0,
+ PERF_SAMPLE_REGS_ABI_32 = 1,
+ PERF_SAMPLE_REGS_ABI_64 = 2,
+};
+
+/*
+ * Values for the memory transaction event qualifier, mostly for
+ * abort events. Multiple bits can be set.
+ */
+enum {
+ PERF_TXN_ELISION = (1 << 0), /* From elision */
+ PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
+ PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
+ PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
+ PERF_TXN_RETRY = (1 << 4), /* Retry possible */
+ PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
+ PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
+ PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
+
+ PERF_TXN_MAX = (1 << 8), /* non-ABI */
+
+ /* bits 32..63 are reserved for the abort code */
+
+ PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
+ PERF_TXN_ABORT_SHIFT = 32,
+};
+
+/*
+ * The format of the data returned by read() on a perf event fd,
+ * as specified by attr.read_format:
+ *
+ * struct read_format {
+ * { u64 value;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 id; } && PERF_FORMAT_ID
+ * } && !PERF_FORMAT_GROUP
+ *
+ * { u64 nr;
+ * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
+ * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
+ * { u64 value;
+ * { u64 id; } && PERF_FORMAT_ID
+ * } cntr[nr];
+ * } && PERF_FORMAT_GROUP
+ * };
+ */
+enum perf_event_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+ PERF_FORMAT_GROUP = 1U << 3,
+
+ PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+#define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
+#define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
+#define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
+ /* add: sample_stack_user */
+#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
+#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
+
+/*
+ * Hardware event_id to monitor via a performance monitoring event:
+ */
+struct perf_event_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
+ */
+ __u64 config;
+
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+
+ __u64 sample_type;
+ __u64 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
+ inherit_stat : 1, /* per task counts */
+ enable_on_exec : 1, /* next exec enables */
+ task : 1, /* trace fork/exit */
+ watermark : 1, /* wakeup_watermark */
+ /*
+ * precise_ip:
+ *
+ * 0 - SAMPLE_IP can have arbitrary skid
+ * 1 - SAMPLE_IP must have constant skid
+ * 2 - SAMPLE_IP requested to have 0 skid
+ * 3 - SAMPLE_IP must have 0 skid
+ *
+ * See also PERF_RECORD_MISC_EXACT_IP
+ */
+ precise_ip : 2, /* skid constraint */
+ mmap_data : 1, /* non-exec mmap data */
+ sample_id_all : 1, /* sample_type all events */
+
+ exclude_host : 1, /* don't count in host */
+ exclude_guest : 1, /* don't count in guest */
+
+ exclude_callchain_kernel : 1, /* exclude kernel callchains */
+ exclude_callchain_user : 1, /* exclude user callchains */
+ mmap2 : 1, /* include mmap with inode data */
+ comm_exec : 1, /* flag comm events that are due to an exec */
+ use_clockid : 1, /* use @clockid for time fields */
+ context_switch : 1, /* context switch data */
+ write_backward : 1, /* Write ring buffer from end to beginning */
+ __reserved_1 : 36;
+
+ union {
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 wakeup_watermark; /* bytes before wakeup */
+ };
+
+ __u32 bp_type;
+ union {
+ __u64 bp_addr;
+ __u64 config1; /* extension of config */
+ };
+ union {
+ __u64 bp_len;
+ __u64 config2; /* extension of config1 */
+ };
+ __u64 branch_sample_type; /* enum perf_branch_sample_type */
+
+ /*
+ * Defines set of user regs to dump on samples.
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_user;
+
+ /*
+ * Defines size of the user stack to dump on samples.
+ */
+ __u32 sample_stack_user;
+
+ __s32 clockid;
+ /*
+ * Defines set of regs to dump for each sample
+ * state captured on:
+ * - precise = 0: PMU interrupt
+ * - precise > 0: sampled instruction
+ *
+ * See asm/perf_regs.h for details.
+ */
+ __u64 sample_regs_intr;
+
+ /*
+ * Wakeup watermark for AUX area
+ */
+ __u32 aux_watermark;
+ __u32 __reserved_2; /* align to __u64 */
+};
+
+#define perf_flags(attr) (*(&(attr)->read_format + 1))
+
+/*
+ * Ioctls that can be done on a perf event fd:
+ */
+#define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
+#define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
+#define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
+#define PERF_EVENT_IOC_RESET _IO ('$', 3)
+#define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
+#define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
+#define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
+#define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
+#define PERF_EVENT_IOC_SET_BPF _IOW('$', 8, __u32)
+#define PERF_EVENT_IOC_PAUSE_OUTPUT _IOW('$', 9, __u32)
+
+enum perf_event_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_event_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw events in user-space.
+ *
+ * u32 seq, time_mult, time_shift, index, width;
+ * u64 count, enabled, running;
+ * u64 cyc, time_offset;
+ * s64 pmc = 0;
+ *
+ * do {
+ * seq = pc->lock;
+ * barrier()
+ *
+ * enabled = pc->time_enabled;
+ * running = pc->time_running;
+ *
+ * if (pc->cap_usr_time && enabled != running) {
+ * cyc = rdtsc();
+ * time_offset = pc->time_offset;
+ * time_mult = pc->time_mult;
+ * time_shift = pc->time_shift;
+ * }
+ *
+ * index = pc->index;
+ * count = pc->offset;
+ * if (pc->cap_user_rdpmc && index) {
+ * width = pc->pmc_width;
+ * pmc = rdpmc(index - 1);
+ * }
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware event identifier */
+ __s64 offset; /* add to hardware event value */
+ __u64 time_enabled; /* time event active */
+ __u64 time_running; /* time event on cpu */
+ union {
+ __u64 capabilities;
+ struct {
+ __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
+ cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
+
+ cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
+ cap_user_time : 1, /* The time_* fields are used */
+ cap_user_time_zero : 1, /* The time_zero field is used */
+ cap_____res : 59;
+ };
+ };
+
+ /*
+ * If cap_user_rdpmc this field provides the bit-width of the value
+ * read using the rdpmc() or equivalent instruction. This can be used
+ * to sign extend the result like:
+ *
+ * pmc <<= 64 - width;
+ * pmc >>= 64 - width; // signed shift right
+ * count += pmc;
+ */
+ __u16 pmc_width;
+
+ /*
+ * If cap_usr_time the below fields can be used to compute the time
+ * delta since time_enabled (in ns) using rdtsc or similar.
+ *
+ * u64 quot, rem;
+ * u64 delta;
+ *
+ * quot = (cyc >> time_shift);
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * delta = time_offset + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ *
+ * Where time_offset,time_mult,time_shift and cyc are read in the
+ * seqcount loop described above. This delta can then be added to
+ * enabled and possible running (if index), improving the scaling:
+ *
+ * enabled += delta;
+ * if (index)
+ * running += delta;
+ *
+ * quot = count / running;
+ * rem = count % running;
+ * count = quot * enabled + (rem * enabled) / running;
+ */
+ __u16 time_shift;
+ __u32 time_mult;
+ __u64 time_offset;
+ /*
+ * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
+ * from sample timestamps.
+ *
+ * time = timestamp - time_zero;
+ * quot = time / time_mult;
+ * rem = time % time_mult;
+ * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
+ *
+ * And vice versa:
+ *
+ * quot = cyc >> time_shift;
+ * rem = cyc & (((u64)1 << time_shift) - 1);
+ * timestamp = time_zero + quot * time_mult +
+ * ((rem * time_mult) >> time_shift);
+ */
+ __u64 time_zero;
+ __u32 size; /* Header size up to __reserved[] fields. */
+
+ /*
+ * Hole for extension of the self monitor capabilities
+ */
+
+ __u8 __reserved[118*8+4]; /* align to 1k. */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading the @data_head value should issue an smp_rmb(),
+ * after reading this value.
+ *
+ * When the mapping is PROT_WRITE the @data_tail value should be
+ * written by userspace to reflect the last read data, after issueing
+ * an smp_mb() to separate the data read from the ->data_tail store.
+ * In this case the kernel will not over-write unread data.
+ *
+ * See perf_output_put_handle() for the data ordering.
+ *
+ * data_{offset,size} indicate the location and size of the perf record
+ * buffer within the mmapped area.
+ */
+ __u64 data_head; /* head in the data section */
+ __u64 data_tail; /* user-space written tail */
+ __u64 data_offset; /* where the buffer starts */
+ __u64 data_size; /* data buffer size */
+
+ /*
+ * AUX area is defined by aux_{offset,size} fields that should be set
+ * by the userspace, so that
+ *
+ * aux_offset >= data_offset + data_size
+ *
+ * prior to mmap()ing it. Size of the mmap()ed area should be aux_size.
+ *
+ * Ring buffer pointers aux_{head,tail} have the same semantics as
+ * data_{head,tail} and same ordering rules apply.
+ */
+ __u64 aux_head;
+ __u64 aux_tail;
+ __u64 aux_offset;
+ __u64 aux_size;
+};
+
+#define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
+#define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_RECORD_MISC_KERNEL (1 << 0)
+#define PERF_RECORD_MISC_USER (2 << 0)
+#define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
+#define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
+#define PERF_RECORD_MISC_GUEST_USER (5 << 0)
+
+/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT (1 << 12)
+/*
+ * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
+ * different events so can reuse the same bit position.
+ * Ditto PERF_RECORD_MISC_SWITCH_OUT.
+ */
+#define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
+#define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
+#define PERF_RECORD_MISC_SWITCH_OUT (1 << 13)
+/*
+ * Indicates that the content of PERF_SAMPLE_IP points to
+ * the actual instruction that triggered the event. See also
+ * perf_event_attr::precise_ip.
+ */
+#define PERF_RECORD_MISC_EXACT_IP (1 << 14)
+/*
+ * Reserve the last bit to indicate some extended misc field
+ */
+#define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * If perf_event_attr.sample_id_all is set then all event types will
+ * have the sample_type selected fields related to where/when
+ * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
+ * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
+ * just after the perf_event_header and the fields already present for
+ * the existing fields, i.e. at the end of the payload. That way a newer
+ * perf.data file will be supported by older perf tools, with these new
+ * optional fields being ignored.
+ *
+ * struct sample_id {
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * } && perf_event_attr::sample_id_all
+ *
+ * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
+ * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
+ * relative to header.size.
+ */
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_MMAP = 1,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 id;
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST = 2,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_EXIT = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 stream_id;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_THROTTLE = 5,
+ PERF_RECORD_UNTHROTTLE = 6,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * u32 tid, ptid;
+ * u64 time;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_FORK = 7,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, tid;
+ *
+ * struct read_format values;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_READ = 8,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * #
+ * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
+ * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
+ * # is fixed relative to header.
+ * #
+ *
+ * { u64 id; } && PERF_SAMPLE_IDENTIFIER
+ * { u64 ip; } && PERF_SAMPLE_IP
+ * { u32 pid, tid; } && PERF_SAMPLE_TID
+ * { u64 time; } && PERF_SAMPLE_TIME
+ * { u64 addr; } && PERF_SAMPLE_ADDR
+ * { u64 id; } && PERF_SAMPLE_ID
+ * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
+ * { u32 cpu, res; } && PERF_SAMPLE_CPU
+ * { u64 period; } && PERF_SAMPLE_PERIOD
+ *
+ * { struct read_format values; } && PERF_SAMPLE_READ
+ *
+ * { u64 nr,
+ * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
+ *
+ * #
+ * # The RAW record below is opaque data wrt the ABI
+ * #
+ * # That is, the ABI doesn't make any promises wrt to
+ * # the stability of its content, it may vary depending
+ * # on event, hardware, kernel version and phase of
+ * # the moon.
+ * #
+ * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
+ * #
+ *
+ * { u32 size;
+ * char data[size];}&& PERF_SAMPLE_RAW
+ *
+ * { u64 nr;
+ * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ *
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
+ *
+ * { u64 size;
+ * char data[size];
+ * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
+ *
+ * { u64 weight; } && PERF_SAMPLE_WEIGHT
+ * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
+ * { u64 abi; # enum perf_sample_regs_abi
+ * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
+ * };
+ */
+ PERF_RECORD_SAMPLE = 9,
+
+ /*
+ * The MMAP2 records are an augmented version of MMAP, they add
+ * maj, min, ino numbers to be used to uniquely identify each mapping
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * u32 maj;
+ * u32 min;
+ * u64 ino;
+ * u64 ino_generation;
+ * u32 prot, flags;
+ * char filename[];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_MMAP2 = 10,
+
+ /*
+ * Records that new data landed in the AUX buffer part.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 aux_offset;
+ * u64 aux_size;
+ * u64 flags;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_AUX = 11,
+
+ /*
+ * Indicates that instruction trace has started
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * };
+ */
+ PERF_RECORD_ITRACE_START = 12,
+
+ /*
+ * Records the dropped/lost sample number.
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u64 lost;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_LOST_SAMPLES = 13,
+
+ /*
+ * Records a context switch in or out (flagged by
+ * PERF_RECORD_MISC_SWITCH_OUT). See also
+ * PERF_RECORD_SWITCH_CPU_WIDE.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH = 14,
+
+ /*
+ * CPU-wide version of PERF_RECORD_SWITCH with next_prev_pid and
+ * next_prev_tid that are the next (switching out) or previous
+ * (switching in) pid/tid.
+ *
+ * struct {
+ * struct perf_event_header header;
+ * u32 next_prev_pid;
+ * u32 next_prev_tid;
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_SWITCH_CPU_WIDE = 15,
+
+ PERF_RECORD_MAX, /* non-ABI */
+};
+
+#define PERF_MAX_STACK_DEPTH 127
+#define PERF_MAX_CONTEXTS_PER_STACK 8
+
+enum perf_callchain_context {
+ PERF_CONTEXT_HV = (__u64)-32,
+ PERF_CONTEXT_KERNEL = (__u64)-128,
+ PERF_CONTEXT_USER = (__u64)-512,
+
+ PERF_CONTEXT_GUEST = (__u64)-2048,
+ PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
+ PERF_CONTEXT_GUEST_USER = (__u64)-2560,
+
+ PERF_CONTEXT_MAX = (__u64)-4095,
+};
+
+/**
+ * PERF_RECORD_AUX::flags bits
+ */
+#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
+#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+
+#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
+#define PERF_FLAG_FD_OUTPUT (1UL << 1)
+#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_op:5, /* type of opcode */
+ mem_lvl:14, /* memory hierarchy level */
+ mem_snoop:5, /* snoop mode */
+ mem_lock:2, /* lock instr */
+ mem_dtlb:7, /* tlb access */
+ mem_rsvd:31;
+ };
+};
+
+/* type of opcode (load/store/prefetch,code) */
+#define PERF_MEM_OP_NA 0x01 /* not available */
+#define PERF_MEM_OP_LOAD 0x02 /* load instruction */
+#define PERF_MEM_OP_STORE 0x04 /* store instruction */
+#define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
+#define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
+#define PERF_MEM_OP_SHIFT 0
+
+/* memory hierarchy (memory level, hit or miss) */
+#define PERF_MEM_LVL_NA 0x01 /* not available */
+#define PERF_MEM_LVL_HIT 0x02 /* hit level */
+#define PERF_MEM_LVL_MISS 0x04 /* miss level */
+#define PERF_MEM_LVL_L1 0x08 /* L1 */
+#define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
+#define PERF_MEM_LVL_L2 0x20 /* L2 */
+#define PERF_MEM_LVL_L3 0x40 /* L3 */
+#define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
+#define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
+#define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
+#define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
+#define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
+#define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
+#define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
+#define PERF_MEM_LVL_SHIFT 5
+
+/* snoop mode */
+#define PERF_MEM_SNOOP_NA 0x01 /* not available */
+#define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
+#define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
+#define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
+#define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
+#define PERF_MEM_SNOOP_SHIFT 19
+
+/* locked instruction */
+#define PERF_MEM_LOCK_NA 0x01 /* not available */
+#define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
+#define PERF_MEM_LOCK_SHIFT 24
+
+/* TLB access */
+#define PERF_MEM_TLB_NA 0x01 /* not available */
+#define PERF_MEM_TLB_HIT 0x02 /* hit level */
+#define PERF_MEM_TLB_MISS 0x04 /* miss level */
+#define PERF_MEM_TLB_L1 0x08 /* L1 */
+#define PERF_MEM_TLB_L2 0x10 /* L2 */
+#define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
+#define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
+#define PERF_MEM_TLB_SHIFT 26
+
+#define PERF_MEM_S(a, s) \
+ (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
+
+/*
+ * single taken branch record layout:
+ *
+ * from: source instruction (may not always be a branch insn)
+ * to: branch target
+ * mispred: branch target was mispredicted
+ * predicted: branch target was predicted
+ *
+ * support for mispred, predicted is optional. In case it
+ * is not supported mispred = predicted = 0.
+ *
+ * in_tx: running in a hardware transaction
+ * abort: aborting a hardware transaction
+ * cycles: cycles from last branch (or 0 if not supported)
+ */
+struct perf_branch_entry {
+ __u64 from;
+ __u64 to;
+ __u64 mispred:1, /* target mispredicted */
+ predicted:1,/* target predicted */
+ in_tx:1, /* in transaction */
+ abort:1, /* transaction abort */
+ cycles:16, /* cycle count to last branch */
+ reserved:44;
+};
+
+#endif /* _UAPI_LINUX_PERF_EVENT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Internal header for Lock-Free RCU Hash Table
+ */
+
+#ifndef _LTTNG_UST_RCULFHASH_INTERNAL_H
+#define _LTTNG_UST_RCULFHASH_INTERNAL_H
+
+#include "rculfhash.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#ifdef DEBUG
+#define dbg_printf(fmt, args...) printf("[debug lttng-ust rculfhash] " fmt, ## args)
+#else
+#define dbg_printf(fmt, args...) \
+do { \
+ /* do nothing but check printf format */ \
+ if (0) \
+ printf("[debug lttng-ust rculfhash] " fmt, ## args); \
+} while (0)
+#endif
+
+#if (CAA_BITS_PER_LONG == 32)
+#define MAX_TABLE_ORDER 32
+#else
+#define MAX_TABLE_ORDER 64
+#endif
+
+#define MAX_CHUNK_TABLE (1UL << 10)
+
+#ifndef min
+#define min(a, b) ((a) < (b) ? (a) : (b))
+#endif
+
+#ifndef max
+#define max(a, b) ((a) > (b) ? (a) : (b))
+#endif
+
+/*
+ * lttng_ust_lfht: Top-level data structure representing a lock-free hash
+ * table. Defined in the implementation file to make it be an opaque
+ * cookie to users.
+ *
+ * The fields used in fast-paths are placed near the end of the
+ * structure, because we need to have a variable-sized union to contain
+ * the mm plugin fields, which are used in the fast path.
+ */
+struct lttng_ust_lfht {
+ /* Initial configuration items */
+ unsigned long max_nr_buckets;
+ const struct lttng_ust_lfht_mm_type *mm; /* memory management plugin */
+ const struct rcu_flavor_struct *flavor; /* RCU flavor */
+
+ /*
+ * We need to put the work threads offline (QSBR) when taking this
+ * mutex, because we use synchronize_rcu within this mutex critical
+ * section, which waits on read-side critical sections, and could
+ * therefore cause grace-period deadlock if we hold off RCU G.P.
+ * completion.
+ */
+ pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
+ unsigned int in_progress_destroy;
+ unsigned long resize_target;
+ int resize_initiated;
+
+ /*
+ * Variables needed for add and remove fast-paths.
+ */
+ int flags;
+ unsigned long min_alloc_buckets_order;
+ unsigned long min_nr_alloc_buckets;
+
+ /*
+ * Variables needed for the lookup, add and remove fast-paths.
+ */
+ unsigned long size; /* always a power of 2, shared (RCU) */
+ /*
+ * bucket_at pointer is kept here to skip the extra level of
+ * dereference needed to get to "mm" (this is a fast-path).
+ */
+ struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
+ unsigned long index);
+ /*
+ * Dynamic length "tbl_chunk" needs to be at the end of
+ * lttng_ust_lfht.
+ */
+ union {
+ /*
+ * Contains the per order-index-level bucket node table.
+ * The size of each bucket node table is half the number
+ * of hashes contained in this order (except for order 0).
+ * The minimum allocation buckets size parameter allows
+ * combining the bucket node arrays of the lowermost
+ * levels to improve cache locality for small index orders.
+ */
+ struct lttng_ust_lfht_node *tbl_order[MAX_TABLE_ORDER];
+
+ /*
+ * Contains the bucket node chunks. The size of each
+ * bucket node chunk is ->min_alloc_size (we avoid to
+ * allocate chunks with different size). Chunks improve
+ * cache locality for small index orders, and are more
+ * friendly with environments where allocation of large
+ * contiguous memory areas is challenging due to memory
+ * fragmentation concerns or inability to use virtual
+ * memory addressing.
+ */
+ struct lttng_ust_lfht_node *tbl_chunk[0];
+
+ /*
+ * Memory mapping with room for all possible buckets.
+ * Their memory is allocated when needed.
+ */
+ struct lttng_ust_lfht_node *tbl_mmap;
+ };
+ /*
+ * End of variables needed for the lookup, add and remove
+ * fast-paths.
+ */
+};
+
+extern unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_lfht_get_count_order_u32(uint32_t x)
+ __attribute__((visibility("hidden")));
+
+extern int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
+ __attribute__((visibility("hidden")));
+
+#ifdef POISON_FREE
+#define poison_free(ptr) \
+ do { \
+ if (ptr) { \
+ memset(ptr, 0x42, sizeof(*(ptr))); \
+ free(ptr); \
+ } \
+ } while (0)
+#else
+#define poison_free(ptr) free(ptr)
+#endif
+
+static inline
+struct lttng_ust_lfht *__default_alloc_lttng_ust_lfht(
+ const struct lttng_ust_lfht_mm_type *mm,
+ unsigned long lttng_ust_lfht_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ struct lttng_ust_lfht *ht;
+
+ ht = calloc(1, lttng_ust_lfht_size);
+ assert(ht);
+
+ ht->mm = mm;
+ ht->bucket_at = mm->bucket_at;
+ ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
+ ht->min_alloc_buckets_order =
+ lttng_ust_lfht_get_count_order_ulong(min_nr_alloc_buckets);
+ ht->max_nr_buckets = max_nr_buckets;
+
+ return ht;
+}
+
+#endif /* _LTTNG_UST_RCULFHASH_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Chunk based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <stddef.h>
+#include "rculfhash-internal.h"
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ ht->tbl_chunk[0] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_chunk[0]);
+ } else if (order > ht->min_alloc_buckets_order) {
+ unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
+
+ for (i = len; i < 2 * len; i++) {
+ ht->tbl_chunk[i] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_chunk[i]);
+ }
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0)
+ poison_free(ht->tbl_chunk[0]);
+ else if (order > ht->min_alloc_buckets_order) {
+ unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
+
+ for (i = len; i < 2 * len; i++)
+ poison_free(ht->tbl_chunk[i]);
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ unsigned long chunk, offset;
+
+ chunk = index >> ht->min_alloc_buckets_order;
+ offset = index & (ht->min_nr_alloc_buckets - 1);
+ return &ht->tbl_chunk[chunk][offset];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ unsigned long nr_chunks, lttng_ust_lfht_size;
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets,
+ max_nr_buckets / MAX_CHUNK_TABLE);
+ nr_chunks = max_nr_buckets / min_nr_alloc_buckets;
+ lttng_ust_lfht_size = offsetof(struct lttng_ust_lfht, tbl_chunk) +
+ sizeof(struct lttng_ust_lfht_node *) * nr_chunks;
+ lttng_ust_lfht_size = max(lttng_ust_lfht_size, sizeof(struct lttng_ust_lfht));
+
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_chunk, lttng_ust_lfht_size,
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * mmap/reservation based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include "rculfhash-internal.h"
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+/*
+ * The allocation scheme used by the mmap based RCU hash table is to make a
+ * large unaccessible mapping to reserve memory without allocating it.
+ * Then smaller chunks are allocated by overlapping read/write mappings which
+ * do allocate memory. Deallocation is done by an overlapping unaccessible
+ * mapping.
+ *
+ * This scheme was tested on Linux, macOS and Solaris. However, on Cygwin the
+ * mmap wrapper is based on the Windows NtMapViewOfSection API which doesn't
+ * support overlapping mappings.
+ *
+ * An alternative to the overlapping mappings is to use mprotect to change the
+ * protection on chunks of the large mapping, read/write to allocate and none
+ * to deallocate. This works perfecty on Cygwin and Solaris but on Linux a
+ * call to madvise is also required to deallocate and it just doesn't work on
+ * macOS.
+ *
+ * For this reason, we keep to original scheme on all platforms except Cygwin.
+ */
+
+
+/* Reserve inaccessible memory space without allocating it */
+static
+void *memory_map(size_t length)
+{
+ void *ret;
+
+ ret = mmap(NULL, length, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if (ret == MAP_FAILED) {
+ perror("mmap");
+ abort();
+ }
+ return ret;
+}
+
+static
+void memory_unmap(void *ptr, size_t length)
+{
+ if (munmap(ptr, length)) {
+ perror("munmap");
+ abort();
+ }
+}
+
+#ifdef __CYGWIN__
+/* Set protection to read/write to allocate a memory chunk */
+static
+void memory_populate(void *ptr, size_t length)
+{
+ if (mprotect(ptr, length, PROT_READ | PROT_WRITE)) {
+ perror("mprotect");
+ abort();
+ }
+}
+
+/* Set protection to none to deallocate a memory chunk */
+static
+void memory_discard(void *ptr, size_t length)
+{
+ if (mprotect(ptr, length, PROT_NONE)) {
+ perror("mprotect");
+ abort();
+ }
+}
+
+#else /* __CYGWIN__ */
+
+static
+void memory_populate(void *ptr, size_t length)
+{
+ if (mmap(ptr, length, PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0) != ptr) {
+ perror("mmap");
+ abort();
+ }
+}
+
+/*
+ * Discard garbage memory and avoid system save it when try to swap it out.
+ * Make it still reserved, inaccessible.
+ */
+static
+void memory_discard(void *ptr, size_t length)
+{
+ if (mmap(ptr, length, PROT_NONE,
+ MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
+ -1, 0) != ptr) {
+ perror("mmap");
+ abort();
+ }
+}
+#endif /* __CYGWIN__ */
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
+ /* small table */
+ ht->tbl_mmap = calloc(ht->max_nr_buckets,
+ sizeof(*ht->tbl_mmap));
+ assert(ht->tbl_mmap);
+ return;
+ }
+ /* large table */
+ ht->tbl_mmap = memory_map(ht->max_nr_buckets
+ * sizeof(*ht->tbl_mmap));
+ memory_populate(ht->tbl_mmap,
+ ht->min_nr_alloc_buckets * sizeof(*ht->tbl_mmap));
+ } else if (order > ht->min_alloc_buckets_order) {
+ /* large table */
+ unsigned long len = 1UL << (order - 1);
+
+ assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
+ memory_populate(ht->tbl_mmap + len,
+ len * sizeof(*ht->tbl_mmap));
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
+ /* small table */
+ poison_free(ht->tbl_mmap);
+ return;
+ }
+ /* large table */
+ memory_unmap(ht->tbl_mmap,
+ ht->max_nr_buckets * sizeof(*ht->tbl_mmap));
+ } else if (order > ht->min_alloc_buckets_order) {
+ /* large table */
+ unsigned long len = 1UL << (order - 1);
+
+ assert(ht->min_nr_alloc_buckets < ht->max_nr_buckets);
+ memory_discard(ht->tbl_mmap + len, len * sizeof(*ht->tbl_mmap));
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ return &ht->tbl_mmap[index];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ unsigned long page_bucket_size;
+
+ page_bucket_size = getpagesize() / sizeof(struct lttng_ust_lfht_node);
+ if (max_nr_buckets <= page_bucket_size) {
+ /* small table */
+ min_nr_alloc_buckets = max_nr_buckets;
+ } else {
+ /* large table */
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets,
+ page_bucket_size);
+ }
+
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_mmap, sizeof(struct lttng_ust_lfht),
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Order based memory management for Lock-Free RCU Hash Table
+ */
+
+#include <rculfhash-internal.h>
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0) {
+ ht->tbl_order[0] = calloc(ht->min_nr_alloc_buckets,
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_order[0]);
+ } else if (order > ht->min_alloc_buckets_order) {
+ ht->tbl_order[order] = calloc(1UL << (order -1),
+ sizeof(struct lttng_ust_lfht_node));
+ assert(ht->tbl_order[order]);
+ }
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ if (order == 0)
+ poison_free(ht->tbl_order[0]);
+ else if (order > ht->min_alloc_buckets_order)
+ poison_free(ht->tbl_order[order]);
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
+}
+
+static
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ unsigned long order;
+
+ if (index < ht->min_nr_alloc_buckets) {
+ dbg_printf("bucket index %lu order 0 aridx 0\n", index);
+ return &ht->tbl_order[0][index];
+ }
+ /*
+ * equivalent to lttng_ust_lfht_get_count_order_ulong(index + 1), but
+ * optimizes away the non-existing 0 special-case for
+ * lttng_ust_lfht_get_count_order_ulong.
+ */
+ order = lttng_ust_lfht_fls_ulong(index);
+ dbg_printf("bucket index %lu order %lu aridx %lu\n",
+ index, order, index & ((1UL << (order - 1)) - 1));
+ return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
+}
+
+static
+struct lttng_ust_lfht *alloc_lttng_ust_lfht(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets)
+{
+ return __default_alloc_lttng_ust_lfht(
+ <tng_ust_lfht_mm_order, sizeof(struct lttng_ust_lfht),
+ min_nr_alloc_buckets, max_nr_buckets);
+}
+
+const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order = {
+ .alloc_lttng_ust_lfht = alloc_lttng_ust_lfht,
+ .alloc_bucket_table = lttng_ust_lfht_alloc_bucket_table,
+ .free_bucket_table = lttng_ust_lfht_free_bucket_table,
+ .bucket_at = bucket_at,
+};
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2010-2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Userspace RCU library - Lock-Free Resizable RCU Hash Table
+ */
+
+/*
+ * Based on the following articles:
+ * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
+ * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
+ * - Michael, M. M. High performance dynamic lock-free hash tables
+ * and list-based sets. In Proceedings of the fourteenth annual ACM
+ * symposium on Parallel algorithms and architectures, ACM Press,
+ * (2002), 73-82.
+ *
+ * Some specificities of this Lock-Free Resizable RCU Hash Table
+ * implementation:
+ *
+ * - RCU read-side critical section allows readers to perform hash
+ * table lookups, as well as traversals, and use the returned objects
+ * safely by allowing memory reclaim to take place only after a grace
+ * period.
+ * - Add and remove operations are lock-free, and do not need to
+ * allocate memory. They need to be executed within RCU read-side
+ * critical section to ensure the objects they read are valid and to
+ * deal with the cmpxchg ABA problem.
+ * - add and add_unique operations are supported. add_unique checks if
+ * the node key already exists in the hash table. It ensures not to
+ * populate a duplicate key if the node key already exists in the hash
+ * table.
+ * - The resize operation executes concurrently with
+ * add/add_unique/add_replace/remove/lookup/traversal.
+ * - Hash table nodes are contained within a split-ordered list. This
+ * list is ordered by incrementing reversed-bits-hash value.
+ * - An index of bucket nodes is kept. These bucket nodes are the hash
+ * table "buckets". These buckets are internal nodes that allow to
+ * perform a fast hash lookup, similarly to a skip list. These
+ * buckets are chained together in the split-ordered list, which
+ * allows recursive expansion by inserting new buckets between the
+ * existing buckets. The split-ordered list allows adding new buckets
+ * between existing buckets as the table needs to grow.
+ * - The resize operation for small tables only allows expanding the
+ * hash table. It is triggered automatically by detecting long chains
+ * in the add operation.
+ * - The resize operation for larger tables (and available through an
+ * API) allows both expanding and shrinking the hash table.
+ * - Split-counters are used to keep track of the number of
+ * nodes within the hash table for automatic resize triggering.
+ * - Resize operation initiated by long chain detection is executed by a
+ * worker thread, which keeps lock-freedom of add and remove.
+ * - Resize operations are protected by a mutex.
+ * - The removal operation is split in two parts: first, a "removed"
+ * flag is set in the next pointer within the node to remove. Then,
+ * a "garbage collection" is performed in the bucket containing the
+ * removed node (from the start of the bucket up to the removed node).
+ * All encountered nodes with "removed" flag set in their next
+ * pointers are removed from the linked-list. If the cmpxchg used for
+ * removal fails (due to concurrent garbage-collection or concurrent
+ * add), we retry from the beginning of the bucket. This ensures that
+ * the node with "removed" flag set is removed from the hash table
+ * (not visible to lookups anymore) before the RCU read-side critical
+ * section held across removal ends. Furthermore, this ensures that
+ * the node with "removed" flag set is removed from the linked-list
+ * before its memory is reclaimed. After setting the "removal" flag,
+ * only the thread which removal is the first to set the "removal
+ * owner" flag (with an xchg) into a node's next pointer is considered
+ * to have succeeded its removal (and thus owns the node to reclaim).
+ * Because we garbage-collect starting from an invariant node (the
+ * start-of-bucket bucket node) up to the "removed" node (or find a
+ * reverse-hash that is higher), we are sure that a successful
+ * traversal of the chain leads to a chain that is present in the
+ * linked-list (the start node is never removed) and that it does not
+ * contain the "removed" node anymore, even if concurrent delete/add
+ * operations are changing the structure of the list concurrently.
+ * - The add operations perform garbage collection of buckets if they
+ * encounter nodes with removed flag set in the bucket where they want
+ * to add their new node. This ensures lock-freedom of add operation by
+ * helping the remover unlink nodes from the list rather than to wait
+ * for it do to so.
+ * - There are three memory backends for the hash table buckets: the
+ * "order table", the "chunks", and the "mmap".
+ * - These bucket containers contain a compact version of the hash table
+ * nodes.
+ * - The RCU "order table":
+ * - has a first level table indexed by log2(hash index) which is
+ * copied and expanded by the resize operation. This order table
+ * allows finding the "bucket node" tables.
+ * - There is one bucket node table per hash index order. The size of
+ * each bucket node table is half the number of hashes contained in
+ * this order (except for order 0).
+ * - The RCU "chunks" is best suited for close interaction with a page
+ * allocator. It uses a linear array as index to "chunks" containing
+ * each the same number of buckets.
+ * - The RCU "mmap" memory backend uses a single memory map to hold
+ * all buckets.
+ * - synchronize_rcu is used to garbage-collect the old bucket node table.
+ *
+ * Ordering Guarantees:
+ *
+ * To discuss these guarantees, we first define "read" operation as any
+ * of the the basic lttng_ust_lfht_lookup, lttng_ust_lfht_next_duplicate,
+ * lttng_ust_lfht_first, lttng_ust_lfht_next operation, as well as
+ * lttng_ust_lfht_add_unique (failure).
+ *
+ * We define "read traversal" operation as any of the following
+ * group of operations
+ * - lttng_ust_lfht_lookup followed by iteration with lttng_ust_lfht_next_duplicate
+ * (and/or lttng_ust_lfht_next, although less common).
+ * - lttng_ust_lfht_add_unique (failure) followed by iteration with
+ * lttng_ust_lfht_next_duplicate (and/or lttng_ust_lfht_next, although less
+ * common).
+ * - lttng_ust_lfht_first followed iteration with lttng_ust_lfht_next (and/or
+ * lttng_ust_lfht_next_duplicate, although less common).
+ *
+ * We define "write" operations as any of lttng_ust_lfht_add, lttng_ust_lfht_replace,
+ * lttng_ust_lfht_add_unique (success), lttng_ust_lfht_add_replace, lttng_ust_lfht_del.
+ *
+ * When lttng_ust_lfht_add_unique succeeds (returns the node passed as
+ * parameter), it acts as a "write" operation. When lttng_ust_lfht_add_unique
+ * fails (returns a node different from the one passed as parameter), it
+ * acts as a "read" operation. A lttng_ust_lfht_add_unique failure is a
+ * lttng_ust_lfht_lookup "read" operation, therefore, any ordering guarantee
+ * referring to "lookup" imply any of "lookup" or lttng_ust_lfht_add_unique
+ * (failure).
+ *
+ * We define "prior" and "later" node as nodes observable by reads and
+ * read traversals respectively before and after a write or sequence of
+ * write operations.
+ *
+ * Hash-table operations are often cascaded, for example, the pointer
+ * returned by a lttng_ust_lfht_lookup() might be passed to a lttng_ust_lfht_next(),
+ * whose return value might in turn be passed to another hash-table
+ * operation. This entire cascaded series of operations must be enclosed
+ * by a pair of matching rcu_read_lock() and rcu_read_unlock()
+ * operations.
+ *
+ * The following ordering guarantees are offered by this hash table:
+ *
+ * A.1) "read" after "write": if there is ordering between a write and a
+ * later read, then the read is guaranteed to see the write or some
+ * later write.
+ * A.2) "read traversal" after "write": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between a write and the first read of the traversal,
+ * then the "read traversal" is guaranteed to see the write or
+ * some later write.
+ * B.1) "write" after "read": if there is ordering between a read and a
+ * later write, then the read will never see the write.
+ * B.2) "write" after "read traversal": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between the last read of the traversal and a later
+ * write, then the "read traversal" will never see the write.
+ * C) "write" while "read traversal": if a write occurs during a "read
+ * traversal", the traversal may, or may not, see the write.
+ * D.1) "write" after "write": if there is ordering between a write and
+ * a later write, then the later write is guaranteed to see the
+ * effects of the first write.
+ * D.2) Concurrent "write" pairs: The system will assign an arbitrary
+ * order to any pair of concurrent conflicting writes.
+ * Non-conflicting writes (for example, to different keys) are
+ * unordered.
+ * E) If a grace period separates a "del" or "replace" operation
+ * and a subsequent operation, then that subsequent operation is
+ * guaranteed not to see the removed item.
+ * F) Uniqueness guarantee: given a hash table that does not contain
+ * duplicate items for a given key, there will only be one item in
+ * the hash table after an arbitrary sequence of add_unique and/or
+ * add_replace operations. Note, however, that a pair of
+ * concurrent read operations might well access two different items
+ * with that key.
+ * G.1) If a pair of lookups for a given key are ordered (e.g. by a
+ * memory barrier), then the second lookup will return the same
+ * node as the previous lookup, or some later node.
+ * G.2) A "read traversal" that starts after the end of a prior "read
+ * traversal" (ordered by memory barriers) is guaranteed to see the
+ * same nodes as the previous traversal, or some later nodes.
+ * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
+ * example, if a pair of reads to the same key run concurrently
+ * with an insertion of that same key, the reads remain unordered
+ * regardless of their return values. In other words, you cannot
+ * rely on the values returned by the reads to deduce ordering.
+ *
+ * Progress guarantees:
+ *
+ * * Reads are wait-free. These operations always move forward in the
+ * hash table linked list, and this list has no loop.
+ * * Writes are lock-free. Any retry loop performed by a write operation
+ * is triggered by progress made within another update operation.
+ *
+ * Bucket node tables:
+ *
+ * hash table hash table the last all bucket node tables
+ * order size bucket node 0 1 2 3 4 5 6(index)
+ * table size
+ * 0 1 1 1
+ * 1 2 1 1 1
+ * 2 4 2 1 1 2
+ * 3 8 4 1 1 2 4
+ * 4 16 8 1 1 2 4 8
+ * 5 32 16 1 1 2 4 8 16
+ * 6 64 32 1 1 2 4 8 16 32
+ *
+ * When growing/shrinking, we only focus on the last bucket node table
+ * which size is (!order ? 1 : (1 << (order -1))).
+ *
+ * Example for growing/shrinking:
+ * grow hash table from order 5 to 6: init the index=6 bucket node table
+ * shrink hash table from order 6 to 5: fini the index=6 bucket node table
+ *
+ * A bit of ascii art explanation:
+ *
+ * The order index is the off-by-one compared to the actual power of 2
+ * because we use index 0 to deal with the 0 special-case.
+ *
+ * This shows the nodes for a small table ordered by reversed bits:
+ *
+ * bits reverse
+ * 0 000 000
+ * 4 100 001
+ * 2 010 010
+ * 6 110 011
+ * 1 001 100
+ * 5 101 101
+ * 3 011 110
+ * 7 111 111
+ *
+ * This shows the nodes in order of non-reversed bits, linked by
+ * reversed-bit order.
+ *
+ * order bits reverse
+ * 0 0 000 000
+ * 1 | 1 001 100 <-
+ * 2 | | 2 010 010 <- |
+ * | | | 3 011 110 | <- |
+ * 3 -> | | | 4 100 001 | |
+ * -> | | 5 101 101 |
+ * -> | 6 110 011
+ * -> 7 111 111
+ */
+
+/*
+ * Note on port to lttng-ust: auto-resize and accounting features are
+ * removed.
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <errno.h>
+#include <assert.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <string.h>
+#include <sched.h>
+#include <unistd.h>
+
+#include <lttng/ust-arch.h>
+#include <lttng/urcu/pointer.h>
+#include <urcu/arch.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include "rculfhash.h"
+#include "rculfhash-internal.h"
+#include <stdio.h>
+#include <pthread.h>
+#include <signal.h>
+
+/*
+ * Split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data support.
+ */
+#define COUNT_COMMIT_ORDER 10
+
+/*
+ * Define the minimum table size.
+ */
+#define MIN_TABLE_ORDER 0
+#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
+
+/*
+ * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
+ */
+#define MIN_PARTITION_PER_THREAD_ORDER 12
+#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
+
+/*
+ * The removed flag needs to be updated atomically with the pointer.
+ * It indicates that no node must attach to the node scheduled for
+ * removal, and that node garbage collection must be performed.
+ * The bucket flag does not require to be updated atomically with the
+ * pointer, but it is added as a pointer low bit flag to save space.
+ * The "removal owner" flag is used to detect which of the "del"
+ * operation that has set the "removed flag" gets to return the removed
+ * node to its caller. Note that the replace operation does not need to
+ * iteract with the "removal owner" flag, because it validates that
+ * the "removed" flag is not set before performing its cmpxchg.
+ */
+#define REMOVED_FLAG (1UL << 0)
+#define BUCKET_FLAG (1UL << 1)
+#define REMOVAL_OWNER_FLAG (1UL << 2)
+#define FLAGS_MASK ((1UL << 3) - 1)
+
+/* Value of the end pointer. Should not interact with flags. */
+#define END_VALUE NULL
+
+/*
+ * ht_items_count: Split-counters counting the number of node addition
+ * and removal in the table. Only used if the LTTNG_UST_LFHT_ACCOUNTING flag
+ * is set at hash table creation.
+ *
+ * These are free-running counters, never reset to zero. They count the
+ * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
+ * operations to update the global counter. We choose a power-of-2 value
+ * for the trigger to deal with 32 or 64-bit overflow of the counter.
+ */
+struct ht_items_count {
+ unsigned long add, del;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
+
+static
+void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+{
+ iter->lfht = ht;
+}
+
+#define lttng_ust_lfht_iter_debug_assert(...) assert(__VA_ARGS__)
+
+#else
+
+static
+void lttng_ust_lfht_iter_debug_set_ht(struct lttng_ust_lfht *ht __attribute__((unused)),
+ struct lttng_ust_lfht_iter *iter __attribute__((unused)))
+{
+}
+
+#define lttng_ust_lfht_iter_debug_assert(...)
+
+#endif
+
+/*
+ * Algorithm to reverse bits in a word by lookup table, extended to
+ * 64-bit words.
+ * Source:
+ * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
+ * Originally from Public Domain.
+ */
+
+static const uint8_t BitReverseTable256[256] =
+{
+#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
+#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
+#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
+ R6(0), R6(2), R6(1), R6(3)
+};
+#undef R2
+#undef R4
+#undef R6
+
+static
+uint8_t bit_reverse_u8(uint8_t v)
+{
+ return BitReverseTable256[v];
+}
+
+#if (CAA_BITS_PER_LONG == 32)
+static
+uint32_t bit_reverse_u32(uint32_t v)
+{
+ return ((uint32_t) bit_reverse_u8(v) << 24) |
+ ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
+ ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
+ ((uint32_t) bit_reverse_u8(v >> 24));
+}
+#else
+static
+uint64_t bit_reverse_u64(uint64_t v)
+{
+ return ((uint64_t) bit_reverse_u8(v) << 56) |
+ ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
+ ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
+ ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
+ ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
+ ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
+ ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
+ ((uint64_t) bit_reverse_u8(v >> 56));
+}
+#endif
+
+static
+unsigned long bit_reverse_ulong(unsigned long v)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return bit_reverse_u32(v);
+#else
+ return bit_reverse_u64(v);
+#endif
+}
+
+/*
+ * fls: returns the position of the most significant bit.
+ * Returns 0 if no bit is set, else returns the position of the most
+ * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
+ */
+#if defined(LTTNG_UST_ARCH_X86)
+static inline
+unsigned int fls_u32(uint32_t x)
+{
+ int r;
+
+ __asm__ ("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n\t"
+ "1:\n\t"
+ : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+#define HAS_FLS_U32
+#endif
+
+#if defined(LTTNG_UST_ARCH_AMD64)
+static inline
+unsigned int fls_u64(uint64_t x)
+{
+ long r;
+
+ __asm__ ("bsrq %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movq $-1,%0\n\t"
+ "1:\n\t"
+ : "=r" (r) : "rm" (x));
+ return r + 1;
+}
+#define HAS_FLS_U64
+#endif
+
+#ifndef HAS_FLS_U64
+static
+unsigned int fls_u64(uint64_t x)
+ __attribute__((unused));
+static
+unsigned int fls_u64(uint64_t x)
+{
+ unsigned int r = 64;
+
+ if (!x)
+ return 0;
+
+ if (!(x & 0xFFFFFFFF00000000ULL)) {
+ x <<= 32;
+ r -= 32;
+ }
+ if (!(x & 0xFFFF000000000000ULL)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF00000000000000ULL)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF000000000000000ULL)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC000000000000000ULL)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x8000000000000000ULL)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
+
+#ifndef HAS_FLS_U32
+static
+unsigned int fls_u32(uint32_t x)
+ __attribute__((unused));
+static
+unsigned int fls_u32(uint32_t x)
+{
+ unsigned int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+#endif
+
+unsigned int lttng_ust_lfht_fls_ulong(unsigned long x)
+{
+#if (CAA_BITS_PER_LONG == 32)
+ return fls_u32(x);
+#else
+ return fls_u64(x);
+#endif
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int lttng_ust_lfht_get_count_order_u32(uint32_t x)
+{
+ if (!x)
+ return -1;
+
+ return fls_u32(x - 1);
+}
+
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int lttng_ust_lfht_get_count_order_ulong(unsigned long x)
+{
+ if (!x)
+ return -1;
+
+ return lttng_ust_lfht_fls_ulong(x - 1);
+}
+
+static
+struct lttng_ust_lfht_node *clear_flag(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
+}
+
+static
+int is_removed(const struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVED_FLAG;
+}
+
+static
+int is_bucket(struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & BUCKET_FLAG;
+}
+
+static
+struct lttng_ust_lfht_node *flag_bucket(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
+}
+
+static
+int is_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
+}
+
+static
+struct lttng_ust_lfht_node *flag_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
+}
+
+static
+struct lttng_ust_lfht_node *flag_removed_or_removal_owner(struct lttng_ust_lfht_node *node)
+{
+ return (struct lttng_ust_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
+}
+
+static
+struct lttng_ust_lfht_node *get_end(void)
+{
+ return (struct lttng_ust_lfht_node *) END_VALUE;
+}
+
+static
+int is_end(struct lttng_ust_lfht_node *node)
+{
+ return clear_flag(node) == (struct lttng_ust_lfht_node *) END_VALUE;
+}
+
+static
+void lttng_ust_lfht_alloc_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ return ht->mm->alloc_bucket_table(ht, order);
+}
+
+/*
+ * lttng_ust_lfht_free_bucket_table() should be called with decreasing order.
+ * When lttng_ust_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void lttng_ust_lfht_free_bucket_table(struct lttng_ust_lfht *ht, unsigned long order)
+{
+ return ht->mm->free_bucket_table(ht, order);
+}
+
+static inline
+struct lttng_ust_lfht_node *bucket_at(struct lttng_ust_lfht *ht, unsigned long index)
+{
+ return ht->bucket_at(ht, index);
+}
+
+static inline
+struct lttng_ust_lfht_node *lookup_bucket(struct lttng_ust_lfht *ht, unsigned long size,
+ unsigned long hash)
+{
+ assert(size > 0);
+ return bucket_at(ht, hash & (size - 1));
+}
+
+/*
+ * Remove all logically deleted nodes from a bucket up to a certain node key.
+ */
+static
+void _lttng_ust_lfht_gc_bucket(struct lttng_ust_lfht_node *bucket, struct lttng_ust_lfht_node *node)
+{
+ struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_next;
+
+ assert(!is_bucket(bucket));
+ assert(!is_removed(bucket));
+ assert(!is_removal_owner(bucket));
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ for (;;) {
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = lttng_ust_rcu_dereference(iter_prev->next);
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ /*
+ * We should never be called with bucket (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(bucket != node);
+ for (;;) {
+ if (caa_unlikely(is_end(iter)))
+ return;
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
+ return;
+ next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
+ if (caa_likely(is_removed(next)))
+ break;
+ iter_prev = clear_flag(iter);
+ iter = next;
+ }
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
+ else
+ new_next = clear_flag(next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
+ }
+}
+
+static
+int _lttng_ust_lfht_replace(struct lttng_ust_lfht *ht, unsigned long size,
+ struct lttng_ust_lfht_node *old_node,
+ struct lttng_ust_lfht_node *old_next,
+ struct lttng_ust_lfht_node *new_node)
+{
+ struct lttng_ust_lfht_node *bucket, *ret_next;
+
+ if (!old_node) /* Return -ENOENT if asked to replace NULL node */
+ return -ENOENT;
+
+ assert(!is_removed(old_node));
+ assert(!is_removal_owner(old_node));
+ assert(!is_bucket(old_node));
+ assert(!is_removed(new_node));
+ assert(!is_removal_owner(new_node));
+ assert(!is_bucket(new_node));
+ assert(new_node != old_node);
+ for (;;) {
+ /* Insert after node to be replaced */
+ if (is_removed(old_next)) {
+ /*
+ * Too late, the old node has been removed under us
+ * between lookup and replace. Fail.
+ */
+ return -ENOENT;
+ }
+ assert(old_next == clear_flag(old_next));
+ assert(new_node != old_next);
+ /*
+ * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
+ * flag. It is either set atomically at the same time
+ * (replace) or after (del).
+ */
+ assert(!is_removal_owner(old_next));
+ new_node->next = old_next;
+ /*
+ * Here is the whole trick for lock-free replace: we add
+ * the replacement node _after_ the node we want to
+ * replace by atomically setting its next pointer at the
+ * same time we set its removal flag. Given that
+ * the lookups/get next use an iterator aware of the
+ * next pointer, they will either skip the old node due
+ * to the removal flag and see the new node, or use
+ * the old node, but will not see the new one.
+ * This is a replacement of a node with another node
+ * that has the same value: we are therefore not
+ * removing a value from the hash table. We set both the
+ * REMOVED and REMOVAL_OWNER flags atomically so we own
+ * the node after successful cmpxchg.
+ */
+ ret_next = uatomic_cmpxchg(&old_node->next,
+ old_next, flag_removed_or_removal_owner(new_node));
+ if (ret_next == old_next)
+ break; /* We performed the replacement. */
+ old_next = ret_next;
+ }
+
+ /*
+ * Ensure that the old node is not visible to readers anymore:
+ * lookup for the node, and remove it (along with any other
+ * logically removed node) if found.
+ */
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
+ _lttng_ust_lfht_gc_bucket(bucket, new_node);
+
+ assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
+ return 0;
+}
+
+/*
+ * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
+ * mode. A NULL unique_ret allows creation of duplicate keys.
+ */
+static
+void _lttng_ust_lfht_add(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ unsigned long size,
+ struct lttng_ust_lfht_node *node,
+ struct lttng_ust_lfht_iter *unique_ret,
+ int bucket_flag)
+{
+ struct lttng_ust_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
+ *return_node;
+ struct lttng_ust_lfht_node *bucket;
+
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ bucket = lookup_bucket(ht, size, hash);
+ for (;;) {
+ /*
+ * iter_prev points to the non-removed node prior to the
+ * insert location.
+ */
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = lttng_ust_rcu_dereference(iter_prev->next);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ for (;;) {
+ if (caa_unlikely(is_end(iter)))
+ goto insert;
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
+ goto insert;
+
+ /* bucket node is the first node of the identical-hash-value chain */
+ if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
+ goto insert;
+
+ next = lttng_ust_rcu_dereference(clear_flag(iter)->next);
+ if (caa_unlikely(is_removed(next)))
+ goto gc_node;
+
+ /* uniquely add */
+ if (unique_ret
+ && !is_bucket(next)
+ && clear_flag(iter)->reverse_hash == node->reverse_hash) {
+ struct lttng_ust_lfht_iter d_iter = {
+ .node = node,
+ .next = iter,
+#ifdef CONFIG_LTTNG_UST_LFHT_ITER_DEBUG
+ .lfht = ht,
+#endif
+ };
+
+ /*
+ * uniquely adding inserts the node as the first
+ * node of the identical-hash-value node chain.
+ *
+ * This semantic ensures no duplicated keys
+ * should ever be observable in the table
+ * (including traversing the table node by
+ * node by forward iterations)
+ */
+ lttng_ust_lfht_next_duplicate(ht, match, key, &d_iter);
+ if (!d_iter.node)
+ goto insert;
+
+ *unique_ret = d_iter;
+ return;
+ }
+
+ iter_prev = clear_flag(iter);
+ iter = next;
+ }
+
+ insert:
+ assert(node != clear_flag(iter));
+ assert(!is_removed(iter_prev));
+ assert(!is_removal_owner(iter_prev));
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ assert(iter_prev != node);
+ if (!bucket_flag)
+ node->next = clear_flag(iter);
+ else
+ node->next = flag_bucket(clear_flag(iter));
+ if (is_bucket(iter))
+ new_node = flag_bucket(node);
+ else
+ new_node = node;
+ if (uatomic_cmpxchg(&iter_prev->next, iter,
+ new_node) != iter) {
+ continue; /* retry */
+ } else {
+ return_node = node;
+ goto end;
+ }
+
+ gc_node:
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
+ else
+ new_next = clear_flag(next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
+ /* retry */
+ }
+end:
+ if (unique_ret) {
+ unique_ret->node = return_node;
+ /* unique_ret->next left unset, never used. */
+ }
+}
+
+static
+int _lttng_ust_lfht_del(struct lttng_ust_lfht *ht, unsigned long size,
+ struct lttng_ust_lfht_node *node)
+{
+ struct lttng_ust_lfht_node *bucket, *next;
+
+ if (!node) /* Return -ENOENT if asked to delete NULL node */
+ return -ENOENT;
+
+ /* logically delete the node */
+ assert(!is_bucket(node));
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+
+ /*
+ * We are first checking if the node had previously been
+ * logically removed (this check is not atomic with setting the
+ * logical removal flag). Return -ENOENT if the node had
+ * previously been removed.
+ */
+ next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
+ if (caa_unlikely(is_removed(next)))
+ return -ENOENT;
+ assert(!is_bucket(next));
+ /*
+ * The del operation semantic guarantees a full memory barrier
+ * before the uatomic_or atomic commit of the deletion flag.
+ */
+ cmm_smp_mb__before_uatomic_or();
+ /*
+ * We set the REMOVED_FLAG unconditionally. Note that there may
+ * be more than one concurrent thread setting this flag.
+ * Knowing which wins the race will be known after the garbage
+ * collection phase, stay tuned!
+ */
+ uatomic_or(&node->next, REMOVED_FLAG);
+ /* We performed the (logical) deletion. */
+
+ /*
+ * Ensure that the node is not visible to readers anymore: lookup for
+ * the node, and remove it (along with any other logically removed node)
+ * if found.
+ */
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
+ _lttng_ust_lfht_gc_bucket(bucket, node);
+
+ assert(is_removed(CMM_LOAD_SHARED(node->next)));
+ /*
+ * Last phase: atomically exchange node->next with a version
+ * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
+ * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
+ * the node and win the removal race.
+ * It is interesting to note that all "add" paths are forbidden
+ * to change the next pointer starting from the point where the
+ * REMOVED_FLAG is set, so here using a read, followed by a
+ * xchg() suffice to guarantee that the xchg() will ever only
+ * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
+ * was already set).
+ */
+ if (!is_removal_owner(uatomic_xchg(&node->next,
+ flag_removal_owner(node->next))))
+ return 0;
+ else
+ return -ENOENT;
+}
+
+/*
+ * Never called with size < 1.
+ */
+static
+void lttng_ust_lfht_create_bucket(struct lttng_ust_lfht *ht, unsigned long size)
+{
+ struct lttng_ust_lfht_node *prev, *node;
+ unsigned long order, len, i;
+ int bucket_order;
+
+ lttng_ust_lfht_alloc_bucket_table(ht, 0);
+
+ dbg_printf("create bucket: order 0 index 0 hash 0\n");
+ node = bucket_at(ht, 0);
+ node->next = flag_bucket(get_end());
+ node->reverse_hash = 0;
+
+ bucket_order = lttng_ust_lfht_get_count_order_ulong(size);
+ assert(bucket_order >= 0);
+
+ for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
+ len = 1UL << (order - 1);
+ lttng_ust_lfht_alloc_bucket_table(ht, order);
+
+ for (i = 0; i < len; i++) {
+ /*
+ * Now, we are trying to init the node with the
+ * hash=(len+i) (which is also a bucket with the
+ * index=(len+i)) and insert it into the hash table,
+ * so this node has to be inserted after the bucket
+ * with the index=(len+i)&(len-1)=i. And because there
+ * is no other non-bucket node nor bucket node with
+ * larger index/hash inserted, so the bucket node
+ * being inserted should be inserted directly linked
+ * after the bucket node with index=i.
+ */
+ prev = bucket_at(ht, i);
+ node = bucket_at(ht, len + i);
+
+ dbg_printf("create bucket: order %lu index %lu hash %lu\n",
+ order, len + i, len + i);
+ node->reverse_hash = bit_reverse_ulong(len + i);
+
+ /* insert after prev */
+ assert(is_bucket(prev->next));
+ node->next = prev->next;
+ prev->next = flag_bucket(node);
+ }
+ }
+}
+
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+ return <tng_ust_lfht_mm_mmap;
+ else
+ return <tng_ust_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct lttng_ust_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ return <tng_ust_lfht_mm_order;
+}
+#endif
+
+struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct lttng_ust_lfht_mm_type *mm)
+{
+ struct lttng_ust_lfht *ht;
+ unsigned long order;
+
+ /* min_nr_alloc_buckets must be power of two */
+ if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
+ return NULL;
+
+ /* init_size must be power of two */
+ if (!init_size || (init_size & (init_size - 1)))
+ return NULL;
+
+ /*
+ * Memory management plugin default.
+ */
+ if (!mm)
+ mm = get_mm_type(max_nr_buckets);
+
+ /* max_nr_buckets == 0 for order based mm means infinite */
+ if (mm == <tng_ust_lfht_mm_order && !max_nr_buckets)
+ max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
+
+ /* max_nr_buckets must be power of two */
+ if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
+ return NULL;
+
+ if (flags & LTTNG_UST_LFHT_AUTO_RESIZE)
+ return NULL;
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
+ init_size = max(init_size, MIN_TABLE_SIZE);
+ max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
+ init_size = min(init_size, max_nr_buckets);
+
+ ht = mm->alloc_lttng_ust_lfht(min_nr_alloc_buckets, max_nr_buckets);
+ assert(ht);
+ assert(ht->mm == mm);
+ assert(ht->bucket_at == mm->bucket_at);
+
+ ht->flags = flags;
+ /* this mutex should not nest in read-side C.S. */
+ pthread_mutex_init(&ht->resize_mutex, NULL);
+ order = lttng_ust_lfht_get_count_order_ulong(init_size);
+ ht->resize_target = 1UL << order;
+ lttng_ust_lfht_create_bucket(ht, 1UL << order);
+ ht->size = 1UL << order;
+ return ht;
+}
+
+void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next, *bucket;
+ unsigned long reverse_hash, size;
+
+ lttng_ust_lfht_iter_debug_set_ht(ht, iter);
+
+ reverse_hash = bit_reverse_ulong(hash);
+
+ size = lttng_ust_rcu_dereference(ht->size);
+ bucket = lookup_bucket(ht, size, hash);
+ /* We can always skip the bucket node initially */
+ node = lttng_ust_rcu_dereference(bucket->next);
+ node = clear_flag(node);
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ assert(node == clear_flag(node));
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && node->reverse_hash == reverse_hash
+ && caa_likely(match(node, key))) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht __attribute__((unused)),
+ lttng_ust_lfht_match_fct match,
+ const void *key, struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next;
+ unsigned long reverse_hash;
+
+ lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
+ node = iter->node;
+ reverse_hash = node->reverse_hash;
+ next = iter->next;
+ node = clear_flag(next);
+
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && caa_likely(match(node, key))) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_next(struct lttng_ust_lfht *ht __attribute__((unused)),
+ struct lttng_ust_lfht_iter *iter)
+{
+ struct lttng_ust_lfht_node *node, *next;
+
+ lttng_ust_lfht_iter_debug_assert(ht == iter->lfht);
+ node = clear_flag(iter->next);
+ for (;;) {
+ if (caa_unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ next = lttng_ust_rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+{
+ lttng_ust_lfht_iter_debug_set_ht(ht, iter);
+ /*
+ * Get next after first bucket node. The first bucket node is the
+ * first node of the linked list.
+ */
+ iter->next = bucket_at(ht, 0)->next;
+ lttng_ust_lfht_next(ht, iter);
+}
+
+void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ _lttng_ust_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
+}
+
+struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+ struct lttng_ust_lfht_iter iter;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ return iter.node;
+}
+
+struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+ struct lttng_ust_lfht_iter iter;
+
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = lttng_ust_rcu_dereference(ht->size);
+ for (;;) {
+ _lttng_ust_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ if (iter.node == node) {
+ return NULL;
+ }
+
+ if (!_lttng_ust_lfht_replace(ht, size, iter.node, iter.next, node))
+ return iter.node;
+ }
+}
+
+int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
+ struct lttng_ust_lfht_iter *old_iter,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *new_node)
+{
+ unsigned long size;
+
+ new_node->reverse_hash = bit_reverse_ulong(hash);
+ if (!old_iter->node)
+ return -ENOENT;
+ if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
+ return -EINVAL;
+ if (caa_unlikely(!match(old_iter->node, key)))
+ return -EINVAL;
+ size = lttng_ust_rcu_dereference(ht->size);
+ return _lttng_ust_lfht_replace(ht, size, old_iter->node, old_iter->next,
+ new_node);
+}
+
+int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
+{
+ unsigned long size;
+
+ size = lttng_ust_rcu_dereference(ht->size);
+ return _lttng_ust_lfht_del(ht, size, node);
+}
+
+int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
+{
+ return is_removed(CMM_LOAD_SHARED(node->next));
+}
+
+static
+int lttng_ust_lfht_delete_bucket(struct lttng_ust_lfht *ht)
+{
+ struct lttng_ust_lfht_node *node;
+ unsigned long order, i, size;
+
+ /* Check that the table is empty */
+ node = bucket_at(ht, 0);
+ do {
+ node = clear_flag(node)->next;
+ if (!is_bucket(node))
+ return -EPERM;
+ assert(!is_removed(node));
+ assert(!is_removal_owner(node));
+ } while (!is_end(node));
+ /*
+ * size accessed without lttng_ust_rcu_dereference because hash table is
+ * being destroyed.
+ */
+ size = ht->size;
+ /* Internal sanity check: all nodes left should be buckets */
+ for (i = 0; i < size; i++) {
+ node = bucket_at(ht, i);
+ dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
+ i, i, bit_reverse_ulong(node->reverse_hash));
+ assert(is_bucket(node->next));
+ }
+
+ for (order = lttng_ust_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
+ lttng_ust_lfht_free_bucket_table(ht, order);
+
+ return 0;
+}
+
+/*
+ * Should only be called when no more concurrent readers nor writers can
+ * possibly access the table.
+ */
+int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
+{
+ int ret;
+
+ ret = lttng_ust_lfht_delete_bucket(ht);
+ if (ret)
+ return ret;
+ ret = pthread_mutex_destroy(&ht->resize_mutex);
+ if (ret)
+ ret = -EBUSY;
+ poison_free(ht);
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 Lai Jiangshan <laijs@cn.fujitsu.com>
+ *
+ * Userspace RCU library - Lock-Free RCU Hash Table
+ */
+
+#ifndef _LTTNG_UST_RCULFHASH_H
+#define _LTTNG_UST_RCULFHASH_H
+
+#include <stdint.h>
+#include <pthread.h>
+#include <urcu/compiler.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct lttng_ust_lfht;
+
+/*
+ * lttng_ust_lfht_node: Contains the next pointers and reverse-hash
+ * value required for lookup and traversal of the hash table.
+ *
+ * struct lttng_ust_lfht_node should be aligned on 8-bytes boundaries because
+ * the three lower bits are used as flags. It is worth noting that the
+ * information contained within these three bits could be represented on
+ * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
+ * BUCKET_FLAG. This can be done if we ensure that no iterator nor
+ * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
+ * is set. Given the minimum size of struct lttng_ust_lfht_node is 8 bytes on
+ * 32-bit architectures, we choose to go for simplicity and reserve
+ * three bits.
+ *
+ * struct lttng_ust_lfht_node can be embedded into a structure (as a field).
+ * caa_container_of() can be used to get the structure from the struct
+ * lttng_ust_lfht_node after a lookup.
+ *
+ * The structure which embeds it typically holds the key (or key-value
+ * pair) of the object. The caller code is responsible for calculation
+ * of the hash value for lttng_ust_lfht APIs.
+ */
+struct lttng_ust_lfht_node {
+ struct lttng_ust_lfht_node *next; /* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
+ unsigned long reverse_hash;
+} __attribute__((aligned(8)));
+
+/* lttng_ust_lfht_iter: Used to track state while traversing a hash chain. */
+struct lttng_ust_lfht_iter {
+ struct lttng_ust_lfht_node *node, *next;
+};
+
+static inline
+struct lttng_ust_lfht_node *lttng_ust_lfht_iter_get_node(struct lttng_ust_lfht_iter *iter)
+{
+ return iter->node;
+}
+
+struct rcu_flavor_struct;
+
+/*
+ * Caution !
+ * Ensure reader and writer threads are registered as urcu readers.
+ */
+
+typedef int (*lttng_ust_lfht_match_fct)(struct lttng_ust_lfht_node *node, const void *key);
+
+/*
+ * lttng_ust_lfht_node_init - initialize a hash table node
+ * @node: the node to initialize.
+ *
+ * This function is kept to be eventually used for debugging purposes
+ * (detection of memory corruption).
+ */
+static inline
+void lttng_ust_lfht_node_init(struct lttng_ust_lfht_node *node __attribute__((unused)))
+{
+}
+
+/*
+ * Hash table creation flags.
+ */
+enum {
+ LTTNG_UST_LFHT_AUTO_RESIZE = (1U << 0),
+ LTTNG_UST_LFHT_ACCOUNTING = (1U << 1),
+};
+
+struct lttng_ust_lfht_mm_type {
+ struct lttng_ust_lfht *(*alloc_lttng_ust_lfht)(unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets);
+ void (*alloc_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
+ void (*free_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
+ struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
+ unsigned long index);
+};
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order
+ __attribute__((visibility("hidden")));
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk
+ __attribute__((visibility("hidden")));
+
+extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_new - allocate a hash table.
+ * @init_size: number of buckets to allocate initially. Must be power of two.
+ * @min_nr_alloc_buckets: the minimum number of allocated buckets.
+ * (must be power of two)
+ * @max_nr_buckets: the maximum number of hash table buckets allowed.
+ * (must be power of two, 0 is accepted, means
+ * "infinite")
+ * @flags: hash table creation flags (can be combined with bitwise or: '|').
+ * 0: no flags.
+ * LTTNG_UST_LFHT_AUTO_RESIZE: automatically resize hash table.
+ * LTTNG_UST_LFHT_ACCOUNTING: count the number of node addition
+ * and removal in the table
+ *
+ * Return NULL on error.
+ * Note: the RCU flavor must be already included before the hash table header.
+ */
+extern struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct lttng_ust_lfht_mm_type *mm)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_destroy - destroy a hash table.
+ * @ht: the hash table to destroy.
+ *
+ * Return 0 on success, negative error value on error.
+
+ * Prior to liburcu 0.10:
+ * - Threads calling this API need to be registered RCU read-side
+ * threads.
+ * - lttng_ust_lfht_destroy should *not* be called from a RCU read-side
+ * critical section. It should *not* be called from a call_rcu thread
+ * context neither.
+ *
+ * Starting from liburcu 0.10, rculfhash implements its own worker
+ * thread to handle resize operations, which removes RCU requirements on
+ * lttng_ust_lfht_destroy.
+ */
+extern int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_count_nodes - count the number of nodes in the hash table.
+ * @ht: the hash table.
+ * @split_count_before: sample the node count split-counter before traversal.
+ * @count: traverse the hash table, count the number of nodes observed.
+ * @split_count_after: sample the node count split-counter after traversal.
+ *
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ */
+extern void lttng_ust_lfht_count_nodes(struct lttng_ust_lfht *ht,
+ long *split_count_before,
+ unsigned long *count,
+ long *split_count_after)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_lookup - lookup a node by key.
+ * @ht: the hash table.
+ * @hash: the key hash.
+ * @match: the key match function.
+ * @key: the current node key.
+ * @iter: node, if found (output). *iter->node set to NULL if not found.
+ *
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_next_duplicate - get the next item with same key, after iterator.
+ * @ht: the hash table.
+ * @match: the key match function.
+ * @key: the current node key.
+ * @iter: input: current iterator.
+ * output: node, if found. *iter->node set to NULL if not found.
+ *
+ * Uses an iterator initialized by a lookup or traversal. Important: the
+ * iterator _needs_ to be initialized before calling
+ * lttng_ust_lfht_next_duplicate.
+ * Sets *iter-node to the following node with same key.
+ * Sets *iter->node to NULL if no following node exists with same key.
+ * RCU read-side lock must be held across lttng_ust_lfht_lookup and
+ * lttng_ust_lfht_next calls, and also between lttng_ust_lfht_next calls using the
+ * node returned by a previous lttng_ust_lfht_next.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht,
+ lttng_ust_lfht_match_fct match, const void *key,
+ struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_first - get the first node in the table.
+ * @ht: the hash table.
+ * @iter: First node, if exists (output). *iter->node set to NULL if not found.
+ *
+ * Output in "*iter". *iter->node set to NULL if table is empty.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_next - get the next node in the table.
+ * @ht: the hash table.
+ * @iter: input: current iterator.
+ * output: next node, if exists. *iter->node set to NULL if not found.
+ *
+ * Input/Output in "*iter". *iter->node set to NULL if *iter was
+ * pointing to the last table node.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function acts as a rcu_dereference() to read the node pointer.
+ */
+extern void lttng_ust_lfht_next(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add - add a node to the hash table.
+ * @ht: the hash table.
+ * @hash: the key hash.
+ * @node: the node to add.
+ *
+ * This function supports adding redundant keys into the table.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function issues a full memory barrier before and after its
+ * atomic commit.
+ */
+extern void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add_unique - add a node to hash table, if key is not present.
+ * @ht: the hash table.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @node: the node to try adding.
+ *
+ * Return the node added upon success.
+ * Return the unique node already present upon failure. If
+ * lttng_ust_lfht_add_unique fails, the node passed as parameter should be
+ * freed by the caller. In this case, the caller does NOT need to wait
+ * for a grace period before freeing or re-using the node.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ *
+ * The semantic of this function is that if only this function is used
+ * to add keys into the table, no duplicated keys should ever be
+ * observable in the table. The same guarantee apply for combination of
+ * add_unique and add_replace (see below).
+ *
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function acts like a
+ * simple lookup operation: it acts as a rcu_dereference() to read the
+ * node pointer. The failure case does not guarantee any other memory
+ * barrier.
+ */
+extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_add_replace - replace or add a node within hash table.
+ * @ht: the hash table.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @node: the node to add.
+ *
+ * Return the node replaced upon success. If no node matching the key
+ * was present, return NULL, which also means the operation succeeded.
+ * This replacement operation should never fail.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing or re-using the memory reserved for the returned node.
+ *
+ * The semantic of replacement vs lookups and traversals is the
+ * following: if lookups and traversals are performed between a key
+ * unique insertion and its removal, we guarantee that the lookups and
+ * traversals will always find exactly one instance of the key if it is
+ * replaced concurrently with the lookups.
+ *
+ * Providing this semantic allows us to ensure that replacement-only
+ * schemes will never generate duplicated keys. It also allows us to
+ * guarantee that a combination of add_replace and add_unique updates
+ * will never generate duplicated keys.
+ *
+ * This function issues a full memory barrier before and after its
+ * atomic commit.
+ */
+extern struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_replace - replace a node pointed to by iter within hash table.
+ * @ht: the hash table.
+ * @old_iter: the iterator position of the node to replace.
+ * @hash: the node's hash.
+ * @match: the key match function.
+ * @key: the node's key.
+ * @new_node: the new node to use as replacement.
+ *
+ * Return 0 if replacement is successful, negative value otherwise.
+ * Replacing a NULL old node or an already removed node will fail with
+ * -ENOENT.
+ * If the hash or value of the node to replace and the new node differ,
+ * this function returns -EINVAL without proceeding to the replacement.
+ * Old node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next.
+ * RCU read-side lock must be held between lookup and replacement.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful replacement, a grace period must be waited for before
+ * freeing or re-using the memory reserved for the old node (which can
+ * be accessed with lttng_ust_lfht_iter_get_node).
+ *
+ * The semantic of replacement vs lookups is the same as
+ * lttng_ust_lfht_add_replace().
+ *
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function does not issue
+ * any memory barrier.
+ */
+extern int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
+ struct lttng_ust_lfht_iter *old_iter,
+ unsigned long hash,
+ lttng_ust_lfht_match_fct match,
+ const void *key,
+ struct lttng_ust_lfht_node *new_node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_del - remove node pointed to by iterator from hash table.
+ * @ht: the hash table.
+ * @node: the node to delete.
+ *
+ * Return 0 if the node is successfully removed, negative value
+ * otherwise.
+ * Deleting a NULL node or an already removed node will fail with a
+ * negative value.
+ * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
+ * followed by use of lttng_ust_lfht_iter_get_node.
+ * RCU read-side lock must be held between lookup and removal.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * After successful removal, a grace period must be waited for before
+ * freeing or re-using the memory reserved for old node (which can be
+ * accessed with lttng_ust_lfht_iter_get_node).
+ * Upon success, this function issues a full memory barrier before and
+ * after its atomic commit. Upon failure, this function does not issue
+ * any memory barrier.
+ */
+extern int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_is_node_deleted - query whether a node is removed from hash table.
+ *
+ * Return non-zero if the node is deleted from the hash table, 0
+ * otherwise.
+ * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
+ * followed by use of lttng_ust_lfht_iter_get_node.
+ * RCU read-side lock must be held between lookup and call to this
+ * function.
+ * Call with rcu_read_lock held.
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function does not issue any memory barrier.
+ */
+extern int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lttng_ust_lfht_resize - Force a hash table resize
+ * @ht: the hash table.
+ * @new_size: update to this hash table size.
+ *
+ * Threads calling this API need to be registered RCU read-side threads.
+ * This function does not (necessarily) issue memory barriers.
+ * lttng_ust_lfht_resize should *not* be called from a RCU read-side critical
+ * section.
+ */
+extern void lttng_ust_lfht_resize(struct lttng_ust_lfht *ht, unsigned long new_size)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Note: it is safe to perform element removal (del), replacement, or
+ * any hash table update operation during any of the following hash
+ * table traversals.
+ * These functions act as rcu_dereference() to read the node pointers.
+ */
+#define lttng_ust_lfht_for_each(ht, iter, node) \
+ for (lttng_ust_lfht_first(ht, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter); \
+ node != NULL; \
+ lttng_ust_lfht_next(ht, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter))
+
+#define lttng_ust_lfht_for_each_duplicate(ht, hash, match, key, iter, node) \
+ for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter); \
+ node != NULL; \
+ lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
+ node = lttng_ust_lfht_iter_get_node(iter))
+
+#define lttng_ust_lfht_for_each_entry(ht, iter, pos, member) \
+ for (lttng_ust_lfht_first(ht, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member); \
+ lttng_ust_lfht_iter_get_node(iter) != NULL; \
+ lttng_ust_lfht_next(ht, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member))
+
+#define lttng_ust_lfht_for_each_entry_duplicate(ht, hash, match, key, \
+ iter, pos, member) \
+ for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member); \
+ lttng_ust_lfht_iter_get_node(iter) != NULL; \
+ lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
+ pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
+ __typeof__(*(pos)), member))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _LTTNG_UST_RCULFHASH_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <assert.h>
+
+#include "string-utils.h"
+
+enum star_glob_pattern_type_flags {
+ STAR_GLOB_PATTERN_TYPE_FLAG_NONE = 0,
+ STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN = 1,
+ STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY = 2,
+};
+
+static
+enum star_glob_pattern_type_flags strutils_test_glob_pattern(const char *pattern)
+{
+ enum star_glob_pattern_type_flags ret =
+ STAR_GLOB_PATTERN_TYPE_FLAG_NONE;
+ const char *p;
+
+ assert(pattern);
+
+ for (p = pattern; *p != '\0'; p++) {
+ switch (*p) {
+ case '*':
+ ret = STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+
+ if (p[1] == '\0') {
+ ret |= STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+ }
+
+ goto end;
+ case '\\':
+ p++;
+
+ if (*p == '\0') {
+ goto end;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+end:
+ return ret;
+}
+
+/*
+ * Returns true if `pattern` is a star-only globbing pattern, that is,
+ * it contains at least one non-escaped `*`.
+ */
+bool strutils_is_star_glob_pattern(const char *pattern)
+{
+ return strutils_test_glob_pattern(pattern) &
+ STAR_GLOB_PATTERN_TYPE_FLAG_PATTERN;
+}
+
+/*
+ * Returns true if `pattern` is a globbing pattern with a globbing,
+ * non-escaped star only at its very end.
+ */
+bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
+{
+ return strutils_test_glob_pattern(pattern) &
+ STAR_GLOB_PATTERN_TYPE_FLAG_END_ONLY;
+}
+
+static inline
+bool at_end_of_pattern(const char *p, const char *pattern, size_t pattern_len)
+{
+ return (p - pattern) == pattern_len || *p == '\0';
+}
+
+/*
+ * Globbing matching function with the star feature only (`?` and
+ * character sets are not supported). This matches `candidate` (plain
+ * string) against `pattern`. A literal star can be escaped with `\` in
+ * `pattern`.
+ *
+ * `pattern_len` or `candidate_len` can be greater than the actual
+ * string length of `pattern` or `candidate` if the string is
+ * null-terminated.
+ */
+bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
+ const char *candidate, size_t candidate_len) {
+ const char *retry_c = candidate, *retry_p = pattern, *c, *p;
+ bool got_a_star = false;
+
+retry:
+ c = retry_c;
+ p = retry_p;
+
+ /*
+ * The concept here is to retry a match in the specific case
+ * where we already got a star. The retry position for the
+ * pattern is just after the most recent star, and the retry
+ * position for the candidate is the character following the
+ * last try's first character.
+ *
+ * Example:
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^ MISMATCH
+ *
+ * candidate: hi ev every onyx one
+ * ^
+ * pattern: hi*every*one
+ * ^
+ *
+ * candidate: hi ev every onyx one
+ * ^^
+ * pattern: hi*every*one
+ * ^^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^
+ *
+ * candidate: hi ev every onyx one
+ * ^ ^
+ * pattern: hi*every*one
+ * ^ ^ SUCCESS
+ */
+ while ((c - candidate) < candidate_len && *c != '\0') {
+ assert(*c);
+
+ if (at_end_of_pattern(p, pattern, pattern_len)) {
+ goto end_of_pattern;
+ }
+
+ switch (*p) {
+ case '*':
+ got_a_star = true;
+
+ /*
+ * Our first try starts at the current candidate
+ * character and after the star in the pattern.
+ */
+ retry_c = c;
+ retry_p = p + 1;
+
+ if (at_end_of_pattern(retry_p, pattern, pattern_len)) {
+ /*
+ * Star at the end of the pattern at
+ * this point: automatic match.
+ */
+ return true;
+ }
+
+ goto retry;
+ case '\\':
+ /* Go to escaped character. */
+ p++; /* Fallthrough */
+
+ /*
+ * Fall through the default case which will
+ * compare the escaped character now.
+ */
+ default:
+ if (at_end_of_pattern(p, pattern, pattern_len) ||
+ *c != *p) {
+end_of_pattern:
+ /* Character mismatch OR end of pattern. */
+ if (!got_a_star) {
+ /*
+ * We didn't get any star yet,
+ * so this first mismatch
+ * automatically makes the whole
+ * test fail.
+ */
+ return false;
+ }
+
+ /*
+ * Next try: next candidate character,
+ * original pattern character (following
+ * the most recent star).
+ */
+ retry_c++;
+ goto retry;
+ }
+ break;
+ }
+
+ /* Next pattern and candidate characters. */
+ c++;
+ p++;
+ }
+
+ /*
+ * We checked every candidate character and we're still in a
+ * success state: the only pattern character allowed to remain
+ * is a star.
+ */
+ if (at_end_of_pattern(p, pattern, pattern_len)) {
+ return true;
+ }
+
+ p++;
+ return p[-1] == '*' && at_end_of_pattern(p, pattern, pattern_len);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2017 Philippe Proulx <pproulx@efficios.com>
+ */
+
+#ifndef _STRING_UTILS_H
+#define _STRING_UTILS_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+bool strutils_is_star_glob_pattern(const char *pattern)
+ __attribute__((visibility("hidden")));
+
+bool strutils_is_star_at_the_end_only_glob_pattern(const char *pattern)
+ __attribute__((visibility("hidden")));
+
+bool strutils_star_glob_match(const char *pattern, size_t pattern_len,
+ const char *candidate, size_t candidate_len)
+ __attribute__((visibility("hidden")));
+
+#endif /* _STRING_UTILS_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <ust-helper.h>
+
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+#include "lttng-ust-tracef-provider.h"
+
+static inline
+void __lttng_ust_vtracef(const char *fmt, va_list ap)
+ __attribute__((always_inline, format(printf, 1, 0)));
+static inline
+void __lttng_ust_vtracef(const char *fmt, va_list ap)
+{
+ char *msg;
+ const int len = vasprintf(&msg, fmt, ap);
+
+ /* len does not include the final \0 */
+ if (len < 0)
+ goto end;
+ __tracepoint_cb_lttng_ust_tracef___event(msg, len,
+ LTTNG_UST_CALLER_IP());
+ free(msg);
+end:
+ return;
+}
+
+/*
+ * FIXME: We should include <lttng/tracef.h> for the declarations here, but it
+ * fails with tracepoint magic above my paygrade.
+ */
+
+void _lttng_ust_vtracef(const char *fmt, va_list ap)
+ __attribute__((format(printf, 1, 0)));
+void _lttng_ust_vtracef(const char *fmt, va_list ap)
+{
+ __lttng_ust_vtracef(fmt, ap);
+}
+
+void _lttng_ust_tracef(const char *fmt, ...)
+ __attribute__((format(printf, 1, 2)));
+void _lttng_ust_tracef(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ __lttng_ust_vtracef(fmt, ap);
+ va_end(ap);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdio.h>
+#include <ust-helper.h>
+
+#define TRACEPOINT_CREATE_PROBES
+#define TRACEPOINT_DEFINE
+#include "lttng-ust-tracelog-provider.h"
+
+#define TRACELOG_CB(level) \
+ static inline \
+ void __lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ __attribute__((always_inline, format(printf, 4, 0))); \
+ \
+ static inline \
+ void __lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ { \
+ char *msg; \
+ const int len = vasprintf(&msg, fmt, ap); \
+ \
+ /* len does not include the final \0 */ \
+ if (len < 0) \
+ goto end; \
+ __tracepoint_cb_lttng_ust_tracelog___##level(file, \
+ line, func, msg, len, \
+ LTTNG_UST_CALLER_IP()); \
+ free(msg); \
+ end: \
+ return; \
+ } \
+ \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ __attribute__ ((format(printf, 4, 0))); \
+ \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap); \
+ void _lttng_ust_vtracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, va_list ap) \
+ { \
+ __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
+ } \
+ \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...) \
+ __attribute__ ((format(printf, 4, 5))); \
+ \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...); \
+ void _lttng_ust_tracelog_##level(const char *file, \
+ int line, const char *func, \
+ const char *fmt, ...) \
+ { \
+ va_list ap; \
+ \
+ va_start(ap, fmt); \
+ __lttng_ust_vtracelog_##level(file, line, func, fmt, ap); \
+ va_end(ap); \
+ }
+
+TRACELOG_CB(TRACE_EMERG)
+TRACELOG_CB(TRACE_ALERT)
+TRACELOG_CB(TRACE_CRIT)
+TRACELOG_CB(TRACE_ERR)
+TRACELOG_CB(TRACE_WARNING)
+TRACELOG_CB(TRACE_NOTICE)
+TRACELOG_CB(TRACE_INFO)
+TRACELOG_CB(TRACE_DEBUG_SYSTEM)
+TRACELOG_CB(TRACE_DEBUG_PROGRAM)
+TRACELOG_CB(TRACE_DEBUG_PROCESS)
+TRACELOG_CB(TRACE_DEBUG_MODULE)
+TRACELOG_CB(TRACE_DEBUG_UNIT)
+TRACELOG_CB(TRACE_DEBUG_FUNCTION)
+TRACELOG_CB(TRACE_DEBUG_LINE)
+TRACELOG_CB(TRACE_DEBUG)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_TRACEPOINT_INTERNAL_H
+#define _LTTNG_TRACEPOINT_INTERNAL_H
+
+#include <urcu/list.h>
+#include <lttng/tracepoint-types.h>
+#include <lttng/ust-events.h>
+
+#define TRACE_DEFAULT TRACE_DEBUG_LINE
+
+struct tracepoint_lib {
+ struct cds_list_head list; /* list of registered libs */
+ struct lttng_ust_tracepoint * const *tracepoints_start;
+ int tracepoints_count;
+ struct cds_list_head callsites;
+};
+
+int tracepoint_probe_register_noupdate(const char *name,
+ void (*callback)(void), void *priv,
+ const char *signature)
+ __attribute__((visibility("hidden")));
+
+int tracepoint_probe_unregister_noupdate(const char *name,
+ void (*callback)(void), void *priv)
+ __attribute__((visibility("hidden")));
+
+void tracepoint_probe_update_all(void)
+ __attribute__((visibility("hidden")));
+
+
+void *lttng_ust_tp_check_weak_hidden1(void)
+ __attribute__((visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden2(void)
+ __attribute__((visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden3(void)
+ __attribute__((visibility("hidden")));
+
+/*
+ * These symbols are ABI between liblttng-ust-tracepoint and liblttng-ust,
+ * which is why they are not hidden and not part of the public API.
+ */
+int lttng_ust_tp_probe_register_queue_release(const char *name,
+ void (*func)(void), void *data, const char *signature);
+int lttng_ust_tp_probe_unregister_queue_release(const char *name,
+ void (*func)(void), void *data);
+void lttng_ust_tp_probe_prune_release_queue(void);
+
+void lttng_ust_tp_init(void);
+void lttng_ust_tp_exit(void);
+
+
+#endif /* _LTTNG_TRACEPOINT_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include "tracepoint-internal.h"
+
+/* Test compiler support for weak symbols with hidden visibility. */
+int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
+void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
+struct {
+ char a[24];
+} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
+
+void *lttng_ust_tp_check_weak_hidden1(void)
+{
+ return &__tracepoint_test_symbol1;
+}
+
+void *lttng_ust_tp_check_weak_hidden2(void)
+{
+ return &__tracepoint_test_symbol2;
+}
+
+void *lttng_ust_tp_check_weak_hidden3(void)
+{
+ return &__tracepoint_test_symbol3;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2008-2011 Mathieu Desnoyers
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ *
+ * Ported to userspace by Pierre-Marc Fournier.
+ */
+
+#define _LGPL_SOURCE
+#include <errno.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <stdio.h>
+
+#include <urcu/arch.h>
+#include <lttng/urcu/urcu-ust.h>
+#include <urcu/hlist.h>
+#include <urcu/uatomic.h>
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+
+#include <lttng/tracepoint.h>
+#include <lttng/ust-abi.h> /* for LTTNG_UST_ABI_SYM_NAME_LEN */
+
+#include <usterr-signal-safe.h>
+#include <ust-helper.h>
+
+#include "tracepoint-internal.h"
+#include "lttng-tracer-core.h"
+#include "jhash.h"
+#include "error.h"
+
+/* Test compiler support for weak symbols with hidden visibility. */
+int __tracepoint_test_symbol1 __attribute__((weak, visibility("hidden")));
+void *__tracepoint_test_symbol2 __attribute__((weak, visibility("hidden")));
+struct {
+ char a[24];
+} __tracepoint_test_symbol3 __attribute__((weak, visibility("hidden")));
+
+/* Set to 1 to enable tracepoint debug output */
+static const int tracepoint_debug;
+static int initialized;
+
+/*
+ * If tracepoint_destructors_state = 1, tracepoint destructors are
+ * enabled. They are disabled otherwise.
+ */
+static int tracepoint_destructors_state = 1;
+
+static void (*new_tracepoint_cb)(struct lttng_ust_tracepoint *);
+
+/*
+ * tracepoint_mutex nests inside UST mutex.
+ *
+ * Note about interaction with fork/clone: UST does not hold the
+ * tracepoint mutex across fork/clone because it is either:
+ * - nested within UST mutex, in which case holding the UST mutex across
+ * fork/clone suffice,
+ * - taken by a library constructor, which should never race with a
+ * fork/clone if the application is expected to continue running with
+ * the same memory layout (no following exec()).
+ */
+static pthread_mutex_t tracepoint_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/*
+ * libraries that contain tracepoints (struct tracepoint_lib).
+ * Protected by tracepoint mutex.
+ */
+static CDS_LIST_HEAD(libs);
+
+/*
+ * The tracepoint mutex protects the library tracepoints, the hash table, and
+ * the library list.
+ * All calls to the tracepoint API must be protected by the tracepoint mutex,
+ * excepts calls to tracepoint_register_lib and
+ * tracepoint_unregister_lib, which take the tracepoint mutex themselves.
+ */
+
+/*
+ * Tracepoint hash table, containing the active tracepoints.
+ * Protected by tracepoint mutex.
+ */
+#define TRACEPOINT_HASH_BITS 12
+#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
+static struct cds_hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
+
+static CDS_LIST_HEAD(old_probes);
+static int need_update;
+
+static CDS_LIST_HEAD(release_queue);
+static int release_queue_need_update;
+
+/*
+ * Note about RCU :
+ * It is used to to delay the free of multiple probes array until a quiescent
+ * state is reached.
+ * Tracepoint entries modifications are protected by the tracepoint mutex.
+ */
+struct tracepoint_entry {
+ struct cds_hlist_node hlist;
+ struct lttng_ust_tracepoint_probe *probes;
+ int refcount; /* Number of times armed. 0 if disarmed. */
+ int callsite_refcount; /* how many libs use this tracepoint */
+ char *signature;
+ char *name;
+};
+
+struct tp_probes {
+ union {
+ struct cds_list_head list;
+ /* Field below only used for call_rcu scheme */
+ /* struct rcu_head head; */
+ } u;
+ struct lttng_ust_tracepoint_probe probes[0];
+};
+
+/*
+ * Callsite hash table, containing the tracepoint call sites.
+ * Protected by tracepoint mutex.
+ */
+#define CALLSITE_HASH_BITS 12
+#define CALLSITE_TABLE_SIZE (1 << CALLSITE_HASH_BITS)
+static struct cds_hlist_head callsite_table[CALLSITE_TABLE_SIZE];
+
+struct callsite_entry {
+ struct cds_hlist_node hlist; /* hash table node */
+ struct cds_list_head node; /* lib list of callsites node */
+ struct lttng_ust_tracepoint *tp;
+ bool tp_entry_callsite_ref; /* Has a tp_entry took a ref on this callsite */
+};
+
+/* coverity[+alloc] */
+static void *allocate_probes(int count)
+{
+ struct tp_probes *p =
+ zmalloc(count * sizeof(struct lttng_ust_tracepoint_probe)
+ + sizeof(struct tp_probes));
+ return p == NULL ? NULL : p->probes;
+}
+
+/* coverity[+free : arg-0] */
+static void release_probes(void *old)
+{
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ lttng_ust_urcu_synchronize_rcu();
+ free(tp_probes);
+ }
+}
+
+static void debug_print_probes(struct tracepoint_entry *entry)
+{
+ int i;
+
+ if (!tracepoint_debug || !entry->probes)
+ return;
+
+ for (i = 0; entry->probes[i].func; i++)
+ DBG("Probe %d : %p", i, entry->probes[i].func);
+}
+
+static void *
+tracepoint_entry_add_probe(struct tracepoint_entry *entry,
+ void (*probe)(void), void *data)
+{
+ int nr_probes = 0;
+ struct lttng_ust_tracepoint_probe *old, *new;
+
+ if (!probe) {
+ WARN_ON(1);
+ return ERR_PTR(-EINVAL);
+ }
+ debug_print_probes(entry);
+ old = entry->probes;
+ if (old) {
+ /* (N -> N+1), (N != 0, 1) probes */
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++)
+ if (old[nr_probes].func == probe &&
+ old[nr_probes].data == data)
+ return ERR_PTR(-EEXIST);
+ }
+ /* + 2 : one for new probe, one for NULL func */
+ new = allocate_probes(nr_probes + 2);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ if (old)
+ memcpy(new, old,
+ nr_probes * sizeof(struct lttng_ust_tracepoint_probe));
+ new[nr_probes].func = probe;
+ new[nr_probes].data = data;
+ new[nr_probes + 1].func = NULL;
+ entry->refcount = nr_probes + 1;
+ entry->probes = new;
+ debug_print_probes(entry);
+ return old;
+}
+
+static void *
+tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
+ void (*probe)(void), void *data)
+{
+ int nr_probes = 0, nr_del = 0, i;
+ struct lttng_ust_tracepoint_probe *old, *new;
+
+ old = entry->probes;
+
+ if (!old)
+ return ERR_PTR(-ENOENT);
+
+ debug_print_probes(entry);
+ /* (N -> M), (N > 1, M >= 0) probes */
+ if (probe) {
+ for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
+ if (old[nr_probes].func == probe &&
+ old[nr_probes].data == data)
+ nr_del++;
+ }
+ }
+
+ if (nr_probes - nr_del == 0) {
+ /* N -> 0, (N > 1) */
+ entry->probes = NULL;
+ entry->refcount = 0;
+ debug_print_probes(entry);
+ return old;
+ } else {
+ int j = 0;
+ /* N -> M, (N > 1, M > 0) */
+ /* + 1 for NULL */
+ new = allocate_probes(nr_probes - nr_del + 1);
+ if (new == NULL)
+ return ERR_PTR(-ENOMEM);
+ for (i = 0; old[i].func; i++)
+ if (old[i].func != probe || old[i].data != data)
+ new[j++] = old[i];
+ new[nr_probes - nr_del].func = NULL;
+ entry->refcount = nr_probes - nr_del;
+ entry->probes = new;
+ }
+ debug_print_probes(entry);
+ return old;
+}
+
+/*
+ * Get tracepoint if the tracepoint is present in the tracepoint hash table.
+ * Must be called with tracepoint mutex held.
+ * Returns NULL if not present.
+ */
+static struct tracepoint_entry *get_tracepoint(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct tracepoint_entry *e;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
+ return e;
+ }
+ return NULL;
+}
+
+/*
+ * Add the tracepoint to the tracepoint hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static struct tracepoint_entry *add_tracepoint(const char *name,
+ const char *signature)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct tracepoint_entry *e;
+ size_t name_len = strlen(name);
+ size_t sig_len = strlen(signature);
+ size_t sig_off, name_off;
+ uint32_t hash;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ if (!strncmp(name, e->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1)) {
+ DBG("tracepoint %s busy", name);
+ return ERR_PTR(-EEXIST); /* Already there */
+ }
+ }
+
+ /*
+ * Using zmalloc here to allocate a variable length elements: name and
+ * signature. Could cause some memory fragmentation if overused.
+ */
+ name_off = sizeof(struct tracepoint_entry);
+ sig_off = name_off + name_len + 1;
+
+ e = zmalloc(sizeof(struct tracepoint_entry) + name_len + 1 + sig_len + 1);
+ if (!e)
+ return ERR_PTR(-ENOMEM);
+ e->name = (char *) e + name_off;
+ memcpy(e->name, name, name_len + 1);
+ e->name[name_len] = '\0';
+
+ e->signature = (char *) e + sig_off;
+ memcpy(e->signature, signature, sig_len + 1);
+ e->signature[sig_len] = '\0';
+
+ e->probes = NULL;
+ e->refcount = 0;
+ e->callsite_refcount = 0;
+
+ cds_hlist_add_head(&e->hlist, head);
+ return e;
+}
+
+/*
+ * Remove the tracepoint from the tracepoint hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static void remove_tracepoint(struct tracepoint_entry *e)
+{
+ cds_hlist_del(&e->hlist);
+ free(e);
+}
+
+/*
+ * Sets the probe callback corresponding to one tracepoint.
+ */
+static void set_tracepoint(struct tracepoint_entry **entry,
+ struct lttng_ust_tracepoint *elem, int active)
+{
+ WARN_ON(strncmp((*entry)->name, elem->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1) != 0);
+ /*
+ * Check that signatures match before connecting a probe to a
+ * tracepoint. Warn the user if they don't.
+ */
+ if (strcmp(elem->signature, (*entry)->signature) != 0) {
+ static int warned = 0;
+
+ /* Only print once, don't flood console. */
+ if (!warned) {
+ WARN("Tracepoint signature mismatch, not enabling one or more tracepoints. Ensure that the tracepoint probes prototypes match the application.");
+ WARN("Tracepoint \"%s\" signatures: call: \"%s\" vs probe: \"%s\".",
+ elem->name, elem->signature, (*entry)->signature);
+ warned = 1;
+ }
+ /* Don't accept connecting non-matching signatures. */
+ return;
+ }
+
+ /*
+ * rcu_assign_pointer has a cmm_smp_wmb() which makes sure that the new
+ * probe callbacks array is consistent before setting a pointer to it.
+ * This array is referenced by __DO_TRACE from
+ * include/linux/tracepoints.h. A matching cmm_smp_read_barrier_depends()
+ * is used.
+ */
+ lttng_ust_rcu_assign_pointer(elem->probes, (*entry)->probes);
+ CMM_STORE_SHARED(elem->state, active);
+}
+
+/*
+ * Disable a tracepoint and its probe callback.
+ * Note: only waiting an RCU period after setting elem->call to the empty
+ * function insures that the original callback is not used anymore. This insured
+ * by preempt_disable around the call site.
+ */
+static void disable_tracepoint(struct lttng_ust_tracepoint *elem)
+{
+ CMM_STORE_SHARED(elem->state, 0);
+ lttng_ust_rcu_assign_pointer(elem->probes, NULL);
+}
+
+/*
+ * Add the callsite to the callsite hash table. Must be called with
+ * tracepoint mutex held.
+ */
+static void add_callsite(struct tracepoint_lib * lib, struct lttng_ust_tracepoint *tp)
+{
+ struct cds_hlist_head *head;
+ struct callsite_entry *e;
+ const char *name = tp->name;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+ struct tracepoint_entry *tp_entry;
+
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
+ e = zmalloc(sizeof(struct callsite_entry));
+ if (!e) {
+ PERROR("Unable to add callsite for tracepoint \"%s\"", name);
+ return;
+ }
+ cds_hlist_add_head(&e->hlist, head);
+ e->tp = tp;
+ cds_list_add(&e->node, &lib->callsites);
+
+ tp_entry = get_tracepoint(name);
+ if (!tp_entry)
+ return;
+ tp_entry->callsite_refcount++;
+ e->tp_entry_callsite_ref = true;
+}
+
+/*
+ * Remove the callsite from the callsite hash table and from lib
+ * callsite list. Must be called with tracepoint mutex held.
+ */
+static void remove_callsite(struct callsite_entry *e)
+{
+ struct tracepoint_entry *tp_entry;
+
+ tp_entry = get_tracepoint(e->tp->name);
+ if (tp_entry) {
+ if (e->tp_entry_callsite_ref)
+ tp_entry->callsite_refcount--;
+ if (tp_entry->callsite_refcount == 0)
+ disable_tracepoint(e->tp);
+ }
+ cds_hlist_del(&e->hlist);
+ cds_list_del(&e->node);
+ free(e);
+}
+
+/*
+ * Enable/disable all callsites based on the state of a specific
+ * tracepoint entry.
+ * Must be called with tracepoint mutex held.
+ */
+static void tracepoint_sync_callsites(const char *name)
+{
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ struct callsite_entry *e;
+ size_t name_len = strlen(name);
+ uint32_t hash;
+ struct tracepoint_entry *tp_entry;
+
+ tp_entry = get_tracepoint(name);
+ if (name_len > LTTNG_UST_ABI_SYM_NAME_LEN - 1) {
+ WARN("Truncating tracepoint name %s which exceeds size limits of %u chars", name, LTTNG_UST_ABI_SYM_NAME_LEN - 1);
+ name_len = LTTNG_UST_ABI_SYM_NAME_LEN - 1;
+ }
+ hash = jhash(name, name_len, 0);
+ head = &callsite_table[hash & (CALLSITE_TABLE_SIZE - 1)];
+ cds_hlist_for_each_entry(e, node, head, hlist) {
+ struct lttng_ust_tracepoint *tp = e->tp;
+
+ if (strncmp(name, tp->name, LTTNG_UST_ABI_SYM_NAME_LEN - 1))
+ continue;
+ if (tp_entry) {
+ if (!e->tp_entry_callsite_ref) {
+ tp_entry->callsite_refcount++;
+ e->tp_entry_callsite_ref = true;
+ }
+ set_tracepoint(&tp_entry, tp,
+ !!tp_entry->refcount);
+ } else {
+ disable_tracepoint(tp);
+ e->tp_entry_callsite_ref = false;
+ }
+ }
+}
+
+/**
+ * tracepoint_update_probe_range - Update a probe range
+ * @begin: beginning of the range
+ * @end: end of the range
+ *
+ * Updates the probe callback corresponding to a range of tracepoints.
+ */
+static
+void tracepoint_update_probe_range(struct lttng_ust_tracepoint * const *begin,
+ struct lttng_ust_tracepoint * const *end)
+{
+ struct lttng_ust_tracepoint * const *iter;
+ struct tracepoint_entry *mark_entry;
+
+ for (iter = begin; iter < end; iter++) {
+ if (!*iter)
+ continue; /* skip dummy */
+ if (!(*iter)->name) {
+ disable_tracepoint(*iter);
+ continue;
+ }
+ mark_entry = get_tracepoint((*iter)->name);
+ if (mark_entry) {
+ set_tracepoint(&mark_entry, *iter,
+ !!mark_entry->refcount);
+ } else {
+ disable_tracepoint(*iter);
+ }
+ }
+}
+
+static void lib_update_tracepoints(struct tracepoint_lib *lib)
+{
+ tracepoint_update_probe_range(lib->tracepoints_start,
+ lib->tracepoints_start + lib->tracepoints_count);
+}
+
+static void lib_register_callsites(struct tracepoint_lib *lib)
+{
+ struct lttng_ust_tracepoint * const *begin;
+ struct lttng_ust_tracepoint * const *end;
+ struct lttng_ust_tracepoint * const *iter;
+
+ begin = lib->tracepoints_start;
+ end = lib->tracepoints_start + lib->tracepoints_count;
+
+ for (iter = begin; iter < end; iter++) {
+ if (!*iter)
+ continue; /* skip dummy */
+ if (!(*iter)->name) {
+ continue;
+ }
+ add_callsite(lib, *iter);
+ }
+}
+
+static void lib_unregister_callsites(struct tracepoint_lib *lib)
+{
+ struct callsite_entry *callsite, *tmp;
+
+ cds_list_for_each_entry_safe(callsite, tmp, &lib->callsites, node)
+ remove_callsite(callsite);
+}
+
+/*
+ * Update probes, removing the faulty probes.
+ */
+static void tracepoint_update_probes(void)
+{
+ struct tracepoint_lib *lib;
+
+ /* tracepoints registered from libraries and executable. */
+ cds_list_for_each_entry(lib, &libs, list)
+ lib_update_tracepoints(lib);
+}
+
+static struct lttng_ust_tracepoint_probe *
+tracepoint_add_probe(const char *name, void (*probe)(void), void *data,
+ const char *signature)
+{
+ struct tracepoint_entry *entry;
+ struct lttng_ust_tracepoint_probe *old;
+
+ entry = get_tracepoint(name);
+ if (entry) {
+ if (strcmp(entry->signature, signature) != 0) {
+ ERR("Tracepoint and probe signature do not match.");
+ return ERR_PTR(-EINVAL);
+ }
+ } else {
+ entry = add_tracepoint(name, signature);
+ if (IS_ERR(entry))
+ return (struct lttng_ust_tracepoint_probe *)entry;
+ }
+ old = tracepoint_entry_add_probe(entry, probe, data);
+ if (IS_ERR(old) && !entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+static void tracepoint_release_queue_add_old_probes(void *old)
+{
+ release_queue_need_update = 1;
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ cds_list_add(&tp_probes->u.list, &release_queue);
+ }
+}
+
+/**
+ * __tracepoint_probe_register - Connect a probe to a tracepoint
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * Returns 0 if ok, error value on error.
+ * The probe address must at least be aligned on the architecture pointer size.
+ * Called with the tracepoint mutex held.
+ */
+int __tracepoint_probe_register(const char *name, void (*probe)(void),
+ void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Registering probe to tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+
+ tracepoint_sync_callsites(name);
+ release_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/*
+ * Caller needs to invoke __tracepoint_probe_release_queue() after
+ * calling lttng_ust_tp_probe_register_queue_release() one or multiple
+ * times to ensure it does not leak memory.
+ */
+int lttng_ust_tp_probe_register_queue_release(const char *name,
+ void (*probe)(void), void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Registering probe to tracepoint %s. Queuing release.", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+
+ tracepoint_sync_callsites(name);
+ tracepoint_release_queue_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+static void *tracepoint_remove_probe(const char *name, void (*probe)(void),
+ void *data)
+{
+ struct tracepoint_entry *entry;
+ void *old;
+
+ entry = get_tracepoint(name);
+ if (!entry)
+ return ERR_PTR(-ENOENT);
+ old = tracepoint_entry_remove_probe(entry, probe, data);
+ if (IS_ERR(old))
+ return old;
+ if (!entry->refcount)
+ remove_tracepoint(entry);
+ return old;
+}
+
+/**
+ * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ * @probe: probe data pointer
+ */
+int __tracepoint_probe_unregister(const char *name, void (*probe)(void),
+ void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_sync_callsites(name);
+ release_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/*
+ * Caller needs to invoke __tracepoint_probe_release_queue() after
+ * calling lttng_ust_tp_probe_unregister_queue_release() one or multiple
+ * times to ensure it does not leak memory.
+ */
+int lttng_ust_tp_probe_unregister_queue_release(const char *name,
+ void (*probe)(void), void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s. Queuing release.", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_sync_callsites(name);
+ tracepoint_release_queue_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+void lttng_ust_tp_probe_prune_release_queue(void)
+{
+ CDS_LIST_HEAD(release_probes);
+ struct tp_probes *pos, *next;
+
+ DBG("Release queue of unregistered tracepoint probes.");
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ if (!release_queue_need_update)
+ goto end;
+ if (!cds_list_empty(&release_queue))
+ cds_list_replace_init(&release_queue, &release_probes);
+ release_queue_need_update = 0;
+
+ /* Wait for grace period between all sync_callsites and free. */
+ lttng_ust_urcu_synchronize_rcu();
+
+ cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ cds_list_del(&pos->u.list);
+ free(pos);
+ }
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+}
+
+static void tracepoint_add_old_probes(void *old)
+{
+ need_update = 1;
+ if (old) {
+ struct tp_probes *tp_probes = caa_container_of(old,
+ struct tp_probes, probes[0]);
+ cds_list_add(&tp_probes->u.list, &old_probes);
+ }
+}
+
+/**
+ * tracepoint_probe_register_noupdate - register a probe but not connect
+ * @name: tracepoint name
+ * @probe: probe handler
+ *
+ * caller must call tracepoint_probe_update_all()
+ */
+int tracepoint_probe_register_noupdate(const char *name, void (*probe)(void),
+ void *data, const char *signature)
+{
+ void *old;
+ int ret = 0;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_add_probe(name, probe, data, signature);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/**
+ * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
+ * @name: tracepoint name
+ * @probe: probe function pointer
+ *
+ * caller must call tracepoint_probe_update_all()
+ * Called with the tracepoint mutex held.
+ */
+int tracepoint_probe_unregister_noupdate(const char *name, void (*probe)(void),
+ void *data)
+{
+ void *old;
+ int ret = 0;
+
+ DBG("Un-registering probe from tracepoint %s", name);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ old = tracepoint_remove_probe(name, probe, data);
+ if (IS_ERR(old)) {
+ ret = PTR_ERR(old);
+ goto end;
+ }
+ tracepoint_add_old_probes(old);
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return ret;
+}
+
+/**
+ * tracepoint_probe_update_all - update tracepoints
+ */
+void tracepoint_probe_update_all(void)
+{
+ CDS_LIST_HEAD(release_probes);
+ struct tp_probes *pos, *next;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ if (!need_update) {
+ goto end;
+ }
+ if (!cds_list_empty(&old_probes))
+ cds_list_replace_init(&old_probes, &release_probes);
+ need_update = 0;
+
+ tracepoint_update_probes();
+ /* Wait for grace period between update_probes and free. */
+ lttng_ust_urcu_synchronize_rcu();
+ cds_list_for_each_entry_safe(pos, next, &release_probes, u.list) {
+ cds_list_del(&pos->u.list);
+ free(pos);
+ }
+end:
+ pthread_mutex_unlock(&tracepoint_mutex);
+}
+
+static void new_tracepoints(struct lttng_ust_tracepoint * const *start,
+ struct lttng_ust_tracepoint * const *end)
+{
+ if (new_tracepoint_cb) {
+ struct lttng_ust_tracepoint * const *t;
+
+ for (t = start; t < end; t++) {
+ if (*t)
+ new_tracepoint_cb(*t);
+ }
+ }
+}
+
+/*
+ * tracepoint_{un,}register_lib is meant to be looked up by instrumented
+ * applications through dlsym(). If found, those can register their
+ * tracepoints, else those tracepoints will not be available for
+ * tracing. The number at the end of those symbols acts as a major
+ * version for tracepoints.
+ *
+ * Older instrumented applications should still work with newer
+ * liblttng-ust, but it is fine that instrumented applications compiled
+ * against recent liblttng-ust headers require a recent liblttng-ust
+ * runtime for those tracepoints to be taken into account.
+ */
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
+ int tracepoints_count);
+int tracepoint_register_lib(struct lttng_ust_tracepoint * const *tracepoints_start,
+ int tracepoints_count)
+{
+ struct tracepoint_lib *pl, *iter;
+
+ lttng_ust_tp_init();
+
+ pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
+ if (!pl) {
+ PERROR("Unable to register tracepoint lib");
+ return -1;
+ }
+ pl->tracepoints_start = tracepoints_start;
+ pl->tracepoints_count = tracepoints_count;
+ CDS_INIT_LIST_HEAD(&pl->callsites);
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ /*
+ * We sort the libs by struct lib pointer address.
+ */
+ cds_list_for_each_entry_reverse(iter, &libs, list) {
+ BUG_ON(iter == pl); /* Should never be in the list twice */
+ if (iter < pl) {
+ /* We belong to the location right after iter. */
+ cds_list_add(&pl->list, &iter->list);
+ goto lib_added;
+ }
+ }
+ /* We should be added at the head of the list */
+ cds_list_add(&pl->list, &libs);
+lib_added:
+ new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
+ lib_register_callsites(pl);
+ lib_update_tracepoints(pl);
+ pthread_mutex_unlock(&tracepoint_mutex);
+
+ DBG("just registered a tracepoints section from %p and having %d tracepoints",
+ tracepoints_start, tracepoints_count);
+ if (ust_err_debug_enabled()) {
+ int i;
+
+ for (i = 0; i < tracepoints_count; i++) {
+ DBG("registered tracepoint: %s", tracepoints_start[i]->name);
+ }
+ }
+
+ return 0;
+}
+
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start);
+int tracepoint_unregister_lib(struct lttng_ust_tracepoint * const *tracepoints_start)
+{
+ struct tracepoint_lib *lib;
+
+ pthread_mutex_lock(&tracepoint_mutex);
+ cds_list_for_each_entry(lib, &libs, list) {
+ if (lib->tracepoints_start != tracepoints_start)
+ continue;
+
+ cds_list_del(&lib->list);
+ /*
+ * Unregistering a callsite also decreases the
+ * callsite reference count of the corresponding
+ * tracepoint, and disables the tracepoint if
+ * the reference count drops to zero.
+ */
+ lib_unregister_callsites(lib);
+ DBG("just unregistered a tracepoints section from %p",
+ lib->tracepoints_start);
+ free(lib);
+ break;
+ }
+ pthread_mutex_unlock(&tracepoint_mutex);
+ return 0;
+}
+
+/*
+ * Report in debug message whether the compiler correctly supports weak
+ * hidden symbols. This test checks that the address associated with two
+ * weak symbols with hidden visibility is the same when declared within
+ * two compile units part of the same module.
+ */
+static void check_weak_hidden(void)
+{
+ DBG("Your compiler treats weak symbols with hidden visibility for integer objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol1 == lttng_ust_tp_check_weak_hidden1() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+ DBG("Your compiler treats weak symbols with hidden visibility for pointer objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol2 == lttng_ust_tp_check_weak_hidden2() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+ DBG("Your compiler treats weak symbols with hidden visibility for 24-byte structure objects as %s between compile units part of the same module.",
+ &__tracepoint_test_symbol3 == lttng_ust_tp_check_weak_hidden3() ?
+ "SAME address" :
+ "DIFFERENT addresses");
+}
+
+void lttng_ust_tp_init(void)
+{
+ if (uatomic_xchg(&initialized, 1) == 1)
+ return;
+ ust_err_init();
+ check_weak_hidden();
+}
+
+void lttng_ust_tp_exit(void)
+{
+ initialized = 0;
+}
+
+/*
+ * Create the wrapper symbols.
+ */
+#undef tp_rcu_read_lock
+#undef tp_rcu_read_unlock
+#undef tp_rcu_dereference
+
+void tp_rcu_read_lock(void);
+void tp_rcu_read_lock(void)
+{
+ lttng_ust_urcu_read_lock();
+}
+
+void tp_rcu_read_unlock(void);
+void tp_rcu_read_unlock(void)
+{
+ lttng_ust_urcu_read_unlock();
+}
+
+void *tp_rcu_dereference_sym(void *p);
+void *tp_rcu_dereference_sym(void *p)
+{
+ return lttng_ust_rcu_dereference(p);
+}
+
+/*
+ * Programs that have threads that survive after they exit, and therefore call
+ * library destructors, should disable the tracepoint destructors by calling
+ * tp_disable_destructors(). This will leak the tracepoint
+ * instrumentation library shared object, leaving its teardown to the operating
+ * system process teardown.
+ *
+ * To access and/or modify this value, users need to use a combination of
+ * dlopen(3) and dlsym(3) to get an handle on the
+ * tp_disable_destructors and tp_get_destructors_state symbols below.
+ */
+void tp_disable_destructors(void);
+void tp_disable_destructors(void)
+{
+ uatomic_set(&tracepoint_destructors_state, 0);
+}
+
+/*
+ * Returns 1 if the destructors are enabled and should be executed.
+ * Returns 0 if the destructors are disabled.
+ */
+int tp_get_destructors_state(void);
+int tp_get_destructors_state(void)
+{
+ return uatomic_read(&tracepoint_destructors_state);
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stdint.h>
+#include <stddef.h>
+#include <stdlib.h>
+
+#include "context-internal.h"
+#include "ust-events-internal.h"
+#include <usterr-signal-safe.h>
+#include "lttng-tracer-core.h"
+#include "lttng-rb-clients.h"
+#include "lttng-counter-client.h"
+#include "jhash.h"
+
+static CDS_LIST_HEAD(lttng_transport_list);
+static CDS_LIST_HEAD(lttng_counter_transport_list);
+
+struct lttng_transport *lttng_ust_transport_find(const char *name)
+{
+ struct lttng_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+struct lttng_counter_transport *lttng_counter_transport_find(const char *name)
+{
+ struct lttng_counter_transport *transport;
+
+ cds_list_for_each_entry(transport, <tng_counter_transport_list, node) {
+ if (!strcmp(transport->name, name))
+ return transport;
+ }
+ return NULL;
+}
+
+/**
+ * lttng_transport_register - LTT transport registration
+ * @transport: transport structure
+ *
+ * Registers a transport which can be used as output to extract the data out of
+ * LTTng. Called with ust_lock held.
+ */
+void lttng_transport_register(struct lttng_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_transport_list);
+}
+
+/**
+ * lttng_transport_unregister - LTT transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_transport_unregister(struct lttng_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
+/**
+ * lttng_counter_transport_register - LTTng counter transport registration
+ * @transport: transport structure
+ *
+ * Registers a counter transport which can be used as output to extract
+ * the data out of LTTng. Called with ust_lock held.
+ */
+void lttng_counter_transport_register(struct lttng_counter_transport *transport)
+{
+ cds_list_add_tail(&transport->node, <tng_counter_transport_list);
+}
+
+/**
+ * lttng_counter_transport_unregister - LTTng counter transport unregistration
+ * @transport: transport structure
+ * Called with ust_lock held.
+ */
+void lttng_counter_transport_unregister(struct lttng_counter_transport *transport)
+{
+ cds_list_del(&transport->node);
+}
+
+/*
+ * Needed by comm layer.
+ */
+struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
+ const struct lttng_ust_enum_desc *enum_desc)
+{
+ struct lttng_enum *_enum;
+ struct cds_hlist_head *head;
+ struct cds_hlist_node *node;
+ size_t name_len = strlen(enum_desc->name);
+ uint32_t hash;
+
+ hash = jhash(enum_desc->name, name_len, 0);
+ head = &session->priv->enums_ht.table[hash & (LTTNG_UST_ENUM_HT_SIZE - 1)];
+ cds_hlist_for_each_entry(_enum, node, head, hlist) {
+ assert(_enum->desc);
+ if (_enum->desc == enum_desc)
+ return _enum;
+ }
+ return NULL;
+}
+
+size_t lttng_ust_dummy_get_size(void *priv __attribute__((unused)),
+ size_t offset)
+{
+ size_t size = 0;
+
+ size += lttng_ust_lib_ring_buffer_align(offset, lttng_ust_rb_alignof(char));
+ size += sizeof(char); /* tag */
+ return size;
+}
+
+void lttng_ust_dummy_record(void *priv __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan)
+{
+ char sel_char = (char) LTTNG_UST_DYNAMIC_TYPE_NONE;
+
+ chan->ops->event_write(ctx, &sel_char, sizeof(sel_char), lttng_ust_rb_alignof(sel_char));
+}
+
+void lttng_ust_dummy_get_value(void *priv __attribute__((unused)),
+ struct lttng_ust_ctx_value *value)
+{
+ value->sel = LTTNG_UST_DYNAMIC_TYPE_NONE;
+}
+
+int lttng_context_is_app(const char *name)
+{
+ if (strncmp(name, "$app.", strlen("$app.")) != 0) {
+ return 0;
+ }
+ return 1;
+}
+
+struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
+{
+ struct lttng_ust_channel_buffer *lttng_chan_buf;
+ struct lttng_ust_channel_common *lttng_chan_common;
+ struct lttng_ust_channel_buffer_private *lttng_chan_buf_priv;
+
+ lttng_chan_buf = zmalloc(sizeof(struct lttng_ust_channel_buffer));
+ if (!lttng_chan_buf)
+ goto lttng_chan_buf_error;
+ lttng_chan_buf->struct_size = sizeof(struct lttng_ust_channel_buffer);
+ lttng_chan_common = zmalloc(sizeof(struct lttng_ust_channel_common));
+ if (!lttng_chan_common)
+ goto lttng_chan_common_error;
+ lttng_chan_common->struct_size = sizeof(struct lttng_ust_channel_common);
+ lttng_chan_buf_priv = zmalloc(sizeof(struct lttng_ust_channel_buffer_private));
+ if (!lttng_chan_buf_priv)
+ goto lttng_chan_buf_priv_error;
+ lttng_chan_buf->parent = lttng_chan_common;
+ lttng_chan_common->type = LTTNG_UST_CHANNEL_TYPE_BUFFER;
+ lttng_chan_common->child = lttng_chan_buf;
+ lttng_chan_buf->priv = lttng_chan_buf_priv;
+ lttng_chan_common->priv = <tng_chan_buf_priv->parent;
+ lttng_chan_buf_priv->pub = lttng_chan_buf;
+ lttng_chan_buf_priv->parent.pub = lttng_chan_common;
+
+ return lttng_chan_buf;
+
+lttng_chan_buf_priv_error:
+ free(lttng_chan_common);
+lttng_chan_common_error:
+ free(lttng_chan_buf);
+lttng_chan_buf_error:
+ return NULL;
+}
+
+void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
+{
+ switch (chan->type) {
+ case LTTNG_UST_CHANNEL_TYPE_BUFFER:
+ {
+ struct lttng_ust_channel_buffer *chan_buf;
+
+ chan_buf = (struct lttng_ust_channel_buffer *)chan->child;
+ free(chan_buf->parent);
+ free(chan_buf->priv);
+ free(chan_buf);
+ break;
+ }
+ default:
+ abort();
+ }
+}
+
+void lttng_ust_ring_buffer_clients_init(void)
+{
+ lttng_ring_buffer_metadata_client_init();
+ lttng_ring_buffer_client_overwrite_init();
+ lttng_ring_buffer_client_overwrite_rt_init();
+ lttng_ring_buffer_client_discard_init();
+ lttng_ring_buffer_client_discard_rt_init();
+}
+
+void lttng_ust_ring_buffer_clients_exit(void)
+{
+ lttng_ring_buffer_client_discard_rt_exit();
+ lttng_ring_buffer_client_discard_exit();
+ lttng_ring_buffer_client_overwrite_rt_exit();
+ lttng_ring_buffer_client_overwrite_exit();
+ lttng_ring_buffer_metadata_client_exit();
+}
+
+void lttng_ust_counter_clients_init(void)
+{
+ lttng_counter_client_percpu_64_modular_init();
+ lttng_counter_client_percpu_32_modular_init();
+}
+
+void lttng_ust_counter_clients_exit(void)
+{
+ lttng_counter_client_percpu_32_modular_exit();
+ lttng_counter_client_percpu_64_modular_exit();
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright 2019 (c) Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_EVENTS_INTERNAL_H
+#define _LTTNG_UST_EVENTS_INTERNAL_H
+
+#include <limits.h>
+#include <stdint.h>
+
+#include <urcu/list.h>
+#include <urcu/hlist.h>
+
+#include <lttng/ust-events.h>
+
+#include <ust-helper.h>
+#include "ust-context-provider.h"
+
+struct lttng_ust_abi_obj;
+struct lttng_event_notifier_group;
+
+union lttng_ust_abi_args {
+ struct {
+ void *chan_data;
+ int wakeup_fd;
+ } channel;
+ struct {
+ int shm_fd;
+ int wakeup_fd;
+ } stream;
+ struct {
+ struct lttng_ust_abi_field_iter entry;
+ } field_list;
+ struct {
+ char *ctxname;
+ } app_context;
+ struct {
+ int event_notifier_notif_fd;
+ } event_notifier_handle;
+ struct {
+ void *counter_data;
+ } counter;
+ struct {
+ int shm_fd;
+ } counter_shm;
+};
+
+struct lttng_ust_abi_objd_ops {
+ long (*cmd)(int objd, unsigned int cmd, unsigned long arg,
+ union lttng_ust_abi_args *args, void *owner);
+ int (*release)(int objd);
+};
+
+enum lttng_enabler_format_type {
+ LTTNG_ENABLER_FORMAT_STAR_GLOB,
+ LTTNG_ENABLER_FORMAT_EVENT,
+};
+
+/*
+ * Enabler field, within whatever object is enabling an event. Target of
+ * backward reference.
+ */
+struct lttng_enabler {
+ enum lttng_enabler_format_type format_type;
+
+ /* head list of struct lttng_ust_filter_bytecode_node */
+ struct cds_list_head filter_bytecode_head;
+ /* head list of struct lttng_ust_excluder_node */
+ struct cds_list_head excluder_head;
+
+ struct lttng_ust_abi_event event_param;
+ unsigned int enabled:1;
+};
+
+struct lttng_event_enabler {
+ struct lttng_enabler base;
+ struct cds_list_head node; /* per-session list of enablers */
+ struct lttng_ust_channel_buffer *chan;
+ /*
+ * Unused, but kept around to make it explicit that the tracer can do
+ * it.
+ */
+ struct lttng_ust_ctx *ctx;
+};
+
+struct lttng_event_notifier_enabler {
+ struct lttng_enabler base;
+ uint64_t error_counter_index;
+ struct cds_list_head node; /* per-app list of event_notifier enablers */
+ struct cds_list_head capture_bytecode_head;
+ struct lttng_event_notifier_group *group; /* weak ref */
+ uint64_t user_token; /* User-provided token */
+ uint64_t num_captures;
+};
+
+enum lttng_ust_bytecode_type {
+ LTTNG_UST_BYTECODE_TYPE_FILTER,
+ LTTNG_UST_BYTECODE_TYPE_CAPTURE,
+};
+
+struct lttng_ust_bytecode_node {
+ enum lttng_ust_bytecode_type type;
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ struct {
+ uint32_t len;
+ uint32_t reloc_offset;
+ uint64_t seqnum;
+ char data[];
+ } bc;
+};
+
+/*
+ * Bytecode interpreter return value.
+ */
+enum lttng_ust_bytecode_interpreter_ret {
+ LTTNG_UST_BYTECODE_INTERPRETER_ERROR = -1,
+ LTTNG_UST_BYTECODE_INTERPRETER_OK = 0,
+};
+
+struct lttng_interpreter_output;
+struct lttng_ust_bytecode_runtime_private;
+
+enum lttng_ust_bytecode_filter_result {
+ LTTNG_UST_BYTECODE_FILTER_ACCEPT = 0,
+ LTTNG_UST_BYTECODE_FILTER_REJECT = 1,
+};
+
+struct lttng_ust_bytecode_filter_ctx {
+ enum lttng_ust_bytecode_filter_result result;
+};
+
+struct lttng_ust_excluder_node {
+ struct cds_list_head node;
+ struct lttng_enabler *enabler;
+ /*
+ * struct lttng_ust_event_exclusion had variable sized array,
+ * must be last field.
+ */
+ struct lttng_ust_abi_event_exclusion excluder;
+};
+
+/* Data structures used by the tracer. */
+
+struct tp_list_entry {
+ struct lttng_ust_abi_tracepoint_iter tp;
+ struct cds_list_head head;
+};
+
+struct lttng_ust_tracepoint_list {
+ struct tp_list_entry *iter;
+ struct cds_list_head head;
+};
+
+struct tp_field_list_entry {
+ struct lttng_ust_abi_field_iter field;
+ struct cds_list_head head;
+};
+
+struct lttng_ust_field_list {
+ struct tp_field_list_entry *iter;
+ struct cds_list_head head;
+};
+
+/*
+ * Objects in a linked-list of enablers, owned by an event or event_notifier.
+ * This is used because an event (or a event_notifier) can be enabled by more
+ * than one enabler and we want a quick way to iterate over all enablers of an
+ * object.
+ *
+ * For example, event rules "my_app:a*" and "my_app:ab*" will both match the
+ * event with the name "my_app:abc".
+ */
+struct lttng_enabler_ref {
+ struct cds_list_head node; /* enabler ref list */
+ struct lttng_enabler *ref; /* backward ref */
+};
+
+#define LTTNG_COUNTER_DIMENSION_MAX 8
+struct lttng_counter_dimension {
+ uint64_t size;
+ uint64_t underflow_index;
+ uint64_t overflow_index;
+ uint8_t has_underflow;
+ uint8_t has_overflow;
+};
+
+struct lttng_counter_ops {
+ struct lib_counter *(*counter_create)(size_t nr_dimensions,
+ const struct lttng_counter_dimension *dimensions,
+ int64_t global_sum_step,
+ int global_counter_fd,
+ int nr_counter_cpu_fds,
+ const int *counter_cpu_fds,
+ bool is_daemon);
+ void (*counter_destroy)(struct lib_counter *counter);
+ int (*counter_add)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t v);
+ int (*counter_read)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int cpu,
+ int64_t *value, bool *overflow, bool *underflow);
+ int (*counter_aggregate)(struct lib_counter *counter,
+ const size_t *dimension_indexes, int64_t *value,
+ bool *overflow, bool *underflow);
+ int (*counter_clear)(struct lib_counter *counter, const size_t *dimension_indexes);
+};
+
+struct lttng_counter {
+ int objd;
+ struct lttng_event_notifier_group *event_notifier_group; /* owner */
+ struct lttng_counter_transport *transport;
+ struct lib_counter *counter;
+ struct lttng_counter_ops *ops;
+};
+
+#define LTTNG_UST_EVENT_HT_BITS 12
+#define LTTNG_UST_EVENT_HT_SIZE (1U << LTTNG_UST_EVENT_HT_BITS)
+
+struct lttng_ust_event_ht {
+ struct cds_hlist_head table[LTTNG_UST_EVENT_HT_SIZE];
+};
+
+#define LTTNG_UST_EVENT_NOTIFIER_HT_BITS 12
+#define LTTNG_UST_EVENT_NOTIFIER_HT_SIZE (1U << LTTNG_UST_EVENT_NOTIFIER_HT_BITS)
+struct lttng_ust_event_notifier_ht {
+ struct cds_hlist_head table[LTTNG_UST_EVENT_NOTIFIER_HT_SIZE];
+};
+
+#define LTTNG_UST_ENUM_HT_BITS 12
+#define LTTNG_UST_ENUM_HT_SIZE (1U << LTTNG_UST_ENUM_HT_BITS)
+
+struct lttng_ust_enum_ht {
+ struct cds_hlist_head table[LTTNG_UST_ENUM_HT_SIZE];
+};
+
+struct lttng_event_notifier_group {
+ int objd;
+ void *owner;
+ int notification_fd;
+ struct cds_list_head node; /* Event notifier group handle list */
+ struct cds_list_head enablers_head;
+ struct cds_list_head event_notifiers_head; /* list of event_notifiers */
+ struct lttng_ust_event_notifier_ht event_notifiers_ht; /* hashtable of event_notifiers */
+ struct lttng_ust_ctx *ctx; /* contexts for filters. */
+
+ struct lttng_counter *error_counter;
+ size_t error_counter_len;
+};
+
+struct lttng_transport {
+ const char *name;
+ struct cds_list_head node;
+ struct lttng_ust_channel_buffer_ops ops;
+ const struct lttng_ust_lib_ring_buffer_config *client_config;
+};
+
+struct lttng_counter_transport {
+ const char *name;
+ struct cds_list_head node;
+ struct lttng_counter_ops ops;
+ const struct lib_counter_config *client_config;
+};
+
+struct lttng_ust_event_common_private {
+ struct lttng_ust_event_common *pub; /* Public event interface */
+
+ const struct lttng_ust_event_desc *desc;
+ /* Backward references: list of lttng_enabler_ref (ref to enablers) */
+ struct cds_list_head enablers_ref_head;
+ int registered; /* has reg'd tracepoint probe */
+ uint64_t user_token;
+
+ int has_enablers_without_filter_bytecode;
+ /* list of struct lttng_ust_bytecode_runtime, sorted by seqnum */
+ struct cds_list_head filter_bytecode_runtime_head;
+};
+
+struct lttng_ust_event_recorder_private {
+ struct lttng_ust_event_common_private parent;
+
+ struct lttng_ust_event_recorder *pub; /* Public event interface */
+ struct cds_list_head node; /* Event recorder list */
+ struct cds_hlist_node hlist; /* Hash table of event recorders */
+ struct lttng_ust_ctx *ctx;
+ unsigned int id;
+};
+
+struct lttng_ust_event_notifier_private {
+ struct lttng_ust_event_common_private parent;
+
+ struct lttng_ust_event_notifier *pub; /* Public event notifier interface */
+ struct lttng_event_notifier_group *group; /* weak ref */
+ size_t num_captures; /* Needed to allocate the msgpack array. */
+ uint64_t error_counter_index;
+ struct cds_list_head node; /* Event notifier list */
+ struct cds_hlist_node hlist; /* Hash table of event notifiers */
+ struct cds_list_head capture_bytecode_runtime_head;
+};
+
+struct lttng_ust_bytecode_runtime {
+ enum lttng_ust_bytecode_type type;
+ struct lttng_ust_bytecode_node *bc;
+ int link_failed;
+ int (*interpreter_func)(struct lttng_ust_bytecode_runtime *bytecode_runtime,
+ const char *interpreter_stack_data,
+ void *ctx);
+ struct cds_list_head node; /* list of bytecode runtime in event */
+ /*
+ * Pointer to a URCU-protected pointer owned by an `struct
+ * lttng_session`or `struct lttng_event_notifier_group`.
+ */
+ struct lttng_ust_ctx **pctx;
+};
+
+struct lttng_ust_session_private {
+ struct lttng_ust_session *pub; /* Public session interface */
+
+ int been_active; /* Been active ? */
+ int objd; /* Object associated */
+ struct cds_list_head chan_head; /* Channel list head */
+ struct cds_list_head events_head; /* list of events */
+ struct cds_list_head node; /* Session list */
+
+ /* List of enablers */
+ struct cds_list_head enablers_head;
+ struct lttng_ust_event_ht events_ht; /* ht of events */
+ void *owner; /* object owner */
+ int tstate:1; /* Transient enable state */
+
+ int statedump_pending:1;
+
+ struct lttng_ust_enum_ht enums_ht; /* ht of enumerations */
+ struct cds_list_head enums_head;
+ struct lttng_ust_ctx *ctx; /* contexts for filters. */
+
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+ bool uuid_set; /* Is uuid set ? */
+};
+
+struct lttng_enum {
+ const struct lttng_ust_enum_desc *desc;
+ struct lttng_ust_session *session;
+ struct cds_list_head node; /* Enum list in session */
+ struct cds_hlist_node hlist; /* Session ht of enums */
+ uint64_t id; /* Enumeration ID in sessiond */
+};
+
+struct lttng_ust_shm_handle;
+
+struct lttng_ust_channel_buffer_ops_private {
+ struct lttng_ust_channel_buffer_ops *pub; /* Public channel buffer ops interface */
+
+ struct lttng_ust_channel_buffer *(*channel_create)(const char *name,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ unsigned char *uuid,
+ uint32_t chan_id,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout);
+ void (*channel_destroy)(struct lttng_ust_channel_buffer *chan);
+ /*
+ * packet_avail_size returns the available size in the current
+ * packet. Note that the size returned is only a hint, since it
+ * may change due to concurrent writes.
+ */
+ size_t (*packet_avail_size)(struct lttng_ust_channel_buffer *chan);
+ int (*is_finalized)(struct lttng_ust_channel_buffer *chan);
+ int (*is_disabled)(struct lttng_ust_channel_buffer *chan);
+ int (*flush_buffer)(struct lttng_ust_channel_buffer *chan);
+};
+
+struct lttng_ust_channel_common_private {
+ struct lttng_ust_channel_common *pub; /* Public channel interface */
+
+ int objd; /* Object associated with channel. */
+ int tstate:1; /* Transient enable state */
+};
+
+struct lttng_ust_channel_buffer_private {
+ struct lttng_ust_channel_common_private parent;
+
+ struct lttng_ust_channel_buffer *pub; /* Public channel buffer interface */
+ struct cds_list_head node; /* Channel list in session */
+ int header_type; /* 0: unset, 1: compact, 2: large */
+ unsigned int id; /* Channel ID */
+ enum lttng_ust_abi_chan_type type;
+ struct lttng_ust_ctx *ctx;
+ struct lttng_ust_lib_ring_buffer_channel *rb_chan; /* Ring buffer channel */
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+};
+
+/*
+ * IMPORTANT: this structure is part of the ABI between the consumer
+ * daemon and the UST library within traced applications. Changing it
+ * breaks the UST communication protocol.
+ *
+ * TODO: remove unused fields on next UST communication protocol
+ * breaking update.
+ */
+struct lttng_ust_abi_channel_config {
+ void *unused1;
+ int unused2;
+ void *unused3;
+ void *unused4;
+ int unused5;
+ struct cds_list_head unused6;
+ void *unused7;
+ int unused8;
+ void *unused9;
+
+ /* Channel ID */
+ unsigned int id;
+ enum lttng_ust_abi_chan_type unused10;
+ unsigned char uuid[LTTNG_UST_UUID_LEN]; /* Trace session unique ID */
+ int unused11:1;
+};
+
+/* Global (filter), event and channel contexts. */
+struct lttng_ust_ctx {
+ struct lttng_ust_ctx_field *fields;
+ unsigned int nr_fields;
+ unsigned int allocated_fields;
+ unsigned int largest_align;
+};
+
+struct lttng_ust_registered_probe {
+ const struct lttng_ust_probe_desc *desc;
+
+ struct cds_list_head head; /* chain registered probes */
+ struct cds_list_head lazy_init_head;
+ int lazy; /* lazy registration */
+};
+
+/*
+ * Context field
+ */
+
+struct lttng_ust_ctx_field {
+ const struct lttng_ust_event_field *event_field;
+ size_t (*get_size)(void *priv, size_t offset);
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan);
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value);
+ void (*destroy)(void *priv);
+ void *priv;
+};
+
+static inline
+const struct lttng_ust_type_integer *lttng_ust_get_type_integer(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_integer)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_integer, parent);
+}
+
+static inline
+const struct lttng_ust_type_float *lttng_ust_get_type_float(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_float)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_float, parent);
+}
+
+static inline
+const struct lttng_ust_type_string *lttng_ust_get_type_string(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_string)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_string, parent);
+}
+
+static inline
+const struct lttng_ust_type_enum *lttng_ust_get_type_enum(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_enum)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_enum, parent);
+}
+
+static inline
+const struct lttng_ust_type_array *lttng_ust_get_type_array(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_array)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_array, parent);
+}
+
+static inline
+const struct lttng_ust_type_sequence *lttng_ust_get_type_sequence(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_sequence)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_sequence, parent);
+}
+
+static inline
+const struct lttng_ust_type_struct *lttng_ust_get_type_struct(const struct lttng_ust_type_common *type)
+{
+ if (type->type != lttng_ust_type_struct)
+ return NULL;
+ return caa_container_of(type, const struct lttng_ust_type_struct, parent);
+}
+
+#define lttng_ust_static_type_integer(_size, _alignment, _signedness, _byte_order, _base) \
+ ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_integer, { \
+ .parent = { \
+ .type = lttng_ust_type_integer, \
+ }, \
+ .struct_size = sizeof(struct lttng_ust_type_integer), \
+ .size = (_size), \
+ .alignment = (_alignment), \
+ .signedness = (_signedness), \
+ .reverse_byte_order = (_byte_order) != BYTE_ORDER, \
+ .base = (_base), \
+ }))
+
+#define lttng_ust_static_type_array_text(_length) \
+ ((const struct lttng_ust_type_common *) __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_type_array, { \
+ .parent = { \
+ .type = lttng_ust_type_array, \
+ }, \
+ .struct_size = sizeof(struct lttng_ust_type_array), \
+ .length = (_length), \
+ .alignment = 0, \
+ .encoding = lttng_ust_string_encoding_UTF8, \
+ .elem_type = lttng_ust_static_type_integer(sizeof(char) * CHAR_BIT, \
+ lttng_ust_rb_alignof(char) * CHAR_BIT, lttng_ust_is_signed_type(char), \
+ BYTE_ORDER, 10), \
+ }))
+
+#define lttng_ust_static_event_field(_name, _type, _nowrite, _nofilter) \
+ __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_event_field, { \
+ .struct_size = sizeof(struct lttng_ust_event_field), \
+ .name = (_name), \
+ .type = (_type), \
+ .nowrite = (_nowrite), \
+ .nofilter = (_nofilter), \
+ })
+
+#define lttng_ust_static_ctx_field(_event_field, _get_size, _record, _get_value, _destroy, _priv) \
+ __LTTNG_COMPOUND_LITERAL(const struct lttng_ust_ctx_field, { \
+ .event_field = (_event_field), \
+ .get_size = (_get_size), \
+ .record = (_record), \
+ .get_value = (_get_value), \
+ .destroy = (_destroy), \
+ .priv = (_priv), \
+ })
+
+static inline
+struct lttng_enabler *lttng_event_enabler_as_enabler(
+ struct lttng_event_enabler *event_enabler)
+{
+ return &event_enabler->base;
+}
+
+static inline
+struct lttng_enabler *lttng_event_notifier_enabler_as_enabler(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+{
+ return &event_notifier_enabler->base;
+}
+
+/*
+ * Allocate and initialize a `struct lttng_event_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_enabler`,
+ * On memory error, returns NULL.
+ */
+struct lttng_event_enabler *lttng_event_enabler_create(
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event *event_param,
+ struct lttng_ust_channel_buffer *chan)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_enabler` object.
+ */
+void lttng_event_enabler_destroy(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Enable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+int lttng_event_enabler_enable(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Disable a `struct lttng_event_enabler` object and all events related to this
+ * enabler.
+ */
+int lttng_event_enabler_disable(struct lttng_event_enabler *enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach filter bytecode program to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+int lttng_event_enabler_attach_filter_bytecode(
+ struct lttng_event_enabler *enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach an application context to an event enabler.
+ *
+ * Not implemented.
+ */
+int lttng_event_enabler_attach_context(struct lttng_event_enabler *enabler,
+ struct lttng_ust_abi_context *ctx)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach exclusion list to `struct lttng_event_enabler` and all
+ * events related to this enabler.
+ */
+int lttng_event_enabler_attach_exclusion(struct lttng_event_enabler *enabler,
+ struct lttng_ust_excluder_node **excluder)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Synchronize bytecodes for the enabler and the instance (event or
+ * event_notifier).
+ *
+ * This function goes over all bytecode programs of the enabler (event or
+ * event_notifier enabler) to ensure each is linked to the provided instance.
+ */
+void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
+ struct lttng_ust_ctx **ctx,
+ struct cds_list_head *instance_bytecode_runtime_head,
+ struct cds_list_head *enabler_bytecode_runtime_head)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Allocate and initialize a `struct lttng_event_notifier_group` object.
+ *
+ * On success, returns a `struct lttng_triggre_group`,
+ * on memory error, returns NULL.
+ */
+struct lttng_event_notifier_group *lttng_event_notifier_group_create(void)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_notifier_group` object.
+ */
+void lttng_event_notifier_group_destroy(
+ struct lttng_event_notifier_group *event_notifier_group)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Allocate and initialize a `struct lttng_event_notifier_enabler` object.
+ *
+ * On success, returns a `struct lttng_event_notifier_enabler`,
+ * On memory error, returns NULL.
+ */
+struct lttng_event_notifier_enabler *lttng_event_notifier_enabler_create(
+ struct lttng_event_notifier_group *event_notifier_group,
+ enum lttng_enabler_format_type format_type,
+ struct lttng_ust_abi_event_notifier *event_notifier_param)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Destroy a `struct lttng_event_notifier_enabler` object.
+ */
+void lttng_event_notifier_enabler_destroy(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Enable a `struct lttng_event_notifier_enabler` object and all event
+ * notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_enable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Disable a `struct lttng_event_notifier_enabler` object and all event
+ * notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_disable(
+ struct lttng_event_notifier_enabler *event_notifier_enabler)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach filter bytecode program to `struct lttng_event_notifier_enabler` and
+ * all event notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_filter_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach capture bytecode program to `struct lttng_event_notifier_enabler` and
+ * all event_notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_capture_bytecode(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_bytecode_node **bytecode)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Attach exclusion list to `struct lttng_event_notifier_enabler` and all
+ * event notifiers related to this enabler.
+ */
+int lttng_event_notifier_enabler_attach_exclusion(
+ struct lttng_event_notifier_enabler *event_notifier_enabler,
+ struct lttng_ust_excluder_node **excluder)
+ __attribute__((visibility("hidden")));
+
+void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Connect the probe on all enablers matching this event description.
+ * Called on library load.
+ */
+int lttng_fix_pending_event_notifiers(void)
+ __attribute__((visibility("hidden")));
+
+struct lttng_counter *lttng_ust_counter_create(
+ const char *counter_transport_name,
+ size_t number_dimensions, const struct lttng_counter_dimension *dimensions)
+ __attribute__((visibility("hidden")));
+
+#ifdef HAVE_LINUX_PERF_EVENT_H
+
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_perf_counter_init(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_perf_counter_exit(void)
+ __attribute__((visibility("hidden")));
+
+#else /* #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+static inline
+int lttng_add_perf_counter_to_ctx(uint32_t type,
+ uint64_t config,
+ const char *name,
+ struct lttng_ust_ctx **ctx)
+{
+ return -ENOSYS;
+}
+static inline
+int lttng_perf_counter_init(void)
+{
+ return 0;
+}
+static inline
+void lttng_perf_counter_exit(void)
+{
+}
+#endif /* #else #ifdef HAVE_LINUX_PERF_EVENT_H */
+
+int lttng_probes_get_event_list(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+void lttng_probes_prune_event_list(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+int lttng_probes_get_field_list(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+void lttng_probes_prune_field_list(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_abi_tracepoint_iter *
+ lttng_ust_tracepoint_list_get_iter_next(struct lttng_ust_tracepoint_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_abi_field_iter *
+ lttng_ust_field_list_get_iter_next(struct lttng_ust_field_list *list)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_session *lttng_session_create(void)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_enable(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_disable(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+int lttng_session_statedump(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+void lttng_session_destroy(struct lttng_ust_session *session)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Called with ust lock held.
+ */
+int lttng_session_active(void)
+ __attribute__((visibility("hidden")));
+
+struct cds_list_head *lttng_get_sessions(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_handle_pending_statedump(void *owner)
+ __attribute__((visibility("hidden")));
+
+int lttng_channel_enable(struct lttng_ust_channel_common *lttng_channel)
+ __attribute__((visibility("hidden")));
+
+int lttng_channel_disable(struct lttng_ust_channel_common *lttng_channel)
+ __attribute__((visibility("hidden")));
+
+void lttng_transport_register(struct lttng_transport *transport)
+ __attribute__((visibility("hidden")));
+
+void lttng_transport_unregister(struct lttng_transport *transport)
+ __attribute__((visibility("hidden")));
+
+/* This is ABI between liblttng-ust and liblttng-ust-ctl */
+struct lttng_transport *lttng_ust_transport_find(const char *name);
+
+/* This is ABI between liblttng-ust and liblttng-ust-dl */
+void lttng_ust_dl_update(void *ip);
+
+void lttng_probe_provider_unregister_events(const struct lttng_ust_probe_desc *desc)
+ __attribute__((visibility("hidden")));
+
+int lttng_fix_pending_events(void)
+ __attribute__((visibility("hidden")));
+
+struct cds_list_head *lttng_get_probe_list_head(void)
+ __attribute__((visibility("hidden")));
+
+struct lttng_enum *lttng_ust_enum_get_from_desc(struct lttng_ust_session *session,
+ const struct lttng_ust_enum_desc *enum_desc)
+ __attribute__((visibility("hidden")));
+
+int lttng_abi_create_root_handle(void)
+ __attribute__((visibility("hidden")));
+
+const struct lttng_ust_abi_objd_ops *lttng_ust_abi_objd_ops(int id)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_abi_objd_unref(int id, int is_owner)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_events_exit(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_abi_objd_table_owner_cleanup(void *owner)
+ __attribute__((visibility("hidden")));
+
+struct lttng_ust_channel_buffer *lttng_ust_alloc_channel_buffer(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_free_channel_common(struct lttng_ust_channel_common *chan)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_interpret_event_filter(struct lttng_ust_event_common *event,
+ const char *interpreter_stack_data,
+ void *filter_ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_session_uuid_validate(struct lttng_ust_session *session,
+ unsigned char *uuid)
+ __attribute__((visibility("hidden")));
+
+bool lttng_ust_validate_event_name(const struct lttng_ust_event_desc *desc)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_format_event_name(const struct lttng_ust_event_desc *desc,
+ char *name)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_add_app_context_to_ctx_rcu(const char *name, struct lttng_ust_ctx **ctx)
+ __attribute__((visibility("hidden")));
+
+int lttng_ust_context_set_provider_rcu(struct lttng_ust_ctx **_ctx,
+ const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_context_set_session_provider(const char *name,
+ size_t (*get_size)(void *priv, size_t offset),
+ void (*record)(void *priv, struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_channel_buffer *chan),
+ void (*get_value)(void *priv, struct lttng_ust_ctx_value *value),
+ void *priv)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_EVENTS_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#define TRACEPOINT_CREATE_PROBES
+#define TP_IP_PARAM ip
+#include "ust_lib.h"
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2013 Paul Woegerer <paul_woegerer@mentor.com>
+ * Copyright (C) 2015 Antoine Busque <abusque@efficios.com>
+ * Copyright (C) 2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#undef TRACEPOINT_PROVIDER
+#define TRACEPOINT_PROVIDER lttng_ust_lib
+
+#if !defined(_TRACEPOINT_UST_LIB_H) || defined(TRACEPOINT_HEADER_MULTI_READ)
+#define _TRACEPOINT_UST_LIB_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+
+#define LTTNG_UST_LIB_PROVIDER
+#include <lttng/tracepoint.h>
+
+TRACEPOINT_EVENT(lttng_ust_lib, load,
+ TP_ARGS(void *, ip, void *, baddr, const char*, path,
+ uint64_t, memsz, uint8_t, has_build_id,
+ uint8_t, has_debug_link),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint64_t, memsz, memsz)
+ ctf_string(path, path)
+ ctf_integer(uint8_t, has_build_id, has_build_id)
+ ctf_integer(uint8_t, has_debug_link, has_debug_link)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, build_id,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ uint8_t *, build_id,
+ size_t, build_id_len
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_sequence_hex(uint8_t, build_id, build_id,
+ size_t, build_id_len)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, debug_link,
+ TP_ARGS(
+ void *, ip,
+ void *, baddr,
+ char *, filename,
+ uint32_t, crc
+ ),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ ctf_integer(uint32_t, crc, crc)
+ ctf_string(filename, filename)
+ )
+)
+
+TRACEPOINT_EVENT(lttng_ust_lib, unload,
+ TP_ARGS(void *, ip, void *, baddr),
+ TP_FIELDS(
+ ctf_unused(ip)
+ ctf_integer_hex(void *, baddr, baddr)
+ )
+)
+
+#endif /* _TRACEPOINT_UST_LIB_H */
+
+#undef TRACEPOINT_INCLUDE
+#define TRACEPOINT_INCLUDE "./ust_lib.h"
+
+/* This part must be outside ifdef protection */
+#include <lttng/tracepoint-event.h>
+
+#ifdef __cplusplus
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _UST_WAIT_H
+#define _UST_WAIT_H
+
+#include <poll.h>
+
+/*
+ * Wait until "cond" gets true or timeout (in ms).
+ */
+#define wait_cond_interruptible_timeout(_cond, _timeout) \
+ ({ \
+ int __ret = 0, __pollret; \
+ int __timeout = _timeout; \
+ \
+ for (;;) { \
+ if (_cond) \
+ break; \
+ if (__timeout <= 0) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ __pollret = poll(NULL, 0, 10); /* wait 10ms */ \
+ if (__pollret < 0) { \
+ __ret = -errno; \
+ break; \
+ } \
+ __timeout -= 10; \
+ } \
+ __ret; \
+ })
+
+
+#endif /* _UST_WAIT_H */
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libmsgpack.la
+
+libmsgpack_la_SOURCES = \
+ msgpack.c msgpack.h
+
+libmsgpack_la_CFLAGS = -DUST_COMPONENT="libmsgpack" $(AM_CFLAGS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+
+#define MSGPACK_FIXSTR_ID_MASK 0xA0
+#define MSGPACK_FIXMAP_ID_MASK 0x80
+#define MSGPACK_FIXARRAY_ID_MASK 0x90
+
+#define MSGPACK_NIL_ID 0xC0
+#define MSGPACK_FALSE_ID 0xC2
+#define MSGPACK_TRUE_ID 0xC3
+#define MSGPACK_MAP16_ID 0xDE
+#define MSGPACK_ARRAY16_ID 0xDC
+
+#define MSGPACK_UINT8_ID 0xCC
+#define MSGPACK_UINT16_ID 0xCD
+#define MSGPACK_UINT32_ID 0xCE
+#define MSGPACK_UINT64_ID 0xCF
+
+#define MSGPACK_INT8_ID 0xD0
+#define MSGPACK_INT16_ID 0xD1
+#define MSGPACK_INT32_ID 0xD2
+#define MSGPACK_INT64_ID 0xD3
+
+#define MSGPACK_FLOAT64_ID 0xCB
+#define MSGPACK_STR16_ID 0xDA
+
+#define MSGPACK_FIXINT_MAX ((1 << 7) - 1)
+#define MSGPACK_FIXINT_MIN -(1 << 5)
+#define MSGPACK_FIXMAP_MAX_COUNT 15
+#define MSGPACK_FIXARRAY_MAX_COUNT 15
+#define MSGPACK_FIXSTR_MAX_LENGTH 31
+
+#ifdef __KERNEL__
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+#include <lttng/msgpack.h>
+
+#define INT8_MIN (-128)
+#define INT16_MIN (-32767-1)
+#define INT32_MIN (-2147483647-1)
+#define INT8_MAX (127)
+#define INT16_MAX (32767)
+#define INT32_MAX (2147483647)
+#define UINT8_MAX (255)
+#define UINT16_MAX (65535)
+#define UINT32_MAX (4294967295U)
+
+#define byteswap_host_to_be16(_tmp) cpu_to_be16(_tmp)
+#define byteswap_host_to_be32(_tmp) cpu_to_be32(_tmp)
+#define byteswap_host_to_be64(_tmp) cpu_to_be64(_tmp)
+
+#define lttng_msgpack_assert(cond) WARN_ON(!(cond))
+
+#else /* __KERNEL__ */
+
+#include <lttng/ust-endian.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "msgpack.h"
+
+#define byteswap_host_to_be16(_tmp) htobe16(_tmp)
+#define byteswap_host_to_be32(_tmp) htobe32(_tmp)
+#define byteswap_host_to_be64(_tmp) htobe64(_tmp)
+
+#define lttng_msgpack_assert(cond) ({ \
+ if (!(cond)) \
+ fprintf(stderr, "Assertion failed. %s:%d\n", __FILE__, __LINE__); \
+ })
+#endif /* __KERNEL__ */
+
+static inline int lttng_msgpack_append_buffer(
+ struct lttng_msgpack_writer *writer,
+ const uint8_t *buf,
+ size_t length)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(buf);
+
+ /* Ensure we are not trying to write after the end of the buffer. */
+ if (writer->write_pos + length > writer->end_write_pos) {
+ ret = -1;
+ goto end;
+ }
+
+ memcpy(writer->write_pos, buf, length);
+ writer->write_pos += length;
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_append_u8(
+ struct lttng_msgpack_writer *writer, uint8_t value)
+{
+ return lttng_msgpack_append_buffer(writer, &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u16(
+ struct lttng_msgpack_writer *writer, uint16_t value)
+{
+ value = byteswap_host_to_be16(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u32(
+ struct lttng_msgpack_writer *writer, uint32_t value)
+{
+ value = byteswap_host_to_be32(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_u64(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ value = byteswap_host_to_be64(value);
+
+ return lttng_msgpack_append_buffer(writer, (uint8_t *) &value, sizeof(value));
+}
+
+static inline int lttng_msgpack_append_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = value;
+
+ return lttng_msgpack_append_u64(writer, u.u);
+}
+
+static inline int lttng_msgpack_append_i8(
+ struct lttng_msgpack_writer *writer, int8_t value)
+{
+ return lttng_msgpack_append_u8(writer, (uint8_t) value);
+}
+
+static inline int lttng_msgpack_append_i16(
+ struct lttng_msgpack_writer *writer, int16_t value)
+{
+ return lttng_msgpack_append_u16(writer, (uint16_t) value);
+}
+
+static inline int lttng_msgpack_append_i32(
+ struct lttng_msgpack_writer *writer, int32_t value)
+{
+ return lttng_msgpack_append_u32(writer, (uint32_t) value);
+}
+
+static inline int lttng_msgpack_append_i64(
+ struct lttng_msgpack_writer *writer, int64_t value)
+{
+ return lttng_msgpack_append_u64(writer, (uint64_t) value);
+}
+
+static inline int lttng_msgpack_encode_f64(
+ struct lttng_msgpack_writer *writer, double value)
+{
+ int ret;
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FLOAT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_f64(writer, value);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixmap(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXMAP_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_map16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXMAP_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_MAP16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixarray(
+ struct lttng_msgpack_writer *writer, uint8_t count)
+{
+ int ret = 0;
+
+ lttng_msgpack_assert(count <= MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXARRAY_ID_MASK | count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_array16(
+ struct lttng_msgpack_writer *writer, uint16_t count)
+{
+ int ret;
+
+ lttng_msgpack_assert(count > MSGPACK_FIXARRAY_MAX_COUNT);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_ARRAY16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, count);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_fixstr(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint8_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len <= MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_FIXSTR_ID_MASK | len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+static inline int lttng_msgpack_encode_str16(
+ struct lttng_msgpack_writer *writer,
+ const char *str,
+ uint16_t len)
+{
+ int ret;
+
+ lttng_msgpack_assert(len > MSGPACK_FIXSTR_MAX_LENGTH);
+
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_STR16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, len);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_buffer(writer, (uint8_t *) str, len);
+ if (ret)
+ goto end;
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXMAP_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixmap(writer, count);
+ else
+ ret = lttng_msgpack_encode_map16(writer, count);
+
+ writer->map_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->map_nesting > 0);
+ writer->map_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count)
+{
+ int ret;
+
+ if (count >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (count <= MSGPACK_FIXARRAY_MAX_COUNT)
+ ret = lttng_msgpack_encode_fixarray(writer, count);
+ else
+ ret = lttng_msgpack_encode_array16(writer, count);
+
+ writer->array_nesting++;
+end:
+ return ret;
+}
+
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
+{
+ lttng_msgpack_assert(writer->array_nesting > 0);
+ writer->array_nesting--;
+ return 0;
+}
+
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *str)
+{
+ int ret;
+ size_t length = strlen(str);
+
+ if (length >= (1 << 16)) {
+ ret = -1;
+ goto end;
+ }
+
+ if (length <= MSGPACK_FIXSTR_MAX_LENGTH)
+ ret = lttng_msgpack_encode_fixstr(writer, str, length);
+ else
+ ret = lttng_msgpack_encode_str16(writer, str, length);
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_NIL_ID);
+}
+
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_TRUE_ID);
+}
+
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
+{
+ return lttng_msgpack_append_u8(writer, MSGPACK_FALSE_ID);
+}
+
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+{
+ int ret = 0;
+
+ if (value <= MSGPACK_FIXINT_MAX) {
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u8(writer, (uint8_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u16(writer, (uint16_t) value);
+ if (ret)
+ goto end;
+ } else if (value <= UINT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u32(writer, (uint32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_UINT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_u64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_signed_integer(struct lttng_msgpack_writer *writer, int64_t value)
+{
+ int ret;
+
+ if (value >= MSGPACK_FIXINT_MIN && value <= MSGPACK_FIXINT_MAX){
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT8_MIN && value <= INT8_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT8_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i8(writer, (int8_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT16_MIN && value <= INT16_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT16_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i16(writer, (int16_t) value);
+ if (ret)
+ goto end;
+ } else if (value >= INT32_MIN && value <= INT32_MAX) {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT32_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i32(writer, (int32_t) value);
+ if (ret)
+ goto end;
+ } else {
+ ret = lttng_msgpack_append_u8(writer, MSGPACK_INT64_ID);
+ if (ret)
+ goto end;
+
+ ret = lttng_msgpack_append_i64(writer, value);
+ if (ret)
+ goto end;
+ }
+
+end:
+ return ret;
+}
+
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
+{
+ return lttng_msgpack_encode_f64(writer, value);
+}
+
+void lttng_msgpack_writer_init(struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size)
+{
+ lttng_msgpack_assert(buffer);
+
+ writer->buffer = buffer;
+ writer->write_pos = buffer;
+ writer->end_write_pos = buffer + size;
+
+ writer->array_nesting = 0;
+ writer->map_nesting = 0;
+}
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
+{
+ memset(writer, 0, sizeof(*writer));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2020 Francis Deslauriers <francis.deslauriers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_MSGPACK_H
+#define _LTTNG_UST_MSGPACK_H
+
+#include <stddef.h>
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else /* __KERNEL__ */
+#include <stdint.h>
+#endif /* __KERNEL__ */
+
+struct lttng_msgpack_writer {
+ uint8_t *buffer;
+ uint8_t *write_pos;
+ const uint8_t *end_write_pos;
+ uint8_t array_nesting;
+ uint8_t map_nesting;
+};
+
+void lttng_msgpack_writer_init(
+ struct lttng_msgpack_writer *writer,
+ uint8_t *buffer, size_t size)
+ __attribute__((visibility("hidden")));
+
+void lttng_msgpack_writer_fini(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_nil(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_true(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_false(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_unsigned_integer(
+ struct lttng_msgpack_writer *writer, uint64_t value)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_signed_integer(
+ struct lttng_msgpack_writer *writer, int64_t value)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_double(struct lttng_msgpack_writer *writer, double value)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_write_str(struct lttng_msgpack_writer *writer,
+ const char *value)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_begin_map(struct lttng_msgpack_writer *writer, size_t count)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_end_map(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_begin_array(
+ struct lttng_msgpack_writer *writer, size_t count)
+ __attribute__((visibility("hidden")));
+
+int lttng_msgpack_end_array(struct lttng_msgpack_writer *writer)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_MSGPACK_H */
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libringbuffer.la
+
+libringbuffer_la_SOURCES = \
+ smp.h smp.c getcpu.h \
+ shm.c shm.h shm_types.h shm_internal.h \
+ ring_buffer_backend.c \
+ ring_buffer_frontend.c \
+ api.h mmap.h \
+ backend.h backend_internal.h backend_types.h \
+ frontend_api.h frontend.h frontend_internal.h frontend_types.h \
+ nohz.h vatomic.h rb-init.h ringbuffer-config.h
+
+libringbuffer_la_LIBADD = \
+ -lrt
+
+if ENABLE_NUMA
+libringbuffer_la_LIBADD += -lnuma
+endif
+
+libringbuffer_la_CFLAGS = -DUST_COMPONENT="libringbuffer" $(AM_CFLAGS)
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring Buffer API.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_API_H
+#define _LTTNG_RING_BUFFER_API_H
+
+#include "backend.h"
+#include "frontend.h"
+#include <lttng/ringbuffer-abi.h>
+
+/*
+ * ring_buffer_frontend_api.h contains static inline functions that depend on
+ * client static inlines. Hence the inclusion of this "api" header only
+ * within the client.
+ */
+#include "frontend_api.h"
+
+#endif /* _LTTNG_RING_BUFFER_API_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer backend (API).
+ *
+ * Credits to Steven Rostedt for proposing to use an extra-subbuffer owned by
+ * the reader in flight recorder mode.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_BACKEND_H
+#define _LTTNG_RING_BUFFER_BACKEND_H
+
+#include <stddef.h>
+#include <unistd.h>
+
+/* Internal helpers */
+#include "backend_internal.h"
+#include "frontend_internal.h"
+
+/* Ring buffer backend API */
+
+/* Ring buffer backend access (read/write) */
+
+extern size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset, void *dest, size_t len,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset, void *dest, size_t len,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to write directly to this address,
+ * as long as the write is never bigger than a page size.
+ */
+extern void *
+lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern void *
+lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/**
+ * lib_ring_buffer_write - write data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ *
+ * This function copies "len" bytes of data from a source pointer to a buffer
+ * backend, at the current context offset. This is more or less a buffer
+ * backend-specific memcpy() operation. Calls the slow path (_ring_buffer_write)
+ * if copy is crossing a page boundary.
+ */
+static inline
+void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len)
+ __attribute__((always_inline));
+static inline
+void lib_ring_buffer_write(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const void *src, size_t len)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
+ size_t offset = ctx_private->buf_offset;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ void *p;
+
+ if (caa_unlikely(!len))
+ return;
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+ lib_ring_buffer_do_copy(config, p, src, len);
+ ctx_private->buf_offset += len;
+}
+
+/*
+ * Copy up to @len string bytes from @src to @dest. Stop whenever a NULL
+ * terminating character is found in @src. Returns the number of bytes
+ * copied. Does *not* terminate @dest with NULL terminating character.
+ */
+static inline
+size_t lib_ring_buffer_do_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+ char *dest, const char *src, size_t len)
+ __attribute__((always_inline));
+static inline
+size_t lib_ring_buffer_do_strcpy(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ char *dest, const char *src, size_t len)
+{
+ size_t count;
+
+ for (count = 0; count < len; count++) {
+ char c;
+
+ /*
+ * Only read source character once, in case it is
+ * modified concurrently.
+ */
+ c = CMM_LOAD_SHARED(src[count]);
+ if (!c)
+ break;
+ lib_ring_buffer_do_copy(config, &dest[count], &c, 1);
+ }
+ return count;
+}
+
+/**
+ * lib_ring_buffer_strcpy - write string data to a buffer backend
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies @len - 1 bytes of string data from a source
+ * pointer to a buffer backend, followed by a terminating '\0'
+ * character, at the current context offset. This is more or less a
+ * buffer backend-specific strncpy() operation. If a terminating '\0'
+ * character is found in @src before @len - 1 characters are copied, pad
+ * the buffer with @pad characters (e.g. '#').
+ */
+static inline
+void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+ __attribute__((always_inline));
+static inline
+void lib_ring_buffer_strcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
+ size_t count;
+ size_t offset = ctx_private->buf_offset;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ void *p;
+
+ if (caa_unlikely(!len))
+ return;
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+
+ count = lib_ring_buffer_do_strcpy(config, p, src, len - 1);
+ offset += count;
+ /* Padding */
+ if (caa_unlikely(count < len - 1)) {
+ size_t pad_len = len - 1 - count;
+
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+ lib_ring_buffer_do_memset(p, pad, pad_len);
+ offset += pad_len;
+ }
+ /* Final '\0' */
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+ lib_ring_buffer_do_memset(p, '\0', 1);
+ ctx_private->buf_offset += len;
+}
+
+/**
+ * lib_ring_buffer_pstrcpy - write to a buffer backend P-string
+ * @config : ring buffer instance configuration
+ * @ctx: ring buffer context. (input arguments only)
+ * @src : source pointer to copy from
+ * @len : length of data to copy
+ * @pad : character to use for padding
+ *
+ * This function copies up to @len bytes of data from a source pointer
+ * to a Pascal String into the buffer backend. If a terminating '\0'
+ * character is found in @src before @len characters are copied, pad the
+ * buffer with @pad characters (e.g. '\0').
+ *
+ * The length of the pascal strings in the ring buffer is explicit: it
+ * is either the array or sequence length.
+ */
+static inline
+void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+ __attribute__((always_inline));
+static inline
+void lib_ring_buffer_pstrcpy(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ const char *src, size_t len, char pad)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
+ size_t count;
+ size_t offset = ctx_private->buf_offset;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ void *p;
+
+ if (caa_unlikely(!len))
+ return;
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, (offset & (chanb->buf_size - 1)) + len > chanb->buf_size);
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+
+ count = lib_ring_buffer_do_strcpy(config, p, src, len);
+ offset += count;
+ /* Padding */
+ if (caa_unlikely(count < len)) {
+ size_t pad_len = len - count;
+
+ p = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!p))
+ return;
+ lib_ring_buffer_do_memset(p, pad, pad_len);
+ }
+ ctx_private->buf_offset += len;
+}
+
+/*
+ * This accessor counts the number of unread records in a buffer.
+ * It only provides a consistent value if no reads not writes are performed
+ * concurrently.
+ */
+static inline
+unsigned long lib_ring_buffer_get_records_unread(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ unsigned long records_unread = 0, sb_bindex;
+ unsigned int i;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return 0;
+ for (i = 0; i < chan->backend.num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ records_unread += v_read(config, &backend_pages->records_unread);
+ }
+ if (config->mode == RING_BUFFER_OVERWRITE) {
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ records_unread += v_read(config, &backend_pages->records_unread);
+ }
+ return records_unread;
+}
+
+#endif /* _LTTNG_RING_BUFFER_BACKEND_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer backend (internal helpers).
+ */
+
+#ifndef _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <urcu/compiler.h>
+
+#include <lttng/ringbuffer-context.h>
+#include "ringbuffer-config.h"
+#include "backend_types.h"
+#include "frontend_types.h"
+#include "shm.h"
+
+/* Ring buffer backend API presented to the frontend */
+
+/* Ring buffer and channel backend create/free */
+
+int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct channel_backend *chan,
+ int cpu,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
+ __attribute__((visibility("hidden")));
+
+void channel_backend_unregister_notifiers(struct channel_backend *chanb)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_backend_free(struct lttng_ust_lib_ring_buffer_backend *bufb)
+ __attribute__((visibility("hidden")));
+
+int channel_backend_init(struct channel_backend *chanb,
+ const char *name,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t subbuf_size,
+ size_t num_subbuf, struct lttng_ust_shm_handle *handle,
+ const int *stream_fds)
+ __attribute__((visibility("hidden")));
+
+void channel_backend_free(struct channel_backend *chanb,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+void channel_backend_reset(struct channel_backend *chanb)
+ __attribute__((visibility("hidden")));
+
+int lib_ring_buffer_backend_init(void)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_backend_exit(void)
+ __attribute__((visibility("hidden")));
+
+extern void _lib_ring_buffer_write(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset, const void *src, size_t len,
+ ssize_t pagecpy)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Subbuffer ID bits for overwrite mode. Need to fit within a single word to be
+ * exchanged atomically.
+ *
+ * Top half word, except lowest bit, belongs to "offset", which is used to keep
+ * to count the produced buffers. For overwrite mode, this provides the
+ * consumer with the capacity to read subbuffers in order, handling the
+ * situation where producers would write up to 2^15 buffers (or 2^31 for 64-bit
+ * systems) concurrently with a single execution of get_subbuf (between offset
+ * sampling and subbuffer ID exchange).
+ */
+
+#define HALF_ULONG_BITS (CAA_BITS_PER_LONG >> 1)
+
+#define SB_ID_OFFSET_SHIFT (HALF_ULONG_BITS + 1)
+#define SB_ID_OFFSET_COUNT (1UL << SB_ID_OFFSET_SHIFT)
+#define SB_ID_OFFSET_MASK (~(SB_ID_OFFSET_COUNT - 1))
+/*
+ * Lowest bit of top word half belongs to noref. Used only for overwrite mode.
+ */
+#define SB_ID_NOREF_SHIFT (SB_ID_OFFSET_SHIFT - 1)
+#define SB_ID_NOREF_COUNT (1UL << SB_ID_NOREF_SHIFT)
+#define SB_ID_NOREF_MASK SB_ID_NOREF_COUNT
+/*
+ * In overwrite mode: lowest half of word is used for index.
+ * Limit of 2^16 subbuffers per buffer on 32-bit, 2^32 on 64-bit.
+ * In producer-consumer mode: whole word used for index.
+ */
+#define SB_ID_INDEX_SHIFT 0
+#define SB_ID_INDEX_COUNT (1UL << SB_ID_INDEX_SHIFT)
+#define SB_ID_INDEX_MASK (SB_ID_NOREF_COUNT - 1)
+
+/*
+ * Construct the subbuffer id from offset, index and noref. Use only the index
+ * for producer-consumer mode (offset and noref are only used in overwrite
+ * mode).
+ */
+static inline
+unsigned long subbuffer_id(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long offset, unsigned long noref,
+ unsigned long index)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ return (offset << SB_ID_OFFSET_SHIFT)
+ | (noref << SB_ID_NOREF_SHIFT)
+ | index;
+ else
+ return index;
+}
+
+/*
+ * Compare offset with the offset contained within id. Return 1 if the offset
+ * bits are identical, else 0.
+ */
+static inline
+int subbuffer_id_compare_offset(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ unsigned long id, unsigned long offset)
+{
+ return (id & SB_ID_OFFSET_MASK) == (offset << SB_ID_OFFSET_SHIFT);
+}
+
+static inline
+unsigned long subbuffer_id_get_index(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long id)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ return id & SB_ID_INDEX_MASK;
+ else
+ return id;
+}
+
+static inline
+unsigned long subbuffer_id_is_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long id)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ return !!(id & SB_ID_NOREF_MASK);
+ else
+ return 1;
+}
+
+/*
+ * Only used by reader on subbuffer ID it has exclusive access to. No volatile
+ * needed.
+ */
+static inline
+void subbuffer_id_set_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long *id)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ *id |= SB_ID_NOREF_MASK;
+}
+
+static inline
+void subbuffer_id_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long *id, unsigned long offset)
+{
+ unsigned long tmp;
+
+ if (config->mode == RING_BUFFER_OVERWRITE) {
+ tmp = *id;
+ tmp &= ~SB_ID_OFFSET_MASK;
+ tmp |= offset << SB_ID_OFFSET_SHIFT;
+ tmp |= SB_ID_NOREF_MASK;
+ /* Volatile store, read concurrently by readers. */
+ CMM_ACCESS_ONCE(*id) = tmp;
+ }
+}
+
+/* No volatile access, since already used locally */
+static inline
+void subbuffer_id_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long *id)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ *id &= ~SB_ID_NOREF_MASK;
+}
+
+/*
+ * For overwrite mode, cap the number of subbuffers per buffer to:
+ * 2^16 on 32-bit architectures
+ * 2^32 on 64-bit architectures
+ * This is required to fit in the index part of the ID. Return 0 on success,
+ * -EPERM on failure.
+ */
+static inline
+int subbuffer_id_check_index(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned long num_subbuf)
+{
+ if (config->mode == RING_BUFFER_OVERWRITE)
+ return (num_subbuf > (1UL << HALF_ULONG_BITS)) ? -EPERM : 0;
+ else
+ return 0;
+}
+
+static inline
+int lib_ring_buffer_backend_get_pages(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_backend_pages **backend_pages)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &ctx_private->buf->backend;
+ struct channel_backend *chanb = &ctx_private->chan->backend;
+ struct lttng_ust_shm_handle *handle = ctx_private->chan->handle;
+ size_t sbidx;
+ size_t offset = ctx_private->buf_offset;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ unsigned long sb_bindex, id;
+ struct lttng_ust_lib_ring_buffer_backend_pages *_backend_pages;
+
+ offset &= chanb->buf_size - 1;
+ sbidx = offset >> chanb->subbuf_size_order;
+ wsb = shmp_index(handle, bufb->buf_wsb, sbidx);
+ if (caa_unlikely(!wsb))
+ return -1;
+ id = wsb->id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (caa_unlikely(!rpages))
+ return -1;
+ CHAN_WARN_ON(ctx_private->chan,
+ config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ _backend_pages = shmp(handle, rpages->shmp);
+ if (caa_unlikely(!_backend_pages))
+ return -1;
+ *backend_pages = _backend_pages;
+ return 0;
+}
+
+/* Get backend pages from cache. */
+static inline
+struct lttng_ust_lib_ring_buffer_backend_pages *
+ lib_ring_buffer_get_backend_pages_from_ctx(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ return ctx->priv->backend_pages;
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static inline
+void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ backend_pages = lib_ring_buffer_get_backend_pages_from_ctx(config, ctx);
+ if (caa_unlikely(!backend_pages)) {
+ if (lib_ring_buffer_backend_get_pages(config, ctx, &backend_pages))
+ return;
+ }
+ v_inc(config, &backend_pages->records_commit);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static inline
+void subbuffer_count_record(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_backend *bufb __attribute__((unused)),
+ unsigned long idx __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
+
+/*
+ * Reader has exclusive subbuffer access for record consumption. No need to
+ * perform the decrement atomically.
+ */
+static inline
+void subbuffer_consume_record(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return;
+ CHAN_WARN_ON(chan, !v_read(config, &backend_pages->records_unread));
+ /* Non-atomic decrement protected by exclusive subbuffer access */
+ _v_dec(config, &backend_pages->records_unread);
+ v_inc(config, &bufb->records_read);
+}
+
+static inline
+unsigned long subbuffer_get_records_count(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return v_read(config, &backend_pages->records_commit);
+}
+
+/*
+ * Must be executed at subbuffer delivery when the writer has _exclusive_
+ * subbuffer access. See lib_ring_buffer_check_deliver() for details.
+ * lib_ring_buffer_get_records_count() must be called to get the records
+ * count before this function, because it resets the records_commit
+ * count.
+ */
+static inline
+unsigned long subbuffer_count_records_overrun(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long overruns, sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ overruns = v_read(config, &backend_pages->records_unread);
+ v_set(config, &backend_pages->records_unread,
+ v_read(config, &backend_pages->records_commit));
+ v_set(config, &backend_pages->records_commit, 0);
+
+ return overruns;
+}
+
+static inline
+void subbuffer_set_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx,
+ unsigned long data_size,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return;
+ backend_pages->data_size = data_size;
+}
+
+static inline
+unsigned long subbuffer_get_read_data_size(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *pages_shmp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ pages_shmp = shmp_index(handle, bufb->array, sb_bindex);
+ if (!pages_shmp)
+ return 0;
+ backend_pages = shmp(handle, pages_shmp->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
+}
+
+static inline
+unsigned long subbuffer_get_data_size(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long sb_bindex;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return 0;
+ sb_bindex = subbuffer_id_get_index(config, wsb->id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ return backend_pages->data_size;
+}
+
+static inline
+void subbuffer_inc_packet_count(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_counts *counts;
+
+ counts = shmp_index(handle, bufb->buf_cnt, idx);
+ if (!counts)
+ return;
+ counts->seq_cnt++;
+}
+
+/**
+ * lib_ring_buffer_clear_noref - Clear the noref subbuffer flag, called by
+ * writer.
+ */
+static inline
+void lib_ring_buffer_clear_noref(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long id, new_id;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+
+ if (config->mode != RING_BUFFER_OVERWRITE)
+ return;
+
+ /*
+ * Performing a volatile access to read the sb_pages, because we want to
+ * read a coherent version of the pointer and the associated noref flag.
+ */
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ id = CMM_ACCESS_ONCE(wsb->id);
+ for (;;) {
+ /* This check is called on the fast path for each record. */
+ if (caa_likely(!subbuffer_id_is_noref(config, id))) {
+ /*
+ * Store after load dependency ordering the writes to
+ * the subbuffer after load and test of the noref flag
+ * matches the memory barrier implied by the cmpxchg()
+ * in update_read_sb_index().
+ */
+ return; /* Already writing to this buffer */
+ }
+ new_id = id;
+ subbuffer_id_clear_noref(config, &new_id);
+ new_id = uatomic_cmpxchg(&wsb->id, id, new_id);
+ if (caa_likely(new_id == id))
+ break;
+ id = new_id;
+ }
+}
+
+/**
+ * lib_ring_buffer_set_noref_offset - Set the noref subbuffer flag and offset,
+ * called by writer.
+ */
+static inline
+void lib_ring_buffer_set_noref_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ unsigned long idx, unsigned long offset,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ if (config->mode != RING_BUFFER_OVERWRITE)
+ return;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, idx);
+ if (!wsb)
+ return;
+ /*
+ * Because ring_buffer_set_noref() is only called by a single thread
+ * (the one which updated the cc_sb value), there are no concurrent
+ * updates to take care of: other writers have not updated cc_sb, so
+ * they cannot set the noref flag, and concurrent readers cannot modify
+ * the pointer because the noref flag is not set yet.
+ * The smp_wmb() in ring_buffer_commit() takes care of ordering writes
+ * to the subbuffer before this set noref operation.
+ * subbuffer_set_noref() uses a volatile store to deal with concurrent
+ * readers of the noref flag.
+ */
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ CHAN_WARN_ON(chan, subbuffer_id_is_noref(config, wsb->id));
+ /*
+ * Memory barrier that ensures counter stores are ordered before set
+ * noref and offset.
+ */
+ cmm_smp_mb();
+ subbuffer_id_set_noref_offset(config, &wsb->id, offset);
+}
+
+/**
+ * update_read_sb_index - Read-side subbuffer index update.
+ */
+static inline
+int update_read_sb_index(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct channel_backend *chanb __attribute__((unused)),
+ unsigned long consumed_idx,
+ unsigned long consumed_count,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ unsigned long old_id, new_id;
+
+ wsb = shmp_index(handle, bufb->buf_wsb, consumed_idx);
+ if (caa_unlikely(!wsb))
+ return -EPERM;
+
+ if (config->mode == RING_BUFFER_OVERWRITE) {
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ /*
+ * Exchange the target writer subbuffer with our own unused
+ * subbuffer. No need to use CMM_ACCESS_ONCE() here to read the
+ * old_wpage, because the value read will be confirmed by the
+ * following cmpxchg().
+ */
+ old_id = wsb->id;
+ if (caa_unlikely(!subbuffer_id_is_noref(config, old_id)))
+ return -EAGAIN;
+ /*
+ * Make sure the offset count we are expecting matches the one
+ * indicated by the writer.
+ */
+ if (caa_unlikely(!subbuffer_id_compare_offset(config, old_id,
+ consumed_count)))
+ return -EAGAIN;
+ chan = shmp(handle, bufb->chan);
+ if (caa_unlikely(!chan))
+ return -EPERM;
+ CHAN_WARN_ON(chan, !subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+ subbuffer_id_set_noref_offset(config, &bufb->buf_rsb.id,
+ consumed_count);
+ new_id = uatomic_cmpxchg(&wsb->id, old_id, bufb->buf_rsb.id);
+ if (caa_unlikely(old_id != new_id))
+ return -EAGAIN;
+ bufb->buf_rsb.id = new_id;
+ } else {
+ /* No page exchange, use the writer page directly */
+ bufb->buf_rsb.id = wsb->id;
+ }
+ return 0;
+}
+
+#ifndef inline_memcpy
+#define inline_memcpy(dest, src, n) memcpy(dest, src, n)
+#endif
+
+static inline
+void lttng_inline_memcpy(void *dest, const void *src,
+ unsigned long len)
+ __attribute__((always_inline));
+static inline
+void lttng_inline_memcpy(void *dest, const void *src,
+ unsigned long len)
+{
+ switch (len) {
+ case 1:
+ *(uint8_t *) dest = *(const uint8_t *) src;
+ break;
+ case 2:
+ *(uint16_t *) dest = *(const uint16_t *) src;
+ break;
+ case 4:
+ *(uint32_t *) dest = *(const uint32_t *) src;
+ break;
+ case 8:
+ *(uint64_t *) dest = *(const uint64_t *) src;
+ break;
+ default:
+ inline_memcpy(dest, src, len);
+ }
+}
+
+/*
+ * Use the architecture-specific memcpy implementation for constant-sized
+ * inputs, but rely on an inline memcpy for length statically unknown.
+ * The function call to memcpy is just way too expensive for a fast path.
+ */
+#define lib_ring_buffer_do_copy(config, dest, src, len) \
+do { \
+ size_t __len = (len); \
+ if (__builtin_constant_p(len)) \
+ memcpy(dest, src, __len); \
+ else \
+ lttng_inline_memcpy(dest, src, __len); \
+} while (0)
+
+/*
+ * write len bytes to dest with c
+ */
+static inline
+void lib_ring_buffer_do_memset(char *dest, char c, unsigned long len)
+{
+ unsigned long i;
+
+ for (i = 0; i < len; i++)
+ dest[i] = c;
+}
+
+/* arch-agnostic implementation */
+
+static inline int lttng_ust_fls(unsigned int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xFFFF0000U)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xFF000000U)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xF0000000U)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xC0000000U)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000U)) {
+ /* No need to bit shift on last operation */
+ r -= 1;
+ }
+ return r;
+}
+
+static inline int get_count_order(unsigned int count)
+{
+ int order;
+
+ order = lttng_ust_fls(count) - 1;
+ if (count & (count - 1))
+ order++;
+ return order;
+}
+
+#endif /* _LTTNG_RING_BUFFER_BACKEND_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2008-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer backend (types).
+ */
+
+#ifndef _LTTNG_RING_BUFFER_BACKEND_TYPES_H
+#define _LTTNG_RING_BUFFER_BACKEND_TYPES_H
+
+#include <limits.h>
+#include <stdint.h>
+#include "shm_internal.h"
+#include "vatomic.h"
+
+#define RB_BACKEND_PAGES_PADDING 16
+struct lttng_ust_lib_ring_buffer_backend_pages {
+ unsigned long mmap_offset; /* offset of the subbuffer in mmap */
+ union v_atomic records_commit; /* current records committed count */
+ union v_atomic records_unread; /* records to read */
+ unsigned long data_size; /* Amount of data to read from subbuf */
+ DECLARE_SHMP(char, p); /* Backing memory map */
+ char padding[RB_BACKEND_PAGES_PADDING];
+};
+
+struct lttng_ust_lib_ring_buffer_backend_subbuffer {
+ /* Identifier for subbuf backend pages. Exchanged atomically. */
+ unsigned long id; /* backend subbuffer identifier */
+};
+
+struct lttng_ust_lib_ring_buffer_backend_counts {
+ /*
+ * Counter specific to the sub-buffer location within the ring buffer.
+ * The actual sequence number of the packet within the entire ring
+ * buffer can be derived from the formula nr_subbuffers * seq_cnt +
+ * subbuf_idx.
+ */
+ uint64_t seq_cnt; /* packet sequence number */
+};
+
+/*
+ * Forward declaration of frontend-specific channel and ring_buffer.
+ */
+struct lttng_ust_lib_ring_buffer_channel;
+struct lttng_ust_lib_ring_buffer;
+
+struct lttng_ust_lib_ring_buffer_backend_pages_shmp {
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages, shmp);
+};
+
+#define RB_BACKEND_RING_BUFFER_PADDING 64
+struct lttng_ust_lib_ring_buffer_backend {
+ /* Array of ring_buffer_backend_subbuffer for writer */
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_subbuffer, buf_wsb);
+ /* ring_buffer_backend_subbuffer for reader */
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer buf_rsb;
+ /* Array of lib_ring_buffer_backend_counts for the packet counter */
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_counts, buf_cnt);
+ /*
+ * Pointer array of backend pages, for whole buffer.
+ * Indexed by ring_buffer_backend_subbuffer identifier (id) index.
+ */
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, array);
+ DECLARE_SHMP(char, memory_map); /* memory mapping */
+
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan); /* Associated channel */
+ int cpu; /* This buffer's cpu. -1 if global. */
+ union v_atomic records_read; /* Number of records read */
+ unsigned int allocated:1; /* is buffer allocated ? */
+ char padding[RB_BACKEND_RING_BUFFER_PADDING];
+};
+
+struct lttng_ust_lib_ring_buffer_shmp {
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, shmp); /* Channel per-cpu buffers */
+};
+
+#define RB_BACKEND_CHANNEL_PADDING 64
+struct channel_backend {
+ unsigned long buf_size; /* Size of the buffer */
+ unsigned long subbuf_size; /* Sub-buffer size */
+ unsigned int subbuf_size_order; /* Order of sub-buffer size */
+ unsigned int num_subbuf_order; /*
+ * Order of number of sub-buffers/buffer
+ * for writer.
+ */
+ unsigned int buf_size_order; /* Order of buffer size */
+ unsigned int extra_reader_sb:1; /* has extra reader subbuffer ? */
+ unsigned long num_subbuf; /* Number of sub-buffers for writer */
+ uint64_t start_tsc; /* Channel creation TSC value */
+ DECLARE_SHMP(void *, priv_data);/* Client-specific information */
+ struct lttng_ust_lib_ring_buffer_config config; /* Ring buffer configuration */
+ char name[NAME_MAX]; /* Channel name */
+ char padding[RB_BACKEND_CHANNEL_PADDING];
+ struct lttng_ust_lib_ring_buffer_shmp buf[];
+};
+
+#endif /* _LTTNG_RING_BUFFER_BACKEND_TYPES_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring Buffer Library Synchronization Header (API).
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_H
+#define _LTTNG_RING_BUFFER_FRONTEND_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <urcu/compiler.h>
+#include <urcu/uatomic.h>
+
+#include "smp.h"
+
+/* Internal helpers */
+#include "frontend_internal.h"
+
+/* Buffer creation/removal and setup operations */
+
+/*
+ * switch_timer_interval is the time interval (in us) to fill sub-buffers with
+ * padding to let readers get those sub-buffers. Used for live streaming.
+ *
+ * read_timer_interval is the time interval (in us) to wake up pending readers.
+ *
+ * buf_addr is a pointer the the beginning of the preallocated buffer contiguous
+ * address mapping. It is used only by RING_BUFFER_STATIC configuration. It can
+ * be set to NULL for other backends.
+ *
+ * private data is a memory area for configuration data. This memory is
+ * managed by lib ring buffer. priv_data_align is the alignment required
+ * for the private data area.
+ */
+
+extern
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+ const char *name,
+ size_t priv_data_align,
+ size_t priv_data_size,
+ void *priv_data_init,
+ void *priv,
+ void *buf_addr,
+ size_t subbuf_size, size_t num_subbuf,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+ __attribute__((visibility("hidden")));
+
+/*
+ * channel_destroy finalizes all channel's buffers, waits for readers to
+ * release all references, and destroys the channel.
+ */
+void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int consumer)
+ __attribute__((visibility("hidden")));
+
+
+/* Buffer read operations */
+
+/*
+ * Iteration on channel cpumask needs to issue a read barrier to match the write
+ * barrier in cpu hotplug. It orders the cpumask read before read of per-cpu
+ * buffer data. The per-cpu buffer is never removed by cpu hotplug; teardown is
+ * only performed at channel destruction.
+ */
+#define for_each_channel_cpu(cpu, chan) \
+ for_each_possible_cpu(cpu)
+
+extern struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ int *shm_fd, int *wait_fd,
+ int *wakeup_fd,
+ uint64_t *memory_map_size)
+ __attribute__((visibility("hidden")));
+
+extern
+int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern
+int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int cpu)
+ __attribute__((visibility("hidden")));
+
+extern
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int cpu)
+ __attribute__((visibility("hidden")));
+
+extern int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Initialize signals for ring buffer. Should be called early e.g. by
+ * main() in the program to affect all threads.
+ */
+void lib_ringbuffer_signal_init(void)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Read sequence: snapshot, many get_subbuf/put_subbuf, move_consumer.
+ */
+extern int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long *consumed,
+ unsigned long *produced,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern int lib_ring_buffer_snapshot_sample_positions(
+ struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long *consumed,
+ unsigned long *produced,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed_new,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+extern void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * lib_ring_buffer_get_next_subbuf/lib_ring_buffer_put_next_subbuf are helpers
+ * to read sub-buffers sequentially.
+ */
+static inline int lib_ring_buffer_get_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ int ret;
+
+ ret = lib_ring_buffer_snapshot(buf, &buf->cons_snapshot,
+ &buf->prod_snapshot, handle);
+ if (ret)
+ return ret;
+ ret = lib_ring_buffer_get_subbuf(buf, buf->cons_snapshot, handle);
+ return ret;
+}
+
+static inline
+void lib_ring_buffer_put_next_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ lib_ring_buffer_put_subbuf(buf, handle);
+ lib_ring_buffer_move_consumer(buf, subbuf_align(buf->cons_snapshot, chan),
+ handle);
+}
+
+extern void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+ __attribute__((visibility("hidden")));
+
+extern void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+static inline
+unsigned long lib_ring_buffer_get_offset(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->offset);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_consumed(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return uatomic_read(&buf->consumed);
+}
+
+/*
+ * Must call lib_ring_buffer_is_finalized before reading counters (memory
+ * ordering enforced with respect to trace teardown).
+ */
+static inline
+int lib_ring_buffer_is_finalized(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ int finalized = CMM_ACCESS_ONCE(buf->finalized);
+ /*
+ * Read finalized before counters.
+ */
+ cmm_smp_rmb();
+ return finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_finalized(const struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return chan->finalized;
+}
+
+static inline
+int lib_ring_buffer_channel_is_disabled(const struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return uatomic_read(&chan->record_disabled);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_read_data_size(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ return subbuffer_get_read_data_size(config, &buf->backend, handle);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_count(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->records_count);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_overrun(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->records_overrun);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_full(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->records_lost_full);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_wrap(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->records_lost_wrap);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_lost_big(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->records_lost_big);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_records_read(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ return v_read(config, &buf->backend.records_read);
+}
+
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * See ring_buffer_frontend.c for more information on wait-free
+ * algorithms.
+ * See frontend.h for channel allocation and read-side API.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_API_H
+#define _LTTNG_RING_BUFFER_FRONTEND_API_H
+
+#include <stddef.h>
+
+#include <urcu/compiler.h>
+
+#include "frontend.h"
+
+/**
+ * lib_ring_buffer_nesting_inc - Ring buffer recursive use protection.
+ *
+ * The rint buffer buffer nesting count is a safety net to ensure tracer
+ * client code will never trigger an endless recursion.
+ * Returns a nesting level >= 0 on success, -EPERM on failure (nesting
+ * count too high).
+ *
+ * asm volatile and "memory" clobber prevent the compiler from moving
+ * instructions out of the ring buffer nesting count. This is required to ensure
+ * that probe side-effects which can cause recursion (e.g. unforeseen traps,
+ * divisions by 0, ...) are triggered within the incremented nesting count
+ * section.
+ */
+static inline
+int lib_ring_buffer_nesting_inc(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+{
+ int nesting;
+
+ nesting = ++URCU_TLS(lib_ring_buffer_nesting);
+ cmm_barrier();
+ if (caa_unlikely(nesting >= LIB_RING_BUFFER_MAX_NESTING)) {
+ WARN_ON_ONCE(1);
+ URCU_TLS(lib_ring_buffer_nesting)--;
+ return -EPERM;
+ }
+ return nesting - 1;
+}
+
+static inline
+int lib_ring_buffer_nesting_count(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+{
+ return URCU_TLS(lib_ring_buffer_nesting);
+}
+
+static inline
+void lib_ring_buffer_nesting_dec(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+{
+ cmm_barrier();
+ URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
+}
+
+/*
+ * lib_ring_buffer_try_reserve is called by lib_ring_buffer_reserve(). It is not
+ * part of the API per se.
+ *
+ * returns 0 if reserve ok, or 1 if the slow path must be taken.
+ */
+static inline
+int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx,
+ unsigned long *o_begin, unsigned long *o_end,
+ unsigned long *o_old, size_t *before_hdr_pad)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ *o_begin = v_read(config, &buf->offset);
+ *o_old = *o_begin;
+
+ ctx_private->tsc = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->tsc == -EIO)
+ return 1;
+
+ /*
+ * Prefetch cacheline for read because we have to read the previous
+ * commit counter to increment it and commit seq value to compare it to
+ * the commit counter.
+ */
+ //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
+
+ if (last_tsc_overflow(config, buf, ctx_private->tsc))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+
+ if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
+ return 1;
+
+ ctx_private->slot_size = record_header_size(config, chan, *o_begin,
+ before_hdr_pad, ctx, client_ctx);
+ ctx_private->slot_size +=
+ lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size,
+ ctx->largest_align) + ctx->data_size;
+ if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size)
+ > chan->backend.subbuf_size))
+ return 1;
+
+ /*
+ * Record fits in the current buffer and we are not on a switch
+ * boundary. It's safe to write.
+ */
+ *o_end = *o_begin + ctx_private->slot_size;
+
+ if (caa_unlikely((subbuf_offset(*o_end, chan)) == 0))
+ /*
+ * The offset_end will fall at the very beginning of the next
+ * subbuffer.
+ */
+ return 1;
+
+ return 0;
+}
+
+/**
+ * lib_ring_buffer_reserve - Reserve space in a ring buffer.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input and output) Must be already initialized.
+ *
+ * Atomic wait-free slot reservation. The reserved space starts at the context
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ *
+ * Return :
+ * 0 on success.
+ * -EAGAIN if channel is disabled.
+ * -ENOSPC if event size is too large for packet.
+ * -ENOBUFS if there is currently not enough space in buffer for the event.
+ * -EIO if data cannot be written into the buffer for any other reason.
+ */
+
+static inline
+int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct lttng_ust_lib_ring_buffer *buf;
+ unsigned long o_begin, o_end, o_old;
+ size_t before_hdr_pad = 0;
+
+ if (caa_unlikely(uatomic_read(&chan->record_disabled)))
+ return -EAGAIN;
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ ctx_private->reserve_cpu = lttng_ust_get_cpu();
+ buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
+ } else {
+ buf = shmp(handle, chan->backend.buf[0].shmp);
+ }
+ if (caa_unlikely(!buf))
+ return -EIO;
+ if (caa_unlikely(uatomic_read(&buf->record_disabled)))
+ return -EAGAIN;
+ ctx_private->buf = buf;
+
+ /*
+ * Perform retryable operations.
+ */
+ if (caa_unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
+ &o_end, &o_old, &before_hdr_pad)))
+ goto slow_path;
+
+ if (caa_unlikely(v_cmpxchg(config, &buf->offset, o_old, o_end)
+ != o_old))
+ goto slow_path;
+
+ /*
+ * Atomically update last_tsc. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full TSC
+ * record headers, never the opposite (missing a full TSC record header
+ * when it would be needed).
+ */
+ save_last_tsc(config, buf, ctx_private->tsc);
+
+ /*
+ * Push the reader if necessary
+ */
+ lib_ring_buffer_reserve_push_reader(buf, chan, o_end - 1);
+
+ /*
+ * Clear noref flag for this subbuffer.
+ */
+ lib_ring_buffer_clear_noref(config, &buf->backend,
+ subbuf_index(o_end - 1, chan), handle);
+
+ ctx_private->pre_offset = o_begin;
+ ctx_private->buf_offset = o_begin + before_hdr_pad;
+ return 0;
+slow_path:
+ return lib_ring_buffer_reserve_slow(ctx, client_ctx);
+}
+
+/**
+ * lib_ring_buffer_switch - Perform a sub-buffer switch for a per-cpu buffer.
+ * @config: ring buffer instance configuration.
+ * @buf: buffer
+ * @mode: buffer switch mode (SWITCH_ACTIVE or SWITCH_FLUSH)
+ *
+ * This operation is completely reentrant : can be called while tracing is
+ * active with absolutely no lock held.
+ *
+ * Note, however, that as a v_cmpxchg is used for some atomic operations and
+ * requires to be executed locally for per-CPU buffers, this function must be
+ * called from the CPU which owns the buffer for a ACTIVE flush, with preemption
+ * disabled, for RING_BUFFER_SYNC_PER_CPU configuration.
+ */
+static inline
+void lib_ring_buffer_switch(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
+{
+ lib_ring_buffer_switch_slow(buf, mode, handle);
+}
+
+/* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
+
+/**
+ * lib_ring_buffer_commit - Commit an record.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input arguments only)
+ *
+ * Atomic unordered slot commit. Increments the commit count in the
+ * specified sub-buffer, and delivers it if necessary.
+ */
+static inline
+void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ unsigned long offset_end = ctx_private->buf_offset;
+ unsigned long endidx = subbuf_index(offset_end - 1, chan);
+ unsigned long commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle,
+ buf->commit_hot, endidx);
+
+ if (caa_unlikely(!cc_hot))
+ return;
+
+ /*
+ * Must count record before incrementing the commit count.
+ */
+ subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
+
+ /*
+ * Order all writes to buffer before the commit count update that will
+ * determine that the subbuffer is full.
+ */
+ cmm_smp_wmb();
+
+ v_add(config, ctx_private->slot_size, &cc_hot->cc);
+
+ /*
+ * commit count read can race with concurrent OOO commit count updates.
+ * This is only needed for lib_ring_buffer_check_deliver (for
+ * non-polling delivery only) and for
+ * lib_ring_buffer_write_commit_counter. The race can only cause the
+ * counter to be read with the same value more than once, which could
+ * cause :
+ * - Multiple delivery for the same sub-buffer (which is handled
+ * gracefully by the reader code) if the value is for a full
+ * sub-buffer. It's important that we can never miss a sub-buffer
+ * delivery. Re-reading the value after the v_add ensures this.
+ * - Reading a commit_count with a higher value that what was actually
+ * added to it for the lib_ring_buffer_write_commit_counter call
+ * (again caused by a concurrent committer). It does not matter,
+ * because this function is interested in the fact that the commit
+ * count reaches back the reserve offset for a specific sub-buffer,
+ * which is completely independent of the order.
+ */
+ commit_count = v_read(config, &cc_hot->cc);
+
+ lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
+ commit_count, endidx, handle, ctx_private->tsc);
+ /*
+ * Update used size at each commit. It's needed only for extracting
+ * ring_buffer buffers from vmcore, after crash.
+ */
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offset_end, commit_count, handle, cc_hot);
+}
+
+/**
+ * lib_ring_buffer_try_discard_reserve - Try discarding a record.
+ * @config: ring buffer instance configuration.
+ * @ctx: ring buffer context. (input arguments only)
+ *
+ * Only succeeds if no other record has been written after the record to
+ * discard. If discard fails, the record must be committed to the buffer.
+ *
+ * Returns 0 upon success, -EPERM if the record cannot be discarded.
+ */
+static inline
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
+ const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size;
+
+ /*
+ * We need to ensure that if the cmpxchg succeeds and discards the
+ * record, the next record will record a full TSC, because it cannot
+ * rely on the last_tsc associated with the discarded record to detect
+ * overflows. The only way to ensure this is to set the last_tsc to 0
+ * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+ * timestamp in the next record.
+ *
+ * Note: if discard fails, we must leave the TSC in the record header.
+ * It is needed to keep track of TSC overflows for the following
+ * records.
+ */
+ save_last_tsc(config, buf, 0ULL);
+
+ if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
+ != end_offset))
+ return -EPERM;
+ else
+ return 0;
+}
+
+static inline
+void channel_record_disable(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ uatomic_inc(&chan->record_disabled);
+}
+
+static inline
+void channel_record_enable(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ uatomic_dec(&chan->record_disabled);
+}
+
+static inline
+void lib_ring_buffer_record_disable(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ uatomic_inc(&buf->record_disabled);
+}
+
+static inline
+void lib_ring_buffer_record_enable(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf)
+{
+ uatomic_dec(&buf->record_disabled);
+}
+
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_API_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring Buffer Library Synchronization Header (internal helpers).
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+
+#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <signal.h>
+#include <stdint.h>
+#include <pthread.h>
+
+#include <lttng/ringbuffer-context.h>
+#include "ringbuffer-config.h"
+#include "backend_types.h"
+#include "backend_internal.h"
+#include "frontend_types.h"
+#include "shm.h"
+
+/* Buffer offset macros */
+
+/* buf_trunc mask selects only the buffer number. */
+static inline
+unsigned long buf_trunc(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return offset & ~(chan->backend.buf_size - 1);
+
+}
+
+/* Select the buffer number value (counter). */
+static inline
+unsigned long buf_trunc_val(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return buf_trunc(offset, chan) >> chan->backend.buf_size_order;
+}
+
+/* buf_offset mask selects only the offset within the current buffer. */
+static inline
+unsigned long buf_offset(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return offset & (chan->backend.buf_size - 1);
+}
+
+/* subbuf_offset mask selects the offset within the current subbuffer. */
+static inline
+unsigned long subbuf_offset(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return offset & (chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_trunc mask selects the subbuffer number. */
+static inline
+unsigned long subbuf_trunc(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return offset & ~(chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_align aligns the offset to the next subbuffer. */
+static inline
+unsigned long subbuf_align(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return (offset + chan->backend.subbuf_size)
+ & ~(chan->backend.subbuf_size - 1);
+}
+
+/* subbuf_index returns the index of the current subbuffer within the buffer. */
+static inline
+unsigned long subbuf_index(unsigned long offset,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return buf_offset(offset, chan) >> chan->backend.subbuf_size_order;
+}
+
+/*
+ * Last TSC comparison functions. Check if the current TSC overflows tsc_bits
+ * bits from the last TSC read. When overflows are detected, the full 64-bit
+ * timestamp counter should be written in the record header. Reads and writes
+ * last_tsc atomically.
+ */
+
+#if (CAA_BITS_PER_LONG == 32)
+static inline
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+{
+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ return;
+
+ /*
+ * Ensure the compiler performs this update in a single instruction.
+ */
+ v_set(config, &buf->last_tsc, (unsigned long)(tsc >> config->tsc_bits));
+}
+
+static inline
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+{
+ unsigned long tsc_shifted;
+
+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ return 0;
+
+ tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
+ if (caa_unlikely(tsc_shifted
+ - (unsigned long)v_read(config, &buf->last_tsc)))
+ return 1;
+ else
+ return 0;
+}
+#else
+static inline
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+{
+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ return;
+
+ v_set(config, &buf->last_tsc, (unsigned long)tsc);
+}
+
+static inline
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
+{
+ if (config->tsc_bits == 0 || config->tsc_bits == 64)
+ return 0;
+
+ if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
+ >> config->tsc_bits))
+ return 1;
+ else
+ return 0;
+}
+#endif
+
+extern
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+ __attribute__((visibility("hidden")));
+
+extern
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
+ enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long offset,
+ unsigned long commit_count,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc)
+ __attribute__((visibility("hidden")));
+
+/* Buffer write helpers */
+
+static inline
+void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long offset)
+{
+ unsigned long consumed_old, consumed_new;
+
+ do {
+ consumed_old = uatomic_read(&buf->consumed);
+ /*
+ * If buffer is in overwrite mode, push the reader consumed
+ * count if the write position has reached it and we are not
+ * at the first iteration (don't push the reader farther than
+ * the writer). This operation can be done concurrently by many
+ * writers in the same buffer, the writer being at the farthest
+ * write position sub-buffer index in the buffer being the one
+ * which will win this loop.
+ */
+ if (caa_unlikely(subbuf_trunc(offset, chan)
+ - subbuf_trunc(consumed_old, chan)
+ >= chan->backend.buf_size))
+ consumed_new = subbuf_align(consumed_old, chan);
+ else
+ return;
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ consumed_new) != consumed_old));
+}
+
+/*
+ * Move consumed position to the beginning of subbuffer in which the
+ * write offset is. Should only be used on ring buffers that are not
+ * actively being written into, because clear_reader does not take into
+ * account the commit counters when moving the consumed position, which
+ * can make concurrent trace producers or consumers observe consumed
+ * position further than the write offset, which breaks ring buffer
+ * algorithm guarantees.
+ */
+static inline
+void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long offset, consumed_old, consumed_new;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+
+ do {
+ offset = v_read(config, &buf->offset);
+ consumed_old = uatomic_read(&buf->consumed);
+ CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
+ - subbuf_trunc(consumed_old, chan))
+ < 0);
+ consumed_new = subbuf_trunc(offset, chan);
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ consumed_new) != consumed_old));
+}
+
+static inline
+int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return !!subbuf_offset(v_read(config, &buf->offset), chan);
+}
+
+static inline
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ return subbuffer_get_data_size(config, &buf->backend, idx, handle);
+}
+
+/*
+ * Check if all space reservation in a buffer have been committed. This helps
+ * knowing if an execution context is nested (for per-cpu buffers only).
+ * This is a very specific ftrace use-case, so we keep this as "internal" API.
+ */
+static inline
+int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long offset, idx, commit_count;
+ struct commit_counters_hot *cc_hot;
+
+ CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
+ CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
+
+ /*
+ * Read offset and commit count in a loop so they are both read
+ * atomically wrt interrupts. By deal with interrupt concurrency by
+ * restarting both reads if the offset has been pushed. Note that given
+ * we only have to deal with interrupt concurrency here, an interrupt
+ * modifying the commit count will also modify "offset", so it is safe
+ * to only check for offset modifications.
+ */
+ do {
+ offset = v_read(config, &buf->offset);
+ idx = subbuf_index(offset, chan);
+ cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ if (caa_unlikely(!cc_hot))
+ return 0;
+ commit_count = v_read(config, &cc_hot->cc);
+ } while (offset != v_read(config, &buf->offset));
+
+ return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+ - (commit_count & chan->commit_count_mask) == 0);
+}
+
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
+static inline
+void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long offset,
+ unsigned long commit_count,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc)
+{
+ unsigned long old_commit_count = commit_count
+ - chan->backend.subbuf_size;
+
+ /* Check if all commits have been done */
+ if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+ - (old_commit_count & chan->commit_count_mask) == 0))
+ lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
+ commit_count, idx, handle, tsc);
+}
+
+/*
+ * lib_ring_buffer_write_commit_counter
+ *
+ * For flight recording. must be called after commit.
+ * This function increments the subbuffer's commit_seq counter each time the
+ * commit count reaches back the reserve offset (modulo subbuffer size). It is
+ * useful for crash dump.
+ */
+static inline
+void lib_ring_buffer_write_commit_counter(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long buf_offset,
+ unsigned long commit_count,
+ struct lttng_ust_shm_handle *handle __attribute__((unused)),
+ struct commit_counters_hot *cc_hot)
+{
+ unsigned long commit_seq_old;
+
+ if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
+ return;
+
+ /*
+ * subbuf_offset includes commit_count_mask. We can simply
+ * compare the offsets within the subbuffer without caring about
+ * buffer full/empty mismatch because offset is never zero here
+ * (subbuffer header and record headers have non-zero length).
+ */
+ if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
+ return;
+
+ commit_seq_old = v_read(config, &cc_hot->seq);
+ if (caa_likely((long) (commit_seq_old - commit_count) < 0))
+ v_set(config, &cc_hot->seq, commit_count);
+}
+
+extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
+ __attribute__((visibility("hidden")));
+
+extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/* Keep track of trap nesting inside ring buffer code */
+extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring Buffer Library Synchronization Header (types).
+ *
+ * See ring_buffer_frontend.c for more information on wait-free algorithms.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
+#define _LTTNG_RING_BUFFER_FRONTEND_TYPES_H
+
+#include <stdint.h>
+#include <string.h>
+#include <time.h> /* for timer_t */
+
+#include <urcu/list.h>
+#include <urcu/uatomic.h>
+
+#include <lttng/ringbuffer-context.h>
+#include "ringbuffer-config.h"
+#include <usterr-signal-safe.h>
+#include "backend_types.h"
+#include "shm_internal.h"
+#include "shm_types.h"
+#include "vatomic.h"
+
+#define LIB_RING_BUFFER_MAX_NESTING 5
+
+/*
+ * A switch is done during tracing or as a final flush after tracing (so it
+ * won't write in the new sub-buffer).
+ */
+enum switch_mode { SWITCH_ACTIVE, SWITCH_FLUSH };
+
+/* channel: collection of per-cpu ring buffers. */
+#define RB_CHANNEL_PADDING 32
+struct lttng_ust_lib_ring_buffer_channel {
+ int record_disabled;
+ unsigned long commit_count_mask; /*
+ * Commit count mask, removing
+ * the MSBs corresponding to
+ * bits used to represent the
+ * subbuffer index.
+ */
+
+ unsigned long switch_timer_interval; /* Buffer flush (us) */
+ timer_t switch_timer;
+ int switch_timer_enabled;
+
+ unsigned long read_timer_interval; /* Reader wakeup (us) */
+ timer_t read_timer;
+ int read_timer_enabled;
+
+ int finalized; /* Has channel been finalized */
+ size_t priv_data_offset; /* Offset of private data channel config */
+ unsigned int nr_streams; /* Number of streams */
+ struct lttng_ust_shm_handle *handle;
+ /* Extended options. */
+ union {
+ struct {
+ int32_t blocking_timeout_ms;
+ void *priv; /* Private data pointer. */
+ } s;
+ char padding[RB_CHANNEL_PADDING];
+ } u;
+ /*
+ * Associated backend contains a variable-length array. Needs to
+ * be last member.
+ */
+ struct channel_backend backend; /* Associated backend */
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+/* Per-subbuffer commit counters used on the hot path */
+#define RB_COMMIT_COUNT_HOT_PADDING 16
+struct commit_counters_hot {
+ union v_atomic cc; /* Commit counter */
+ union v_atomic seq; /* Consecutive commits */
+ char padding[RB_COMMIT_COUNT_HOT_PADDING];
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+/* Per-subbuffer commit counters used only on cold paths */
+#define RB_COMMIT_COUNT_COLD_PADDING 24
+struct commit_counters_cold {
+ union v_atomic cc_sb; /* Incremented _once_ at sb switch */
+ char padding[RB_COMMIT_COUNT_COLD_PADDING];
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+/* ring buffer state */
+#define RB_CRASH_DUMP_ABI_LEN 256
+#define RB_RING_BUFFER_PADDING 60
+
+#define RB_CRASH_DUMP_ABI_MAGIC_LEN 16
+
+/*
+ * The 128-bit magic number is xor'd in the process data so it does not
+ * cause a false positive when searching for buffers by scanning memory.
+ * The actual magic number is:
+ * 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17, 0x7B, 0xF1,
+ * 0x77, 0xBF, 0x17, 0x7B, 0xF1, 0x77, 0xBF, 0x17,
+ */
+#define RB_CRASH_DUMP_ABI_MAGIC_XOR \
+ { \
+ 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, 0x77 ^ 0xFF, \
+ 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, 0xF1 ^ 0xFF, \
+ 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, 0x7B ^ 0xFF, \
+ 0xF1 ^ 0xFF, 0x77 ^ 0xFF, 0xBF ^ 0xFF, 0x17 ^ 0xFF, \
+ }
+
+#define RB_CRASH_ENDIAN 0x1234
+
+#define RB_CRASH_DUMP_ABI_MAJOR 0
+#define RB_CRASH_DUMP_ABI_MINOR 0
+
+enum lttng_crash_type {
+ LTTNG_CRASH_TYPE_UST = 0,
+ LTTNG_CRASH_TYPE_KERNEL = 1,
+};
+
+struct lttng_crash_abi {
+ uint8_t magic[RB_CRASH_DUMP_ABI_MAGIC_LEN];
+ uint64_t mmap_length; /* Overall lenght of crash record */
+ uint16_t endian; /*
+ * { 0x12, 0x34 }: big endian
+ * { 0x34, 0x12 }: little endian
+ */
+ uint16_t major; /* Major number. */
+ uint16_t minor; /* Minor number. */
+ uint8_t word_size; /* Word size (bytes). */
+ uint8_t layout_type; /* enum lttng_crash_type */
+
+ struct {
+ uint32_t prod_offset;
+ uint32_t consumed_offset;
+ uint32_t commit_hot_array;
+ uint32_t commit_hot_seq;
+ uint32_t buf_wsb_array;
+ uint32_t buf_wsb_id;
+ uint32_t sb_array;
+ uint32_t sb_array_shmp_offset;
+ uint32_t sb_backend_p_offset;
+ uint32_t content_size;
+ uint32_t packet_size;
+ } __attribute__((packed)) offset;
+ struct {
+ uint8_t prod_offset;
+ uint8_t consumed_offset;
+ uint8_t commit_hot_seq;
+ uint8_t buf_wsb_id;
+ uint8_t sb_array_shmp_offset;
+ uint8_t sb_backend_p_offset;
+ uint8_t content_size;
+ uint8_t packet_size;
+ } __attribute__((packed)) length;
+ struct {
+ uint32_t commit_hot_array;
+ uint32_t buf_wsb_array;
+ uint32_t sb_array;
+ } __attribute__((packed)) stride;
+
+ uint64_t buf_size; /* Size of the buffer */
+ uint64_t subbuf_size; /* Sub-buffer size */
+ uint64_t num_subbuf; /* Number of sub-buffers for writer */
+ uint32_t mode; /* Buffer mode: 0: overwrite, 1: discard */
+} __attribute__((packed));
+
+struct lttng_ust_lib_ring_buffer {
+ /* First 32 bytes are for the buffer crash dump ABI */
+ struct lttng_crash_abi crash_abi;
+
+ /* 32 bytes cache-hot cacheline */
+ union v_atomic __attribute__((aligned(32))) offset;
+ /* Current offset in the buffer */
+ DECLARE_SHMP(struct commit_counters_hot, commit_hot);
+ /* Commit count per sub-buffer */
+ long consumed; /*
+ * Current offset in the buffer
+ * standard atomic access (shared)
+ */
+ int record_disabled;
+ /* End of cache-hot 32 bytes cacheline */
+
+ union v_atomic last_tsc; /*
+ * Last timestamp written in the buffer.
+ */
+
+ struct lttng_ust_lib_ring_buffer_backend backend;
+ /* Associated backend */
+
+ DECLARE_SHMP(struct commit_counters_cold, commit_cold);
+ /* Commit count per sub-buffer */
+ DECLARE_SHMP(uint64_t, ts_end); /*
+ * timestamp_end per sub-buffer.
+ * Time is sampled by the
+ * switch_*_end() callbacks
+ * which are the last space
+ * reservation performed in the
+ * sub-buffer before it can be
+ * fully committed and
+ * delivered. This time value is
+ * then read by the deliver
+ * callback, performed by the
+ * last commit before the buffer
+ * becomes readable.
+ */
+ long active_readers; /*
+ * Active readers count
+ * standard atomic access (shared)
+ */
+ /* Dropped records */
+ union v_atomic records_lost_full; /* Buffer full */
+ union v_atomic records_lost_wrap; /* Nested wrap-around */
+ union v_atomic records_lost_big; /* Events too big */
+ union v_atomic records_count; /* Number of records written */
+ union v_atomic records_overrun; /* Number of overwritten records */
+ //wait_queue_head_t read_wait; /* reader buffer-level wait queue */
+ int finalized; /* buffer has been finalized */
+ unsigned long get_subbuf_consumed; /* Read-side consumed */
+ unsigned long prod_snapshot; /* Producer count snapshot */
+ unsigned long cons_snapshot; /* Consumer count snapshot */
+ unsigned int get_subbuf:1; /* Sub-buffer being held by reader */
+ /* shmp pointer to self */
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer, self);
+ char padding[RB_RING_BUFFER_PADDING];
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
+/*
+ * ring buffer private context
+ *
+ * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(),
+ * lib_ring_buffer_try_discard_reserve(), lttng_ust_lib_ring_buffer_align_ctx() and
+ * lib_ring_buffer_write().
+ *
+ * This context is allocated on an internal shadow-stack by a successful reserve
+ * operation, used by align/write, and freed by commit.
+ */
+
+struct lttng_ust_lib_ring_buffer_ctx_private {
+ /* input received by lib_ring_buffer_reserve(). */
+ struct lttng_ust_lib_ring_buffer_ctx *pub;
+ struct lttng_ust_lib_ring_buffer_channel *chan; /* channel */
+
+ /* output from lib_ring_buffer_reserve() */
+ int reserve_cpu; /* processor id updated by the reserve */
+ size_t slot_size; /* size of the reserved slot */
+ unsigned long buf_offset; /* offset following the record header */
+ unsigned long pre_offset; /*
+ * Initial offset position _before_
+ * the record is written. Positioned
+ * prior to record header alignment
+ * padding.
+ */
+ uint64_t tsc; /* time-stamp counter value */
+ unsigned int rflags; /* reservation flags */
+ void *ip; /* caller ip address */
+
+ struct lttng_ust_lib_ring_buffer *buf; /*
+ * buffer corresponding to processor id
+ * for this channel
+ */
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+};
+
+static inline
+void *channel_get_private_config(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return ((char *) chan) + chan->priv_data_offset;
+}
+
+static inline
+void *channel_get_private(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ return chan->u.s.priv;
+}
+
+static inline
+void channel_set_private(struct lttng_ust_lib_ring_buffer_channel *chan, void *priv)
+{
+ chan->u.s.priv = priv;
+}
+
+#ifndef __rb_same_type
+#define __rb_same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
+/*
+ * Issue warnings and disable channels upon internal error.
+ * Can receive struct lttng_ust_lib_ring_buffer or struct lttng_ust_lib_ring_buffer_backend
+ * parameters.
+ */
+#define CHAN_WARN_ON(c, cond) \
+ ({ \
+ struct lttng_ust_lib_ring_buffer_channel *__chan; \
+ int _____ret = caa_unlikely(cond); \
+ if (_____ret) { \
+ if (__rb_same_type(*(c), struct channel_backend)) \
+ __chan = caa_container_of((void *) (c), \
+ struct lttng_ust_lib_ring_buffer_channel, \
+ backend); \
+ else if (__rb_same_type(*(c), \
+ struct lttng_ust_lib_ring_buffer_channel)) \
+ __chan = (void *) (c); \
+ else \
+ BUG_ON(1); \
+ uatomic_inc(&__chan->record_disabled); \
+ WARN_ON(1); \
+ } \
+ _____ret = _____ret; /* For clang "unused result". */ \
+ })
+
+/**
+ * lttng_ust_lib_ring_buffer_align_ctx - Align context offset on "alignment"
+ * @ctx: ring buffer context.
+ */
+static inline
+void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ size_t alignment)
+ lttng_ust_notrace;
+static inline
+void lttng_ust_lib_ring_buffer_align_ctx(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ size_t alignment)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+
+ ctx_private->buf_offset += lttng_ust_lib_ring_buffer_align(ctx_private->buf_offset,
+ alignment);
+}
+
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_TYPES_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_GETCPU_H
+#define _LTTNG_GETCPU_H
+
+#include <urcu/compiler.h>
+#include <urcu/system.h>
+#include <urcu/arch.h>
+
+void lttng_ust_getcpu_init(void)
+ __attribute__((visibility("hidden")));
+
+extern int (*lttng_get_cpu)(void)
+ __attribute__((visibility("hidden")));
+
+#ifdef LTTNG_UST_DEBUG_VALGRIND
+
+/*
+ * Fallback on cpu 0 if liblttng-ust is build with Valgrind support.
+ * get_cpu() returns the current CPU number. It may change due to
+ * migration, so it is only statistically accurate.
+ */
+static inline
+int lttng_ust_get_cpu_internal(void)
+{
+ return 0;
+}
+
+#else
+
+/*
+ * sched_getcpu.
+ */
+#ifdef __linux__
+
+#if !HAVE_SCHED_GETCPU
+#include <sys/syscall.h>
+#define __getcpu(cpu, node, cache) syscall(__NR_getcpu, cpu, node, cache)
+/*
+ * If getcpu is not implemented in the kernel, use cpu 0 as fallback.
+ */
+static inline
+int lttng_ust_get_cpu_internal(void)
+{
+ int cpu, ret;
+
+ ret = __getcpu(&cpu, NULL, NULL);
+ if (caa_unlikely(ret < 0))
+ return 0;
+ return cpu;
+}
+#else /* HAVE_SCHED_GETCPU */
+#include <sched.h>
+
+/*
+ * If getcpu is not implemented in the kernel, use cpu 0 as fallback.
+ */
+static inline
+int lttng_ust_get_cpu_internal(void)
+{
+ int cpu;
+
+ cpu = sched_getcpu();
+ if (caa_unlikely(cpu < 0))
+ return 0;
+ return cpu;
+}
+#endif /* HAVE_SCHED_GETCPU */
+
+#elif (defined(__FreeBSD__) || defined(__CYGWIN__))
+
+/*
+ * FreeBSD and Cygwin do not allow query of CPU ID. Always use CPU
+ * number 0, with the assocated performance degradation on SMP.
+ */
+static inline
+int lttng_ust_get_cpu_internal(void)
+{
+ return 0;
+}
+
+#else
+#error "Please add support for your OS into liblttng-ust/compat.h."
+#endif
+
+#endif
+
+static inline
+int lttng_ust_get_cpu(void)
+{
+ int (*getcpu)(void) = CMM_LOAD_SHARED(lttng_get_cpu);
+
+ if (caa_likely(!getcpu)) {
+ return lttng_ust_get_cpu_internal();
+ } else {
+ return getcpu();
+ }
+}
+
+#endif /* _LTTNG_GETCPU_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2019 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+ */
+
+#ifndef _LTTNG_MMAP_H
+#define _LTTNG_MMAP_H
+
+#include <sys/mman.h>
+
+#if defined(__linux__) && defined(MAP_POPULATE)
+# define LTTNG_MAP_POPULATE MAP_POPULATE
+#else
+# define LTTNG_MAP_POPULATE 0
+#endif /* __linux__ && MAP_POPULATE */
+
+#endif /* _LTTNG_MMAP_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_RING_BUFFER_NOHZ_H
+#define _LTTNG_RING_BUFFER_NOHZ_H
+
+#ifdef CONFIG_LIB_RING_BUFFER
+void lib_ring_buffer_tick_nohz_flush(void)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_tick_nohz_stop(void)
+ __attribute__((visibility("hidden")));
+
+void lib_ring_buffer_tick_nohz_restart(void)
+ __attribute__((visibility("hidden")));
+
+#else
+
+static inline void lib_ring_buffer_tick_nohz_flush(void)
+{
+}
+
+static inline void lib_ring_buffer_tick_nohz_stop(void)
+{
+}
+
+static inline void lib_ring_buffer_tick_nohz_restart(void)
+{
+}
+#endif
+
+#endif /* _LTTNG_RING_BUFFER_NOHZ_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2012-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
+#define _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H
+
+void lttng_fixup_ringbuffer_tls(void)
+ __attribute__((visibility("hidden")));
+
+void lttng_ust_ringbuffer_set_allow_blocking(void)
+ __attribute__((visibility("hidden")));
+
+#endif /* _LTTNG_UST_LIB_RINGBUFFER_RB_INIT_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <urcu/arch.h>
+#include <limits.h>
+
+#include <lttng/ust-utils.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "ringbuffer-config.h"
+#include "vatomic.h"
+#include "backend.h"
+#include "frontend.h"
+#include "smp.h"
+#include "shm.h"
+#include "ust-compat.h"
+
+/**
+ * lib_ring_buffer_backend_allocate - allocate a channel buffer
+ * @config: ring buffer instance configuration
+ * @buf: the buffer struct
+ * @size: total size of the buffer
+ * @num_subbuf: number of subbuffers
+ * @extra_reader_sb: need extra subbuffer for reader
+ */
+static
+int lib_ring_buffer_backend_allocate(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t size __attribute__((unused)), size_t num_subbuf,
+ int extra_reader_sb,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
+{
+ struct channel_backend *chanb;
+ unsigned long subbuf_size, mmap_offset = 0;
+ unsigned long num_subbuf_alloc;
+ unsigned long i;
+ long page_size;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return -EINVAL;
+
+ subbuf_size = chanb->subbuf_size;
+ num_subbuf_alloc = num_subbuf;
+
+ if (extra_reader_sb)
+ num_subbuf_alloc++;
+
+ page_size = LTTNG_UST_PAGE_SIZE;
+ if (page_size <= 0) {
+ goto page_size_error;
+ }
+
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ set_shmp(bufb->array, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc));
+ if (caa_unlikely(!shmp(handle, bufb->array)))
+ goto array_error;
+
+ /*
+ * This is the largest element (the buffer pages) which needs to
+ * be aligned on page size.
+ */
+ align_shm(shmobj, page_size);
+ set_shmp(bufb->memory_map, zalloc_shm(shmobj,
+ subbuf_size * num_subbuf_alloc));
+ if (caa_unlikely(!shmp(handle, bufb->memory_map)))
+ goto memory_map_error;
+
+ /* Allocate backend pages array elements */
+ for (i = 0; i < num_subbuf_alloc; i++) {
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ set_shmp(shmp_index(handle, bufb->array, i)->shmp,
+ zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages)));
+ if (!shmp(handle, shmp_index(handle, bufb->array, i)->shmp))
+ goto free_array;
+ }
+
+ /* Allocate write-side subbuffer table */
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ set_shmp(bufb->buf_wsb, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer)
+ * num_subbuf));
+ if (caa_unlikely(!shmp(handle, bufb->buf_wsb)))
+ goto free_array;
+
+ for (i = 0; i < num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+
+ sb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!sb)
+ goto free_array;
+ sb->id = subbuffer_id(config, 0, 1, i);
+ }
+
+ /* Assign read-side subbuffer table */
+ if (extra_reader_sb)
+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
+ num_subbuf_alloc - 1);
+ else
+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+
+ /* Allocate subbuffer packet counter table */
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ set_shmp(bufb->buf_cnt, zalloc_shm(shmobj,
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_counts)
+ * num_subbuf));
+ if (caa_unlikely(!shmp(handle, bufb->buf_cnt)))
+ goto free_wsb;
+
+ /* Assign pages to page index */
+ for (i = 0; i < num_subbuf_alloc; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+ struct shm_ref ref;
+
+ ref.index = bufb->memory_map._ref.index;
+ ref.offset = bufb->memory_map._ref.offset;
+ ref.offset += i * subbuf_size;
+
+ sbp = shmp_index(handle, bufb->array, i);
+ if (!sbp)
+ goto free_array;
+ pages = shmp(handle, sbp->shmp);
+ if (!pages)
+ goto free_array;
+ set_shmp(pages->p, ref);
+ if (config->output == RING_BUFFER_MMAP) {
+ pages->mmap_offset = mmap_offset;
+ mmap_offset += subbuf_size;
+ }
+ }
+ return 0;
+
+free_wsb:
+ /* bufb->buf_wsb will be freed by shm teardown */
+free_array:
+ /* bufb->array[i] will be freed by shm teardown */
+memory_map_error:
+ /* bufb->array will be freed by shm teardown */
+array_error:
+page_size_error:
+ return -ENOMEM;
+}
+
+int lib_ring_buffer_backend_create(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct channel_backend *chanb, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+
+ set_shmp(bufb->chan, handle->chan._ref);
+ bufb->cpu = cpu;
+
+ return lib_ring_buffer_backend_allocate(config, bufb, chanb->buf_size,
+ chanb->num_subbuf,
+ chanb->extra_reader_sb,
+ handle, shmobj);
+}
+
+void lib_ring_buffer_backend_reset(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long num_subbuf_alloc;
+ unsigned int i;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return;
+ config = &chanb->config;
+
+ num_subbuf_alloc = chanb->num_subbuf;
+ if (chanb->extra_reader_sb)
+ num_subbuf_alloc++;
+
+ for (i = 0; i < chanb->num_subbuf; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+
+ sb = shmp_index(handle, bufb->buf_wsb, i);
+ if (!sb)
+ return;
+ sb->id = subbuffer_id(config, 0, 1, i);
+ }
+ if (chanb->extra_reader_sb)
+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1,
+ num_subbuf_alloc - 1);
+ else
+ bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
+
+ for (i = 0; i < num_subbuf_alloc; i++) {
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *sbp;
+ struct lttng_ust_lib_ring_buffer_backend_pages *pages;
+
+ sbp = shmp_index(handle, bufb->array, i);
+ if (!sbp)
+ return;
+ pages = shmp(handle, sbp->shmp);
+ if (!pages)
+ return;
+ /* Don't reset mmap_offset */
+ v_set(config, &pages->records_commit, 0);
+ v_set(config, &pages->records_unread, 0);
+ pages->data_size = 0;
+ /* Don't reset backend page and virt addresses */
+ }
+ /* Don't reset num_pages_per_subbuf, cpu, allocated */
+ v_set(config, &bufb->records_read, 0);
+}
+
+/*
+ * The frontend is responsible for also calling ring_buffer_backend_reset for
+ * each buffer when calling channel_backend_reset.
+ */
+void channel_backend_reset(struct channel_backend *chanb)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_lib_ring_buffer_channel, backend);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+
+ /*
+ * Don't reset buf_size, subbuf_size, subbuf_size_order,
+ * num_subbuf_order, buf_size_order, extra_reader_sb, num_subbuf,
+ * priv, notifiers, config, cpumask and name.
+ */
+ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+}
+
+/**
+ * channel_backend_init - initialize a channel backend
+ * @chanb: channel backend
+ * @name: channel name
+ * @config: client ring buffer configuration
+ * @parent: dentry of parent directory, %NULL for root directory
+ * @subbuf_size: size of sub-buffers (> page size, power of 2)
+ * @num_subbuf: number of sub-buffers (power of 2)
+ * @lttng_ust_shm_handle: shared memory handle
+ * @stream_fds: stream file descriptors.
+ *
+ * Returns channel pointer if successful, %NULL otherwise.
+ *
+ * Creates per-cpu channel buffers using the sizes and attributes
+ * specified. The created channel buffer files will be named
+ * name_0...name_N-1. File permissions will be %S_IRUSR.
+ *
+ * Called with CPU hotplug disabled.
+ */
+int channel_backend_init(struct channel_backend *chanb,
+ const char *name,
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t subbuf_size, size_t num_subbuf,
+ struct lttng_ust_shm_handle *handle,
+ const int *stream_fds)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_lib_ring_buffer_channel, backend);
+ unsigned int i;
+ int ret;
+ size_t shmsize = 0, num_subbuf_alloc;
+ long page_size;
+
+ if (!name)
+ return -EPERM;
+
+ page_size = LTTNG_UST_PAGE_SIZE;
+ if (page_size <= 0) {
+ return -ENOMEM;
+ }
+ /* Check that the subbuffer size is larger than a page. */
+ if (subbuf_size < page_size)
+ return -EINVAL;
+
+ /*
+ * Make sure the number of subbuffers and subbuffer size are
+ * power of 2, and nonzero.
+ */
+ if (!subbuf_size || (subbuf_size & (subbuf_size - 1)))
+ return -EINVAL;
+ if (!num_subbuf || (num_subbuf & (num_subbuf - 1)))
+ return -EINVAL;
+ /*
+ * Overwrite mode buffers require at least 2 subbuffers per
+ * buffer.
+ */
+ if (config->mode == RING_BUFFER_OVERWRITE && num_subbuf < 2)
+ return -EINVAL;
+
+ ret = subbuffer_id_check_index(config, num_subbuf);
+ if (ret)
+ return ret;
+
+ chanb->buf_size = num_subbuf * subbuf_size;
+ chanb->subbuf_size = subbuf_size;
+ chanb->buf_size_order = get_count_order(chanb->buf_size);
+ chanb->subbuf_size_order = get_count_order(subbuf_size);
+ chanb->num_subbuf_order = get_count_order(num_subbuf);
+ chanb->extra_reader_sb =
+ (config->mode == RING_BUFFER_OVERWRITE) ? 1 : 0;
+ chanb->num_subbuf = num_subbuf;
+ strncpy(chanb->name, name, NAME_MAX);
+ chanb->name[NAME_MAX - 1] = '\0';
+ memcpy(&chanb->config, config, sizeof(*config));
+
+ /* Per-cpu buffer size: control (prior to backend) */
+ shmsize = lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer);
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_hot));
+ shmsize += sizeof(struct commit_counters_hot) * num_subbuf;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct commit_counters_cold));
+ shmsize += sizeof(struct commit_counters_cold) * num_subbuf;
+ /* Sampled timestamp end */
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(uint64_t));
+ shmsize += sizeof(uint64_t) * num_subbuf;
+
+ /* Per-cpu buffer size: backend */
+ /* num_subbuf + 1 is the worse case */
+ num_subbuf_alloc = num_subbuf + 1;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages_shmp));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp) * num_subbuf_alloc;
+ shmsize += lttng_ust_offset_align(shmsize, page_size);
+ shmsize += subbuf_size * num_subbuf_alloc;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_pages));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_pages) * num_subbuf_alloc;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_subbuffer));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer) * num_subbuf;
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_backend_counts));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_backend_counts) * num_subbuf;
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ struct lttng_ust_lib_ring_buffer *buf;
+ /*
+ * We need to allocate for all possible cpus.
+ */
+ for_each_possible_cpu(i) {
+ struct shm_object *shmobj;
+
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM, stream_fds[i], i);
+ if (!shmobj)
+ goto end;
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[i].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ buf = shmp(handle, chanb->buf[i].shmp);
+ if (!buf)
+ goto end;
+ set_shmp(buf->self, chanb->buf[i].shmp._ref);
+ ret = lib_ring_buffer_create(buf, chanb, i,
+ handle, shmobj);
+ if (ret)
+ goto free_bufs; /* cpu hotplug locked */
+ }
+ } else {
+ struct shm_object *shmobj;
+ struct lttng_ust_lib_ring_buffer *buf;
+
+ shmobj = shm_object_table_alloc(handle->table, shmsize,
+ SHM_OBJECT_SHM, stream_fds[0], -1);
+ if (!shmobj)
+ goto end;
+ align_shm(shmobj, __alignof__(struct lttng_ust_lib_ring_buffer));
+ set_shmp(chanb->buf[0].shmp, zalloc_shm(shmobj, sizeof(struct lttng_ust_lib_ring_buffer)));
+ buf = shmp(handle, chanb->buf[0].shmp);
+ if (!buf)
+ goto end;
+ set_shmp(buf->self, chanb->buf[0].shmp._ref);
+ ret = lib_ring_buffer_create(buf, chanb, -1,
+ handle, shmobj);
+ if (ret)
+ goto free_bufs;
+ }
+ chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
+
+ return 0;
+
+free_bufs:
+ /* We only free the buffer data upon shm teardown */
+end:
+ return -ENOMEM;
+}
+
+/**
+ * channel_backend_free - destroy the channel
+ * @chan: the channel
+ *
+ * Destroy all channel buffers and frees the channel.
+ */
+void channel_backend_free(struct channel_backend *chanb __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ /* SHM teardown takes care of everything */
+}
+
+/**
+ * lib_ring_buffer_read - read data from ring_buffer_buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : length to copy to destination
+ *
+ * Should be protected by get_subbuf/put_subbuf.
+ * Returns the length copied.
+ */
+size_t lib_ring_buffer_read(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+ void *dest, size_t len, struct lttng_ust_shm_handle *handle)
+{
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ ssize_t orig_len;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ unsigned long sb_bindex, id;
+ void *src;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return 0;
+ config = &chanb->config;
+ orig_len = len;
+ offset &= chanb->buf_size - 1;
+
+ if (caa_unlikely(!len))
+ return 0;
+ id = bufb->buf_rsb.id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return 0;
+ /*
+ * Underlying layer should never ask for reads across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return 0;
+ src = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!src))
+ return 0;
+ memcpy(dest, src, len);
+ return orig_len;
+}
+
+/**
+ * lib_ring_buffer_read_cstr - read a C-style string from ring_buffer.
+ * @bufb : buffer backend
+ * @offset : offset within the buffer
+ * @dest : destination address
+ * @len : destination's length
+ *
+ * Return string's length, or -EINVAL on error.
+ * Should be protected by get_subbuf/put_subbuf.
+ * Destination length should be at least 1 to hold '\0'.
+ */
+int lib_ring_buffer_read_cstr(struct lttng_ust_lib_ring_buffer_backend *bufb, size_t offset,
+ void *dest, size_t len, struct lttng_ust_shm_handle *handle)
+{
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ ssize_t string_len, orig_offset;
+ char *str;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ unsigned long sb_bindex, id;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return -EINVAL;
+ config = &chanb->config;
+ if (caa_unlikely(!len))
+ return -EINVAL;
+ offset &= chanb->buf_size - 1;
+ orig_offset = offset;
+ id = bufb->buf_rsb.id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return -EINVAL;
+ /*
+ * Underlying layer should never ask for reads across
+ * subbuffers.
+ */
+ CHAN_WARN_ON(chanb, offset >= chanb->buf_size);
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return -EINVAL;
+ str = shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+ if (caa_unlikely(!str))
+ return -EINVAL;
+ string_len = strnlen(str, len);
+ if (dest && len) {
+ memcpy(dest, str, string_len);
+ ((char *)dest)[0] = 0;
+ }
+ return offset - orig_offset;
+}
+
+/**
+ * lib_ring_buffer_read_offset_address - get address of a buffer location
+ * @bufb : buffer backend
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located (for read).
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's never on a page boundary, it's safe to read/write directly
+ * from/to this address, as long as the read/write is never bigger than
+ * a page size.
+ */
+void *lib_ring_buffer_read_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long sb_bindex, id;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return NULL;
+ config = &chanb->config;
+ offset &= chanb->buf_size - 1;
+ id = bufb->buf_rsb.id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+}
+
+/**
+ * lib_ring_buffer_offset_address - get address of a location within the buffer
+ * @bufb : buffer backend
+ * @offset : offset within the buffer.
+ *
+ * Return the address where a given offset is located.
+ * Should be used to get the current subbuffer header pointer. Given we know
+ * it's always at the beginning of a page, it's safe to write directly to this
+ * address, as long as the write is never bigger than a page size.
+ */
+void *lib_ring_buffer_offset_address(struct lttng_ust_lib_ring_buffer_backend *bufb,
+ size_t offset,
+ struct lttng_ust_shm_handle *handle)
+{
+ size_t sbidx;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+ struct channel_backend *chanb;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long sb_bindex, id;
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *sb;
+
+ chanb = &shmp(handle, bufb->chan)->backend;
+ if (!chanb)
+ return NULL;
+ config = &chanb->config;
+ offset &= chanb->buf_size - 1;
+ sbidx = offset >> chanb->subbuf_size_order;
+ sb = shmp_index(handle, bufb->buf_wsb, sbidx);
+ if (!sb)
+ return NULL;
+ id = sb->id;
+ sb_bindex = subbuffer_id_get_index(config, id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return NULL;
+ CHAN_WARN_ON(chanb, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, id));
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return NULL;
+ return shmp_index(handle, backend_pages->p, offset & (chanb->subbuf_size - 1));
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
+ * recorder (overwrite) modes. See thesis:
+ *
+ * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
+ * dissertation, Ecole Polytechnique de Montreal.
+ * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
+ *
+ * - Algorithm presentation in Chapter 5:
+ * "Lockless Multi-Core High-Throughput Buffering".
+ * - Algorithm formal verification in Section 8.6:
+ * "Formal verification of LTTng"
+ *
+ * Author:
+ * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Inspired from LTT and RelayFS:
+ * Karim Yaghmour <karim@opersys.com>
+ * Tom Zanussi <zanussi@us.ibm.com>
+ * Bob Wisniewski <bob@watson.ibm.com>
+ * And from K42 :
+ * Bob Wisniewski <bob@watson.ibm.com>
+ *
+ * Buffer reader semantic :
+ *
+ * - get_subbuf_size
+ * while buffer is not finalized and empty
+ * - get_subbuf
+ * - if return value != 0, continue
+ * - splice one subbuffer worth of data to a pipe
+ * - splice the data from pipe to disk/network
+ * - put_subbuf
+ */
+
+#define _LGPL_SOURCE
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <time.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <urcu/compiler.h>
+#include <urcu/ref.h>
+#include <urcu/tls-compat.h>
+#include <poll.h>
+#include <ust-helper.h>
+
+#include <lttng/ust-utils.h>
+#include <lttng/ringbuffer-context.h>
+
+#include "smp.h"
+#include "ringbuffer-config.h"
+#include "vatomic.h"
+#include "backend.h"
+#include "frontend.h"
+#include "shm.h"
+#include "rb-init.h"
+#include "../liblttng-ust/compat.h" /* For ENODATA */
+
+/* Print DBG() messages about events lost only every 1048576 hits */
+#define DBG_PRINT_NR_LOST (1UL << 20)
+
+#define LTTNG_UST_RB_SIG_FLUSH SIGRTMIN
+#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
+#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
+#define CLOCKID CLOCK_MONOTONIC
+#define LTTNG_UST_RING_BUFFER_GET_RETRY 10
+#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
+#define RETRY_DELAY_MS 100 /* 100 ms. */
+
+/*
+ * Non-static to ensure the compiler does not optimize away the xor.
+ */
+uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
+
+/*
+ * Use POSIX SHM: shm_open(3) and shm_unlink(3).
+ * close(2) to close the fd returned by shm_open.
+ * shm_unlink releases the shared memory object name.
+ * ftruncate(2) sets the size of the memory object.
+ * mmap/munmap maps the shared memory obj to a virtual address in the
+ * calling proceess (should be done both in libust and consumer).
+ * See shm_overview(7) for details.
+ * Pass file descriptor returned by shm_open(3) to ltt-sessiond through
+ * a UNIX socket.
+ *
+ * Since we don't need to access the object using its name, we can
+ * immediately shm_unlink(3) it, and only keep the handle with its file
+ * descriptor.
+ */
+
+/*
+ * Internal structure representing offsets to use at a sub-buffer switch.
+ */
+struct switch_offsets {
+ unsigned long begin, end, old;
+ size_t pre_header_padding, size;
+ unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
+ switch_old_end:1;
+};
+
+DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
+
+/*
+ * wakeup_fd_mutex protects wakeup fd use by timer from concurrent
+ * close.
+ */
+static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+static
+void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle);
+
+/*
+ * Handle timer teardown race wrt memory free of private data by
+ * ring buffer signals are handled by a single thread, which permits
+ * a synchronization point between handling of each signal.
+ * Protected by the lock within the structure.
+ */
+struct timer_signal_data {
+ pthread_t tid; /* thread id managing signals */
+ int setup_done;
+ int qs_done;
+ pthread_mutex_t lock;
+};
+
+static struct timer_signal_data timer_signal = {
+ .tid = 0,
+ .setup_done = 0,
+ .qs_done = 0,
+ .lock = PTHREAD_MUTEX_INITIALIZER,
+};
+
+static bool lttng_ust_allow_blocking;
+
+void lttng_ust_ringbuffer_set_allow_blocking(void)
+{
+ lttng_ust_allow_blocking = true;
+}
+
+/* Get blocking timeout, in ms */
+static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ if (!lttng_ust_allow_blocking)
+ return 0;
+ return chan->u.s.blocking_timeout_ms;
+}
+
+/**
+ * lib_ring_buffer_reset - Reset ring buffer to initial values.
+ * @buf: Ring buffer.
+ *
+ * Effectively empty the ring buffer. Should be called when the buffer is not
+ * used for writing. The ring buffer can be opened for reading, but the reader
+ * should not be using the iterator concurrently with reset. The previous
+ * current iterator record is reset.
+ */
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned int i;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+ /*
+ * Reset iterator first. It will put the subbuffer if it currently holds
+ * it.
+ */
+ v_set(config, &buf->offset, 0);
+ for (i = 0; i < chan->backend.num_subbuf; i++) {
+ struct commit_counters_hot *cc_hot;
+ struct commit_counters_cold *cc_cold;
+ uint64_t *ts_end;
+
+ cc_hot = shmp_index(handle, buf->commit_hot, i);
+ if (!cc_hot)
+ return;
+ cc_cold = shmp_index(handle, buf->commit_cold, i);
+ if (!cc_cold)
+ return;
+ ts_end = shmp_index(handle, buf->ts_end, i);
+ if (!ts_end)
+ return;
+ v_set(config, &cc_hot->cc, 0);
+ v_set(config, &cc_hot->seq, 0);
+ v_set(config, &cc_cold->cc_sb, 0);
+ *ts_end = 0;
+ }
+ uatomic_set(&buf->consumed, 0);
+ uatomic_set(&buf->record_disabled, 0);
+ v_set(config, &buf->last_tsc, 0);
+ lib_ring_buffer_backend_reset(&buf->backend, handle);
+ /* Don't reset number of active readers */
+ v_set(config, &buf->records_lost_full, 0);
+ v_set(config, &buf->records_lost_wrap, 0);
+ v_set(config, &buf->records_lost_big, 0);
+ v_set(config, &buf->records_count, 0);
+ v_set(config, &buf->records_overrun, 0);
+ buf->finalized = 0;
+}
+
+/**
+ * channel_reset - Reset channel to initial values.
+ * @chan: Channel.
+ *
+ * Effectively empty the channel. Should be called when the channel is not used
+ * for writing. The channel can be opened for reading, but the reader should not
+ * be using the iterator concurrently with reset. The previous current iterator
+ * record is reset.
+ */
+void channel_reset(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ /*
+ * Reset iterators first. Will put the subbuffer if held for reading.
+ */
+ uatomic_set(&chan->record_disabled, 0);
+ /* Don't reset commit_count_mask, still valid */
+ channel_backend_reset(&chan->backend);
+ /* Don't reset switch/read timer interval */
+ /* Don't reset notifiers and notifier enable bits */
+ /* Don't reset reader reference count */
+}
+
+static
+void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_crash_abi *crash_abi,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb,
+ struct shm_object *shmobj,
+ struct lttng_ust_shm_handle *handle)
+{
+ int i;
+
+ for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
+ crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
+ crash_abi->mmap_length = shmobj->memory_map_size;
+ crash_abi->endian = RB_CRASH_ENDIAN;
+ crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
+ crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
+ crash_abi->word_size = sizeof(unsigned long);
+ crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
+
+ /* Offset of fields */
+ crash_abi->offset.prod_offset =
+ (uint32_t) ((char *) &buf->offset - (char *) buf);
+ crash_abi->offset.consumed_offset =
+ (uint32_t) ((char *) &buf->consumed - (char *) buf);
+ crash_abi->offset.commit_hot_array =
+ (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
+ crash_abi->offset.commit_hot_seq =
+ offsetof(struct commit_counters_hot, seq);
+ crash_abi->offset.buf_wsb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
+ crash_abi->offset.buf_wsb_id =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id);
+ crash_abi->offset.sb_array =
+ (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
+ crash_abi->offset.sb_array_shmp_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp,
+ shmp._ref.offset);
+ crash_abi->offset.sb_backend_p_offset =
+ offsetof(struct lttng_ust_lib_ring_buffer_backend_pages,
+ p._ref.offset);
+
+ /* Field length */
+ crash_abi->length.prod_offset = sizeof(buf->offset);
+ crash_abi->length.consumed_offset = sizeof(buf->consumed);
+ crash_abi->length.commit_hot_seq =
+ sizeof(((struct commit_counters_hot *) NULL)->seq);
+ crash_abi->length.buf_wsb_id =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id);
+ crash_abi->length.sb_array_shmp_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
+ crash_abi->length.sb_backend_p_offset =
+ sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset);
+
+ /* Array stride */
+ crash_abi->stride.commit_hot_array =
+ sizeof(struct commit_counters_hot);
+ crash_abi->stride.buf_wsb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer);
+ crash_abi->stride.sb_array =
+ sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp);
+
+ /* Buffer constants */
+ crash_abi->buf_size = chanb->buf_size;
+ crash_abi->subbuf_size = chanb->subbuf_size;
+ crash_abi->num_subbuf = chanb->num_subbuf;
+ crash_abi->mode = (uint32_t) chanb->config.mode;
+
+ if (config->cb.content_size_field) {
+ size_t offset, length;
+
+ config->cb.content_size_field(config, &offset, &length);
+ crash_abi->offset.content_size = offset;
+ crash_abi->length.content_size = length;
+ } else {
+ crash_abi->offset.content_size = 0;
+ crash_abi->length.content_size = 0;
+ }
+ if (config->cb.packet_size_field) {
+ size_t offset, length;
+
+ config->cb.packet_size_field(config, &offset, &length);
+ crash_abi->offset.packet_size = offset;
+ crash_abi->length.packet_size = length;
+ } else {
+ crash_abi->offset.packet_size = 0;
+ crash_abi->length.packet_size = 0;
+ }
+}
+
+/*
+ * Must be called under cpu hotplug protection.
+ */
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+ struct channel_backend *chanb, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+ struct lttng_ust_lib_ring_buffer_channel *chan = caa_container_of(chanb,
+ struct lttng_ust_lib_ring_buffer_channel, backend);
+ struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+ struct lttng_ust_lib_ring_buffer_channel *shmp_chan;
+ struct commit_counters_hot *cc_hot;
+ void *priv = channel_get_private_config(chan);
+ size_t subbuf_header_size;
+ uint64_t tsc;
+ int ret;
+
+ /* Test for cpu hotplug */
+ if (buf->backend.allocated)
+ return 0;
+
+ align_shm(shmobj, __alignof__(struct commit_counters_hot));
+ set_shmp(buf->commit_hot,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_hot)) {
+ return -ENOMEM;
+ }
+
+ align_shm(shmobj, __alignof__(struct commit_counters_cold));
+ set_shmp(buf->commit_cold,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_cold)) {
+ ret = -ENOMEM;
+ goto free_commit;
+ }
+
+ align_shm(shmobj, __alignof__(uint64_t));
+ set_shmp(buf->ts_end,
+ zalloc_shm(shmobj,
+ sizeof(uint64_t) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->ts_end)) {
+ ret = -ENOMEM;
+ goto free_commit_cold;
+ }
+
+
+ ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+ cpu, handle, shmobj);
+ if (ret) {
+ goto free_init;
+ }
+
+ /*
+ * Write the subbuffer header for first subbuffer so we know the total
+ * duration of data gathering.
+ */
+ subbuf_header_size = config->cb.subbuffer_header_size();
+ v_set(config, &buf->offset, subbuf_header_size);
+ wsb = shmp_index(handle, buf->backend.buf_wsb, 0);
+ if (!wsb) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ subbuffer_id_clear_noref(config, &wsb->id);
+ shmp_chan = shmp(handle, buf->backend.chan);
+ if (!shmp_chan) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ tsc = config->cb.ring_buffer_clock_read(shmp_chan);
+ config->cb.buffer_begin(buf, tsc, 0, handle);
+ cc_hot = shmp_index(handle, buf->commit_hot, 0);
+ if (!cc_hot) {
+ ret = -EPERM;
+ goto free_chanbuf;
+ }
+ v_add(config, subbuf_header_size, &cc_hot->cc);
+ v_add(config, subbuf_header_size, &cc_hot->seq);
+
+ if (config->cb.buffer_create) {
+ ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
+ if (ret)
+ goto free_chanbuf;
+ }
+
+ init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
+
+ buf->backend.allocated = 1;
+ return 0;
+
+ /* Error handling */
+free_init:
+ /* ts_end will be freed by shm teardown */
+free_commit_cold:
+ /* commit_cold will be freed by shm teardown */
+free_commit:
+ /* commit_hot will be freed by shm teardown */
+free_chanbuf:
+ return ret;
+}
+
+static
+void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
+ siginfo_t *si, void *uc __attribute__((unused)))
+{
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_shm_handle *handle;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ int cpu;
+
+ assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
+
+ chan = si->si_value.sival_ptr;
+ handle = chan->handle;
+ config = &chan->backend.config;
+
+ DBG("Switch timer for channel %p\n", chan);
+
+ /*
+ * Only flush buffers periodically if readers are active.
+ */
+ pthread_mutex_lock(&wakeup_fd_mutex);
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ for_each_possible_cpu(cpu) {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[cpu].shmp);
+
+ if (!buf)
+ goto end;
+ if (uatomic_read(&buf->active_readers))
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
+ chan->handle);
+ }
+ } else {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[0].shmp);
+
+ if (!buf)
+ goto end;
+ if (uatomic_read(&buf->active_readers))
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
+ chan->handle);
+ }
+end:
+ pthread_mutex_unlock(&wakeup_fd_mutex);
+ return;
+}
+
+static
+int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
+{
+ unsigned long consumed_old, consumed_idx, commit_count, write_offset;
+ struct commit_counters_cold *cc_cold;
+
+ consumed_old = uatomic_read(&buf->consumed);
+ consumed_idx = subbuf_index(consumed_old, chan);
+ cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+ if (!cc_cold)
+ return 0;
+ commit_count = v_read(config, &cc_cold->cc_sb);
+ /*
+ * No memory barrier here, since we are only interested
+ * in a statistically correct polling result. The next poll will
+ * get the data is we are racing. The mb() that ensures correct
+ * memory order is in get_subbuf.
+ */
+ write_offset = v_read(config, &buf->offset);
+
+ /*
+ * Check that the subbuffer we are trying to consume has been
+ * already fully committed.
+ */
+
+ if (((commit_count - chan->backend.subbuf_size)
+ & chan->commit_count_mask)
+ - (buf_trunc(consumed_old, chan)
+ >> chan->backend.num_subbuf_order)
+ != 0)
+ return 0;
+
+ /*
+ * Check that we are not about to read the same subbuffer in
+ * which the writer head is.
+ */
+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
+ == 0)
+ return 0;
+
+ return 1;
+}
+
+static
+void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
+ sigset_t sigpipe_set, pending_set, old_set;
+ int ret, sigpipe_was_pending = 0;
+
+ if (wakeup_fd < 0)
+ return;
+
+ /*
+ * Wake-up the other end by writing a null byte in the pipe
+ * (non-blocking). Important note: Because writing into the
+ * pipe is non-blocking (and therefore we allow dropping wakeup
+ * data, as long as there is wakeup data present in the pipe
+ * buffer to wake up the consumer), the consumer should perform
+ * the following sequence for waiting:
+ * 1) empty the pipe (reads).
+ * 2) check if there is data in the buffer.
+ * 3) wait on the pipe (poll).
+ *
+ * Discard the SIGPIPE from write(), not disturbing any SIGPIPE
+ * that might be already pending. If a bogus SIGPIPE is sent to
+ * the entire process concurrently by a malicious user, it may
+ * be simply discarded.
+ */
+ ret = sigemptyset(&pending_set);
+ assert(!ret);
+ /*
+ * sigpending returns the mask of signals that are _both_
+ * blocked for the thread _and_ pending for either the thread or
+ * the entire process.
+ */
+ ret = sigpending(&pending_set);
+ assert(!ret);
+ sigpipe_was_pending = sigismember(&pending_set, SIGPIPE);
+ /*
+ * If sigpipe was pending, it means it was already blocked, so
+ * no need to block it.
+ */
+ if (!sigpipe_was_pending) {
+ ret = sigemptyset(&sigpipe_set);
+ assert(!ret);
+ ret = sigaddset(&sigpipe_set, SIGPIPE);
+ assert(!ret);
+ ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set);
+ assert(!ret);
+ }
+ do {
+ ret = write(wakeup_fd, "", 1);
+ } while (ret == -1L && errno == EINTR);
+ if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) {
+ struct timespec timeout = { 0, 0 };
+ do {
+ ret = sigtimedwait(&sigpipe_set, NULL,
+ &timeout);
+ } while (ret == -1L && errno == EINTR);
+ }
+ if (!sigpipe_was_pending) {
+ ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
+ assert(!ret);
+ }
+}
+
+static
+void lib_ring_buffer_channel_do_read(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ struct lttng_ust_shm_handle *handle;
+ int cpu;
+
+ handle = chan->handle;
+ config = &chan->backend.config;
+
+ /*
+ * Only flush buffers periodically if readers are active.
+ */
+ pthread_mutex_lock(&wakeup_fd_mutex);
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ for_each_possible_cpu(cpu) {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[cpu].shmp);
+
+ if (!buf)
+ goto end;
+ if (uatomic_read(&buf->active_readers)
+ && lib_ring_buffer_poll_deliver(config, buf,
+ chan, handle)) {
+ lib_ring_buffer_wakeup(buf, handle);
+ }
+ }
+ } else {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[0].shmp);
+
+ if (!buf)
+ goto end;
+ if (uatomic_read(&buf->active_readers)
+ && lib_ring_buffer_poll_deliver(config, buf,
+ chan, handle)) {
+ lib_ring_buffer_wakeup(buf, handle);
+ }
+ }
+end:
+ pthread_mutex_unlock(&wakeup_fd_mutex);
+}
+
+static
+void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
+ siginfo_t *si, void *uc __attribute__((unused)))
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+
+ assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
+ chan = si->si_value.sival_ptr;
+ DBG("Read timer for channel %p\n", chan);
+ lib_ring_buffer_channel_do_read(chan);
+ return;
+}
+
+static
+void rb_setmask(sigset_t *mask)
+{
+ int ret;
+
+ ret = sigemptyset(mask);
+ if (ret) {
+ PERROR("sigemptyset");
+ }
+ ret = sigaddset(mask, LTTNG_UST_RB_SIG_FLUSH);
+ if (ret) {
+ PERROR("sigaddset");
+ }
+ ret = sigaddset(mask, LTTNG_UST_RB_SIG_READ);
+ if (ret) {
+ PERROR("sigaddset");
+ }
+ ret = sigaddset(mask, LTTNG_UST_RB_SIG_TEARDOWN);
+ if (ret) {
+ PERROR("sigaddset");
+ }
+}
+
+static
+void *sig_thread(void *arg __attribute__((unused)))
+{
+ sigset_t mask;
+ siginfo_t info;
+ int signr;
+
+ /* Only self thread will receive signal mask. */
+ rb_setmask(&mask);
+ CMM_STORE_SHARED(timer_signal.tid, pthread_self());
+
+ for (;;) {
+ signr = sigwaitinfo(&mask, &info);
+ if (signr == -1) {
+ if (errno != EINTR)
+ PERROR("sigwaitinfo");
+ continue;
+ }
+ if (signr == LTTNG_UST_RB_SIG_FLUSH) {
+ lib_ring_buffer_channel_switch_timer(info.si_signo,
+ &info, NULL);
+ } else if (signr == LTTNG_UST_RB_SIG_READ) {
+ lib_ring_buffer_channel_read_timer(info.si_signo,
+ &info, NULL);
+ } else if (signr == LTTNG_UST_RB_SIG_TEARDOWN) {
+ cmm_smp_mb();
+ CMM_STORE_SHARED(timer_signal.qs_done, 1);
+ cmm_smp_mb();
+ } else {
+ ERR("Unexptected signal %d\n", info.si_signo);
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Ensure only a single thread listens on the timer signal.
+ */
+static
+void lib_ring_buffer_setup_timer_thread(void)
+{
+ pthread_t thread;
+ int ret;
+
+ pthread_mutex_lock(&timer_signal.lock);
+ if (timer_signal.setup_done)
+ goto end;
+
+ ret = pthread_create(&thread, NULL, &sig_thread, NULL);
+ if (ret) {
+ errno = ret;
+ PERROR("pthread_create");
+ }
+ ret = pthread_detach(thread);
+ if (ret) {
+ errno = ret;
+ PERROR("pthread_detach");
+ }
+ timer_signal.setup_done = 1;
+end:
+ pthread_mutex_unlock(&timer_signal.lock);
+}
+
+/*
+ * Wait for signal-handling thread quiescent state.
+ */
+static
+void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr)
+{
+ sigset_t pending_set;
+ int ret;
+
+ /*
+ * We need to be the only thread interacting with the thread
+ * that manages signals for teardown synchronization.
+ */
+ pthread_mutex_lock(&timer_signal.lock);
+
+ /*
+ * Ensure we don't have any signal queued for this channel.
+ */
+ for (;;) {
+ ret = sigemptyset(&pending_set);
+ if (ret == -1) {
+ PERROR("sigemptyset");
+ }
+ ret = sigpending(&pending_set);
+ if (ret == -1) {
+ PERROR("sigpending");
+ }
+ if (!sigismember(&pending_set, signr))
+ break;
+ caa_cpu_relax();
+ }
+
+ /*
+ * From this point, no new signal handler will be fired that
+ * would try to access "chan". However, we still need to wait
+ * for any currently executing handler to complete.
+ */
+ cmm_smp_mb();
+ CMM_STORE_SHARED(timer_signal.qs_done, 0);
+ cmm_smp_mb();
+
+ /*
+ * Kill with LTTNG_UST_RB_SIG_TEARDOWN, so signal management
+ * thread wakes up.
+ */
+ kill(getpid(), LTTNG_UST_RB_SIG_TEARDOWN);
+
+ while (!CMM_LOAD_SHARED(timer_signal.qs_done))
+ caa_cpu_relax();
+ cmm_smp_mb();
+
+ pthread_mutex_unlock(&timer_signal.lock);
+}
+
+static
+void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ struct sigevent sev;
+ struct itimerspec its;
+ int ret;
+
+ if (!chan->switch_timer_interval || chan->switch_timer_enabled)
+ return;
+
+ chan->switch_timer_enabled = 1;
+
+ lib_ring_buffer_setup_timer_thread();
+
+ memset(&sev, 0, sizeof(sev));
+ sev.sigev_notify = SIGEV_SIGNAL;
+ sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH;
+ sev.sigev_value.sival_ptr = chan;
+ ret = timer_create(CLOCKID, &sev, &chan->switch_timer);
+ if (ret == -1) {
+ PERROR("timer_create");
+ }
+
+ its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
+ its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
+ its.it_interval.tv_sec = its.it_value.tv_sec;
+ its.it_interval.tv_nsec = its.it_value.tv_nsec;
+
+ ret = timer_settime(chan->switch_timer, 0, &its, NULL);
+ if (ret == -1) {
+ PERROR("timer_settime");
+ }
+}
+
+static
+void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ int ret;
+
+ if (!chan->switch_timer_interval || !chan->switch_timer_enabled)
+ return;
+
+ ret = timer_delete(chan->switch_timer);
+ if (ret == -1) {
+ PERROR("timer_delete");
+ }
+
+ lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_FLUSH);
+
+ chan->switch_timer = 0;
+ chan->switch_timer_enabled = 0;
+}
+
+static
+void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct sigevent sev;
+ struct itimerspec its;
+ int ret;
+
+ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
+ || !chan->read_timer_interval || chan->read_timer_enabled)
+ return;
+
+ chan->read_timer_enabled = 1;
+
+ lib_ring_buffer_setup_timer_thread();
+
+ sev.sigev_notify = SIGEV_SIGNAL;
+ sev.sigev_signo = LTTNG_UST_RB_SIG_READ;
+ sev.sigev_value.sival_ptr = chan;
+ ret = timer_create(CLOCKID, &sev, &chan->read_timer);
+ if (ret == -1) {
+ PERROR("timer_create");
+ }
+
+ its.it_value.tv_sec = chan->read_timer_interval / 1000000;
+ its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
+ its.it_interval.tv_sec = its.it_value.tv_sec;
+ its.it_interval.tv_nsec = its.it_value.tv_nsec;
+
+ ret = timer_settime(chan->read_timer, 0, &its, NULL);
+ if (ret == -1) {
+ PERROR("timer_settime");
+ }
+}
+
+static
+void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_lib_ring_buffer_channel *chan)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ int ret;
+
+ if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
+ || !chan->read_timer_interval || !chan->read_timer_enabled)
+ return;
+
+ ret = timer_delete(chan->read_timer);
+ if (ret == -1) {
+ PERROR("timer_delete");
+ }
+
+ /*
+ * do one more check to catch data that has been written in the last
+ * timer period.
+ */
+ lib_ring_buffer_channel_do_read(chan);
+
+ lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_READ);
+
+ chan->read_timer = 0;
+ chan->read_timer_enabled = 0;
+}
+
+static void channel_unregister_notifiers(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ lib_ring_buffer_channel_switch_timer_stop(chan);
+ lib_ring_buffer_channel_read_timer_stop(chan);
+}
+
+static void channel_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config =
+ &chan->backend.config;
+ int cpu;
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
+ for_each_possible_cpu(cpu) {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[cpu].shmp);
+ if (buf)
+ lib_ring_buffer_print_errors(chan, buf, cpu, handle);
+ }
+ } else {
+ struct lttng_ust_lib_ring_buffer *buf =
+ shmp(handle, chan->backend.buf[0].shmp);
+
+ if (buf)
+ lib_ring_buffer_print_errors(chan, buf, -1, handle);
+ }
+}
+
+static void channel_free(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int consumer)
+{
+ channel_backend_free(&chan->backend, handle);
+ /* chan is freed by shm teardown */
+ shm_object_table_destroy(handle->table, consumer);
+ free(handle);
+}
+
+/**
+ * channel_create - Create channel.
+ * @config: ring buffer instance configuration
+ * @name: name of the channel
+ * @priv_data_align: alignment, in bytes, of the private data area. (config)
+ * @priv_data_size: length, in bytes, of the private data area. (config)
+ * @priv_data_init: initialization data for private data. (config)
+ * @priv: local private data (memory owner by caller)
+ * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
+ * address mapping. It is used only by RING_BUFFER_STATIC
+ * configuration. It can be set to NULL for other backends.
+ * @subbuf_size: subbuffer size
+ * @num_subbuf: number of subbuffers
+ * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
+ * padding to let readers get those sub-buffers.
+ * Used for live streaming.
+ * @read_timer_interval: Time interval (in us) to wake up pending readers.
+ * @stream_fds: array of stream file descriptors.
+ * @nr_stream_fds: number of file descriptors in array.
+ *
+ * Holds cpu hotplug.
+ * Returns NULL on failure.
+ */
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+ const char *name,
+ size_t priv_data_align,
+ size_t priv_data_size,
+ void *priv_data_init,
+ void *priv,
+ void *buf_addr __attribute__((unused)), size_t subbuf_size,
+ size_t num_subbuf, unsigned int switch_timer_interval,
+ unsigned int read_timer_interval,
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
+{
+ int ret;
+ size_t shmsize, chansize;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ struct lttng_ust_shm_handle *handle;
+ struct shm_object *shmobj;
+ unsigned int nr_streams;
+ int64_t blocking_timeout_ms;
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ nr_streams = num_possible_cpus();
+ else
+ nr_streams = 1;
+
+ if (nr_stream_fds != nr_streams)
+ return NULL;
+
+ if (blocking_timeout < -1) {
+ return NULL;
+ }
+ /* usec to msec */
+ if (blocking_timeout == -1) {
+ blocking_timeout_ms = -1;
+ } else {
+ blocking_timeout_ms = blocking_timeout / 1000;
+ if (blocking_timeout_ms != (int32_t) blocking_timeout_ms) {
+ return NULL;
+ }
+ }
+
+ if (lib_ring_buffer_check_config(config, switch_timer_interval,
+ read_timer_interval))
+ return NULL;
+
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+
+ /* Calculate the shm allocation layout */
+ shmsize = sizeof(struct lttng_ust_lib_ring_buffer_channel);
+ shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
+ chansize = shmsize;
+ if (priv_data_align)
+ shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
+ shmsize += priv_data_size;
+
+ /* Allocate normal memory for channel (not shared) */
+ shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
+ -1, -1);
+ if (!shmobj)
+ goto error_append;
+ /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
+ assert(handle->chan._ref.index == 0);
+ assert(handle->chan._ref.offset == 0);
+ chan = shmp(handle, handle->chan);
+ if (!chan)
+ goto error_append;
+ chan->nr_streams = nr_streams;
+
+ /* space for private data */
+ if (priv_data_size) {
+ void *priv_config;
+
+ DECLARE_SHMP(void, priv_data_alloc);
+
+ align_shm(shmobj, priv_data_align);
+ chan->priv_data_offset = shmobj->allocated_len;
+ set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+ if (!shmp(handle, priv_data_alloc))
+ goto error_append;
+ priv_config = channel_get_private_config(chan);
+ memcpy(priv_config, priv_data_init, priv_data_size);
+ } else {
+ chan->priv_data_offset = -1;
+ }
+
+ chan->u.s.blocking_timeout_ms = (int32_t) blocking_timeout_ms;
+
+ channel_set_private(chan, priv);
+
+ ret = channel_backend_init(&chan->backend, name, config,
+ subbuf_size, num_subbuf, handle,
+ stream_fds);
+ if (ret)
+ goto error_backend_init;
+
+ chan->handle = handle;
+ chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
+
+ chan->switch_timer_interval = switch_timer_interval;
+ chan->read_timer_interval = read_timer_interval;
+ lib_ring_buffer_channel_switch_timer_start(chan);
+ lib_ring_buffer_channel_read_timer_start(chan);
+
+ return handle;
+
+error_backend_init:
+error_append:
+ shm_object_table_destroy(handle->table, 1);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
+
+struct lttng_ust_shm_handle *channel_handle_create(void *data,
+ uint64_t memory_map_size,
+ int wakeup_fd)
+{
+ struct lttng_ust_shm_handle *handle;
+ struct shm_object *object;
+
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+ /* Add channel object */
+ object = shm_object_table_append_mem(handle->table, data,
+ memory_map_size, wakeup_fd);
+ if (!object)
+ goto error_table_object;
+ /* struct lttng_ust_lib_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
+ handle->chan._ref.index = 0;
+ handle->chan._ref.offset = 0;
+ return handle;
+
+error_table_object:
+ shm_object_table_destroy(handle->table, 0);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
+
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+ int shm_fd, int wakeup_fd, uint32_t stream_nr,
+ uint64_t memory_map_size)
+{
+ struct shm_object *object;
+
+ /* Add stream object */
+ object = shm_object_table_append_shm(handle->table,
+ shm_fd, wakeup_fd, stream_nr,
+ memory_map_size);
+ if (!object)
+ return -EINVAL;
+ return 0;
+}
+
+unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
+{
+ assert(handle->table);
+ return handle->table->allocated_len - 1;
+}
+
+static
+void channel_release(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+ int consumer)
+{
+ channel_free(chan, handle, consumer);
+}
+
+/**
+ * channel_destroy - Finalize, wait for q.s. and destroy channel.
+ * @chan: channel to destroy
+ *
+ * Holds cpu hotplug.
+ * Call "destroy" callback, finalize channels, decrement the channel
+ * reference count. Note that when readers have completed data
+ * consumption of finalized channels, get_subbuf() will return -ENODATA.
+ * They should release their handle at that point.
+ */
+void channel_destroy(struct lttng_ust_lib_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
+ int consumer)
+{
+ if (consumer) {
+ /*
+ * Note: the consumer takes care of finalizing and
+ * switching the buffers.
+ */
+ channel_unregister_notifiers(chan, handle);
+ /*
+ * The consumer prints errors.
+ */
+ channel_print_errors(chan, handle);
+ }
+
+ /*
+ * sessiond/consumer are keeping a reference on the shm file
+ * descriptor directly. No need to refcount.
+ */
+ channel_release(chan, handle, consumer);
+ return;
+}
+
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ int *shm_fd, int *wait_fd,
+ int *wakeup_fd,
+ uint64_t *memory_map_size)
+{
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ cpu = 0;
+ } else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
+ }
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ *shm_fd = shm_get_shm_fd(handle, ref);
+ *wait_fd = shm_get_wait_fd(handle, ref);
+ *wakeup_fd = shm_get_wakeup_fd(handle, ref);
+ if (shm_get_shm_size(handle, ref, memory_map_size))
+ return NULL;
+ return shmp(handle, chan->backend.buf[cpu].shmp);
+}
+
+int ring_buffer_channel_close_wait_fd(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle)
+{
+ struct shm_ref *ref;
+
+ ref = &handle->chan._ref;
+ return shm_close_wait_fd(handle, ref);
+}
+
+int ring_buffer_channel_close_wakeup_fd(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer_channel *chan __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle)
+{
+ struct shm_ref *ref;
+
+ ref = &handle->chan._ref;
+ return shm_close_wakeup_fd(handle, ref);
+}
+
+int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int cpu)
+{
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ cpu = 0;
+ } else {
+ if (cpu >= num_possible_cpus())
+ return -EINVAL;
+ }
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ return shm_close_wait_fd(handle, ref);
+}
+
+int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_shm_handle *handle,
+ int cpu)
+{
+ struct shm_ref *ref;
+ int ret;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ cpu = 0;
+ } else {
+ if (cpu >= num_possible_cpus())
+ return -EINVAL;
+ }
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ pthread_mutex_lock(&wakeup_fd_mutex);
+ ret = shm_close_wakeup_fd(handle, ref);
+ pthread_mutex_unlock(&wakeup_fd_mutex);
+ return ret;
+}
+
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+ if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+}
+
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
+
+ if (!chan)
+ return;
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_readers);
+}
+
+/**
+ * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
+ * @buf: ring buffer
+ * @consumed: consumed count indicating the position where to read
+ * @produced: produced count, indicates position when to stop reading
+ *
+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
+ * data to read at consumed position, or 0 if the get operation succeeds.
+ */
+
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long *consumed, unsigned long *produced,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long consumed_cur, write_offset;
+ int finalized;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return -EPERM;
+ config = &chan->backend.config;
+ finalized = CMM_ACCESS_ONCE(buf->finalized);
+ /*
+ * Read finalized before counters.
+ */
+ cmm_smp_rmb();
+ consumed_cur = uatomic_read(&buf->consumed);
+ /*
+ * No need to issue a memory barrier between consumed count read and
+ * write offset read, because consumed count can only change
+ * concurrently in overwrite mode, and we keep a sequence counter
+ * identifier derived from the write offset to check we are getting
+ * the same sub-buffer we are expecting (the sub-buffers are atomically
+ * "tagged" upon writes, tags are checked upon read).
+ */
+ write_offset = v_read(config, &buf->offset);
+
+ /*
+ * Check that we are not about to read the same subbuffer in
+ * which the writer head is.
+ */
+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
+ == 0)
+ goto nodata;
+
+ *consumed = consumed_cur;
+ *produced = subbuf_trunc(write_offset, chan);
+
+ return 0;
+
+nodata:
+ /*
+ * The memory barriers __wait_event()/wake_up_interruptible() take care
+ * of "raw_spin_is_locked" memory ordering.
+ */
+ if (finalized)
+ return -ENODATA;
+ else
+ return -EAGAIN;
+}
+
+/**
+ * Performs the same function as lib_ring_buffer_snapshot(), but the positions
+ * are saved regardless of whether the consumed and produced positions are
+ * in the same subbuffer.
+ * @buf: ring buffer
+ * @consumed: consumed byte count indicating the last position read
+ * @produced: produced byte count indicating the last position written
+ *
+ * This function is meant to provide information on the exact producer and
+ * consumer positions without regard for the "snapshot" feature.
+ */
+int lib_ring_buffer_snapshot_sample_positions(
+ struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long *consumed, unsigned long *produced,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return -EPERM;
+ config = &chan->backend.config;
+ cmm_smp_rmb();
+ *consumed = uatomic_read(&buf->consumed);
+ /*
+ * No need to issue a memory barrier between consumed count read and
+ * write offset read, because consumed count can only change
+ * concurrently in overwrite mode, and we keep a sequence counter
+ * identifier derived from the write offset to check we are getting
+ * the same sub-buffer we are expecting (the sub-buffers are atomically
+ * "tagged" upon writes, tags are checked upon read).
+ */
+ *produced = v_read(config, &buf->offset);
+ return 0;
+}
+
+/**
+ * lib_ring_buffer_move_consumer - move consumed counter forward
+ * @buf: ring buffer
+ * @consumed_new: new consumed count value
+ */
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed_new,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ unsigned long consumed;
+
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+
+ /*
+ * Only push the consumed value forward.
+ * If the consumed cmpxchg fails, this is because we have been pushed by
+ * the writer in flight recorder mode.
+ */
+ consumed = uatomic_read(&buf->consumed);
+ while ((long) consumed - (long) consumed_new < 0)
+ consumed = uatomic_cmpxchg(&buf->consumed, consumed,
+ consumed_new);
+}
+
+/**
+ * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
+ * @buf: ring buffer
+ * @consumed: consumed count indicating the position where to read
+ *
+ * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
+ * data to read at consumed position, or 0 if the get operation succeeds.
+ */
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
+ int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
+ struct commit_counters_cold *cc_cold;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return -EPERM;
+ config = &chan->backend.config;
+retry:
+ finalized = CMM_ACCESS_ONCE(buf->finalized);
+ /*
+ * Read finalized before counters.
+ */
+ cmm_smp_rmb();
+ consumed_cur = uatomic_read(&buf->consumed);
+ consumed_idx = subbuf_index(consumed, chan);
+ cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+ if (!cc_cold)
+ return -EPERM;
+ commit_count = v_read(config, &cc_cold->cc_sb);
+ /*
+ * Make sure we read the commit count before reading the buffer
+ * data and the write offset. Correct consumed offset ordering
+ * wrt commit count is insured by the use of cmpxchg to update
+ * the consumed offset.
+ */
+ /*
+ * Local rmb to match the remote wmb to read the commit count
+ * before the buffer data and the write offset.
+ */
+ cmm_smp_rmb();
+
+ write_offset = v_read(config, &buf->offset);
+
+ /*
+ * Check that the buffer we are getting is after or at consumed_cur
+ * position.
+ */
+ if ((long) subbuf_trunc(consumed, chan)
+ - (long) subbuf_trunc(consumed_cur, chan) < 0)
+ goto nodata;
+
+ /*
+ * Check that the subbuffer we are trying to consume has been
+ * already fully committed. There are a few causes that can make
+ * this unavailability situation occur:
+ *
+ * Temporary (short-term) situation:
+ * - Application is running on a different CPU, between reserve
+ * and commit ring buffer operations,
+ * - Application is preempted between reserve and commit ring
+ * buffer operations,
+ *
+ * Long-term situation:
+ * - Application is stopped (SIGSTOP) between reserve and commit
+ * ring buffer operations. Could eventually be resumed by
+ * SIGCONT.
+ * - Application is killed (SIGTERM, SIGINT, SIGKILL) between
+ * reserve and commit ring buffer operation.
+ *
+ * From a consumer perspective, handling short-term
+ * unavailability situations is performed by retrying a few
+ * times after a delay. Handling long-term unavailability
+ * situations is handled by failing to get the sub-buffer.
+ *
+ * In all of those situations, if the application is taking a
+ * long time to perform its commit after ring buffer space
+ * reservation, we can end up in a situation where the producer
+ * will fill the ring buffer and try to write into the same
+ * sub-buffer again (which has a missing commit). This is
+ * handled by the producer in the sub-buffer switch handling
+ * code of the reserve routine by detecting unbalanced
+ * reserve/commit counters and discarding all further events
+ * until the situation is resolved in those situations. Two
+ * scenarios can occur:
+ *
+ * 1) The application causing the reserve/commit counters to be
+ * unbalanced has been terminated. In this situation, all
+ * further events will be discarded in the buffers, and no
+ * further buffer data will be readable by the consumer
+ * daemon. Tearing down the UST tracing session and starting
+ * anew is a work-around for those situations. Note that this
+ * only affects per-UID tracing. In per-PID tracing, the
+ * application vanishes with the termination, and therefore
+ * no more data needs to be written to the buffers.
+ * 2) The application causing the unbalance has been delayed for
+ * a long time, but will eventually try to increment the
+ * commit counter after eventually writing to the sub-buffer.
+ * This situation can cause events to be discarded until the
+ * application resumes its operations.
+ */
+ if (((commit_count - chan->backend.subbuf_size)
+ & chan->commit_count_mask)
+ - (buf_trunc(consumed, chan)
+ >> chan->backend.num_subbuf_order)
+ != 0) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
+
+ /*
+ * Check that we are not about to read the same subbuffer in
+ * which the writer head is.
+ */
+ if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
+ == 0)
+ goto nodata;
+
+ /*
+ * Failure to get the subbuffer causes a busy-loop retry without going
+ * to a wait queue. These are caused by short-lived race windows where
+ * the writer is getting access to a subbuffer we were trying to get
+ * access to. Also checks that the "consumed" buffer count we are
+ * looking for matches the one contained in the subbuffer id.
+ *
+ * The short-lived race window described here can be affected by
+ * application signals and preemption, thus requiring to bound
+ * the loop to a maximum number of retry.
+ */
+ ret = update_read_sb_index(config, &buf->backend, &chan->backend,
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
+ if (ret) {
+ if (nr_retry-- > 0) {
+ if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
+ (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
+ goto retry;
+ } else {
+ goto nodata;
+ }
+ }
+ subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
+
+ buf->get_subbuf_consumed = consumed;
+ buf->get_subbuf = 1;
+
+ return 0;
+
+nodata:
+ /*
+ * The memory barriers __wait_event()/wake_up_interruptible() take care
+ * of "raw_spin_is_locked" memory ordering.
+ */
+ if (finalized)
+ return -ENODATA;
+ else
+ return -EAGAIN;
+}
+
+/**
+ * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
+ * @buf: ring buffer
+ */
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long sb_bindex, consumed_idx, consumed;
+ struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+ struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
+
+ chan = shmp(handle, bufb->chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+
+ if (!buf->get_subbuf) {
+ /*
+ * Reader puts a subbuffer it did not get.
+ */
+ CHAN_WARN_ON(chan, 1);
+ return;
+ }
+ consumed = buf->get_subbuf_consumed;
+ buf->get_subbuf = 0;
+
+ /*
+ * Clear the records_unread counter. (overruns counter)
+ * Can still be non-zero if a file reader simply grabbed the data
+ * without using iterators.
+ * Can be below zero if an iterator is used on a snapshot more than
+ * once.
+ */
+ sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+ rpages = shmp_index(handle, bufb->array, sb_bindex);
+ if (!rpages)
+ return;
+ backend_pages = shmp(handle, rpages->shmp);
+ if (!backend_pages)
+ return;
+ v_add(config, v_read(config, &backend_pages->records_unread),
+ &bufb->records_read);
+ v_set(config, &backend_pages->records_unread, 0);
+ CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
+ && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
+ subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
+
+ /*
+ * Exchange the reader subbuffer with the one we put in its place in the
+ * writer subbuffer table. Expect the original consumed count. If
+ * update_read_sb_index fails, this is because the writer updated the
+ * subbuffer concurrently. We should therefore keep the subbuffer we
+ * currently have: it has become invalid to try reading this sub-buffer
+ * consumed count value anyway.
+ */
+ consumed_idx = subbuf_index(consumed, chan);
+ update_read_sb_index(config, &buf->backend, &chan->backend,
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
+ /*
+ * update_read_sb_index return value ignored. Don't exchange sub-buffer
+ * if the writer concurrently updated it.
+ */
+}
+
+/*
+ * cons_offset is an iterator on all subbuffer offsets between the reader
+ * position and the writer position. (inclusive)
+ */
+static
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long cons_offset,
+ int cpu,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long cons_idx, commit_count, commit_count_sb;
+ struct commit_counters_hot *cc_hot;
+ struct commit_counters_cold *cc_cold;
+
+ cons_idx = subbuf_index(cons_offset, chan);
+ cc_hot = shmp_index(handle, buf->commit_hot, cons_idx);
+ if (!cc_hot)
+ return;
+ cc_cold = shmp_index(handle, buf->commit_cold, cons_idx);
+ if (!cc_cold)
+ return;
+ commit_count = v_read(config, &cc_hot->cc);
+ commit_count_sb = v_read(config, &cc_cold->cc_sb);
+
+ if (subbuf_offset(commit_count, chan) != 0)
+ DBG("ring buffer %s, cpu %d: "
+ "commit count in subbuffer %lu,\n"
+ "expecting multiples of %lu bytes\n"
+ " [ %lu bytes committed, %lu bytes reader-visible ]\n",
+ chan->backend.name, cpu, cons_idx,
+ chan->backend.subbuf_size,
+ commit_count, commit_count_sb);
+
+ DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
+ chan->backend.name, cpu, commit_count);
+}
+
+static
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ int cpu, struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long write_offset, cons_offset;
+
+ /*
+ * No need to order commit_count, write_offset and cons_offset reads
+ * because we execute at teardown when no more writer nor reader
+ * references are left.
+ */
+ write_offset = v_read(config, &buf->offset);
+ cons_offset = uatomic_read(&buf->consumed);
+ if (write_offset != cons_offset)
+ DBG("ring buffer %s, cpu %d: "
+ "non-consumed data\n"
+ " [ %lu bytes written, %lu bytes read ]\n",
+ chan->backend.name, cpu, write_offset, cons_offset);
+
+ for (cons_offset = uatomic_read(&buf->consumed);
+ (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
+ chan)
+ - cons_offset) > 0;
+ cons_offset = subbuf_align(cons_offset, chan))
+ lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
+ cpu, handle);
+}
+
+static
+void lib_ring_buffer_print_errors(struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+
+ if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
+ DBG("ring buffer %s: %lu records written, "
+ "%lu records overrun\n",
+ chan->backend.name,
+ v_read(config, &buf->records_count),
+ v_read(config, &buf->records_overrun));
+ } else {
+ DBG("ring buffer %s, cpu %d: %lu records written, "
+ "%lu records overrun\n",
+ chan->backend.name, cpu,
+ v_read(config, &buf->records_count),
+ v_read(config, &buf->records_overrun));
+
+ if (v_read(config, &buf->records_lost_full)
+ || v_read(config, &buf->records_lost_wrap)
+ || v_read(config, &buf->records_lost_big))
+ DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
+ " [ %lu buffer full, %lu nest buffer wrap-around, "
+ "%lu event too big ]\n",
+ chan->backend.name, cpu,
+ v_read(config, &buf->records_lost_full),
+ v_read(config, &buf->records_lost_wrap),
+ v_read(config, &buf->records_lost_big));
+ }
+ lib_ring_buffer_print_buffer_errors(buf, chan, cpu, handle);
+}
+
+/*
+ * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
+ *
+ * Only executed by SWITCH_FLUSH, which can be issued while tracing is
+ * active or at buffer finalization (destroy).
+ */
+static
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long oldidx = subbuf_index(offsets->old, chan);
+ unsigned long commit_count;
+ struct commit_counters_hot *cc_hot;
+
+ config->cb.buffer_begin(buf, tsc, oldidx, handle);
+
+ /*
+ * Order all writes to buffer before the commit count update that will
+ * determine that the subbuffer is full.
+ */
+ cmm_smp_wmb();
+ cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+ if (!cc_hot)
+ return;
+ v_add(config, config->cb.subbuffer_header_size(),
+ &cc_hot->cc);
+ commit_count = v_read(config, &cc_hot->cc);
+ /* Check if the written buffer has to be delivered */
+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
+ commit_count, oldidx, handle, tsc);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offsets->old + config->cb.subbuffer_header_size(),
+ commit_count, handle, cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_old_end: switch old subbuffer
+ *
+ * Note : offset_old should never be 0 here. It is ok, because we never perform
+ * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
+ * increments the offset_old value when doing a SWITCH_FLUSH on an empty
+ * subbuffer.
+ */
+static
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
+ unsigned long commit_count, padding_size, data_size;
+ struct commit_counters_hot *cc_hot;
+ uint64_t *ts_end;
+
+ data_size = subbuf_offset(offsets->old - 1, chan) + 1;
+ padding_size = chan->backend.subbuf_size - data_size;
+ subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
+ handle);
+
+ ts_end = shmp_index(handle, buf->ts_end, oldidx);
+ if (!ts_end)
+ return;
+ /*
+ * This is the last space reservation in that sub-buffer before
+ * it gets delivered. This provides exclusive access to write to
+ * this sub-buffer's ts_end. There are also no concurrent
+ * readers of that ts_end because delivery of that sub-buffer is
+ * postponed until the commit counter is incremented for the
+ * current space reservation.
+ */
+ *ts_end = tsc;
+
+ /*
+ * Order all writes to buffer and store to ts_end before the commit
+ * count update that will determine that the subbuffer is full.
+ */
+ cmm_smp_wmb();
+ cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+ if (!cc_hot)
+ return;
+ v_add(config, padding_size, &cc_hot->cc);
+ commit_count = v_read(config, &cc_hot->cc);
+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
+ commit_count, oldidx, handle, tsc);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offsets->old + padding_size, commit_count, handle,
+ cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_new_start: Populate new subbuffer.
+ *
+ * This code can be executed unordered : writers may already have written to the
+ * sub-buffer before this code gets executed, caution. The commit makes sure
+ * that this code is executed before the deliver of this sub-buffer.
+ */
+static
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long beginidx = subbuf_index(offsets->begin, chan);
+ unsigned long commit_count;
+ struct commit_counters_hot *cc_hot;
+
+ config->cb.buffer_begin(buf, tsc, beginidx, handle);
+
+ /*
+ * Order all writes to buffer before the commit count update that will
+ * determine that the subbuffer is full.
+ */
+ cmm_smp_wmb();
+ cc_hot = shmp_index(handle, buf->commit_hot, beginidx);
+ if (!cc_hot)
+ return;
+ v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
+ commit_count = v_read(config, &cc_hot->cc);
+ /* Check if the written buffer has to be delivered */
+ lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
+ commit_count, beginidx, handle, tsc);
+ lib_ring_buffer_write_commit_counter(config, buf, chan,
+ offsets->begin + config->cb.subbuffer_header_size(),
+ commit_count, handle, cc_hot);
+}
+
+/*
+ * lib_ring_buffer_switch_new_end: finish switching current subbuffer
+ *
+ * Calls subbuffer_set_data_size() to set the data size of the current
+ * sub-buffer. We do not need to perform check_deliver nor commit here,
+ * since this task will be done by the "commit" of the event for which
+ * we are currently doing the space reservation.
+ */
+static
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long endidx, data_size;
+ uint64_t *ts_end;
+
+ endidx = subbuf_index(offsets->end - 1, chan);
+ data_size = subbuf_offset(offsets->end - 1, chan) + 1;
+ subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
+ handle);
+ ts_end = shmp_index(handle, buf->ts_end, endidx);
+ if (!ts_end)
+ return;
+ /*
+ * This is the last space reservation in that sub-buffer before
+ * it gets delivered. This provides exclusive access to write to
+ * this sub-buffer's ts_end. There are also no concurrent
+ * readers of that ts_end because delivery of that sub-buffer is
+ * postponed until the commit counter is incremented for the
+ * current space reservation.
+ */
+ *ts_end = tsc;
+}
+
+/*
+ * Returns :
+ * 0 if ok
+ * !0 if execution must be aborted.
+ */
+static
+int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t *tsc,
+ struct lttng_ust_shm_handle *handle)
+{
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ unsigned long off, reserve_commit_diff;
+
+ offsets->begin = v_read(config, &buf->offset);
+ offsets->old = offsets->begin;
+ offsets->switch_old_start = 0;
+ off = subbuf_offset(offsets->begin, chan);
+
+ *tsc = config->cb.ring_buffer_clock_read(chan);
+
+ /*
+ * Ensure we flush the header of an empty subbuffer when doing the
+ * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
+ * total data gathering duration even if there were no records saved
+ * after the last buffer switch.
+ * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
+ * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
+ * subbuffer header as appropriate.
+ * The next record that reserves space will be responsible for
+ * populating the following subbuffer header. We choose not to populate
+ * the next subbuffer header here because we want to be able to use
+ * SWITCH_ACTIVE for periodical buffer flush, which must
+ * guarantee that all the buffer content (records and header
+ * timestamps) are visible to the reader. This is required for
+ * quiescence guarantees for the fusion merge.
+ */
+ if (mode != SWITCH_FLUSH && !off)
+ return -1; /* we do not have to switch : buffer is empty */
+
+ if (caa_unlikely(off == 0)) {
+ unsigned long sb_index, commit_count;
+ struct commit_counters_cold *cc_cold;
+
+ /*
+ * We are performing a SWITCH_FLUSH. There may be concurrent
+ * writes into the buffer if e.g. invoked while performing a
+ * snapshot on an active trace.
+ *
+ * If the client does not save any header information
+ * (sub-buffer header size == 0), don't switch empty subbuffer
+ * on finalize, because it is invalid to deliver a completely
+ * empty subbuffer.
+ */
+ if (!config->cb.subbuffer_header_size())
+ return -1;
+
+ /* Test new buffer integrity */
+ sb_index = subbuf_index(offsets->begin, chan);
+ cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+ if (!cc_cold)
+ return -1;
+ commit_count = v_read(config, &cc_cold->cc_sb);
+ reserve_commit_diff =
+ (buf_trunc(offsets->begin, chan)
+ >> chan->backend.num_subbuf_order)
+ - (commit_count & chan->commit_count_mask);
+ if (caa_likely(reserve_commit_diff == 0)) {
+ /* Next subbuffer not being written to. */
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ subbuf_trunc(offsets->begin, chan)
+ - subbuf_trunc((unsigned long)
+ uatomic_read(&buf->consumed), chan)
+ >= chan->backend.buf_size)) {
+ /*
+ * We do not overwrite non consumed buffers
+ * and we are full : don't switch.
+ */
+ return -1;
+ } else {
+ /*
+ * Next subbuffer not being written to, and we
+ * are either in overwrite mode or the buffer is
+ * not full. It's safe to write in this new
+ * subbuffer.
+ */
+ }
+ } else {
+ /*
+ * Next subbuffer reserve offset does not match the
+ * commit offset. Don't perform switch in
+ * producer-consumer and overwrite mode. Caused by
+ * either a writer OOPS or too many nested writes over a
+ * reserve/commit pair.
+ */
+ return -1;
+ }
+
+ /*
+ * Need to write the subbuffer start header on finalize.
+ */
+ offsets->switch_old_start = 1;
+ }
+ offsets->begin = subbuf_align(offsets->begin, chan);
+ /* Note: old points to the next subbuf at offset 0 */
+ offsets->end = offsets->begin;
+ return 0;
+}
+
+/*
+ * Force a sub-buffer switch. This operation is completely reentrant : can be
+ * called while tracing is active with absolutely no lock held.
+ *
+ * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for
+ * some atomic operations, this function must be called from the CPU
+ * which owns the buffer for a ACTIVE flush. However, for
+ * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
+ * from any CPU.
+ */
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct lttng_ust_lib_ring_buffer_channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ struct switch_offsets offsets;
+ unsigned long oldidx;
+ uint64_t tsc;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+
+ offsets.size = 0;
+
+ /*
+ * Perform retryable operations.
+ */
+ do {
+ if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
+ &tsc, handle))
+ return; /* Switch not needed */
+ } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
+ != offsets.old);
+
+ /*
+ * Atomically update last_tsc. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full TSC
+ * records, never the opposite (missing a full TSC record when it would
+ * be needed).
+ */
+ save_last_tsc(config, buf, tsc);
+
+ /*
+ * Push the reader if necessary
+ */
+ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
+
+ oldidx = subbuf_index(offsets.old, chan);
+ lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
+
+ /*
+ * May need to populate header start on SWITCH_FLUSH.
+ */
+ if (offsets.switch_old_start) {
+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
+ offsets.old += config->cb.subbuffer_header_size();
+ }
+
+ /*
+ * Switch old subbuffer.
+ */
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
+}
+
+static
+bool handle_blocking_retry(int *timeout_left_ms)
+{
+ int timeout = *timeout_left_ms, delay;
+
+ if (caa_likely(!timeout))
+ return false; /* Do not retry, discard event. */
+ if (timeout < 0) /* Wait forever. */
+ delay = RETRY_DELAY_MS;
+ else
+ delay = min_t(int, timeout, RETRY_DELAY_MS);
+ (void) poll(NULL, 0, delay);
+ if (timeout > 0)
+ *timeout_left_ms -= delay;
+ return true; /* Retry. */
+}
+
+/*
+ * Returns :
+ * 0 if ok
+ * -ENOSPC if event size is too large for packet.
+ * -ENOBUFS if there is currently not enough space in buffer for the event.
+ * -EIO if data cannot be written into the buffer for any other reason.
+ */
+static
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct switch_offsets *offsets,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ unsigned long reserve_commit_diff, offset_cmp;
+ int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
+
+retry:
+ offsets->begin = offset_cmp = v_read(config, &buf->offset);
+ offsets->old = offsets->begin;
+ offsets->switch_new_start = 0;
+ offsets->switch_new_end = 0;
+ offsets->switch_old_end = 0;
+ offsets->pre_header_padding = 0;
+
+ ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->tsc == -EIO)
+ return -EIO;
+
+ if (last_tsc_overflow(config, buf, ctx_private->tsc))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
+ offsets->switch_new_start = 1; /* For offsets->begin */
+ } else {
+ offsets->size = config->cb.record_header_size(config, chan,
+ offsets->begin,
+ &offsets->pre_header_padding,
+ ctx, client_ctx);
+ offsets->size +=
+ lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ ctx->largest_align)
+ + ctx->data_size;
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
+ offsets->size > chan->backend.subbuf_size)) {
+ offsets->switch_old_end = 1; /* For offsets->old */
+ offsets->switch_new_start = 1; /* For offsets->begin */
+ }
+ }
+ if (caa_unlikely(offsets->switch_new_start)) {
+ unsigned long sb_index, commit_count;
+ struct commit_counters_cold *cc_cold;
+
+ /*
+ * We are typically not filling the previous buffer completely.
+ */
+ if (caa_likely(offsets->switch_old_end))
+ offsets->begin = subbuf_align(offsets->begin, chan);
+ offsets->begin = offsets->begin
+ + config->cb.subbuffer_header_size();
+ /* Test new buffer integrity */
+ sb_index = subbuf_index(offsets->begin, chan);
+ /*
+ * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
+ * lib_ring_buffer_check_deliver() has the matching
+ * memory barriers required around commit_cold cc_sb
+ * updates to ensure reserve and commit counter updates
+ * are not seen reordered when updated by another CPU.
+ */
+ cmm_smp_rmb();
+ cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+ if (!cc_cold)
+ return -1;
+ commit_count = v_read(config, &cc_cold->cc_sb);
+ /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
+ cmm_smp_rmb();
+ if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
+ /*
+ * The reserve counter have been concurrently updated
+ * while we read the commit counter. This means the
+ * commit counter we read might not match buf->offset
+ * due to concurrent update. We therefore need to retry.
+ */
+ goto retry;
+ }
+ reserve_commit_diff =
+ (buf_trunc(offsets->begin, chan)
+ >> chan->backend.num_subbuf_order)
+ - (commit_count & chan->commit_count_mask);
+ if (caa_likely(reserve_commit_diff == 0)) {
+ /* Next subbuffer not being written to. */
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ subbuf_trunc(offsets->begin, chan)
+ - subbuf_trunc((unsigned long)
+ uatomic_read(&buf->consumed), chan)
+ >= chan->backend.buf_size)) {
+ unsigned long nr_lost;
+
+ if (handle_blocking_retry(&timeout_left_ms))
+ goto retry;
+
+ /*
+ * We do not overwrite non consumed buffers
+ * and we are full : record is lost.
+ */
+ nr_lost = v_read(config, &buf->records_lost_full);
+ v_inc(config, &buf->records_lost_full);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu);
+ }
+ return -ENOBUFS;
+ } else {
+ /*
+ * Next subbuffer not being written to, and we
+ * are either in overwrite mode or the buffer is
+ * not full. It's safe to write in this new
+ * subbuffer.
+ */
+ }
+ } else {
+ unsigned long nr_lost;
+
+ /*
+ * Next subbuffer reserve offset does not match the
+ * commit offset, and this did not involve update to the
+ * reserve counter. Drop record in producer-consumer and
+ * overwrite mode. Caused by either a writer OOPS or too
+ * many nested writes over a reserve/commit pair.
+ */
+ nr_lost = v_read(config, &buf->records_lost_wrap);
+ v_inc(config, &buf->records_lost_wrap);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu);
+ }
+ return -EIO;
+ }
+ offsets->size =
+ config->cb.record_header_size(config, chan,
+ offsets->begin,
+ &offsets->pre_header_padding,
+ ctx, client_ctx);
+ offsets->size +=
+ lttng_ust_lib_ring_buffer_align(offsets->begin + offsets->size,
+ ctx->largest_align)
+ + ctx->data_size;
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan)
+ + offsets->size > chan->backend.subbuf_size)) {
+ unsigned long nr_lost;
+
+ /*
+ * Record too big for subbuffers, report error, don't
+ * complete the sub-buffer switch.
+ */
+ nr_lost = v_read(config, &buf->records_lost_big);
+ v_inc(config, &buf->records_lost_big);
+ if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+ DBG("%lu or more records lost in (%s:%d) record size "
+ " of %zu bytes is too large for buffer\n",
+ nr_lost + 1, chan->backend.name,
+ buf->backend.cpu, offsets->size);
+ }
+ return -ENOSPC;
+ } else {
+ /*
+ * We just made a successful buffer switch and the
+ * record fits in the new subbuffer. Let's write.
+ */
+ }
+ } else {
+ /*
+ * Record fits in the current buffer and we are not on a switch
+ * boundary. It's safe to write.
+ */
+ }
+ offsets->end = offsets->begin + offsets->size;
+
+ if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+ /*
+ * The offset_end will fall at the very beginning of the next
+ * subbuffer.
+ */
+ offsets->switch_new_end = 1; /* For offsets->begin */
+ }
+ return 0;
+}
+
+/**
+ * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
+ * @ctx: ring buffer context.
+ *
+ * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
+ * -EIO for other errors, else returns 0.
+ * It will take care of sub-buffer switching.
+ */
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
+{
+ struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_shm_handle *handle = chan->handle;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf;
+ struct switch_offsets offsets;
+ int ret;
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
+ else
+ buf = shmp(handle, chan->backend.buf[0].shmp);
+ if (!buf)
+ return -EIO;
+ ctx_private->buf = buf;
+
+ offsets.size = 0;
+
+ do {
+ ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
+ ctx, client_ctx);
+ if (caa_unlikely(ret))
+ return ret;
+ } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+ offsets.end)
+ != offsets.old));
+
+ /*
+ * Atomically update last_tsc. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full TSC
+ * records, never the opposite (missing a full TSC record when it would
+ * be needed).
+ */
+ save_last_tsc(config, buf, ctx_private->tsc);
+
+ /*
+ * Push the reader if necessary
+ */
+ lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
+
+ /*
+ * Clear noref flag for this subbuffer.
+ */
+ lib_ring_buffer_clear_noref(config, &buf->backend,
+ subbuf_index(offsets.end - 1, chan),
+ handle);
+
+ /*
+ * Switch old subbuffer if needed.
+ */
+ if (caa_unlikely(offsets.switch_old_end)) {
+ lib_ring_buffer_clear_noref(config, &buf->backend,
+ subbuf_index(offsets.old - 1, chan),
+ handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
+ }
+
+ /*
+ * Populate new subbuffer.
+ */
+ if (caa_unlikely(offsets.switch_new_start))
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
+
+ if (caa_unlikely(offsets.switch_new_end))
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
+
+ ctx_private->slot_size = offsets.size;
+ ctx_private->pre_offset = offsets.begin;
+ ctx_private->buf_offset = offsets.begin + offsets.pre_header_padding;
+ return 0;
+}
+
+static
+void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long commit_count,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct commit_counters_hot *cc_hot;
+
+ if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
+ return;
+ cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ if (!cc_hot)
+ return;
+ v_set(config, &cc_hot->seq, commit_count);
+}
+
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static
+void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle)
+{
+ v_add(config, subbuffer_get_records_count(config,
+ &buf->backend, idx, handle),
+ &buf->records_count);
+ v_add(config, subbuffer_count_records_overrun(config,
+ &buf->backend, idx, handle),
+ &buf->records_overrun);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static
+void deliver_count_events(
+ const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_lib_ring_buffer *buf __attribute__((unused)),
+ unsigned long idx __attribute__((unused)),
+ struct lttng_ust_shm_handle *handle __attribute__((unused)))
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
+
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ unsigned long offset,
+ unsigned long commit_count,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc __attribute__((unused)))
+{
+ unsigned long old_commit_count = commit_count
+ - chan->backend.subbuf_size;
+ struct commit_counters_cold *cc_cold;
+
+ /*
+ * If we succeeded at updating cc_sb below, we are the subbuffer
+ * writer delivering the subbuffer. Deals with concurrent
+ * updates of the "cc" value without adding a add_return atomic
+ * operation to the fast path.
+ *
+ * We are doing the delivery in two steps:
+ * - First, we cmpxchg() cc_sb to the new value
+ * old_commit_count + 1. This ensures that we are the only
+ * subbuffer user successfully filling the subbuffer, but we
+ * do _not_ set the cc_sb value to "commit_count" yet.
+ * Therefore, other writers that would wrap around the ring
+ * buffer and try to start writing to our subbuffer would
+ * have to drop records, because it would appear as
+ * non-filled.
+ * We therefore have exclusive access to the subbuffer control
+ * structures. This mutual exclusion with other writers is
+ * crucially important to perform record overruns count in
+ * flight recorder mode locklessly.
+ * - When we are ready to release the subbuffer (either for
+ * reading or for overrun by other writers), we simply set the
+ * cc_sb value to "commit_count" and perform delivery.
+ *
+ * The subbuffer size is least 2 bytes (minimum size: 1 page).
+ * This guarantees that old_commit_count + 1 != commit_count.
+ */
+
+ /*
+ * Order prior updates to reserve count prior to the
+ * commit_cold cc_sb update.
+ */
+ cmm_smp_wmb();
+ cc_cold = shmp_index(handle, buf->commit_cold, idx);
+ if (!cc_cold)
+ return;
+ if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb,
+ old_commit_count, old_commit_count + 1)
+ == old_commit_count)) {
+ uint64_t *ts_end;
+
+ /*
+ * Start of exclusive subbuffer access. We are
+ * guaranteed to be the last writer in this subbuffer
+ * and any other writer trying to access this subbuffer
+ * in this state is required to drop records.
+ *
+ * We can read the ts_end for the current sub-buffer
+ * which has been saved by the very last space
+ * reservation for the current sub-buffer.
+ *
+ * Order increment of commit counter before reading ts_end.
+ */
+ cmm_smp_mb();
+ ts_end = shmp_index(handle, buf->ts_end, idx);
+ if (!ts_end)
+ return;
+ deliver_count_events(config, buf, idx, handle);
+ config->cb.buffer_end(buf, *ts_end, idx,
+ lib_ring_buffer_get_data_size(config,
+ buf,
+ idx,
+ handle),
+ handle);
+
+ /*
+ * Increment the packet counter while we have exclusive
+ * access.
+ */
+ subbuffer_inc_packet_count(config, &buf->backend, idx, handle);
+
+ /*
+ * Set noref flag and offset for this subbuffer id.
+ * Contains a memory barrier that ensures counter stores
+ * are ordered before set noref and offset.
+ */
+ lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
+ buf_trunc_val(offset, chan), handle);
+
+ /*
+ * Order set_noref and record counter updates before the
+ * end of subbuffer exclusive access. Orders with
+ * respect to writers coming into the subbuffer after
+ * wrap around, and also order wrt concurrent readers.
+ */
+ cmm_smp_mb();
+ /* End of exclusive subbuffer access */
+ v_set(config, &cc_cold->cc_sb, commit_count);
+ /*
+ * Order later updates to reserve count after
+ * the commit cold cc_sb update.
+ */
+ cmm_smp_wmb();
+ lib_ring_buffer_vmcore_check_deliver(config, buf,
+ commit_count, idx, handle);
+
+ /*
+ * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
+ */
+ if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
+ && uatomic_read(&buf->active_readers)
+ && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
+ lib_ring_buffer_wakeup(buf, handle);
+ }
+ }
+}
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_ringbuffer_tls(void)
+{
+ asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
+}
+
+void lib_ringbuffer_signal_init(void)
+{
+ sigset_t mask;
+ int ret;
+
+ /*
+ * Block signal for entire process, so only our thread processes
+ * it.
+ */
+ rb_setmask(&mask);
+ ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
+ if (ret) {
+ errno = ret;
+ PERROR("pthread_sigmask");
+ }
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2021 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * Ring buffer configuration header. Note: after declaring the standard inline
+ * functions, clients should also include linux/ringbuffer/api.h.
+ */
+
+#ifndef _LTTNG_RING_BUFFER_CONFIG_H
+#define _LTTNG_RING_BUFFER_CONFIG_H
+
+#include <errno.h>
+#include <stdint.h>
+#include <stddef.h>
+#include <urcu/arch.h>
+#include <string.h>
+
+#include <lttng/ust-utils.h>
+#include <lttng/ust-compiler.h>
+#include <lttng/ust-tracer.h>
+
+struct lttng_ust_lib_ring_buffer;
+struct lttng_ust_lib_ring_buffer_channel;
+struct lttng_ust_lib_ring_buffer_config;
+struct lttng_ust_lib_ring_buffer_ctx_private;
+struct lttng_ust_shm_handle;
+
+/*
+ * Ring buffer client callbacks. Only used by slow path, never on fast path.
+ * For the fast path, record_header_size(), ring_buffer_clock_read() should be
+ * provided as inline functions too. These may simply return 0 if not used by
+ * the client.
+ */
+struct lttng_ust_lib_ring_buffer_client_cb {
+ /* Mandatory callbacks */
+
+ /* A static inline version is also required for fast path */
+ uint64_t (*ring_buffer_clock_read) (struct lttng_ust_lib_ring_buffer_channel *chan);
+ size_t (*record_header_size) (const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ size_t offset,
+ size_t *pre_header_padding,
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
+
+ /* Slow path only, at subbuffer switch */
+ size_t (*subbuffer_header_size) (void);
+ void (*buffer_begin) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx,
+ struct lttng_ust_shm_handle *handle);
+ void (*buffer_end) (struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc,
+ unsigned int subbuf_idx, unsigned long data_size,
+ struct lttng_ust_shm_handle *handle);
+
+ /* Optional callbacks (can be set to NULL) */
+
+ /* Called at buffer creation/finalize */
+ int (*buffer_create) (struct lttng_ust_lib_ring_buffer *buf, void *priv,
+ int cpu, const char *name,
+ struct lttng_ust_shm_handle *handle);
+ /*
+ * Clients should guarantee that no new reader handle can be opened
+ * after finalize.
+ */
+ void (*buffer_finalize) (struct lttng_ust_lib_ring_buffer *buf,
+ void *priv, int cpu,
+ struct lttng_ust_shm_handle *handle);
+
+ /*
+ * Extract header length, payload length and timestamp from event
+ * record. Used by buffer iterators. Timestamp is only used by channel
+ * iterator.
+ */
+ void (*record_get) (const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer_channel *chan,
+ struct lttng_ust_lib_ring_buffer *buf,
+ size_t offset, size_t *header_len,
+ size_t *payload_len, uint64_t *timestamp,
+ struct lttng_ust_shm_handle *handle);
+ /*
+ * Offset and size of content size field in client.
+ */
+ void (*content_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length);
+ void (*packet_size_field) (const struct lttng_ust_lib_ring_buffer_config *config,
+ size_t *offset, size_t *length);
+};
+
+/*
+ * Ring buffer instance configuration.
+ *
+ * Declare as "static const" within the client object to ensure the inline fast
+ * paths can be optimized.
+ *
+ * alloc/sync pairs:
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_PER_CPU :
+ * Per-cpu buffers with per-cpu synchronization.
+ *
+ * RING_BUFFER_ALLOC_PER_CPU and RING_BUFFER_SYNC_GLOBAL :
+ * Per-cpu buffer with global synchronization. Tracing can be performed with
+ * preemption enabled, statistically stays on the local buffers.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_PER_CPU :
+ * Should only be used for buffers belonging to a single thread or protected
+ * by mutual exclusion by the client. Note that periodical sub-buffer switch
+ * should be disabled in this kind of configuration.
+ *
+ * RING_BUFFER_ALLOC_GLOBAL and RING_BUFFER_SYNC_GLOBAL :
+ * Global shared buffer with global synchronization.
+ *
+ * wakeup:
+ *
+ * RING_BUFFER_WAKEUP_BY_TIMER uses per-cpu deferrable timers to poll the
+ * buffers and wake up readers if data is ready. Mainly useful for tracers which
+ * don't want to call into the wakeup code on the tracing path. Use in
+ * combination with "read_timer_interval" channel_create() argument.
+ *
+ * RING_BUFFER_WAKEUP_BY_WRITER directly wakes up readers when a subbuffer is
+ * ready to read. Lower latencies before the reader is woken up. Mainly suitable
+ * for drivers.
+ *
+ * RING_BUFFER_WAKEUP_NONE does not perform any wakeup whatsoever. The client
+ * has the responsibility to perform wakeups.
+ */
+#define LTTNG_UST_RING_BUFFER_CONFIG_PADDING 20
+
+enum lttng_ust_lib_ring_buffer_alloc_types {
+ RING_BUFFER_ALLOC_PER_CPU,
+ RING_BUFFER_ALLOC_GLOBAL,
+};
+
+enum lttng_ust_lib_ring_buffer_sync_types {
+ RING_BUFFER_SYNC_PER_CPU, /* Wait-free */
+ RING_BUFFER_SYNC_GLOBAL, /* Lock-free */
+};
+
+enum lttng_ust_lib_ring_buffer_mode_types {
+ RING_BUFFER_OVERWRITE = 0, /* Overwrite when buffer full */
+ RING_BUFFER_DISCARD = 1, /* Discard when buffer full */
+};
+
+enum lttng_ust_lib_ring_buffer_output_types {
+ RING_BUFFER_SPLICE,
+ RING_BUFFER_MMAP,
+ RING_BUFFER_READ, /* TODO */
+ RING_BUFFER_ITERATOR,
+ RING_BUFFER_NONE,
+};
+
+enum lttng_ust_lib_ring_buffer_backend_types {
+ RING_BUFFER_PAGE,
+ RING_BUFFER_VMAP, /* TODO */
+ RING_BUFFER_STATIC, /* TODO */
+};
+
+enum lttng_ust_lib_ring_buffer_oops_types {
+ RING_BUFFER_NO_OOPS_CONSISTENCY,
+ RING_BUFFER_OOPS_CONSISTENCY,
+};
+
+enum lttng_ust_lib_ring_buffer_ipi_types {
+ RING_BUFFER_IPI_BARRIER,
+ RING_BUFFER_NO_IPI_BARRIER,
+};
+
+enum lttng_ust_lib_ring_buffer_wakeup_types {
+ RING_BUFFER_WAKEUP_BY_TIMER, /* wake up performed by timer */
+ RING_BUFFER_WAKEUP_BY_WRITER, /*
+ * writer wakes up reader,
+ * not lock-free
+ * (takes spinlock).
+ */
+};
+
+struct lttng_ust_lib_ring_buffer_config {
+ enum lttng_ust_lib_ring_buffer_alloc_types alloc;
+ enum lttng_ust_lib_ring_buffer_sync_types sync;
+ enum lttng_ust_lib_ring_buffer_mode_types mode;
+ enum lttng_ust_lib_ring_buffer_output_types output;
+ enum lttng_ust_lib_ring_buffer_backend_types backend;
+ enum lttng_ust_lib_ring_buffer_oops_types oops;
+ enum lttng_ust_lib_ring_buffer_ipi_types ipi;
+ enum lttng_ust_lib_ring_buffer_wakeup_types wakeup;
+ /*
+ * tsc_bits: timestamp bits saved at each record.
+ * 0 and 64 disable the timestamp compression scheme.
+ */
+ unsigned int tsc_bits;
+ struct lttng_ust_lib_ring_buffer_client_cb cb;
+ /*
+ * client_type is used by the consumer process (which is in a
+ * different address space) to lookup the appropriate client
+ * callbacks and update the cb pointers.
+ */
+ int client_type;
+ int _unused1;
+ const struct lttng_ust_lib_ring_buffer_client_cb *cb_ptr;
+ char padding[LTTNG_UST_RING_BUFFER_CONFIG_PADDING];
+};
+
+/*
+ * Reservation flags.
+ *
+ * RING_BUFFER_RFLAG_FULL_TSC
+ *
+ * This flag is passed to record_header_size() and to the primitive used to
+ * write the record header. It indicates that the full 64-bit time value is
+ * needed in the record header. If this flag is not set, the record header needs
+ * only to contain "tsc_bits" bit of time value.
+ *
+ * Reservation flags can be added by the client, starting from
+ * "(RING_BUFFER_FLAGS_END << 0)". It can be used to pass information from
+ * record_header_size() to lib_ring_buffer_write_record_header().
+ */
+#define RING_BUFFER_RFLAG_FULL_TSC (1U << 0)
+#define RING_BUFFER_RFLAG_END (1U << 1)
+
+/*
+ * lib_ring_buffer_check_config() returns 0 on success.
+ * Used internally to check for valid configurations at channel creation.
+ */
+static inline
+int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval)
+ lttng_ust_notrace;
+
+static inline
+int lib_ring_buffer_check_config(const struct lttng_ust_lib_ring_buffer_config *config,
+ unsigned int switch_timer_interval,
+ unsigned int read_timer_interval __attribute__((unused)))
+{
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL
+ && config->sync == RING_BUFFER_SYNC_PER_CPU
+ && switch_timer_interval)
+ return -EINVAL;
+ return 0;
+}
+
+#endif /* _LTTNG_RING_BUFFER_CONFIG_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include "shm.h"
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/stat.h> /* For mode constants */
+#include <fcntl.h> /* For O_* constants */
+#include <assert.h>
+#include <stdio.h>
+#include <signal.h>
+#include <dirent.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#ifdef HAVE_LIBNUMA
+#include <numa.h>
+#include <numaif.h>
+#endif
+
+#include <lttng/ust-utils.h>
+
+#include <ust-helper.h>
+#include <ust-fd.h>
+#include "mmap.h"
+
+/*
+ * Ensure we have the required amount of space available by writing 0
+ * into the entire buffer. Not doing so can trigger SIGBUS when going
+ * beyond the available shm space.
+ */
+static
+int zero_file(int fd, size_t len)
+{
+ ssize_t retlen;
+ size_t written = 0;
+ char *zeropage;
+ long pagelen;
+ int ret;
+
+ pagelen = sysconf(_SC_PAGESIZE);
+ if (pagelen < 0)
+ return (int) pagelen;
+ zeropage = calloc(pagelen, 1);
+ if (!zeropage)
+ return -ENOMEM;
+
+ while (len > written) {
+ do {
+ retlen = write(fd, zeropage,
+ min_t(size_t, pagelen, len - written));
+ } while (retlen == -1UL && errno == EINTR);
+ if (retlen < 0) {
+ ret = (int) retlen;
+ goto error;
+ }
+ written += retlen;
+ }
+ ret = 0;
+error:
+ free(zeropage);
+ return ret;
+}
+
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+{
+ struct shm_object_table *table;
+
+ table = zmalloc(sizeof(struct shm_object_table) +
+ max_nb_obj * sizeof(table->objects[0]));
+ if (!table)
+ return NULL;
+ table->size = max_nb_obj;
+ return table;
+}
+
+static
+struct shm_object *_shm_object_table_alloc_shm(struct shm_object_table *table,
+ size_t memory_map_size,
+ int stream_fd)
+{
+ int shmfd, waitfd[2], ret, i;
+ struct shm_object *obj;
+ char *memory_map;
+
+ if (stream_fd < 0)
+ return NULL;
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ /* wait_fd: create pipe */
+ ret = pipe(waitfd);
+ if (ret < 0) {
+ PERROR("pipe");
+ goto error_pipe;
+ }
+ for (i = 0; i < 2; i++) {
+ ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+ }
+ /* The write end of the pipe needs to be non-blocking */
+ ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+ memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
+
+ /*
+ * Set POSIX shared memory object size
+ *
+ * First, use ftruncate() to set its size, some implementations won't
+ * allow writes past the size set by ftruncate.
+ * Then, use write() to fill it with zeros, this allows us to fully
+ * allocate it and detect a shortage of shm space without dealing with
+ * a SIGBUS.
+ */
+
+ shmfd = stream_fd;
+ ret = ftruncate(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("ftruncate");
+ goto error_ftruncate;
+ }
+ ret = zero_file(shmfd, memory_map_size);
+ if (ret) {
+ PERROR("zero_file");
+ goto error_zero_file;
+ }
+
+ /*
+ * Also ensure the file metadata is synced with the storage by using
+ * fsync(2). Some platforms don't allow fsync on POSIX shm fds, ignore
+ * EINVAL accordingly.
+ */
+ ret = fsync(shmfd);
+ if (ret && errno != EINVAL) {
+ PERROR("fsync");
+ goto error_fsync;
+ }
+ obj->shm_fd_ownership = 0;
+ obj->shm_fd = shmfd;
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shmfd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_mmap:
+error_fsync:
+error_ftruncate:
+error_zero_file:
+error_fcntl:
+ for (i = 0; i < 2; i++) {
+ ret = close(waitfd[i]);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+error_pipe:
+ return NULL;
+}
+
+static
+struct shm_object *_shm_object_table_alloc_mem(struct shm_object_table *table,
+ size_t memory_map_size)
+{
+ struct shm_object *obj;
+ void *memory_map;
+ int waitfd[2], i, ret;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ memory_map = zmalloc(memory_map_size);
+ if (!memory_map)
+ goto alloc_error;
+
+ /* wait_fd: create pipe */
+ ret = pipe(waitfd);
+ if (ret < 0) {
+ PERROR("pipe");
+ goto error_pipe;
+ }
+ for (i = 0; i < 2; i++) {
+ ret = fcntl(waitfd[i], F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+ }
+ /* The write end of the pipe needs to be non-blocking */
+ ret = fcntl(waitfd[1], F_SETFL, O_NONBLOCK);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+ memcpy(obj->wait_fd, waitfd, sizeof(waitfd));
+
+ /* no shm_fd */
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ obj->type = SHM_OBJECT_MEM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = 0;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_fcntl:
+ for (i = 0; i < 2; i++) {
+ ret = close(waitfd[i]);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+error_pipe:
+ free(memory_map);
+alloc_error:
+ return NULL;
+}
+
+/*
+ * libnuma prints errors on the console even for numa_available().
+ * Work-around this limitation by using get_mempolicy() directly to
+ * check whether the kernel supports mempolicy.
+ */
+#ifdef HAVE_LIBNUMA
+static bool lttng_is_numa_available(void)
+{
+ int ret;
+
+ ret = get_mempolicy(NULL, NULL, 0, NULL, 0);
+ if (ret && errno == ENOSYS) {
+ return false;
+ }
+ return numa_available() > 0;
+}
+#endif
+
+struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
+ size_t memory_map_size,
+ enum shm_object_type type,
+ int stream_fd,
+ int cpu)
+{
+ struct shm_object *shm_object;
+#ifdef HAVE_LIBNUMA
+ int oldnode = 0, node;
+ bool numa_avail;
+
+ numa_avail = lttng_is_numa_available();
+ if (numa_avail) {
+ oldnode = numa_preferred();
+ if (cpu >= 0) {
+ node = numa_node_of_cpu(cpu);
+ if (node >= 0)
+ numa_set_preferred(node);
+ }
+ if (cpu < 0 || node < 0)
+ numa_set_localalloc();
+ }
+#endif /* HAVE_LIBNUMA */
+ switch (type) {
+ case SHM_OBJECT_SHM:
+ shm_object = _shm_object_table_alloc_shm(table, memory_map_size,
+ stream_fd);
+ break;
+ case SHM_OBJECT_MEM:
+ shm_object = _shm_object_table_alloc_mem(table, memory_map_size);
+ break;
+ default:
+ assert(0);
+ }
+#ifdef HAVE_LIBNUMA
+ if (numa_avail)
+ numa_set_preferred(oldnode);
+#endif /* HAVE_LIBNUMA */
+ return shm_object;
+}
+
+struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
+ int shm_fd, int wakeup_fd, uint32_t stream_nr,
+ size_t memory_map_size)
+{
+ struct shm_object *obj;
+ char *memory_map;
+ int ret;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ /* streams _must_ be received in sequential order, else fail. */
+ if (stream_nr + 1 != table->allocated_len)
+ return NULL;
+
+ obj = &table->objects[table->allocated_len];
+
+ /* wait_fd: set write end of the pipe. */
+ obj->wait_fd[0] = -1; /* read end is unset */
+ obj->wait_fd[1] = wakeup_fd;
+ obj->shm_fd = shm_fd;
+ obj->shm_fd_ownership = 1;
+
+ /* The write end of the pipe needs to be non-blocking */
+ ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+
+ /* memory_map: mmap */
+ memory_map = mmap(NULL, memory_map_size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | LTTNG_MAP_POPULATE, shm_fd, 0);
+ if (memory_map == MAP_FAILED) {
+ PERROR("mmap");
+ goto error_mmap;
+ }
+ obj->type = SHM_OBJECT_SHM;
+ obj->memory_map = memory_map;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_fcntl:
+error_mmap:
+ return NULL;
+}
+
+/*
+ * Passing ownership of mem to object.
+ */
+struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
+ void *mem, size_t memory_map_size, int wakeup_fd)
+{
+ struct shm_object *obj;
+ int ret;
+
+ if (table->allocated_len >= table->size)
+ return NULL;
+ obj = &table->objects[table->allocated_len];
+
+ obj->wait_fd[0] = -1; /* read end is unset */
+ obj->wait_fd[1] = wakeup_fd;
+ obj->shm_fd = -1;
+ obj->shm_fd_ownership = 0;
+
+ ret = fcntl(obj->wait_fd[1], F_SETFD, FD_CLOEXEC);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+ /* The write end of the pipe needs to be non-blocking */
+ ret = fcntl(obj->wait_fd[1], F_SETFL, O_NONBLOCK);
+ if (ret < 0) {
+ PERROR("fcntl");
+ goto error_fcntl;
+ }
+
+ obj->type = SHM_OBJECT_MEM;
+ obj->memory_map = mem;
+ obj->memory_map_size = memory_map_size;
+ obj->allocated_len = memory_map_size;
+ obj->index = table->allocated_len++;
+
+ return obj;
+
+error_fcntl:
+ return NULL;
+}
+
+static
+void shmp_object_destroy(struct shm_object *obj, int consumer)
+{
+ switch (obj->type) {
+ case SHM_OBJECT_SHM:
+ {
+ int ret, i;
+
+ ret = munmap(obj->memory_map, obj->memory_map_size);
+ if (ret) {
+ PERROR("umnmap");
+ assert(0);
+ }
+
+ if (obj->shm_fd_ownership) {
+ /* Delete FDs only if called from app (not consumer). */
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->shm_fd);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->shm_fd);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->shm_fd);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ for (i = 0; i < 2; i++) {
+ if (obj->wait_fd[i] < 0)
+ continue;
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->wait_fd[i]);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->wait_fd[i]);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ break;
+ }
+ case SHM_OBJECT_MEM:
+ {
+ int ret, i;
+
+ for (i = 0; i < 2; i++) {
+ if (obj->wait_fd[i] < 0)
+ continue;
+ if (!consumer) {
+ lttng_ust_lock_fd_tracker();
+ ret = close(obj->wait_fd[i]);
+ if (!ret) {
+ lttng_ust_delete_fd_from_tracker(obj->wait_fd[i]);
+ } else {
+ PERROR("close");
+ assert(0);
+ }
+ lttng_ust_unlock_fd_tracker();
+ } else {
+ ret = close(obj->wait_fd[i]);
+ if (ret) {
+ PERROR("close");
+ assert(0);
+ }
+ }
+ }
+ free(obj->memory_map);
+ break;
+ }
+ default:
+ assert(0);
+ }
+}
+
+void shm_object_table_destroy(struct shm_object_table *table, int consumer)
+{
+ int i;
+
+ for (i = 0; i < table->allocated_len; i++)
+ shmp_object_destroy(&table->objects[i], consumer);
+ free(table);
+}
+
+/*
+ * zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
+{
+ struct shm_ref ref;
+ struct shm_ref shm_ref_error = { -1, -1 };
+
+ if (obj->memory_map_size - obj->allocated_len < len)
+ return shm_ref_error;
+ ref.index = obj->index;
+ ref.offset = obj->allocated_len;
+ obj->allocated_len += len;
+ return ref;
+}
+
+void align_shm(struct shm_object *obj, size_t align)
+{
+ size_t offset_len = lttng_ust_offset_align(obj->allocated_len, align);
+ obj->allocated_len += offset_len;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBRINGBUFFER_SHM_H
+#define _LIBRINGBUFFER_SHM_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <usterr-signal-safe.h>
+#include <urcu/compiler.h>
+#include "shm_types.h"
+
+/* channel_handle_create - for UST. */
+extern
+struct lttng_ust_shm_handle *channel_handle_create(void *data,
+ uint64_t memory_map_size, int wakeup_fd)
+ __attribute__((visibility("hidden")));
+
+/* channel_handle_add_stream - for UST. */
+extern
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+ int shm_fd, int wakeup_fd, uint32_t stream_nr,
+ uint64_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
+ __attribute__((visibility("hidden")));
+
+/*
+ * Pointer dereferencing. We don't trust the shm_ref, so we validate
+ * both the index and offset with known boundaries.
+ *
+ * "shmp" and "shmp_index" guarantee that it's safe to use the pointer
+ * target type, even in the occurrence of shm_ref modification by an
+ * untrusted process having write access to the shm_ref. We return a
+ * NULL pointer if the ranges are invalid.
+ */
+static inline
+char *_shmp_offset(struct shm_object_table *table, struct shm_ref *ref,
+ size_t idx, size_t elem_size)
+{
+ struct shm_object *obj;
+ size_t objindex, ref_offset;
+
+ objindex = (size_t) ref->index;
+ if (caa_unlikely(objindex >= table->allocated_len))
+ return NULL;
+ obj = &table->objects[objindex];
+ ref_offset = (size_t) ref->offset;
+ ref_offset += idx * elem_size;
+ /* Check if part of the element returned would exceed the limits. */
+ if (caa_unlikely(ref_offset + elem_size > obj->memory_map_size))
+ return NULL;
+ return &obj->memory_map[ref_offset];
+}
+
+#define shmp_index(handle, ref, index) \
+ ((__typeof__((ref)._type)) _shmp_offset((handle)->table, &(ref)._ref, index, sizeof(*((ref)._type))))
+
+#define shmp(handle, ref) shmp_index(handle, ref, 0)
+
+static inline
+void _set_shmp(struct shm_ref *ref, struct shm_ref src)
+{
+ *ref = src;
+}
+
+#define set_shmp(ref, src) _set_shmp(&(ref)._ref, src)
+
+struct shm_object_table *shm_object_table_create(size_t max_nb_obj)
+ __attribute__((visibility("hidden")));
+
+struct shm_object *shm_object_table_alloc(struct shm_object_table *table,
+ size_t memory_map_size,
+ enum shm_object_type type,
+ const int stream_fd,
+ int cpu)
+ __attribute__((visibility("hidden")));
+
+struct shm_object *shm_object_table_append_shm(struct shm_object_table *table,
+ int shm_fd, int wakeup_fd, uint32_t stream_nr,
+ size_t memory_map_size)
+ __attribute__((visibility("hidden")));
+
+/* mem ownership is passed to shm_object_table_append_mem(). */
+struct shm_object *shm_object_table_append_mem(struct shm_object_table *table,
+ void *mem, size_t memory_map_size, int wakeup_fd)
+ __attribute__((visibility("hidden")));
+
+void shm_object_table_destroy(struct shm_object_table *table, int consumer)
+ __attribute__((visibility("hidden")));
+
+/*
+ * zalloc_shm - allocate memory within a shm object.
+ *
+ * Shared memory is already zeroed by shmget.
+ * *NOT* multithread-safe (should be protected by mutex).
+ * Returns a -1, -1 tuple on error.
+ */
+struct shm_ref zalloc_shm(struct shm_object *obj, size_t len)
+ __attribute__((visibility("hidden")));
+
+void align_shm(struct shm_object *obj, size_t align)
+ __attribute__((visibility("hidden")));
+
+static inline
+int shm_get_wait_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->wait_fd[0];
+}
+
+static inline
+int shm_get_wakeup_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->wait_fd[1];
+}
+
+static inline
+int shm_close_wait_fd(struct lttng_ust_shm_handle *handle,
+ struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ int wait_fd;
+ size_t index;
+ int ret;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ wait_fd = obj->wait_fd[0];
+ if (wait_fd < 0)
+ return -ENOENT;
+ obj->wait_fd[0] = -1;
+ ret = close(wait_fd);
+ if (ret) {
+ ret = -errno;
+ return ret;
+ }
+ return 0;
+}
+
+static inline
+int shm_close_wakeup_fd(struct lttng_ust_shm_handle *handle,
+ struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ int wakeup_fd;
+ size_t index;
+ int ret;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ wakeup_fd = obj->wait_fd[1];
+ if (wakeup_fd < 0)
+ return -ENOENT;
+ obj->wait_fd[1] = -1;
+ ret = close(wakeup_fd);
+ if (ret) {
+ ret = -errno;
+ return ret;
+ }
+ return 0;
+}
+
+static inline
+int shm_get_shm_fd(struct lttng_ust_shm_handle *handle, struct shm_ref *ref)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ return obj->shm_fd;
+}
+
+
+static inline
+int shm_get_shm_size(struct lttng_ust_shm_handle *handle, struct shm_ref *ref,
+ uint64_t *size)
+{
+ struct shm_object_table *table = handle->table;
+ struct shm_object *obj;
+ size_t index;
+
+ index = (size_t) ref->index;
+ if (caa_unlikely(index >= table->allocated_len))
+ return -EPERM;
+ obj = &table->objects[index];
+ *size = obj->memory_map_size;
+ return 0;
+}
+
+#endif /* _LIBRINGBUFFER_SHM_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBRINGBUFFER_SHM_INTERNAL_H
+#define _LIBRINGBUFFER_SHM_INTERNAL_H
+
+struct shm_ref {
+ volatile ssize_t index; /* within the object table */
+ volatile ssize_t offset; /* within the object */
+};
+
+#define DECLARE_SHMP(type, name) \
+ union { \
+ struct shm_ref _ref; \
+ type *_type; \
+ } name
+
+#endif /* _LIBRINGBUFFER_SHM_INTERNAL_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBRINGBUFFER_SHM_TYPES_H
+#define _LIBRINGBUFFER_SHM_TYPES_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <limits.h>
+#include "shm_internal.h"
+
+struct lttng_ust_lib_ring_buffer_channel;
+
+enum shm_object_type {
+ SHM_OBJECT_SHM,
+ SHM_OBJECT_MEM,
+};
+
+struct shm_object {
+ enum shm_object_type type;
+ size_t index; /* within the object table */
+ int shm_fd; /* shm fd */
+ int wait_fd[2]; /* fd for wait/wakeup */
+ char *memory_map;
+ size_t memory_map_size;
+ uint64_t allocated_len;
+ int shm_fd_ownership;
+};
+
+struct shm_object_table {
+ size_t size;
+ size_t allocated_len;
+ struct shm_object objects[];
+};
+
+struct lttng_ust_shm_handle {
+ struct shm_object_table *table;
+ DECLARE_SHMP(struct lttng_ust_lib_ring_buffer_channel, chan);
+};
+
+#endif /* _LIBRINGBUFFER_SHM_TYPES_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2019 Michael Jeanson <mjeanson@efficios.com>
+ */
+
+#define _LGPL_SOURCE
+#include <unistd.h>
+#include <pthread.h>
+#include "smp.h"
+
+int __num_possible_cpus;
+
+#if (defined(__GLIBC__) || defined( __UCLIBC__))
+void _get_num_possible_cpus(void)
+{
+ int result;
+
+ /* On Linux, when some processors are offline
+ * _SC_NPROCESSORS_CONF counts the offline
+ * processors, whereas _SC_NPROCESSORS_ONLN
+ * does not. If we used _SC_NPROCESSORS_ONLN,
+ * getcpu() could return a value greater than
+ * this sysconf, in which case the arrays
+ * indexed by processor would overflow.
+ */
+ result = sysconf(_SC_NPROCESSORS_CONF);
+ if (result == -1)
+ return;
+ __num_possible_cpus = result;
+}
+
+#else
+
+/*
+ * The MUSL libc implementation of the _SC_NPROCESSORS_CONF sysconf does not
+ * return the number of configured CPUs in the system but relies on the cpu
+ * affinity mask of the current task.
+ *
+ * So instead we use a strategy similar to GLIBC's, counting the cpu
+ * directories in "/sys/devices/system/cpu" and fallback on the value from
+ * sysconf if it fails.
+ */
+
+#include <dirent.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+
+#define __max(a,b) ((a)>(b)?(a):(b))
+
+void _get_num_possible_cpus(void)
+{
+ int result, count = 0;
+ DIR *cpudir;
+ struct dirent *entry;
+
+ cpudir = opendir("/sys/devices/system/cpu");
+ if (cpudir == NULL)
+ goto end;
+
+ /*
+ * Count the number of directories named "cpu" followed by and
+ * integer. This is the same strategy as glibc uses.
+ */
+ while ((entry = readdir(cpudir))) {
+ if (entry->d_type == DT_DIR &&
+ strncmp(entry->d_name, "cpu", 3) == 0) {
+
+ char *endptr;
+ unsigned long cpu_num;
+
+ cpu_num = strtoul(entry->d_name + 3, &endptr, 10);
+ if ((cpu_num < ULONG_MAX) && (endptr != entry->d_name + 3)
+ && (*endptr == '\0')) {
+ count++;
+ }
+ }
+ }
+
+end:
+ /*
+ * Get the sysconf value as a fallback. Keep the highest number.
+ */
+ result = __max(sysconf(_SC_NPROCESSORS_CONF), count);
+
+ /*
+ * If both methods failed, don't store the value.
+ */
+ if (result < 1)
+ return;
+ __num_possible_cpus = result;
+}
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LIBRINGBUFFER_SMP_H
+#define _LIBRINGBUFFER_SMP_H
+
+#include "getcpu.h"
+
+/*
+ * 4kB of per-cpu data available. Enough to hold the control structures,
+ * but not ring buffers.
+ */
+#define PER_CPU_MEM_SIZE 4096
+
+extern int __num_possible_cpus
+ __attribute__((visibility("hidden")));
+
+extern void _get_num_possible_cpus(void)
+ __attribute__((visibility("hidden")));
+
+static inline
+int num_possible_cpus(void)
+{
+ if (!__num_possible_cpus)
+ _get_num_possible_cpus();
+ return __num_possible_cpus;
+}
+
+#define for_each_possible_cpu(cpu) \
+ for ((cpu) = 0; (cpu) < num_possible_cpus(); (cpu)++)
+
+#endif /* _LIBRINGBUFFER_SMP_H */
--- /dev/null
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#ifndef _LTTNG_RING_BUFFER_VATOMIC_H
+#define _LTTNG_RING_BUFFER_VATOMIC_H
+
+#include <assert.h>
+#include <urcu/uatomic.h>
+
+/*
+ * Same data type (long) accessed differently depending on configuration.
+ * v field is for non-atomic access (protected by mutual exclusion).
+ * In the fast-path, the ring_buffer_config structure is constant, so the
+ * compiler can statically select the appropriate branch.
+ * local_t is used for per-cpu and per-thread buffers.
+ * atomic_long_t is used for globally shared buffers.
+ */
+union v_atomic {
+ long a; /* accessed through uatomic */
+ long v;
+};
+
+static inline
+long v_read(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ return uatomic_read(&v_a->a);
+}
+
+static inline
+void v_set(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
+ long v)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_set(&v_a->a, v);
+}
+
+static inline
+void v_add(const struct lttng_ust_lib_ring_buffer_config *config, long v, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_add(&v_a->a, v);
+}
+
+static inline
+void v_inc(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ uatomic_inc(&v_a->a);
+}
+
+/*
+ * Non-atomic decrement. Only used by reader, apply to reader-owned subbuffer.
+ */
+static inline
+void _v_dec(const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)), union v_atomic *v_a)
+{
+ --v_a->v;
+}
+
+static inline
+long v_cmpxchg(const struct lttng_ust_lib_ring_buffer_config *config, union v_atomic *v_a,
+ long old, long _new)
+{
+ assert(config->sync != RING_BUFFER_SYNC_PER_CPU);
+ return uatomic_cmpxchg(&v_a->a, old, _new);
+}
+
+#endif /* _LTTNG_RING_BUFFER_VATOMIC_H */
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: LTTng Userspace Tracer control
+Description: The LTTng Userspace Tracer (UST) is a library accompanied by a set of tools to trace userspace code.
+Version: @PACKAGE_VERSION@
+Requires:
+Libs: -L${libdir} -llttng-ust-ctl
+Cflags: -I${includedir}
+
--- /dev/null
+prefix=@prefix@
+exec_prefix=@exec_prefix@
+libdir=@libdir@
+includedir=@includedir@
+
+Name: LTTng Userspace Tracer
+Description: The LTTng Userspace Tracer (UST) is a library accompanied by a set of tools to trace userspace code.
+Version: @PACKAGE_VERSION@
+Requires:
+Libs: -L${libdir} -llttng-ust -llttng-ust-common -ldl
+Cflags: -I${includedir}
+
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+INSTALLED_FILES=$(builddir)/installed_files.txt
+
+STATIC_BINDINGS_DEPS = \
+ lttngust/__init__.py \
+ lttngust/agent.py \
+ lttngust/cmd.py \
+ lttngust/compat.py \
+ lttngust/debug.py \
+ lttngust/loghandler.py
+
+all-local: build-python-bindings.stamp
+
+copy-static-deps.stamp: $(addprefix $(srcdir)/, $(STATIC_BINDINGS_DEPS))
+ @if [ x"$(srcdir)" != x"$(builddir)" ]; then \
+ for file in $(STATIC_BINDINGS_DEPS); do \
+ cp -f $(srcdir)/$$file $(builddir)/$$file; \
+ done; \
+ fi
+ touch $@
+
+# Use setup.py for the installation instead of Autoconf.
+# This ease the installation process and assure a *pythonic*
+# installation.
+build-python-bindings.stamp: copy-static-deps.stamp
+ $(PYTHON) $(builddir)/setup.py build --force
+ touch $@
+
+install-exec-local: build-python-bindings.stamp
+ @opts="--prefix=$(prefix) --record $(INSTALLED_FILES) --verbose --no-compile $(DISTSETUPOPTS)"; \
+ if [ "$(DESTDIR)" != "" ]; then \
+ opts="$$opts --root=$(DESTDIR)"; \
+ fi; \
+ $(PYTHON) $(builddir)/setup.py install $$opts;
+
+clean-local:
+ rm -rf $(builddir)/build
+ @if [ x"$(srcdir)" != x"$(builddir)" ]; then \
+ for file in $(STATIC_BINDINGS_DEPS); do \
+ rm -f $(builddir)/$$file; \
+ done; \
+ fi
+
+# Distutils' setup.py does not include an uninstall target, we thus need to do
+# it manually. We save the path of the files that were installed during the install target
+# and delete them during the uninstallation.
+uninstall-local:
+ if [ "$(DESTDIR)" != "" ]; then \
+ $(SED) -i "s|^|$(DESTDIR)/|g" $(INSTALLED_FILES); \
+ fi
+ cat $(INSTALLED_FILES) | xargs rm -rf || true
+ $(GREP) "__init__.py" $(INSTALLED_FILES) | xargs dirname | xargs rm -rf || true
+ rm -f $(INSTALLED_FILES)
+
+EXTRA_DIST = $(STATIC_BINDINGS_DEPS)
+
+CLEANFILES = \
+ build-python-bindings.stamp \
+ copy-static-deps.stamp
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
+
+from __future__ import unicode_literals
+
+from .version import __version__
+from .version import __soname_major__
+
+# this creates the daemon threads and registers the application
+import lttngust.agent
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
+# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+import lttngust.debug as dbg
+import lttngust.loghandler
+import lttngust.compat
+import lttngust.cmd
+from io import open
+import threading
+import logging
+import socket
+import time
+import sys
+import os
+
+
+try:
+ # Python 2
+ import Queue as queue
+except ImportError:
+ # Python 3
+ import queue
+
+
+_PROTO_DOMAIN = 5
+_PROTO_MAJOR = 2
+_PROTO_MINOR = 0
+
+
+def _get_env_value_ms(key, default_s):
+ try:
+ val = int(os.getenv(key, default_s * 1000)) / 1000
+ except:
+ val = -1
+
+ if val < 0:
+ fmt = 'invalid ${} value; {} seconds will be used'
+ dbg._pwarning(fmt.format(key, default_s))
+ val = default_s
+
+ return val
+
+
+_REG_TIMEOUT = _get_env_value_ms('LTTNG_UST_PYTHON_REGISTER_TIMEOUT', 5)
+_RETRY_REG_DELAY = _get_env_value_ms('LTTNG_UST_PYTHON_REGISTER_RETRY_DELAY', 3)
+
+
+class _TcpClient(object):
+ def __init__(self, name, host, port, reg_queue):
+ super(self.__class__, self).__init__()
+ self._name = name
+ self._host = host
+ self._port = port
+
+ try:
+ self._log_handler = lttngust.loghandler._Handler()
+ except (OSError) as e:
+ dbg._pwarning('cannot load library: {}'.format(e))
+ raise e
+
+ self._root_logger = logging.getLogger()
+ self._root_logger.setLevel(logging.NOTSET)
+ self._ref_count = 0
+ self._sessiond_sock = None
+ self._reg_queue = reg_queue
+ self._server_cmd_handlers = {
+ lttngust.cmd._ServerCmdRegistrationDone: self._handle_server_cmd_reg_done,
+ lttngust.cmd._ServerCmdEnable: self._handle_server_cmd_enable,
+ lttngust.cmd._ServerCmdDisable: self._handle_server_cmd_disable,
+ lttngust.cmd._ServerCmdList: self._handle_server_cmd_list,
+ }
+
+ def _debug(self, msg):
+ return 'client "{}": {}'.format(self._name, msg)
+
+ def run(self):
+ while True:
+ try:
+ # connect to the session daemon
+ dbg._pdebug(self._debug('connecting to session daemon'))
+ self._connect_to_sessiond()
+
+ # register to the session daemon after a successful connection
+ dbg._pdebug(self._debug('registering to session daemon'))
+ self._register()
+
+ # wait for commands from the session daemon
+ self._wait_server_cmd()
+ except (Exception) as e:
+ # Whatever happens here, we have to close the socket and
+ # retry to connect to the session daemon since either
+ # the socket was closed, a network timeout occured, or
+ # invalid data was received.
+ dbg._pdebug(self._debug('got exception: {}'.format(e)))
+ self._cleanup_socket()
+ dbg._pdebug(self._debug('sleeping for {} s'.format(_RETRY_REG_DELAY)))
+ time.sleep(_RETRY_REG_DELAY)
+
+ def _recv_server_cmd_header(self):
+ data = self._sessiond_sock.recv(lttngust.cmd._SERVER_CMD_HEADER_SIZE)
+
+ if not data:
+ dbg._pdebug(self._debug('received empty server command header'))
+ return None
+
+ assert(len(data) == lttngust.cmd._SERVER_CMD_HEADER_SIZE)
+ dbg._pdebug(self._debug('received server command header ({} bytes)'.format(len(data))))
+
+ return lttngust.cmd._server_cmd_header_from_data(data)
+
+ def _recv_server_cmd(self):
+ server_cmd_header = self._recv_server_cmd_header()
+
+ if server_cmd_header is None:
+ return None
+
+ dbg._pdebug(self._debug('server command header: data size: {} bytes'.format(server_cmd_header.data_size)))
+ dbg._pdebug(self._debug('server command header: command ID: {}'.format(server_cmd_header.cmd_id)))
+ dbg._pdebug(self._debug('server command header: command version: {}'.format(server_cmd_header.cmd_version)))
+ data = bytes()
+
+ if server_cmd_header.data_size > 0:
+ data = self._sessiond_sock.recv(server_cmd_header.data_size)
+ assert(len(data) == server_cmd_header.data_size)
+
+ return lttngust.cmd._server_cmd_from_data(server_cmd_header, data)
+
+ def _send_cmd_reply(self, cmd_reply):
+ data = cmd_reply.get_data()
+ dbg._pdebug(self._debug('sending command reply ({} bytes)'.format(len(data))))
+ self._sessiond_sock.sendall(data)
+
+ def _handle_server_cmd_reg_done(self, server_cmd):
+ dbg._pdebug(self._debug('got "registration done" server command'))
+
+ if self._reg_queue is not None:
+ dbg._pdebug(self._debug('notifying _init_threads()'))
+
+ try:
+ self._reg_queue.put(True)
+ except (Exception) as e:
+ # read side could be closed by now; ignore it
+ pass
+
+ self._reg_queue = None
+
+ def _handle_server_cmd_enable(self, server_cmd):
+ dbg._pdebug(self._debug('got "enable" server command'))
+ self._ref_count += 1
+
+ if self._ref_count == 1:
+ dbg._pdebug(self._debug('adding our handler to the root logger'))
+ self._root_logger.addHandler(self._log_handler)
+
+ dbg._pdebug(self._debug('ref count is {}'.format(self._ref_count)))
+
+ return lttngust.cmd._ClientCmdReplyEnable()
+
+ def _handle_server_cmd_disable(self, server_cmd):
+ dbg._pdebug(self._debug('got "disable" server command'))
+ self._ref_count -= 1
+
+ if self._ref_count < 0:
+ # disable command could be sent again when a session is destroyed
+ self._ref_count = 0
+
+ if self._ref_count == 0:
+ dbg._pdebug(self._debug('removing our handler from the root logger'))
+ self._root_logger.removeHandler(self._log_handler)
+
+ dbg._pdebug(self._debug('ref count is {}'.format(self._ref_count)))
+
+ return lttngust.cmd._ClientCmdReplyDisable()
+
+ def _handle_server_cmd_list(self, server_cmd):
+ dbg._pdebug(self._debug('got "list" server command'))
+ names = logging.Logger.manager.loggerDict.keys()
+ dbg._pdebug(self._debug('found {} loggers'.format(len(names))))
+ cmd_reply = lttngust.cmd._ClientCmdReplyList(names=names)
+
+ return cmd_reply
+
+ def _handle_server_cmd(self, server_cmd):
+ cmd_reply = None
+
+ if server_cmd is None:
+ dbg._pdebug(self._debug('bad server command'))
+ status = lttngust.cmd._CLIENT_CMD_REPLY_STATUS_INVALID_CMD
+ cmd_reply = lttngust.cmd._ClientCmdReply(status)
+ elif type(server_cmd) in self._server_cmd_handlers:
+ cmd_reply = self._server_cmd_handlers[type(server_cmd)](server_cmd)
+ else:
+ dbg._pdebug(self._debug('unknown server command'))
+ status = lttngust.cmd._CLIENT_CMD_REPLY_STATUS_INVALID_CMD
+ cmd_reply = lttngust.cmd._ClientCmdReply(status)
+
+ if cmd_reply is not None:
+ self._send_cmd_reply(cmd_reply)
+
+ def _wait_server_cmd(self):
+ while True:
+ try:
+ server_cmd = self._recv_server_cmd()
+ except socket.timeout:
+ # simply retry here; the protocol has no KA and we could
+ # wait for hours
+ continue
+
+ self._handle_server_cmd(server_cmd)
+
+ def _cleanup_socket(self):
+ try:
+ self._sessiond_sock.shutdown(socket.SHUT_RDWR)
+ self._sessiond_sock.close()
+ except:
+ pass
+
+ self._sessiond_sock = None
+
+ def _connect_to_sessiond(self):
+ # create session daemon TCP socket
+ if self._sessiond_sock is None:
+ self._sessiond_sock = socket.socket(socket.AF_INET,
+ socket.SOCK_STREAM)
+
+ # Use str(self._host) here. Since this host could be a string
+ # literal, and since we're importing __future__.unicode_literals,
+ # we want to make sure the host is a native string in Python 2.
+ # This avoids an indirect module import (unicode module to
+ # decode the unicode string, eventually imported by the
+ # socket module if needed), which is not allowed in a thread
+ # directly created by a module in Python 2 (our case).
+ #
+ # tl;dr: Do NOT remove str() here, or this call in Python 2
+ # _will_ block on an interpreter's mutex until the waiting
+ # register queue timeouts.
+ self._sessiond_sock.connect((str(self._host), self._port))
+
+ def _register(self):
+ cmd = lttngust.cmd._ClientRegisterCmd(_PROTO_DOMAIN, os.getpid(),
+ _PROTO_MAJOR, _PROTO_MINOR)
+ data = cmd.get_data()
+ self._sessiond_sock.sendall(data)
+
+
+def _get_port_from_file(path):
+ port = None
+ dbg._pdebug('reading port from file "{}"'.format(path))
+
+ try:
+ f = open(path)
+ r_port = int(f.readline())
+ f.close()
+
+ if r_port > 0 or r_port <= 65535:
+ port = r_port
+ except:
+ pass
+
+ return port
+
+
+def _get_user_home_path():
+ # $LTTNG_HOME overrides $HOME if it exists
+ return os.getenv('LTTNG_HOME', os.path.expanduser('~'))
+
+
+_initialized = False
+_SESSIOND_HOST = '127.0.0.1'
+
+
+def _client_thread_target(name, port, reg_queue):
+ dbg._pdebug('creating client "{}" using TCP port {}'.format(name, port))
+ client = _TcpClient(name, _SESSIOND_HOST, port, reg_queue)
+ dbg._pdebug('starting client "{}"'.format(name))
+ client.run()
+
+
+def _init_threads():
+ global _initialized
+
+ dbg._pdebug('entering')
+
+ if _initialized:
+ dbg._pdebug('agent is already initialized')
+ return
+
+ # This makes sure that the appropriate modules for encoding and
+ # decoding strings/bytes are imported now, since no import should
+ # happen within a thread at import time (our case).
+ 'lttng'.encode().decode()
+
+ _initialized = True
+ sys_port = _get_port_from_file('/var/run/lttng/agent.port')
+ user_port_file = os.path.join(_get_user_home_path(), '.lttng', 'agent.port')
+ user_port = _get_port_from_file(user_port_file)
+ reg_queue = queue.Queue()
+ reg_expecting = 0
+
+ dbg._pdebug('system session daemon port: {}'.format(sys_port))
+ dbg._pdebug('user session daemon port: {}'.format(user_port))
+
+ if sys_port == user_port and sys_port is not None:
+ # The two session daemon ports are the same. This is not normal.
+ # Connect to only one.
+ dbg._pdebug('both user and system session daemon have the same port')
+ sys_port = None
+
+ try:
+ if sys_port is not None:
+ dbg._pdebug('creating system client thread')
+ t = threading.Thread(target=_client_thread_target,
+ args=('system', sys_port, reg_queue))
+ t.name = 'system'
+ t.daemon = True
+ t.start()
+ dbg._pdebug('created and started system client thread')
+ reg_expecting += 1
+
+ if user_port is not None:
+ dbg._pdebug('creating user client thread')
+ t = threading.Thread(target=_client_thread_target,
+ args=('user', user_port, reg_queue))
+ t.name = 'user'
+ t.daemon = True
+ t.start()
+ dbg._pdebug('created and started user client thread')
+ reg_expecting += 1
+ except:
+ # cannot create threads for some reason; stop this initialization
+ dbg._pwarning('cannot create client threads')
+ return
+
+ if reg_expecting == 0:
+ # early exit: looks like there's not even one valid port
+ dbg._pwarning('no valid LTTng session daemon port found (is the session daemon started?)')
+ return
+
+ cur_timeout = _REG_TIMEOUT
+
+ # We block here to make sure the agent is properly registered to
+ # the session daemon. If we timeout, the client threads will still
+ # continue to try to connect and register to the session daemon,
+ # but there is no guarantee that all following logging statements
+ # will make it to LTTng-UST.
+ #
+ # When a client thread receives a "registration done" confirmation
+ # from the session daemon it's connected to, it puts True in
+ # reg_queue.
+ while True:
+ try:
+ dbg._pdebug('waiting for registration done (expecting {}, timeout is {} s)'.format(reg_expecting,
+ cur_timeout))
+ t1 = lttngust.compat._clock()
+ reg_queue.get(timeout=cur_timeout)
+ t2 = lttngust.compat._clock()
+ reg_expecting -= 1
+ dbg._pdebug('unblocked')
+
+ if reg_expecting == 0:
+ # done!
+ dbg._pdebug('successfully registered to session daemon(s)')
+ break
+
+ cur_timeout -= (t2 - t1)
+
+ if cur_timeout <= 0:
+ # timeout
+ dbg._pdebug('ran out of time')
+ break
+ except queue.Empty:
+ dbg._pdebug('ran out of time')
+ break
+
+ dbg._pdebug('leaving')
+
+
+_init_threads()
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
+# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
+# Copyright (C) 2015 Jérémie Galarneau <jeremie.galarneau@efficios.com>
+
+from __future__ import unicode_literals
+import lttngust.debug as dbg
+import struct
+
+
+# server command header
+_server_cmd_header_struct = struct.Struct('>QII')
+
+
+# server command header size
+_SERVER_CMD_HEADER_SIZE = _server_cmd_header_struct.size
+
+
+# agent protocol symbol size
+_LTTNG_SYMBOL_NAME_LEN = 256
+
+
+class _ServerCmdHeader(object):
+ def __init__(self, data_size, cmd_id, cmd_version):
+ self.data_size = data_size
+ self.cmd_id = cmd_id
+ self.cmd_version = cmd_version
+
+
+def _server_cmd_header_from_data(data):
+ try:
+ data_size, cmd_id, cmd_version = _server_cmd_header_struct.unpack(data)
+ except (Exception) as e:
+ dbg._pdebug('cannot decode command header: {}'.format(e))
+ return None
+
+ return _ServerCmdHeader(data_size, cmd_id, cmd_version)
+
+
+class _ServerCmd(object):
+ def __init__(self, header):
+ self.header = header
+
+ @classmethod
+ def from_data(cls, header, data):
+ raise NotImplementedError()
+
+
+class _ServerCmdList(_ServerCmd):
+ @classmethod
+ def from_data(cls, header, data):
+ return cls(header)
+
+
+class _ServerCmdEnable(_ServerCmd):
+ _NAME_OFFSET = 8
+ _loglevel_struct = struct.Struct('>II')
+ # filter expression size
+ _filter_exp_len_struct = struct.Struct('>I')
+
+ def __init__(self, header, loglevel, loglevel_type, name, filter_exp):
+ super(self.__class__, self).__init__(header)
+ self.loglevel = loglevel
+ self.loglevel_type = loglevel_type
+ self.name = name
+ self.filter_expression = filter_exp
+ dbg._pdebug('server enable command {}'.format(self.__dict__))
+
+ @classmethod
+ def from_data(cls, header, data):
+ try:
+ loglevel, loglevel_type = cls._loglevel_struct.unpack_from(data)
+ name_start = cls._loglevel_struct.size
+ name_end = name_start + _LTTNG_SYMBOL_NAME_LEN
+ data_name = data[name_start:name_end]
+ name = data_name.rstrip(b'\0').decode()
+
+ filter_exp_start = name_end + cls._filter_exp_len_struct.size
+ filter_exp_len, = cls._filter_exp_len_struct.unpack_from(
+ data[name_end:filter_exp_start])
+ filter_exp_end = filter_exp_start + filter_exp_len
+
+ filter_exp = data[filter_exp_start:filter_exp_end].rstrip(
+ b'\0').decode()
+
+ return cls(header, loglevel, loglevel_type, name, filter_exp)
+ except (Exception) as e:
+ dbg._pdebug('cannot decode enable command: {}'.format(e))
+ return None
+
+
+class _ServerCmdDisable(_ServerCmd):
+ def __init__(self, header, name):
+ super(self.__class__, self).__init__(header)
+ self.name = name
+
+ @classmethod
+ def from_data(cls, header, data):
+ try:
+ name = data.rstrip(b'\0').decode()
+
+ return cls(header, name)
+ except (Exception) as e:
+ dbg._pdebug('cannot decode disable command: {}'.format(e))
+ return None
+
+
+class _ServerCmdRegistrationDone(_ServerCmd):
+ @classmethod
+ def from_data(cls, header, data):
+ return cls(header)
+
+
+_SERVER_CMD_ID_TO_SERVER_CMD = {
+ 1: _ServerCmdList,
+ 2: _ServerCmdEnable,
+ 3: _ServerCmdDisable,
+ 4: _ServerCmdRegistrationDone,
+}
+
+
+def _server_cmd_from_data(header, data):
+ if header.cmd_id not in _SERVER_CMD_ID_TO_SERVER_CMD:
+ return None
+
+ return _SERVER_CMD_ID_TO_SERVER_CMD[header.cmd_id].from_data(header, data)
+
+
+_CLIENT_CMD_REPLY_STATUS_SUCCESS = 1
+_CLIENT_CMD_REPLY_STATUS_INVALID_CMD = 2
+
+
+class _ClientCmdReplyHeader(object):
+ _payload_struct = struct.Struct('>I')
+
+ def __init__(self, status_code=_CLIENT_CMD_REPLY_STATUS_SUCCESS):
+ self.status_code = status_code
+
+ def get_data(self):
+ return self._payload_struct.pack(self.status_code)
+
+
+class _ClientCmdReplyEnable(_ClientCmdReplyHeader):
+ pass
+
+
+class _ClientCmdReplyDisable(_ClientCmdReplyHeader):
+ pass
+
+
+class _ClientCmdReplyList(_ClientCmdReplyHeader):
+ _nb_events_struct = struct.Struct('>I')
+ _data_size_struct = struct.Struct('>I')
+
+ def __init__(self, names, status_code=_CLIENT_CMD_REPLY_STATUS_SUCCESS):
+ super(self.__class__, self).__init__(status_code)
+ self.names = names
+
+ def get_data(self):
+ upper_data = super(self.__class__, self).get_data()
+ nb_events_data = self._nb_events_struct.pack(len(self.names))
+ names_data = bytes()
+
+ for name in self.names:
+ names_data += name.encode() + b'\0'
+
+ data_size_data = self._data_size_struct.pack(len(names_data))
+
+ return upper_data + data_size_data + nb_events_data + names_data
+
+
+class _ClientRegisterCmd(object):
+ _payload_struct = struct.Struct('>IIII')
+
+ def __init__(self, domain, pid, major, minor):
+ self.domain = domain
+ self.pid = pid
+ self.major = major
+ self.minor = minor
+
+ def get_data(self):
+ return self._payload_struct.pack(self.domain, self.pid, self.major,
+ self.minor)
--- /dev/null
+# -*- coding: utf-8 -*-
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2020 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+
+import sys
+import time
+
+
+# Support for deprecation of time.clock().
+# Deprecated since python 3.3 and removed in python 3.8.
+# See PEP 418 for more details.
+def _clock():
+ if sys.version_info > (3,2):
+ clock = time.perf_counter()
+ else:
+ clock = time.clock()
+ return clock
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
+
+from __future__ import unicode_literals, print_function
+import lttngust.compat
+import time
+import sys
+import os
+
+
+_ENABLE_DEBUG = os.getenv('LTTNG_UST_PYTHON_DEBUG', '0') == '1'
+
+
+if _ENABLE_DEBUG:
+ import inspect
+
+ def _pwarning(msg):
+ fname = inspect.stack()[1][3]
+ fmt = '[{:.6f}] LTTng-UST warning: {}(): {}'
+ print(fmt.format(lttngust.compat._clock(), fname, msg), file=sys.stderr)
+
+ def _pdebug(msg):
+ fname = inspect.stack()[1][3]
+ fmt = '[{:.6f}] LTTng-UST debug: {}(): {}'
+ print(fmt.format(lttngust.compat._clock(), fname, msg), file=sys.stderr)
+
+ _pdebug('debug is enabled')
+else:
+ def _pwarning(msg):
+ pass
+
+ def _pdebug(msg):
+ pass
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Philippe Proulx <pproulx@efficios.com>
+# Copyright (C) 2014 David Goulet <dgoulet@efficios.com>
+
+from __future__ import unicode_literals
+import logging
+import ctypes
+
+from .version import __soname_major__
+
+class _Handler(logging.Handler):
+ _LIB_NAME = 'liblttng-ust-python-agent.so.' + __soname_major__
+
+ def __init__(self):
+ super(self.__class__, self).__init__(level=logging.NOTSET)
+ self.setFormatter(logging.Formatter('%(asctime)s'))
+
+ # will raise if library is not found: caller should catch
+ self.agent_lib = ctypes.cdll.LoadLibrary(_Handler._LIB_NAME)
+
+ def emit(self, record):
+ self.agent_lib.py_tracepoint(self.format(record).encode(),
+ record.getMessage().encode(),
+ record.name.encode(),
+ record.funcName.encode(),
+ record.lineno, record.levelno,
+ record.thread,
+ record.threadName.encode())
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2021 Michael Jeanson <mjeanson@efficios.com>
+
+__version__ = '@PACKAGE_VERSION@'
+__soname_major__ = '@LTTNG_UST_LIB_SONAME_MAJOR@'
--- /dev/null
+# -*- coding: utf-8 -*-
+#
+# SPDX-License-Identifier: LGPL-2.1-only
+#
+# Copyright (C) 2015 Jonathan Rajotte <jonathan.rajotte-julien@efficios.com>
+
+import os
+import sys
+
+from distutils.core import setup, Extension
+
+PY_PATH_WARN_MSG = """
+-------------------------------------WARNING------------------------------------
+The install directory used:\n ({0})\nis not included in your PYTHONPATH.
+
+To add this directory to your Python search path permanently you can add the
+following command to your .bashrc/.zshrc:
+ export PYTHONPATH="${{PYTHONPATH}}:{0}"
+--------------------------------------------------------------------------------
+"""
+
+def main():
+ dist = setup(name='lttngust',
+ version='@PACKAGE_VERSION@',
+ description='LTTng-UST Python agent',
+ packages=['lttngust'],
+ package_dir={'lttngust': 'lttngust'},
+ options={'build': {'build_base': 'build'}},
+ url='http://lttng.org',
+ license='LGPL-2.1',
+ classifiers=[
+ 'Development Status :: 5 - Production/Stable',
+ 'Intended Audience :: Developers',
+ 'License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)',
+ 'Programming Language :: Python :: 2.7',
+ 'Programming Language :: Python :: 3'
+ 'Topic :: System :: Logging',
+ ])
+
+# After the installation, we check that the install directory is included in
+# the Python search path and we print a warning message when it's not. We need
+# to do this because Python search path differs depending on the distro and
+# some distros don't include any `/usr/local/` (the default install prefix) in
+# the search path. This is also useful for out-of-tree installs and tests. It's
+# only relevant to make this check on the `install` command.
+
+ if 'install' in dist.command_obj:
+ install_dir = dist.command_obj['install'].install_libbase
+ if install_dir not in sys.path:
+ # We can't consider this an error because if affects every
+ # distro differently. We only warn the user that some
+ # extra configuration is needed to use the agent
+ abs_install_dir = os.path.abspath(install_dir)
+ print(PY_PATH_WARN_MSG.format(abs_install_dir))
+
+if __name__ == '__main__':
+ main()
--- /dev/null
+# SPDX-License-Identifier: LGPL-2.1-only
+
+AM_CPPFLAGS += -I$(top_srcdir)/libustcomm
+AM_CFLAGS += -fno-strict-aliasing
+
+noinst_LTLIBRARIES = libustsnprintf.la
+libustsnprintf_la_SOURCES = \
+ fflush.c \
+ fileext.h \
+ floatio.h \
+ fvwrite.c \
+ fvwrite.h \
+ local.h \
+ mbrtowc_sb.c \
+ snprintf.c \
+ various.h \
+ vfprintf.c \
+ wcio.h \
+ wsetup.c \
+ core.c \
+ patient_write.c
+
+libustsnprintf_la_LDFLAGS = -no-undefined -static
+libustsnprintf_la_CFLAGS = -DUST_COMPONENT="lttng_ust_snprintf" -fPIC $(AM_CFLAGS)
+
+EXTRA_DIST = README
--- /dev/null
+This is a signal safe version of snprintf/vsnprintf. The code is
+originally from the OpenBSD libc.
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-only
+ *
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <usterr-signal-safe.h>
+
+volatile enum ust_err_loglevel ust_err_loglevel;
+
+void ust_err_init(void)
+{
+ char *ust_debug;
+
+ if (ust_err_loglevel == UST_ERR_LOGLEVEL_UNKNOWN) {
+ /*
+ * This getenv is not part of lttng_ust_getenv() because it
+ * is required to print ERR() performed during getenv
+ * initialization.
+ */
+ ust_debug = getenv("LTTNG_UST_DEBUG");
+ if (ust_debug)
+ ust_err_loglevel = UST_ERR_LOGLEVEL_DEBUG;
+ else
+ ust_err_loglevel = UST_ERR_LOGLEVEL_NORMAL;
+ }
+}
--- /dev/null
+/* $OpenBSD: fflush.c,v 1.7 2009/10/22 01:23:16 guenther Exp $ */
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include "local.h"
+
+/* Flush a single file, or (if fp is NULL) all files. */
+int ust_safe_fflush(LTTNG_UST_LFILE *fp)
+{
+
+ if (fp == NULL)
+ return 0;
+// return (_fwalk(__sflush));
+ if ((fp->_flags & (__SWR | __SRW)) == 0) {
+ errno = EBADF;
+ return (EOF);
+ }
+ return (__sflush(fp));
+}
+
+int
+__sflush(LTTNG_UST_LFILE *fp)
+{
+ unsigned char *p;
+ int n, t;
+
+ t = fp->_flags;
+ if ((t & __SWR) == 0)
+ return (0);
+
+ if ((p = fp->_bf._base) == NULL)
+ return (0);
+
+ n = fp->_p - p; /* write this much */
+
+ /*
+ * Set these immediately to avoid problems with longjmp and to allow
+ * exchange buffering (via setvbuf) in user write function.
+ */
+ fp->_p = p;
+ fp->_w = t & (__SLBF|__SNBF) ? 0 : fp->_bf._size;
+
+ for (; n > 0; n -= t, p += t) {
+ t = (*fp->_write)(fp->_cookie, (char *)p, n);
+ if (t <= 0) {
+ fp->_flags |= __SERR;
+ return (EOF);
+ }
+ }
+ return (0);
+}
--- /dev/null
+/* $OpenBSD: fileext.h,v 1.2 2005/06/17 20:40:32 espie Exp $ */
+/* $NetBSD: fileext.h,v 1.5 2003/07/18 21:46:41 nathanw Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C)2001 Citrus Project,
+ * All rights reserved.
+ *
+ * $Citrus$
+ */
+
+/*
+ * file extension
+ */
+struct __lttng_ust_sfileext {
+ struct __lttng_ust_sbuf _ub; /* ungetc buffer */
+ struct wchar_io_data _wcio; /* wide char io status */
+};
+
+#define _EXT(fp) ((struct __lttng_ust_sfileext *)((fp)->_ext._base))
+#define _UB(fp) _EXT(fp)->_ub
+
+#define _FILEEXT_INIT(fp) \
+do { \
+ _UB(fp)._base = NULL; \
+ _UB(fp)._size = 0; \
+ WCIO_INIT(fp); \
+} while (0)
+
+#define _FILEEXT_SETUP(f, fext) \
+do { \
+ (f)->_ext._base = (unsigned char *)(fext); \
+ _FILEEXT_INIT(f); \
+} while (0)
--- /dev/null
+/* $OpenBSD: floatio.h,v 1.4 2008/09/07 20:36:08 martynas Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+/*
+ * Floating point scanf/printf (input/output) definitions.
+ */
+
+/* 11-bit exponent (VAX G floating point) is 308 decimal digits */
+#define MAXEXP 308
+/* 128 bit fraction takes up 39 decimal digits; max reasonable precision */
+#define MAXFRACT 39
+
+/*
+ * MAXEXPDIG is the maximum number of decimal digits needed to store a
+ * floating point exponent in the largest supported format. It should
+ * be ceil(log10(LDBL_MAX_10_EXP)) or, if hexadecimal floating point
+ * conversions are supported, ceil(log10(LDBL_MAX_EXP)). But since it
+ * is presently never greater than 5 in practice, we fudge it.
+ */
+#define MAXEXPDIG 6
+#if LDBL_MAX_EXP > 999999
+#error "floating point buffers too small"
+#endif
+
+char *__hdtoa(double, const char *, int, int *, int *, char **)
+ __attribute__((visibility("hidden")));
+
+char *__hldtoa(long double, const char *, int, int *, int *, char **)
+ __attribute__((visibility("hidden")));
+
+char *__ldtoa(long double *, int, int, int *, int *, char **)
+ __attribute__((visibility("hidden")));
--- /dev/null
+/* $OpenBSD: fvwrite.c,v 1.16 2009/10/22 01:23:16 guenther Exp $ */
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include "local.h"
+#include "fvwrite.h"
+#include "various.h"
+
+/*
+ * Write some memory regions. Return zero on success, EOF on error.
+ *
+ * This routine is large and unsightly, but most of the ugliness due
+ * to the three different kinds of output buffering is handled here.
+ */
+int
+__sfvwrite(LTTNG_UST_LFILE *fp, struct __lttng_ust_suio *uio)
+{
+ size_t len;
+ char *p;
+ struct __lttng_ust_siov *iov;
+ int w, s;
+ char *nl;
+ int nlknown, nldist;
+
+ if ((len = uio->uio_resid) == 0)
+ return (0);
+ /* make sure we can write */
+ if (cantwrite(fp)) {
+ errno = EBADF;
+ return (EOF);
+ }
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define COPY(n) (void)memcpy((void *)fp->_p, (void *)p, (size_t)(n))
+
+ iov = uio->uio_iov;
+ p = iov->iov_base;
+ len = iov->iov_len;
+ iov++;
+#define GETIOV(extra_work) \
+ while (len == 0) { \
+ extra_work; \
+ p = iov->iov_base; \
+ len = iov->iov_len; \
+ iov++; \
+ }
+ if (fp->_flags & __SNBF) {
+ /*
+ * Unbuffered: write up to BUFSIZ bytes at a time.
+ */
+ do {
+ GETIOV(;);
+ w = (*fp->_write)(fp->_cookie, p, MIN(len, BUFSIZ));
+ if (w <= 0)
+ goto err;
+ p += w;
+ len -= w;
+ } while ((uio->uio_resid -= w) != 0);
+ } else if ((fp->_flags & __SLBF) == 0) {
+ /*
+ * Fully buffered: fill partially full buffer, if any,
+ * and then flush. If there is no partial buffer, write
+ * one _bf._size byte chunk directly (without copying).
+ *
+ * String output is a special case: write as many bytes
+ * as fit, but pretend we wrote everything. This makes
+ * snprintf() return the number of bytes needed, rather
+ * than the number used, and avoids its write function
+ * (so that the write function can be invalid).
+ */
+ do {
+ GETIOV(;);
+ if ((fp->_flags & (__SALC | __SSTR)) ==
+ (__SALC | __SSTR) && fp->_w < len) {
+ size_t blen = fp->_p - fp->_bf._base;
+ unsigned char *_base;
+ int _size;
+
+ /* Allocate space exponentially. */
+ _size = fp->_bf._size;
+ do {
+ _size = (_size << 1) + 1;
+ } while (_size < blen + len);
+ _base = realloc(fp->_bf._base, _size + 1);
+ if (_base == NULL)
+ goto err;
+ fp->_w += _size - fp->_bf._size;
+ fp->_bf._base = _base;
+ fp->_bf._size = _size;
+ fp->_p = _base + blen;
+ }
+ w = fp->_w;
+ if (fp->_flags & __SSTR) {
+ if (len < w)
+ w = len;
+ COPY(w); /* copy MIN(fp->_w,len), */
+ fp->_w -= w;
+ fp->_p += w;
+ w = len; /* but pretend copied all */
+ } else if (fp->_p > fp->_bf._base && len > w) {
+ /* fill and flush */
+ COPY(w);
+ /* fp->_w -= w; */ /* unneeded */
+ fp->_p += w;
+ if (ust_safe_fflush(fp))
+ goto err;
+ } else if (len >= (w = fp->_bf._size)) {
+ /* write directly */
+ w = (*fp->_write)(fp->_cookie, p, w);
+ if (w <= 0)
+ goto err;
+ } else {
+ /* fill and done */
+ w = len;
+ COPY(w);
+ fp->_w -= w;
+ fp->_p += w;
+ }
+ p += w;
+ len -= w;
+ } while ((uio->uio_resid -= w) != 0);
+ } else {
+ /*
+ * Line buffered: like fully buffered, but we
+ * must check for newlines. Compute the distance
+ * to the first newline (including the newline),
+ * or `infinity' if there is none, then pretend
+ * that the amount to write is MIN(len,nldist).
+ */
+ nlknown = 0;
+ nldist = 0; /* XXX just to keep gcc happy */
+ do {
+ GETIOV(nlknown = 0);
+ if (!nlknown) {
+ nl = memchr((void *)p, '\n', len);
+ nldist = nl ? nl + 1 - p : len + 1;
+ nlknown = 1;
+ }
+ s = MIN(len, nldist);
+ w = fp->_w + fp->_bf._size;
+ if (fp->_p > fp->_bf._base && s > w) {
+ COPY(w);
+ /* fp->_w -= w; */
+ fp->_p += w;
+ if (ust_safe_fflush(fp))
+ goto err;
+ } else if (s >= (w = fp->_bf._size)) {
+ w = (*fp->_write)(fp->_cookie, p, w);
+ if (w <= 0)
+ goto err;
+ } else {
+ w = s;
+ COPY(w);
+ fp->_w -= w;
+ fp->_p += w;
+ }
+ if ((nldist -= w) == 0) {
+ /* copied the newline: flush and forget */
+ if (ust_safe_fflush(fp))
+ goto err;
+ nlknown = 0;
+ }
+ p += w;
+ len -= w;
+ } while ((uio->uio_resid -= w) != 0);
+ }
+ return (0);
+
+err:
+ fp->_flags |= __SERR;
+ return (EOF);
+}
--- /dev/null
+/* $OpenBSD: fvwrite.h,v 1.5 2003/06/02 20:18:37 millert Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+/*
+ * I/O descriptors for __sfvwrite().
+ */
+#include <stddef.h>
+
+struct __lttng_ust_siov {
+ void *iov_base;
+ size_t iov_len;
+};
+struct __lttng_ust_suio {
+ struct __lttng_ust_siov *uio_iov;
+ int uio_iovcnt;
+ int uio_resid;
+};
+
+extern int __sfvwrite(LTTNG_UST_LFILE *, struct __lttng_ust_suio *)
+ __attribute__((visibility("hidden")));
--- /dev/null
+/* $OpenBSD: local.h,v 1.14 2009/10/22 01:23:16 guenther Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+/*
+ * Information local to this implementation of stdio,
+ * in particular, macros and private variables.
+ */
+
+#include <stdio.h>
+#include <wchar.h>
+#include "various.h"
+#include "wcio.h"
+#include "fileext.h"
+
+int __sflush(LTTNG_UST_LFILE *)
+ __attribute__((visibility("hidden")));
+
+LTTNG_UST_LFILE *__sfp(void)
+ __attribute__((visibility("hidden")));
+
+int __srefill(LTTNG_UST_LFILE *)
+ __attribute__((visibility("hidden")));
+
+int __sread(void *, char *, int)
+ __attribute__((visibility("hidden")));
+
+int __swrite(void *, const char *, int)
+ __attribute__((visibility("hidden")));
+
+fpos_t __sseek(void *, fpos_t, int)
+ __attribute__((visibility("hidden")));
+
+int __sclose(void *)
+ __attribute__((visibility("hidden")));
+
+void __sinit(void)
+ __attribute__((visibility("hidden")));
+
+void _cleanup(void)
+ __attribute__((visibility("hidden")));
+
+void __smakebuf(LTTNG_UST_LFILE *)
+ __attribute__((visibility("hidden")));
+
+int __swhatbuf(LTTNG_UST_LFILE *, size_t *, int *)
+ __attribute__((visibility("hidden")));
+
+int _fwalk(int (*)(LTTNG_UST_LFILE *))
+ __attribute__((visibility("hidden")));
+
+int __swsetup(LTTNG_UST_LFILE *)
+ __attribute__((visibility("hidden")));
+
+int __sflags(const char *, int *)
+ __attribute__((visibility("hidden")));
+
+wint_t __fgetwc_unlock(LTTNG_UST_LFILE *)
+ __attribute__((visibility("hidden")));
+
+extern void __atexit_register_cleanup(void (*)(void))
+ __attribute__((visibility("hidden")));
+
+extern int __sdidinit
+ __attribute__((visibility("hidden")));
+
+/*
+ * Return true if the given LTTNG_UST_LFILE cannot be written now.
+ */
+#define cantwrite(fp) \
+ ((((fp)->_flags & __SWR) == 0 || (fp)->_bf._base == NULL) && \
+ __swsetup(fp))
+
+/*
+ * Test whether the given stdio file has an active ungetc buffer;
+ * release such a buffer, without restoring ordinary unread data.
+ */
+#define HASUB(fp) (_UB(fp)._base != NULL)
+#define FREEUB(fp) { \
+ if (_UB(fp)._base != (fp)->_ubuf) \
+ free(_UB(fp)._base); \
+ _UB(fp)._base = NULL; \
+}
+
+/*
+ * test for an fgetln() buffer.
+ */
+#define HASLB(fp) ((fp)->_lb._base != NULL)
+#define FREELB(fp) { \
+ free((char *)(fp)->_lb._base); \
+ (fp)->_lb._base = NULL; \
+}
--- /dev/null
+/* $OpenBSD: mbrtowc_sb.c,v 1.4 2005/11/27 20:03:06 cloder Exp $ */
+/* $NetBSD: multibyte_sb.c,v 1.4 2003/08/07 16:43:04 agc Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1991 The Regents of the University of California.
+ * All rights reserved.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <wchar.h>
+
+#include "various.h"
+
+/*ARGSUSED*/
+size_t
+ust_safe_mbrtowc(wchar_t *pwc, const char *s, size_t n,
+ mbstate_t *ps __attribute__((unused)))
+{
+
+ /* pwc may be NULL */
+ /* s may be NULL */
+ /* ps appears to be unused */
+
+ if (s == NULL)
+ return 0;
+ if (n == 0)
+ return (size_t)-1;
+ if (pwc)
+ *pwc = (wchar_t)(unsigned char)*s;
+ return (*s != '\0');
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * Copyright (C) 2009 Pierre-Marc Fournier
+ * Copyright (C) 2011 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#include <stddef.h>
+
+/* write() */
+#include <unistd.h>
+
+/* writev() */
+#include <sys/uio.h>
+
+/* send() */
+#include <sys/types.h>
+#include <sys/socket.h>
+
+#include <errno.h>
+
+#include <ust-share.h>
+
+/*
+ * This write is patient because it restarts if it was incomplete.
+ */
+
+ssize_t ust_patient_write(int fd, const void *buf, size_t count)
+{
+ const char *bufc = (const char *) buf;
+ int result;
+
+ for(;;) {
+ result = write(fd, bufc, count);
+ if (result == -1 && errno == EINTR) {
+ continue;
+ }
+ if (result <= 0) {
+ return result;
+ }
+ count -= result;
+ bufc += result;
+
+ if (count == 0) {
+ break;
+ }
+ }
+
+ return bufc-(const char *)buf;
+}
+
+/*
+ * The `struct iovec *iov` is not `const` because we modify it to support
+ * partial writes.
+ */
+ssize_t ust_patient_writev(int fd, struct iovec *iov, int iovcnt)
+{
+ ssize_t written, total_written = 0;
+ int curr_element_idx = 0;
+
+ for(;;) {
+ written = writev(fd, iov + curr_element_idx,
+ iovcnt - curr_element_idx);
+ if (written == -1 && errno == EINTR) {
+ continue;
+ }
+ if (written <= 0) {
+ return written;
+ }
+
+ total_written += written;
+
+ /*
+ * If it's not the last element in the vector and we have
+ * written more than the current element size, then increment
+ * the current element index until we reach the element that
+ * was partially written.
+ */
+ while (curr_element_idx < iovcnt &&
+ written >= iov[curr_element_idx].iov_len) {
+ written -= iov[curr_element_idx].iov_len;
+ curr_element_idx++;
+ }
+
+ /* Maybe we are done. */
+ if (curr_element_idx >= iovcnt) {
+ break;
+ }
+
+ /* Update the current element base and size. */
+ iov[curr_element_idx].iov_base += written;
+ iov[curr_element_idx].iov_len -= written;
+ }
+
+ return total_written;
+}
+
+ssize_t ust_patient_send(int fd, const void *buf, size_t count, int flags)
+{
+ const char *bufc = (const char *) buf;
+ int result;
+
+ for(;;) {
+ result = send(fd, bufc, count, flags);
+ if (result == -1 && errno == EINTR) {
+ continue;
+ }
+ if (result <= 0) {
+ return result;
+ }
+ count -= result;
+ bufc += result;
+
+ if (count == 0) {
+ break;
+ }
+ }
+
+ return bufc - (const char *) buf;
+}
--- /dev/null
+/* $OpenBSD: snprintf.c,v 1.16 2009/10/22 01:23:16 guenther Exp $ */
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdarg.h>
+#include "local.h"
+#include "ust-snprintf.h"
+
+#define DUMMY_LEN 1
+
+int ust_safe_vsnprintf(char *str, size_t n, const char *fmt, va_list ap)
+{
+ int ret;
+ char dummy[DUMMY_LEN];
+ LTTNG_UST_LFILE f;
+ struct __lttng_ust_sfileext fext;
+
+ /* While snprintf(3) specifies size_t stdio uses an int internally */
+ if (n > INT_MAX)
+ n = INT_MAX;
+ /* Stdio internals do not deal correctly with zero length buffer */
+ if (n == 0) {
+ str = dummy;
+ n = DUMMY_LEN;
+ }
+ _FILEEXT_SETUP(&f, &fext);
+ f._file = -1;
+ f._flags = __SWR | __SSTR;
+ f._bf._base = f._p = (unsigned char *)str;
+ f._bf._size = f._w = n - 1;
+ ret = ust_safe_vfprintf(&f, fmt, ap);
+ *f._p = '\0';
+ return (ret);
+}
+
+int ust_safe_snprintf(char *str, size_t n, const char *fmt, ...)
+{
+ va_list ap;
+ int ret;
+
+ va_start(ap, fmt);
+ ret = ust_safe_vsnprintf(str, n, fmt, ap);
+ va_end(ap);
+
+ return ret;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+#ifndef UST_SNPRINTF_VARIOUS_H
+#define UST_SNPRINTF_VARIOUS_H
+
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <wchar.h>
+
+struct __lttng_ust_sbuf {
+ unsigned char *_base;
+ int _size;
+};
+
+/*
+ * stdio state variables.
+ *
+ * The following always hold:
+ *
+ * if (_flags&(__SLBF|__SWR)) == (__SLBF|__SWR),
+ * _lbfsize is -_bf._size, else _lbfsize is 0
+ * if _flags&__SRD, _w is 0
+ * if _flags&__SWR, _r is 0
+ *
+ * This ensures that the getc and putc macros (or inline functions) never
+ * try to write or read from a file that is in `read' or `write' mode.
+ * (Moreover, they can, and do, automatically switch from read mode to
+ * write mode, and back, on "r+" and "w+" files.)
+ *
+ * _lbfsize is used only to make the inline line-buffered output stream
+ * code as compact as possible.
+ *
+ * _ub, _up, and _ur are used when ungetc() pushes back more characters
+ * than fit in the current _bf, or when ungetc() pushes back a character
+ * that does not match the previous one in _bf. When this happens,
+ * _ub._base becomes non-nil (i.e., a stream has ungetc() data iff
+ * _ub._base!=NULL) and _up and _ur save the current values of _p and _r.
+ */
+typedef struct __lttng_ust_sFILE {
+ unsigned char *_p; /* current position in (some) buffer */
+ int _r; /* read space left for getc() */
+ int _w; /* write space left for putc() */
+ short _flags; /* flags, below; this FILE is free if 0 */
+ short _file; /* fileno, if Unix descriptor, else -1 */
+ struct __lttng_ust_sbuf _bf; /* the buffer (at least 1 byte, if !NULL) */
+ int _lbfsize; /* 0 or -_bf._size, for inline putc */
+
+ /* operations */
+ void *_cookie; /* cookie passed to io functions */
+ int (*_close)(void *);
+ int (*_read)(void *, char *, int);
+ fpos_t (*_seek)(void *, fpos_t, int);
+ int (*_write)(void *, const char *, int);
+
+ /* extension data, to avoid further ABI breakage */
+ struct __lttng_ust_sbuf _ext;
+ /* data for long sequences of ungetc() */
+ unsigned char *_up; /* saved _p when _p is doing ungetc data */
+ int _ur; /* saved _r when _r is counting ungetc data */
+
+ /* tricks to meet minimum requirements even when malloc() fails */
+ unsigned char _ubuf[3]; /* guarantee an ungetc() buffer */
+ unsigned char _nbuf[1]; /* guarantee a getc() buffer */
+
+ /* separate buffer for fgetln() when line crosses buffer boundary */
+ struct __lttng_ust_sbuf _lb; /* buffer for fgetln() */
+
+ /* Unix stdio files get aligned to block boundaries on fseek() */
+ int _blksize; /* stat.st_blksize (may be != _bf._size) */
+ fpos_t _offset; /* current lseek offset */
+} LTTNG_UST_LFILE;
+
+#define __SLBF 0x0001 /* line buffered */
+#define __SNBF 0x0002 /* unbuffered */
+#define __SRD 0x0004 /* OK to read */
+#define __SWR 0x0008 /* OK to write */
+ /* RD and WR are never simultaneously asserted */
+#define __SRW 0x0010 /* open for reading & writing */
+#define __SEOF 0x0020 /* found EOF */
+#define __SERR 0x0040 /* found error */
+#define __SMBF 0x0080 /* _buf is from malloc */
+#define __SAPP 0x0100 /* fdopen()ed in append mode */
+#define __SSTR 0x0200 /* this is an sprintf/snprintf string */
+#define __SOPT 0x0400 /* do fseek() optimisation */
+#define __SNPT 0x0800 /* do not do fseek() optimisation */
+#define __SOFF 0x1000 /* set iff _offset is in fact correct */
+#define __SMOD 0x2000 /* true => fgetln modified _p text */
+#define __SALC 0x4000 /* allocate string space dynamically */
+
+#define __sferror(p) (((p)->_flags & __SERR) != 0)
+
+extern int ust_safe_fflush(LTTNG_UST_LFILE *fp)
+ __attribute__((visibility("hidden")));
+
+extern int ust_safe_vfprintf(LTTNG_UST_LFILE *fp, const char *fmt0, va_list ap)
+ __attribute__((visibility("hidden")));
+
+extern size_t ust_safe_mbrtowc(wchar_t *pwc, const char *s, size_t n, mbstate_t *ps)
+ __attribute__((visibility("hidden")));
+
+#endif /* UST_SNPRINTF_VARIOUS_H */
--- /dev/null
+/* $OpenBSD: vfprintf.c,v 1.57 2009/10/28 21:15:02 naddy Exp $ */
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+/*
+ * Actual printf innards.
+ *
+ * This code is large and complicated...
+ */
+
+//#define FLOATING_POINT
+
+#include <sys/types.h>
+#include <sys/mman.h>
+
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <wchar.h>
+
+#include "local.h"
+#include "fvwrite.h"
+#include "various.h"
+
+static char null_str[] = "(null)";
+static char bad_base_str[] = "bug in ust_safe_vfprintf: bad base";
+
+union arg {
+ int intarg;
+ unsigned int uintarg;
+ long longarg;
+ unsigned long ulongarg;
+ long long longlongarg;
+ unsigned long long ulonglongarg;
+ ptrdiff_t ptrdiffarg;
+ size_t sizearg;
+ size_t ssizearg;
+ intmax_t intmaxarg;
+ uintmax_t uintmaxarg;
+ void *pvoidarg;
+ char *pchararg;
+ signed char *pschararg;
+ short *pshortarg;
+ int *pintarg;
+ long *plongarg;
+ long long *plonglongarg;
+ ptrdiff_t *pptrdiffarg;
+ ssize_t *pssizearg;
+ intmax_t *pintmaxarg;
+#ifdef FLOATING_POINT
+ double doublearg;
+ long double longdoublearg;
+#endif
+};
+
+static int __find_arguments(const char *fmt0, va_list ap, union arg **argtable,
+ size_t *argtablesiz);
+static int __grow_type_table(unsigned char **typetable, int *tablesize);
+
+/*
+ * Flush out all the vectors defined by the given uio,
+ * then reset it so that it can be reused.
+ */
+static int
+__sprint(LTTNG_UST_LFILE *fp, struct __lttng_ust_suio *uio)
+{
+ int err;
+
+ if (uio->uio_resid == 0) {
+ uio->uio_iovcnt = 0;
+ return (0);
+ }
+ err = __sfvwrite(fp, uio);
+ uio->uio_resid = 0;
+ uio->uio_iovcnt = 0;
+ return (err);
+}
+
+/*
+ * Helper function for `fprintf to unbuffered unix file': creates a
+ * temporary buffer. We only work on write-only files; this avoids
+ * worries about ungetc buffers and so forth.
+ */
+//static int
+//__sbprintf(LTTNG_UST_LFILE *fp, const char *fmt, va_list ap)
+//{
+// int ret;
+// LTTNG_UST_LFILE fake;
+// struct __sfileext fakeext;
+// unsigned char buf[BUFSIZ];
+//
+// _FILEEXT_SETUP(&fake, &fakeext);
+// /* copy the important variables */
+// fake._flags = fp->_flags & ~__SNBF;
+// fake._file = fp->_file;
+// fake._cookie = fp->_cookie;
+// fake._write = fp->_write;
+//
+// /* set up the buffer */
+// fake._bf._base = fake._p = buf;
+// fake._bf._size = fake._w = sizeof(buf);
+// fake._lbfsize = 0; /* not actually used, but Just In Case */
+//
+// /* do the work, then copy any error status */
+// ret = ust_safe_vfprintf(&fake, fmt, ap);
+// if (ret >= 0 && fflush(&fake))
+// ret = EOF;
+// if (fake._flags & __SERR)
+// fp->_flags |= __SERR;
+// return (ret);
+//}
+
+
+#ifdef FLOATING_POINT
+#include <float.h>
+#include <locale.h>
+#include <math.h>
+#include "floatio.h"
+
+#define DEFPREC 6
+
+extern char *__dtoa(double, int, int, int *, int *, char **);
+extern void __freedtoa(char *);
+static int exponent(char *, int, int);
+#endif /* FLOATING_POINT */
+
+/*
+ * The size of the buffer we use as scratch space for integer
+ * conversions, among other things. Technically, we would need the
+ * most space for base 10 conversions with thousands' grouping
+ * characters between each pair of digits. 100 bytes is a
+ * conservative overestimate even for a 128-bit uintmax_t.
+ */
+#define BUF 100
+
+#define STATIC_ARG_TBL_SIZE 8 /* Size of static argument table. */
+
+
+/*
+ * Macros for converting digits to letters and vice versa
+ */
+#define to_digit(c) ((c) - '0')
+#define is_digit(c) ((unsigned)to_digit(c) <= 9)
+#define to_char(n) ((n) + '0')
+
+/*
+ * Flags used during conversion.
+ */
+#define ALT 0x0001 /* alternate form */
+#define LADJUST 0x0004 /* left adjustment */
+#define LONGDBL 0x0008 /* long double; unimplemented */
+#define LONGINT 0x0010 /* long integer */
+#define LLONGINT 0x0020 /* long long integer */
+#define SHORTINT 0x0040 /* short integer */
+#define ZEROPAD 0x0080 /* zero (as opposed to blank) pad */
+#define FPT 0x0100 /* Floating point number */
+#define PTRINT 0x0200 /* (unsigned) ptrdiff_t */
+#define SIZEINT 0x0400 /* (signed) size_t */
+#define CHARINT 0x0800 /* 8 bit integer */
+#define MAXINT 0x1000 /* largest integer size (intmax_t) */
+
+int ust_safe_vfprintf(LTTNG_UST_LFILE *fp, const char *fmt0, va_list ap)
+{
+ char *fmt; /* format string */
+ int ch; /* character from fmt */
+ int n, n2; /* handy integers (short term usage) */
+ char *cp; /* handy char pointer (short term usage) */
+ struct __lttng_ust_siov *iovp; /* for PRINT macro */
+ int flags; /* flags as above */
+ int ret; /* return value accumulator */
+ int width; /* width from format (%8d), or 0 */
+ int prec; /* precision from format; <0 for N/A */
+ char sign; /* sign prefix (' ', '+', '-', or \0) */
+ wchar_t wc;
+ mbstate_t ps;
+#ifdef FLOATING_POINT
+ /*
+ * We can decompose the printed representation of floating
+ * point numbers into several parts, some of which may be empty:
+ *
+ * [+|-| ] [0x|0X] MMM . NNN [e|E|p|P] [+|-] ZZ
+ * A B ---C--- D E F
+ *
+ * A: 'sign' holds this value if present; '\0' otherwise
+ * B: ox[1] holds the 'x' or 'X'; '\0' if not hexadecimal
+ * C: cp points to the string MMMNNN. Leading and trailing
+ * zeros are not in the string and must be added.
+ * D: expchar holds this character; '\0' if no exponent, e.g. %f
+ * F: at least two digits for decimal, at least one digit for hex
+ */
+ char *decimal_point = localeconv()->decimal_point;
+ int signflag; /* true if float is negative */
+ union { /* floating point arguments %[aAeEfFgG] */
+ double dbl;
+ long double ldbl;
+ } fparg;
+ int expt; /* integer value of exponent */
+ char expchar; /* exponent character: [eEpP\0] */
+ char *dtoaend; /* pointer to end of converted digits */
+ int expsize; /* character count for expstr */
+ int lead; /* sig figs before decimal or group sep */
+ int ndig; /* actual number of digits returned by dtoa */
+ char expstr[MAXEXPDIG+2]; /* buffer for exponent string: e+ZZZ */
+ char *dtoaresult = NULL;
+#endif
+
+ uintmax_t _umax; /* integer arguments %[diouxX] */
+ enum { OCT, DEC, HEX } base; /* base for %[diouxX] conversion */
+ int dprec; /* a copy of prec if %[diouxX], 0 otherwise */
+ int realsz; /* field size expanded by dprec */
+ int size; /* size of converted field or string */
+ const char *xdigs = NULL; /* digits for %[xX] conversion */
+#define NIOV 8
+ struct __lttng_ust_suio uio; /* output information: summary */
+ struct __lttng_ust_siov iov[NIOV];/* ... and individual io vectors */
+ char buf[BUF]; /* buffer with space for digits of uintmax_t */
+ char ox[2]; /* space for 0x; ox[1] is either x, X, or \0 */
+ union arg *argtable; /* args, built due to positional arg */
+ union arg statargtable[STATIC_ARG_TBL_SIZE];
+ size_t argtablesiz;
+ int nextarg; /* 1-based argument index */
+ va_list orgap; /* original argument pointer */
+
+ /*
+ * Choose PADSIZE to trade efficiency vs. size. If larger printf
+ * fields occur frequently, increase PADSIZE and make the initialisers
+ * below longer.
+ */
+#define PADSIZE 16 /* pad chunk size */
+ static char blanks[PADSIZE] =
+ {' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' ',' '};
+ static char zeroes[PADSIZE] =
+ {'0','0','0','0','0','0','0','0','0','0','0','0','0','0','0','0'};
+
+ static const char xdigs_lower[16] = "0123456789abcdef";
+ static const char xdigs_upper[16] = "0123456789ABCDEF";
+
+ /*
+ * BEWARE, these `goto error' on error, and PAD uses `n'.
+ */
+#define PRINT(ptr, len) do { \
+ iovp->iov_base = (ptr); \
+ iovp->iov_len = (len); \
+ uio.uio_resid += (len); \
+ iovp++; \
+ if (++uio.uio_iovcnt >= NIOV) { \
+ if (__sprint(fp, &uio)) \
+ goto error; \
+ iovp = iov; \
+ } \
+} while (0)
+#define PAD(howmany, with) do { \
+ if ((n = (howmany)) > 0) { \
+ while (n > PADSIZE) { \
+ PRINT(with, PADSIZE); \
+ n -= PADSIZE; \
+ } \
+ PRINT(with, n); \
+ } \
+} while (0)
+#define PRINTANDPAD(p, ep, len, with) do { \
+ n2 = (ep) - (p); \
+ if (n2 > (len)) \
+ n2 = (len); \
+ if (n2 > 0) \
+ PRINT((p), n2); \
+ PAD((len) - (n2 > 0 ? n2 : 0), (with)); \
+} while(0)
+#define FLUSH() do { \
+ if (uio.uio_resid && __sprint(fp, &uio)) \
+ goto error; \
+ uio.uio_iovcnt = 0; \
+ iovp = iov; \
+} while (0)
+
+ /*
+ * To extend shorts properly, we need both signed and unsigned
+ * argument extraction methods.
+ */
+#define SARG() \
+ ((intmax_t)(flags&MAXINT ? GETARG(intmax_t) : \
+ flags&LLONGINT ? GETARG(long long) : \
+ flags&LONGINT ? GETARG(long) : \
+ flags&PTRINT ? GETARG(ptrdiff_t) : \
+ flags&SIZEINT ? GETARG(ssize_t) : \
+ flags&SHORTINT ? (short)GETARG(int) : \
+ flags&CHARINT ? (__signed char)GETARG(int) : \
+ GETARG(int)))
+#define UARG() \
+ ((uintmax_t)(flags&MAXINT ? GETARG(uintmax_t) : \
+ flags&LLONGINT ? GETARG(unsigned long long) : \
+ flags&LONGINT ? GETARG(unsigned long) : \
+ flags&PTRINT ? (uintptr_t)GETARG(ptrdiff_t) : /* XXX */ \
+ flags&SIZEINT ? GETARG(size_t) : \
+ flags&SHORTINT ? (unsigned short)GETARG(int) : \
+ flags&CHARINT ? (unsigned char)GETARG(int) : \
+ GETARG(unsigned int)))
+
+ /*
+ * Append a digit to a value and check for overflow.
+ */
+#define APPEND_DIGIT(val, dig) do { \
+ if ((val) > INT_MAX / 10) \
+ goto overflow; \
+ (val) *= 10; \
+ if ((val) > INT_MAX - to_digit((dig))) \
+ goto overflow; \
+ (val) += to_digit((dig)); \
+} while (0)
+
+ /*
+ * Get * arguments, including the form *nn$. Preserve the nextarg
+ * that the argument can be gotten once the type is determined.
+ */
+#define GETASTER(val) \
+ n2 = 0; \
+ cp = fmt; \
+ while (is_digit(*cp)) { \
+ APPEND_DIGIT(n2, *cp); \
+ cp++; \
+ } \
+ if (*cp == '$') { \
+ int hold = nextarg; \
+ if (argtable == NULL) { \
+ argtable = statargtable; \
+ __find_arguments(fmt0, orgap, &argtable, &argtablesiz); \
+ } \
+ nextarg = n2; \
+ val = GETARG(int); \
+ nextarg = hold; \
+ fmt = ++cp; \
+ } else { \
+ val = GETARG(int); \
+ }
+
+/*
+* Get the argument indexed by nextarg. If the argument table is
+* built, use it to get the argument. If its not, get the next
+* argument (and arguments must be gotten sequentially).
+*/
+#define GETARG(type) \
+ ((argtable != NULL) ? *((type*)(&argtable[nextarg++])) : \
+ (nextarg++, va_arg(ap, type)))
+
+ _SET_ORIENTATION(fp, -1);
+ /* sorry, fprintf(read_only_file, "") returns EOF, not 0 */
+ if (cantwrite(fp)) {
+ errno = EBADF;
+ return (EOF);
+ }
+
+ /* optimise fprintf(stderr) (and other unbuffered Unix files) */
+// if ((fp->_flags & (__SNBF|__SWR|__SRW)) == (__SNBF|__SWR) &&
+// fp->_file >= 0)
+// return (__sbprintf(fp, fmt0, ap));
+
+ fmt = (char *)fmt0;
+ argtable = NULL;
+ nextarg = 1;
+ va_copy(orgap, ap);
+ uio.uio_iov = iovp = iov;
+ uio.uio_resid = 0;
+ uio.uio_iovcnt = 0;
+ ret = 0;
+
+ memset(&ps, 0, sizeof(ps));
+ /*
+ * Scan the format for conversions (`%' character).
+ */
+ for (;;) {
+ cp = fmt;
+ while ((n = ust_safe_mbrtowc(&wc, fmt, MB_CUR_MAX, &ps)) > 0) {
+ fmt += n;
+ if (wc == '%') {
+ fmt--;
+ break;
+ }
+ }
+ if (fmt != cp) {
+ ptrdiff_t m = fmt - cp;
+ if (m < 0 || m > INT_MAX - ret)
+ goto overflow;
+ PRINT(cp, m);
+ ret += m;
+ }
+ if (n <= 0)
+ goto done;
+ fmt++; /* skip over '%' */
+
+ flags = 0;
+ dprec = 0;
+ width = 0;
+ prec = -1;
+ sign = '\0';
+ ox[1] = '\0';
+
+rflag: ch = *fmt++;
+reswitch: switch (ch) {
+ case ' ':
+ /*
+ * ``If the space and + flags both appear, the space
+ * flag will be ignored.''
+ * -- ANSI X3J11
+ */
+ if (!sign)
+ sign = ' ';
+ goto rflag;
+ case '#':
+ flags |= ALT;
+ goto rflag;
+ case '\'':
+ /* grouping not implemented */
+ goto rflag;
+ case '*':
+ /*
+ * ``A negative field width argument is taken as a
+ * - flag followed by a positive field width.''
+ * -- ANSI X3J11
+ * They don't exclude field widths read from args.
+ */
+ GETASTER(width);
+ if (width >= 0)
+ goto rflag;
+ if (width == INT_MIN)
+ goto overflow;
+ width = -width;
+ /* FALLTHROUGH */
+ case '-':
+ flags |= LADJUST;
+ goto rflag;
+ case '+':
+ sign = '+';
+ goto rflag;
+ case '.':
+ if ((ch = *fmt++) == '*') {
+ GETASTER(n);
+ prec = n < 0 ? -1 : n;
+ goto rflag;
+ }
+ n = 0;
+ while (is_digit(ch)) {
+ APPEND_DIGIT(n, ch);
+ ch = *fmt++;
+ }
+ if (ch == '$') {
+ nextarg = n;
+ if (argtable == NULL) {
+ argtable = statargtable;
+ __find_arguments(fmt0, orgap,
+ &argtable, &argtablesiz);
+ }
+ goto rflag;
+ }
+ prec = n;
+ goto reswitch;
+ case '0':
+ /*
+ * ``Note that 0 is taken as a flag, not as the
+ * beginning of a field width.''
+ * -- ANSI X3J11
+ */
+ flags |= ZEROPAD;
+ goto rflag;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ n = 0;
+ do {
+ APPEND_DIGIT(n, ch);
+ ch = *fmt++;
+ } while (is_digit(ch));
+ if (ch == '$') {
+ nextarg = n;
+ if (argtable == NULL) {
+ argtable = statargtable;
+ __find_arguments(fmt0, orgap,
+ &argtable, &argtablesiz);
+ }
+ goto rflag;
+ }
+ width = n;
+ goto reswitch;
+#ifdef FLOATING_POINT
+ case 'L':
+ flags |= LONGDBL;
+ goto rflag;
+#endif
+ case 'h':
+ if (*fmt == 'h') {
+ fmt++;
+ flags |= CHARINT;
+ } else {
+ flags |= SHORTINT;
+ }
+ goto rflag;
+ case 'j':
+ flags |= MAXINT;
+ goto rflag;
+ case 'l':
+ if (*fmt == 'l') {
+ fmt++;
+ flags |= LLONGINT;
+ } else {
+ flags |= LONGINT;
+ }
+ goto rflag;
+ case 'q':
+ flags |= LLONGINT;
+ goto rflag;
+ case 't':
+ flags |= PTRINT;
+ goto rflag;
+ case 'z':
+ flags |= SIZEINT;
+ goto rflag;
+ case 'c':
+ *(cp = buf) = GETARG(int);
+ size = 1;
+ sign = '\0';
+ break;
+ case 'D':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'd':
+ case 'i':
+ _umax = SARG();
+ if ((intmax_t)_umax < 0) {
+ _umax = -_umax;
+ sign = '-';
+ }
+ base = DEC;
+ goto number;
+#ifdef FLOATING_POINT
+ case 'a':
+ case 'A':
+ if (ch == 'a') {
+ ox[1] = 'x';
+ xdigs = xdigs_lower;
+ expchar = 'p';
+ } else {
+ ox[1] = 'X';
+ xdigs = xdigs_upper;
+ expchar = 'P';
+ }
+ if (prec >= 0)
+ prec++;
+ if (dtoaresult)
+ __freedtoa(dtoaresult);
+ if (flags & LONGDBL) {
+ fparg.ldbl = GETARG(long double);
+ dtoaresult = cp =
+ __hldtoa(fparg.ldbl, xdigs, prec,
+ &expt, &signflag, &dtoaend);
+ if (dtoaresult == NULL) {
+ errno = ENOMEM;
+ goto error;
+ }
+ } else {
+ fparg.dbl = GETARG(double);
+ dtoaresult = cp =
+ __hdtoa(fparg.dbl, xdigs, prec,
+ &expt, &signflag, &dtoaend);
+ if (dtoaresult == NULL) {
+ errno = ENOMEM;
+ goto error;
+ }
+ }
+ if (prec < 0)
+ prec = dtoaend - cp;
+ if (expt == INT_MAX)
+ ox[1] = '\0';
+ goto fp_common;
+ case 'e':
+ case 'E':
+ expchar = ch;
+ if (prec < 0) /* account for digit before decpt */
+ prec = DEFPREC + 1;
+ else
+ prec++;
+ goto fp_begin;
+ case 'f':
+ case 'F':
+ expchar = '\0';
+ goto fp_begin;
+ case 'g':
+ case 'G':
+ expchar = ch - ('g' - 'e');
+ if (prec == 0)
+ prec = 1;
+fp_begin:
+ if (prec < 0)
+ prec = DEFPREC;
+ if (dtoaresult)
+ __freedtoa(dtoaresult);
+ if (flags & LONGDBL) {
+ fparg.ldbl = GETARG(long double);
+ dtoaresult = cp =
+ __ldtoa(&fparg.ldbl, expchar ? 2 : 3, prec,
+ &expt, &signflag, &dtoaend);
+ if (dtoaresult == NULL) {
+ errno = ENOMEM;
+ goto error;
+ }
+ } else {
+ fparg.dbl = GETARG(double);
+ dtoaresult = cp =
+ __dtoa(fparg.dbl, expchar ? 2 : 3, prec,
+ &expt, &signflag, &dtoaend);
+ if (dtoaresult == NULL) {
+ errno = ENOMEM;
+ goto error;
+ }
+ if (expt == 9999)
+ expt = INT_MAX;
+ }
+fp_common:
+ if (signflag)
+ sign = '-';
+ if (expt == INT_MAX) { /* inf or nan */
+ if (*cp == 'N') {
+ cp = (ch >= 'a') ? "nan" : "NAN";
+ sign = '\0';
+ } else
+ cp = (ch >= 'a') ? "inf" : "INF";
+ size = 3;
+ flags &= ~ZEROPAD;
+ break;
+ }
+ flags |= FPT;
+ ndig = dtoaend - cp;
+ if (ch == 'g' || ch == 'G') {
+ if (expt > -4 && expt <= prec) {
+ /* Make %[gG] smell like %[fF] */
+ expchar = '\0';
+ if (flags & ALT)
+ prec -= expt;
+ else
+ prec = ndig - expt;
+ if (prec < 0)
+ prec = 0;
+ } else {
+ /*
+ * Make %[gG] smell like %[eE], but
+ * trim trailing zeroes if no # flag.
+ */
+ if (!(flags & ALT))
+ prec = ndig;
+ }
+ }
+ if (expchar) {
+ expsize = exponent(expstr, expt - 1, expchar);
+ size = expsize + prec;
+ if (prec > 1 || flags & ALT)
+ ++size;
+ } else {
+ /* space for digits before decimal point */
+ if (expt > 0)
+ size = expt;
+ else /* "0" */
+ size = 1;
+ /* space for decimal pt and following digits */
+ if (prec || flags & ALT)
+ size += prec + 1;
+ lead = expt;
+ }
+ break;
+#endif /* FLOATING_POINT */
+ case 'n':
+ if (flags & LLONGINT)
+ *GETARG(long long *) = ret;
+ else if (flags & LONGINT)
+ *GETARG(long *) = ret;
+ else if (flags & SHORTINT)
+ *GETARG(short *) = ret;
+ else if (flags & CHARINT)
+ *GETARG(__signed char *) = ret;
+ else if (flags & PTRINT)
+ *GETARG(ptrdiff_t *) = ret;
+ else if (flags & SIZEINT)
+ *GETARG(ssize_t *) = ret;
+ else if (flags & MAXINT)
+ *GETARG(intmax_t *) = ret;
+ else
+ *GETARG(int *) = ret;
+ continue; /* no output */
+ case 'O':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'o':
+ _umax = UARG();
+ base = OCT;
+ goto nosign;
+ case 'p':
+ /*
+ * ``The argument shall be a pointer to void. The
+ * value of the pointer is converted to a sequence
+ * of printable characters, in an implementation-
+ * defined manner.''
+ * -- ANSI X3J11
+ */
+ /* NOSTRICT */
+ _umax = (u_long)GETARG(void *);
+ base = HEX;
+ xdigs = xdigs_lower;
+ ox[1] = 'x';
+ goto nosign;
+ case 's':
+ if ((cp = GETARG(char *)) == NULL)
+ cp = null_str;
+ if (prec >= 0) {
+ /*
+ * can't use strlen; can only look for the
+ * NUL in the first `prec' characters, and
+ * strlen() will go further.
+ */
+ char *p = memchr(cp, 0, prec);
+
+ size = p ? (p - cp) : prec;
+ } else {
+ size_t len;
+
+ if ((len = strlen(cp)) > INT_MAX)
+ goto overflow;
+ size = (int)len;
+ }
+ sign = '\0';
+ break;
+ case 'U':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'u':
+ _umax = UARG();
+ base = DEC;
+ goto nosign;
+ case 'X':
+ xdigs = xdigs_upper;
+ goto hex;
+ case 'x':
+ xdigs = xdigs_lower;
+hex: _umax = UARG();
+ base = HEX;
+ /* leading 0x/X only if non-zero */
+ if (flags & ALT && _umax != 0)
+ ox[1] = ch;
+
+ /* unsigned conversions */
+nosign: sign = '\0';
+ /*
+ * ``... diouXx conversions ... if a precision is
+ * specified, the 0 flag will be ignored.''
+ * -- ANSI X3J11
+ */
+number: if ((dprec = prec) >= 0)
+ flags &= ~ZEROPAD;
+
+ /*
+ * ``The result of converting a zero value with an
+ * explicit precision of zero is no characters.''
+ * -- ANSI X3J11
+ */
+ cp = buf + BUF;
+ if (_umax != 0 || prec != 0) {
+ /*
+ * Unsigned mod is hard, and unsigned mod
+ * by a constant is easier than that by
+ * a variable; hence this switch.
+ */
+ switch (base) {
+ case OCT:
+ do {
+ *--cp = to_char(_umax & 7);
+ _umax >>= 3;
+ } while (_umax);
+ /* handle octal leading 0 */
+ if (flags & ALT && *cp != '0')
+ *--cp = '0';
+ break;
+
+ case DEC:
+ /* many numbers are 1 digit */
+ while (_umax >= 10) {
+ *--cp = to_char(_umax % 10);
+ _umax /= 10;
+ }
+ *--cp = to_char(_umax);
+ break;
+
+ case HEX:
+ do {
+ *--cp = xdigs[_umax & 15];
+ _umax >>= 4;
+ } while (_umax);
+ break;
+
+ default:
+ cp = bad_base_str;
+ size = strlen(cp);
+ goto skipsize;
+ }
+ }
+ size = buf + BUF - cp;
+ if (size > BUF) /* should never happen */
+ abort();
+ skipsize:
+ break;
+ default: /* "%?" prints ?, unless ? is NUL */
+ if (ch == '\0')
+ goto done;
+ /* pretend it was %c with argument ch */
+ cp = buf;
+ *cp = ch;
+ size = 1;
+ sign = '\0';
+ break;
+ }
+
+ /*
+ * All reasonable formats wind up here. At this point, `cp'
+ * points to a string which (if not flags&LADJUST) should be
+ * padded out to `width' places. If flags&ZEROPAD, it should
+ * first be prefixed by any sign or other prefix; otherwise,
+ * it should be blank padded before the prefix is emitted.
+ * After any left-hand padding and prefixing, emit zeroes
+ * required by a decimal %[diouxX] precision, then print the
+ * string proper, then emit zeroes required by any leftover
+ * floating precision; finally, if LADJUST, pad with blanks.
+ *
+ * Compute actual size, so we know how much to pad.
+ * size excludes decimal prec; realsz includes it.
+ */
+ realsz = dprec > size ? dprec : size;
+ if (sign)
+ realsz++;
+ if (ox[1])
+ realsz+= 2;
+
+ /* right-adjusting blank padding */
+ if ((flags & (LADJUST|ZEROPAD)) == 0)
+ PAD(width - realsz, blanks);
+
+ /* prefix */
+ if (sign)
+ PRINT(&sign, 1);
+ if (ox[1]) { /* ox[1] is either x, X, or \0 */
+ ox[0] = '0';
+ PRINT(ox, 2);
+ }
+
+ /* right-adjusting zero padding */
+ if ((flags & (LADJUST|ZEROPAD)) == ZEROPAD)
+ PAD(width - realsz, zeroes);
+
+ /* leading zeroes from decimal precision */
+ PAD(dprec - size, zeroes);
+
+ /* the string or number proper */
+#ifdef FLOATING_POINT
+ if ((flags & FPT) == 0) {
+ PRINT(cp, size);
+ } else { /* glue together f_p fragments */
+ if (!expchar) { /* %[fF] or sufficiently short %[gG] */
+ if (expt <= 0) {
+ PRINT(zeroes, 1);
+ if (prec || flags & ALT)
+ PRINT(decimal_point, 1);
+ PAD(-expt, zeroes);
+ /* already handled initial 0's */
+ prec += expt;
+ } else {
+ PRINTANDPAD(cp, dtoaend, lead, zeroes);
+ cp += lead;
+ if (prec || flags & ALT)
+ PRINT(decimal_point, 1);
+ }
+ PRINTANDPAD(cp, dtoaend, prec, zeroes);
+ } else { /* %[eE] or sufficiently long %[gG] */
+ if (prec > 1 || flags & ALT) {
+ buf[0] = *cp++;
+ buf[1] = *decimal_point;
+ PRINT(buf, 2);
+ PRINT(cp, ndig-1);
+ PAD(prec - ndig, zeroes);
+ } else { /* XeYYY */
+ PRINT(cp, 1);
+ }
+ PRINT(expstr, expsize);
+ }
+ }
+#else
+ PRINT(cp, size);
+#endif
+ /* left-adjusting padding (always blank) */
+ if (flags & LADJUST)
+ PAD(width - realsz, blanks);
+
+ /* finally, adjust ret */
+ if (width < realsz)
+ width = realsz;
+ if (width > INT_MAX - ret)
+ goto overflow;
+ ret += width;
+
+ FLUSH(); /* copy out the I/O vectors */
+ }
+done:
+ FLUSH();
+error:
+ if (__sferror(fp))
+ ret = -1;
+ goto finish;
+
+overflow:
+ errno = ENOMEM;
+ ret = -1;
+
+finish:
+ va_end(orgap);
+#ifdef FLOATING_POINT
+ if (dtoaresult)
+ __freedtoa(dtoaresult);
+#endif
+ if (argtable != NULL && argtable != statargtable) {
+ munmap(argtable, argtablesiz);
+ argtable = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * Type ids for argument type table.
+ */
+#define T_UNUSED 0
+#define T_SHORT 1
+#define T_U_SHORT 2
+#define TP_SHORT 3
+#define T_INT 4
+#define T_U_INT 5
+#define TP_INT 6
+#define T_LONG 7
+#define T_U_LONG 8
+#define TP_LONG 9
+#define T_LLONG 10
+#define T_U_LLONG 11
+#define TP_LLONG 12
+#define T_DOUBLE 13
+#define T_LONG_DOUBLE 14
+#define TP_CHAR 15
+#define TP_VOID 16
+#define T_PTRINT 17
+#define TP_PTRINT 18
+#define T_SIZEINT 19
+#define T_SSIZEINT 20
+#define TP_SSIZEINT 21
+#define T_MAXINT 22
+#define T_MAXUINT 23
+#define TP_MAXINT 24
+#define T_CHAR 25
+#define T_U_CHAR 26
+
+/*
+ * Find all arguments when a positional parameter is encountered. Returns a
+ * table, indexed by argument number, of pointers to each arguments. The
+ * initial argument table should be an array of STATIC_ARG_TBL_SIZE entries.
+ * It will be replaced with a mmap-ed one if it overflows (malloc cannot be
+ * used since we are attempting to make snprintf thread safe, and alloca is
+ * problematic since we have nested functions..)
+ */
+static int
+__find_arguments(const char *fmt0, va_list ap, union arg **argtable,
+ size_t *argtablesiz)
+{
+ char *fmt; /* format string */
+ int ch; /* character from fmt */
+ int n, n2; /* handy integer (short term usage) */
+ char *cp; /* handy char pointer (short term usage) */
+ int flags; /* flags as above */
+ unsigned char *typetable; /* table of types */
+ unsigned char stattypetable[STATIC_ARG_TBL_SIZE];
+ int tablesize; /* current size of type table */
+ int tablemax; /* largest used index in table */
+ int nextarg; /* 1-based argument index */
+ int ret = 0; /* return value */
+ wchar_t wc;
+ mbstate_t ps;
+
+ /*
+ * Add an argument type to the table, expanding if necessary.
+ */
+#define ADDTYPE(type) \
+ ((nextarg >= tablesize) ? \
+ __grow_type_table(&typetable, &tablesize) : 0, \
+ (nextarg > tablemax) ? tablemax = nextarg : 0, \
+ typetable[nextarg++] = type)
+
+#define ADDSARG() \
+ ((flags&MAXINT) ? ADDTYPE(T_MAXINT) : \
+ ((flags&PTRINT) ? ADDTYPE(T_PTRINT) : \
+ ((flags&SIZEINT) ? ADDTYPE(T_SSIZEINT) : \
+ ((flags&LLONGINT) ? ADDTYPE(T_LLONG) : \
+ ((flags&LONGINT) ? ADDTYPE(T_LONG) : \
+ ((flags&SHORTINT) ? ADDTYPE(T_SHORT) : \
+ ((flags&CHARINT) ? ADDTYPE(T_CHAR) : ADDTYPE(T_INT))))))))
+
+#define ADDUARG() \
+ ((flags&MAXINT) ? ADDTYPE(T_MAXUINT) : \
+ ((flags&PTRINT) ? ADDTYPE(T_PTRINT) : \
+ ((flags&SIZEINT) ? ADDTYPE(T_SIZEINT) : \
+ ((flags&LLONGINT) ? ADDTYPE(T_U_LLONG) : \
+ ((flags&LONGINT) ? ADDTYPE(T_U_LONG) : \
+ ((flags&SHORTINT) ? ADDTYPE(T_U_SHORT) : \
+ ((flags&CHARINT) ? ADDTYPE(T_U_CHAR) : ADDTYPE(T_U_INT))))))))
+
+ /*
+ * Add * arguments to the type array.
+ */
+#define ADDASTER() \
+ n2 = 0; \
+ cp = fmt; \
+ while (is_digit(*cp)) { \
+ APPEND_DIGIT(n2, *cp); \
+ cp++; \
+ } \
+ if (*cp == '$') { \
+ int hold = nextarg; \
+ nextarg = n2; \
+ ADDTYPE(T_INT); \
+ nextarg = hold; \
+ fmt = ++cp; \
+ } else { \
+ ADDTYPE(T_INT); \
+ }
+ fmt = (char *)fmt0;
+ typetable = stattypetable;
+ tablesize = STATIC_ARG_TBL_SIZE;
+ tablemax = 0;
+ nextarg = 1;
+ memset(typetable, T_UNUSED, STATIC_ARG_TBL_SIZE);
+ memset(&ps, 0, sizeof(ps));
+
+ /*
+ * Scan the format for conversions (`%' character).
+ */
+ for (;;) {
+ cp = fmt;
+ while ((n = ust_safe_mbrtowc(&wc, fmt, MB_CUR_MAX, &ps)) > 0) {
+ fmt += n;
+ if (wc == '%') {
+ fmt--;
+ break;
+ }
+ }
+ if (n <= 0)
+ goto done;
+ fmt++; /* skip over '%' */
+
+ flags = 0;
+
+rflag: ch = *fmt++;
+reswitch: switch (ch) {
+ case ' ':
+ case '#':
+ case '\'':
+ goto rflag;
+ case '*':
+ ADDASTER();
+ goto rflag;
+ case '-':
+ case '+':
+ goto rflag;
+ case '.':
+ if ((ch = *fmt++) == '*') {
+ ADDASTER();
+ goto rflag;
+ }
+ while (is_digit(ch)) {
+ ch = *fmt++;
+ }
+ goto reswitch;
+ case '0':
+ goto rflag;
+ case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ n = 0;
+ do {
+ APPEND_DIGIT(n ,ch);
+ ch = *fmt++;
+ } while (is_digit(ch));
+ if (ch == '$') {
+ nextarg = n;
+ goto rflag;
+ }
+ goto reswitch;
+#ifdef FLOATING_POINT
+ case 'L':
+ flags |= LONGDBL;
+ goto rflag;
+#endif
+ case 'h':
+ if (*fmt == 'h') {
+ fmt++;
+ flags |= CHARINT;
+ } else {
+ flags |= SHORTINT;
+ }
+ goto rflag;
+ case 'l':
+ if (*fmt == 'l') {
+ fmt++;
+ flags |= LLONGINT;
+ } else {
+ flags |= LONGINT;
+ }
+ goto rflag;
+ case 'q':
+ flags |= LLONGINT;
+ goto rflag;
+ case 't':
+ flags |= PTRINT;
+ goto rflag;
+ case 'z':
+ flags |= SIZEINT;
+ goto rflag;
+ case 'c':
+ ADDTYPE(T_INT);
+ break;
+ case 'D':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'd':
+ case 'i':
+ ADDSARG();
+ break;
+#ifdef FLOATING_POINT
+ case 'a':
+ case 'A':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ if (flags & LONGDBL)
+ ADDTYPE(T_LONG_DOUBLE);
+ else
+ ADDTYPE(T_DOUBLE);
+ break;
+#endif /* FLOATING_POINT */
+ case 'n':
+ if (flags & LLONGINT)
+ ADDTYPE(TP_LLONG);
+ else if (flags & LONGINT)
+ ADDTYPE(TP_LONG);
+ else if (flags & SHORTINT)
+ ADDTYPE(TP_SHORT);
+ else if (flags & PTRINT)
+ ADDTYPE(TP_PTRINT);
+ else if (flags & SIZEINT)
+ ADDTYPE(TP_SSIZEINT);
+ else if (flags & MAXINT)
+ ADDTYPE(TP_MAXINT);
+ else
+ ADDTYPE(TP_INT);
+ continue; /* no output */
+ case 'O':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'o':
+ ADDUARG();
+ break;
+ case 'p':
+ ADDTYPE(TP_VOID);
+ break;
+ case 's':
+ ADDTYPE(TP_CHAR);
+ break;
+ case 'U':
+ flags |= LONGINT;
+ /*FALLTHROUGH*/
+ case 'u':
+ case 'X':
+ case 'x':
+ ADDUARG();
+ break;
+ default: /* "%?" prints ?, unless ? is NUL */
+ if (ch == '\0')
+ goto done;
+ break;
+ }
+ }
+done:
+ /*
+ * Build the argument table.
+ */
+ if (tablemax >= STATIC_ARG_TBL_SIZE) {
+ *argtablesiz = sizeof(union arg) * (tablemax + 1);
+ *argtable = mmap(NULL, *argtablesiz,
+ PROT_WRITE|PROT_READ, MAP_ANON|MAP_PRIVATE, -1, 0);
+ if (*argtable == MAP_FAILED)
+ return (-1);
+ }
+
+#if 0
+ /* XXX is this required? */
+ (*argtable)[0].intarg = 0;
+#endif
+ for (n = 1; n <= tablemax; n++) {
+ switch (typetable[n]) {
+ case T_UNUSED:
+ case T_CHAR:
+ case T_U_CHAR:
+ case T_SHORT:
+ case T_U_SHORT:
+ case T_INT:
+ (*argtable)[n].intarg = va_arg(ap, int);
+ break;
+ case TP_SHORT:
+ (*argtable)[n].pshortarg = va_arg(ap, short *);
+ break;
+ case T_U_INT:
+ (*argtable)[n].uintarg = va_arg(ap, unsigned int);
+ break;
+ case TP_INT:
+ (*argtable)[n].pintarg = va_arg(ap, int *);
+ break;
+ case T_LONG:
+ (*argtable)[n].longarg = va_arg(ap, long);
+ break;
+ case T_U_LONG:
+ (*argtable)[n].ulongarg = va_arg(ap, unsigned long);
+ break;
+ case TP_LONG:
+ (*argtable)[n].plongarg = va_arg(ap, long *);
+ break;
+ case T_LLONG:
+ (*argtable)[n].longlongarg = va_arg(ap, long long);
+ break;
+ case T_U_LLONG:
+ (*argtable)[n].ulonglongarg = va_arg(ap, unsigned long long);
+ break;
+ case TP_LLONG:
+ (*argtable)[n].plonglongarg = va_arg(ap, long long *);
+ break;
+#ifdef FLOATING_POINT
+ case T_DOUBLE:
+ (*argtable)[n].doublearg = va_arg(ap, double);
+ break;
+ case T_LONG_DOUBLE:
+ (*argtable)[n].longdoublearg = va_arg(ap, long double);
+ break;
+#endif
+ case TP_CHAR:
+ (*argtable)[n].pchararg = va_arg(ap, char *);
+ break;
+ case TP_VOID:
+ (*argtable)[n].pvoidarg = va_arg(ap, void *);
+ break;
+ case T_PTRINT:
+ (*argtable)[n].ptrdiffarg = va_arg(ap, ptrdiff_t);
+ break;
+ case TP_PTRINT:
+ (*argtable)[n].pptrdiffarg = va_arg(ap, ptrdiff_t *);
+ break;
+ case T_SIZEINT:
+ (*argtable)[n].sizearg = va_arg(ap, size_t);
+ break;
+ case T_SSIZEINT:
+ (*argtable)[n].ssizearg = va_arg(ap, ssize_t);
+ break;
+ case TP_SSIZEINT:
+ (*argtable)[n].pssizearg = va_arg(ap, ssize_t *);
+ break;
+ case TP_MAXINT:
+ (*argtable)[n].intmaxarg = va_arg(ap, intmax_t);
+ break;
+ }
+ }
+ goto finish;
+
+overflow:
+ errno = ENOMEM;
+ ret = -1;
+
+finish:
+ if (typetable != NULL && typetable != stattypetable) {
+ munmap(typetable, *argtablesiz);
+ typetable = NULL;
+ }
+ return (ret);
+}
+
+/*
+ * Increase the size of the type table.
+ */
+static int
+__grow_type_table(unsigned char **typetable, int *tablesize)
+{
+ unsigned char *oldtable = *typetable;
+ int newsize = *tablesize * 2;
+
+ if (newsize < getpagesize())
+ newsize = getpagesize();
+
+ if (*tablesize == STATIC_ARG_TBL_SIZE) {
+ *typetable = mmap(NULL, newsize, PROT_WRITE|PROT_READ,
+ MAP_ANON|MAP_PRIVATE, -1, 0);
+ if (*typetable == MAP_FAILED)
+ return (-1);
+ bcopy(oldtable, *typetable, *tablesize);
+ } else {
+ unsigned char *new = mmap(NULL, newsize, PROT_WRITE|PROT_READ,
+ MAP_ANON|MAP_PRIVATE, -1, 0);
+ if (new == MAP_FAILED)
+ return (-1);
+ memmove(new, *typetable, *tablesize);
+ munmap(*typetable, *tablesize);
+ *typetable = new;
+ }
+ memset(*typetable + *tablesize, T_UNUSED, (newsize - *tablesize));
+
+ *tablesize = newsize;
+ return (0);
+}
+
+
+#ifdef FLOATING_POINT
+static int
+exponent(char *p0, int exp, int fmtch)
+{
+ char *p, *t;
+ char expbuf[MAXEXPDIG];
+
+ p = p0;
+ *p++ = fmtch;
+ if (exp < 0) {
+ exp = -exp;
+ *p++ = '-';
+ } else
+ *p++ = '+';
+ t = expbuf + MAXEXPDIG;
+ if (exp > 9) {
+ do {
+ *--t = to_char(exp % 10);
+ } while ((exp /= 10) > 9);
+ *--t = to_char(exp);
+ for (; t < expbuf + MAXEXPDIG; *p++ = *t++)
+ /* nothing */;
+ } else {
+ /*
+ * Exponents for decimal floating point conversions
+ * (%[eEgG]) must be at least two characters long,
+ * whereas exponents for hexadecimal conversions can
+ * be only one character long.
+ */
+ if (fmtch == 'e' || fmtch == 'E')
+ *p++ = '0';
+ *p++ = to_char(exp);
+ }
+ return (p - p0);
+}
+#endif /* FLOATING_POINT */
--- /dev/null
+/* $OpenBSD: wcio.h,v 1.1 2005/06/17 20:40:32 espie Exp $ */
+/* $NetBSD: wcio.h,v 1.3 2003/01/18 11:30:00 thorpej Exp $ */
+
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C)2001 Citrus Project,
+ * All rights reserved.
+ *
+ * $Citrus$
+ */
+
+#ifndef _WCIO_H_
+#define _WCIO_H_
+
+#include <stddef.h>
+#include <wchar.h>
+
+/* minimal requirement of SUSv2 */
+#define WCIO_UNGETWC_BUFSIZE 1
+
+struct wchar_io_data {
+ mbstate_t wcio_mbstate_in;
+ mbstate_t wcio_mbstate_out;
+
+ wchar_t wcio_ungetwc_buf[WCIO_UNGETWC_BUFSIZE];
+ size_t wcio_ungetwc_inbuf;
+
+ int wcio_mode; /* orientation */
+};
+
+#define WCIO_GET(fp) \
+ (_EXT(fp) ? &(_EXT(fp)->_wcio) : (struct wchar_io_data *)0)
+
+#define WCIO_GET_NONULL(fp) \
+ (&(_EXT(fp)->_wcio))
+
+#define _SET_ORIENTATION(fp, mode) \
+do {\
+ struct wchar_io_data *_wcio = WCIO_GET(fp); \
+ if (_wcio && _wcio->wcio_mode == 0) \
+ _wcio->wcio_mode = (mode);\
+} while (0)
+
+/*
+ * WCIO_FREE should be called by fclose
+ */
+#define WCIO_FREE(fp) \
+do {\
+ struct wchar_io_data *_wcio = WCIO_GET(fp); \
+ if (_wcio) { \
+ _wcio->wcio_mode = 0;\
+ _wcio->wcio_ungetwc_inbuf = 0;\
+ } \
+} while (0)
+
+#define WCIO_FREEUB(fp) \
+do {\
+ struct wchar_io_data *_wcio = WCIO_GET(fp); \
+ if (_wcio) { \
+ _wcio->wcio_ungetwc_inbuf = 0;\
+ } \
+} while (0)
+
+#define WCIO_INIT(fp) \
+ memset(WCIO_GET_NONULL(fp), 0, sizeof(struct wchar_io_data))
+
+#endif /*_WCIO_H_*/
--- /dev/null
+/* $OpenBSD: wsetup.c,v 1.7 2005/08/08 08:05:36 espie Exp $ */
+/*
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (C) 1990, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Chris Torek.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include "local.h"
+
+/*
+ * Various output routines call wsetup to be sure it is safe to write,
+ * because either _flags does not include __SWR, or _buf is NULL.
+ * _wsetup returns 0 if OK to write, nonzero otherwise.
+ */
+int
+__swsetup(LTTNG_UST_LFILE *fp)
+{
+ /* make sure stdio is set up */
+// if (!__sdidinit)
+// __sinit();
+
+ /*
+ * If we are not writing, we had better be reading and writing.
+ */
+ if ((fp->_flags & __SWR) == 0) {
+ if ((fp->_flags & __SRW) == 0)
+ return (EOF);
+ if (fp->_flags & __SRD) {
+ /* clobber any ungetc data */
+ if (HASUB(fp))
+ FREEUB(fp);
+ fp->_flags &= ~(__SRD|__SEOF);
+ fp->_r = 0;
+ fp->_p = fp->_bf._base;
+ }
+ fp->_flags |= __SWR;
+ }
+
+ /*
+ * Make a buffer if necessary, then set _w.
+ */
+ if (fp->_bf._base == NULL) {
+// if ((fp->_flags & (__SSTR | __SALC)) == __SSTR)
+// return (EOF);
+// __smakebuf(fp);
+ assert(0);
+ }
+ if (fp->_flags & __SLBF) {
+ /*
+ * It is line buffered, so make _lbfsize be -_bufsize
+ * for the putc() macro. We will change _lbfsize back
+ * to 0 whenever we turn off __SWR.
+ */
+ fp->_w = 0;
+ fp->_lbfsize = -fp->_bf._size;
+ } else
+ fp->_w = fp->_flags & __SNBF ? 0 : fp->_bf._size;
+ return (0);
+}
noinst_PROGRAMS = bench1 bench2
bench1_SOURCES = bench.c tp.c ust_tests_benchmark.h
-bench1_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
+bench1_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
bench2_SOURCES = bench.c tp.c ust_tests_benchmark.h
-bench2_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
bench2_CFLAGS = -DTRACING $(AM_CFLAGS)
+bench2_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
dist_noinst_SCRIPTS = test_benchmark ptime
noinst_PROGRAMS = ctf-types
ctf_types_SOURCES = ctf-types.c tp.c ust_tests_ctf_types.h
-ctf_types_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
ctf_types_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
+ctf_types_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = hello-many
hello_many_SOURCES = hello-many.c tp.c ust_tests_hello_many.h
-hello_many_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
hello_many_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
+hello_many_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = hello
hello_SOURCES = hello.cpp tp-cpp.cpp ust_tests_hello.h
-hello_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
+hello_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = hello
hello_SOURCES = hello.c tp.c ust_tests_hello.h
-hello_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
hello_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
+hello_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = same_line_tracepoint
same_line_tracepoint_SOURCES = same_line_tracepoint.c ust_tests_sameline.h
-same_line_tracepoint_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
+same_line_tracepoint_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
EXTRA_DIST = README
noinst_PROGRAMS = hello
hello_SOURCES = hello.c tp.c ust_tests_hello.h
-hello_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la $(DL_LIBS)
hello_CFLAGS = -Werror=old-style-definition $(AM_CFLAGS)
+hello_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
+ $(DL_LIBS)
noinst_PROGRAMS = test_msgpack
test_msgpack_SOURCES = test_msgpack.c
test_msgpack_LDADD = \
- $(top_builddir)/libmsgpack/libmsgpack.la \
- $(top_builddir)/tests/utils/libtap.a
+ $(top_builddir)/src/libmsgpack/libmsgpack.la \
+ $(top_builddir)/tests/utils/libtap.a
test_msgpack_CFLAGS = $(AM_CFLAGS)
#include "tap.h"
-#include "../../libmsgpack/msgpack.h"
+#include "../../src/libmsgpack/msgpack.h"
#define BUFFER_SIZE 4096
#define NUM_TESTS 23
# SPDX-License-Identifier: LGPL-2.1-only
-AM_CPPFLAGS += -I$(top_srcdir)/include -I$(top_srcdir)/ -I$(top_srcdir)/tests/utils
+AM_CPPFLAGS += -I$(top_srcdir)/tests/utils
noinst_PROGRAMS = test_shm
test_shm_SOURCES = shm.c
test_shm_LDADD = \
- $(top_builddir)/libringbuffer/libringbuffer.la \
- $(top_builddir)/liblttng-ust-comm/liblttng-ust-comm.la \
- $(top_builddir)/snprintf/libustsnprintf.la \
+ $(top_builddir)/src/libringbuffer/libringbuffer.la \
+ $(top_builddir)/src/liblttng-ust-comm/liblttng-ust-comm.la \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
$(top_builddir)/tests/utils/libtap.a
#include <sys/mman.h>
#include <fcntl.h>
-#include "libringbuffer/shm.h"
+#include "../../../src/libringbuffer/shm.h"
#include "ust-compat.h"
#include "tap.h"
# SPDX-License-Identifier: LGPL-2.1-only
-AM_CPPFLAGS += -I$(top_srcdir)/include -I$(top_srcdir)/liblttng-ust -I$(top_srcdir)/tests/utils
+AM_CPPFLAGS += -I$(top_srcdir)/liblttng-ust -I$(top_srcdir)/tests/utils
noinst_PROGRAMS = test_pthread_name
test_pthread_name_SOURCES = pthread_name.c
#include <stdio.h>
#include <string.h>
-#include "compat.h"
+#include "../../../src/liblttng-ust/compat.h"
#include "tap.h"
noinst_PROGRAMS = test_snprintf
test_snprintf_SOURCES = snprintf.c
-test_snprintf_LDADD = $(top_builddir)/snprintf/libustsnprintf.la \
+test_snprintf_LDADD = \
+ $(top_builddir)/src/snprintf/libustsnprintf.la \
$(top_builddir)/tests/utils/libtap.a
EXTRA_DIST = README
noinst_PROGRAMS = ust-elf
ust_elf_SOURCES = ust-elf.c
-ust_elf_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la \
+ust_elf_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
dist_check_SCRIPTS = test_ust_elf
noinst_PROGRAMS = test_ust_error
test_ust_error_SOURCES = ust-error.c
-test_ust_error_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la \
+test_ust_error_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
noinst_PROGRAMS = test_ust_utils
test_ust_utils_SOURCES = ust-utils.c ust-utils-common.h
-test_ust_utils_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la \
+test_ust_utils_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
if HAVE_CXX
noinst_PROGRAMS += test_ust_utils_cxx
test_ust_utils_cxx_SOURCES = ust-utils-cxx.cpp ust-utils-common.h
-test_ust_utils_cxx_LDADD = $(top_builddir)/liblttng-ust/liblttng-ust.la \
+test_ust_utils_cxx_LDADD = \
+ $(top_builddir)/src/liblttng-ust/liblttng-ust.la \
$(top_builddir)/tests/utils/libtap.a
endif