-/*
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
* lttng-context-callstack.c
*
* LTTng callstack event context.
* Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
* Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
* The callstack context can be added to any kernel event. It records
* either the kernel or the userspace callstack, up to a max depth. The
* context is a CTF sequence, such that it uses only the space required
return 0;
}
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
static
struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
struct lib_ring_buffer_ctx *ctx)
{
- int nesting;
+ int buffer_nesting, cs_user_nesting;
struct lttng_cs *cs;
struct field_data *fdata = field->priv;
+ /*
+ * Do not gather the userspace callstack context when the event was
+ * triggered by the userspace callstack context saving mechanism.
+ */
+ cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+ if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+ return NULL;
+
/*
* get_cpu() is not required, preemption is already
* disabled while event is written.
* Check it again as a safety net.
*/
cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
- nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
- if (nesting >= RING_BUFFER_MAX_NESTING) {
+ buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+ if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
return NULL;
- }
- return &cs->dispatch[nesting].stack_trace;
+
+ return &cs->dispatch[buffer_nesting].stack_trace;
}
/*
/* reset stack trace, no need to clear memory */
trace->nr_entries = 0;
+ if (fdata->mode == CALLSTACK_USER)
+ ++per_cpu(callstack_user_nesting, ctx->cpu);
+
/* do the real work and reserve space */
cs_types[fdata->mode].save_func(trace);
+
+ if (fdata->mode == CALLSTACK_USER)
+ per_cpu(callstack_user_nesting, ctx->cpu)--;
+
/*
* Remove final ULONG_MAX delimiter. If we cannot find it, add
* our own marker to show that the stack is incomplete. This is
switch (type) {
case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL:
return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
+#ifdef CONFIG_X86
case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER:
return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
+#endif
default:
return -EINVAL;
}
}
EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
-
-MODULE_LICENSE("GPL and additional rights");
-MODULE_AUTHOR("Francis Giraldeau");
-MODULE_DESCRIPTION("Linux Trace Toolkit Callstack Support");