Introduce callstack stackwalk implementation header
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 22 May 2019 01:16:15 +0000 (21:16 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Wed, 22 May 2019 14:48:30 +0000 (10:48 -0400)
Introduce a new implementation header for the stackwalk-based API, added
in Linux 5.2 and gradually integrated within each architecture.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
lttng-context-callstack-stackwalk-impl.h [new file with mode: 0644]
lttng-context-callstack.c

diff --git a/lttng-context-callstack-stackwalk-impl.h b/lttng-context-callstack-stackwalk-impl.h
new file mode 100644 (file)
index 0000000..dfb60c5
--- /dev/null
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+ *
+ * lttng-context-callstack-stackwalk-impl.h
+ *
+ * LTTng callstack event context, stackwalk implementation. Targets
+ * kernels and architectures using the stacktrace common infrastructure
+ * introduced in the upstream Linux kernel by commit 214d8ca6ee
+ * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
+ * then gradually introduced within architectures).
+ *
+ * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
+ */
+
+#define MAX_ENTRIES 128
+
+enum lttng_cs_ctx_modes {
+       CALLSTACK_KERNEL = 0,
+       CALLSTACK_USER = 1,
+       NR_CALLSTACK_MODES,
+};
+
+struct lttng_stack_trace {
+       unsigned long entries[MAX_ENTRIES];
+       unsigned int nr_entries;
+};
+
+struct lttng_cs {
+       struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING];
+};
+
+struct field_data {
+       struct lttng_cs __percpu *cs_percpu;
+       enum lttng_cs_ctx_modes mode;
+};
+
+static
+unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
+                               unsigned int skipnr);
+static
+unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
+
+static
+const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
+{
+       switch (mode) {
+       case CALLSTACK_KERNEL:
+               return "callstack_kernel";
+       case CALLSTACK_USER:
+               return "callstack_user";
+       default:
+               return NULL;
+       }
+}
+
+static
+int init_type_callstack_kernel(void)
+{
+       unsigned long func;
+       const char *func_name = "stack_trace_save";
+
+       if (save_func_kernel)
+               return 0;
+       func = kallsyms_lookup_funcptr(func_name);
+       if (!func) {
+               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
+                               func_name);
+               return -EINVAL;
+       }
+       save_func_kernel = (void *) func;
+       return 0;
+}
+
+static
+int init_type_callstack_user(void)
+{
+       unsigned long func;
+       const char *func_name = "stack_trace_save_user";
+
+       if (save_func_user)
+               return 0;
+       func = kallsyms_lookup_funcptr(func_name);
+       if (!func) {
+               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
+                               func_name);
+               return -EINVAL;
+       }
+       save_func_user = (void *) func;
+       return 0;
+}
+
+static
+int init_type(enum lttng_cs_ctx_modes mode)
+{
+       switch (mode) {
+       case CALLSTACK_KERNEL:
+               return init_type_callstack_kernel();
+       case CALLSTACK_USER:
+               return init_type_callstack_user();
+       default:
+               return -EINVAL;
+       }
+}
+
+static
+void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
+{
+}
+
+/* Keep track of nesting inside userspace callstack context code */
+DEFINE_PER_CPU(int, callstack_user_nesting);
+
+static
+struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx)
+{
+       int buffer_nesting, cs_user_nesting;
+       struct lttng_cs *cs;
+       struct field_data *fdata = field->priv;
+
+       /*
+        * Do not gather the userspace callstack context when the event was
+        * triggered by the userspace callstack context saving mechanism.
+        */
+       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu);
+
+       if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
+               return NULL;
+
+       /*
+        * get_cpu() is not required, preemption is already
+        * disabled while event is written.
+        *
+        * max nesting is checked in lib_ring_buffer_get_cpu().
+        * Check it again as a safety net.
+        */
+       cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1;
+       if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
+               return NULL;
+
+       return &cs->stack_trace[buffer_nesting];
+}
+
+/*
+ * In order to reserve the correct size, the callstack is computed. The
+ * resulting callstack is saved to be accessed in the record step.
+ */
+static
+size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
+                               struct lib_ring_buffer_ctx *ctx,
+                               struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace;
+       struct field_data *fdata = field->priv;
+       size_t orig_offset = offset;
+
+       /* do not write data if no space is available */
+       trace = stack_trace_context(field, ctx);
+       if (unlikely(!trace)) {
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+               offset += sizeof(unsigned int);
+               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+               return offset - orig_offset;
+       }
+
+       /* reset stack trace, no need to clear memory */
+       trace->nr_entries = 0;
+
+       switch (fdata->mode) {
+       case CALLSTACK_KERNEL:
+               /* do the real work and reserve space */
+               trace->nr_entries = save_func_kernel(trace->entries,
+                                               MAX_ENTRIES, 0);
+               break;
+       case CALLSTACK_USER:
+               ++per_cpu(callstack_user_nesting, ctx->cpu);
+               /* do the real work and reserve space */
+               trace->nr_entries = save_func_user(trace->entries,
+                                               MAX_ENTRIES);
+               per_cpu(callstack_user_nesting, ctx->cpu)--;
+               break;
+       default:
+               WARN_ON_ONCE(1);
+       }
+
+       /*
+        * If the array is filled, add our own marker to show that the
+        * stack is incomplete.
+        */
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
+       offset += sizeof(unsigned long) * trace->nr_entries;
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == MAX_ENTRIES)
+               offset += sizeof(unsigned long);
+       return offset - orig_offset;
+}
+
+static
+void lttng_callstack_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       if (unlikely(!trace)) {
+               nr_seq_entries = 0;
+               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
+               chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+               return;
+       }
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
+       nr_seq_entries = trace->nr_entries;
+       if (trace->nr_entries == MAX_ENTRIES)
+               nr_seq_entries++;
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       chan->ops->event_write(ctx, trace->entries,
+                       sizeof(unsigned long) * trace->nr_entries);
+       /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+       if (trace->nr_entries == MAX_ENTRIES) {
+               unsigned long delim = ULONG_MAX;
+
+               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+       }
+}
index 4a4400408fcac6122382bbcb60abbe5a73a719a3..195990a6e3e1292bebce8277a7b3bdefbd0d5acb 100644 (file)
 #include "wrapper/vmalloc.h"
 #include "lttng-tracer.h"
 
+#ifdef CONFIG_ARCH_STACKWALK
+#include "lttng-context-callstack-stackwalk-impl.h"
+#else
 #include "lttng-context-callstack-legacy-impl.h"
+#endif
 
 static
 void field_data_free(struct field_data *fdata)
This page took 0.028016 seconds and 4 git commands to generate.