Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / src / lttng-context-callstack.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
2fa2d39a
FG
3 * lttng-context-callstack.c
4 *
5 * LTTng callstack event context.
6 *
7 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
9 *
0bb47c89
MD
10 * The callstack context can be added to any kernel event. It records
11 * either the kernel or the userspace callstack, up to a max depth. The
12 * context is a CTF sequence, such that it uses only the space required
13 * for the number of callstack entries.
2fa2d39a 14 *
0bb47c89
MD
15 * It allocates callstack buffers per-CPU up to 4 interrupt nesting.
16 * This nesting limit is the same as defined in the ring buffer. It
17 * therefore uses a fixed amount of memory, proportional to the number
18 * of CPUs:
2fa2d39a
FG
19 *
20 * size = cpus * nest * depth * sizeof(unsigned long)
21 *
3685cc80 22 * Which is 4096 bytes per CPU on 64-bit host and a depth of 128.
0bb47c89
MD
23 * The allocation is done at the initialization to avoid memory
24 * allocation overhead while tracing, using a shallow stack.
2fa2d39a
FG
25 *
26 * The kernel callstack is recovered using save_stack_trace(), and the
27 * userspace callstack uses save_stack_trace_user(). They rely on frame
0bb47c89
MD
28 * pointers. These are usually available for the kernel, but the
29 * compiler option -fomit-frame-pointer frequently used in popular Linux
30 * distributions may cause the userspace callstack to be unreliable, and
31 * is a known limitation of this approach. If frame pointers are not
32 * available, it produces no error, but the callstack will be empty. We
33 * still provide the feature, because it works well for runtime
34 * environments having frame pointers. In the future, unwind support
35 * and/or last branch record may provide a solution to this problem.
2fa2d39a
FG
36 *
37 * The symbol name resolution is left to the trace reader.
38 */
39
40#include <linux/module.h>
41#include <linux/slab.h>
42#include <linux/sched.h>
43#include <linux/utsname.h>
44#include <linux/stacktrace.h>
45#include <linux/spinlock.h>
24591303
MD
46#include <ringbuffer/backend.h>
47#include <ringbuffer/frontend.h>
2df37e95 48#include <lttng/events.h>
0fe45627 49#include <lttng/events-internal.h>
2df37e95
MD
50#include <lttng/tracer.h>
51#include <lttng/endian.h>
2fa2d39a 52#include "wrapper/vmalloc.h"
2fa2d39a 53
b29c6286
MD
54#ifdef CONFIG_ARCH_STACKWALK
55#include "lttng-context-callstack-stackwalk-impl.h"
56#else
b6ee48d2 57#include "lttng-context-callstack-legacy-impl.h"
b29c6286 58#endif
2fa2d39a 59
437d5aa5
MD
60#define NR_FIELDS 2
61
2fa2d39a
FG
62static
63void field_data_free(struct field_data *fdata)
64{
2fa2d39a
FG
65 if (!fdata)
66 return;
2fa2d39a
FG
67 free_percpu(fdata->cs_percpu);
68 kfree(fdata);
69}
70
71static
0bb47c89 72struct field_data __percpu *field_data_create(enum lttng_cs_ctx_modes mode)
2fa2d39a 73{
2fa2d39a 74 struct lttng_cs __percpu *cs_set;
64cc198b 75 struct field_data *fdata;
2fa2d39a 76
64cc198b 77 fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
2fa2d39a
FG
78 if (!fdata)
79 return NULL;
80 cs_set = alloc_percpu(struct lttng_cs);
81 if (!cs_set)
82 goto error_alloc;
b5a89a3f 83 lttng_cs_set_init(cs_set);
2fa2d39a 84 fdata->cs_percpu = cs_set;
0bb47c89 85 fdata->mode = mode;
2fa2d39a
FG
86 return fdata;
87
88error_alloc:
89 field_data_free(fdata);
90 return NULL;
91}
92
93static
2dc781e0 94void lttng_callstack_sequence_destroy(void *priv)
2fa2d39a 95{
2dc781e0 96 struct field_data *fdata = priv;
2fa2d39a
FG
97
98 field_data_free(fdata);
99}
100
437d5aa5
MD
101static const struct lttng_kernel_event_field *event_fields_kernel[NR_FIELDS] = {
102 lttng_kernel_static_event_field("_callstack_kernel_length",
103 lttng_kernel_static_type_integer_from_type(unsigned int, __BYTE_ORDER, 10),
4697aac7 104 false, false),
437d5aa5 105 lttng_kernel_static_event_field("callstack_kernel",
51ef4536 106 lttng_kernel_static_type_sequence(NULL,
437d5aa5
MD
107 lttng_kernel_static_type_integer_from_type(unsigned long, __BYTE_ORDER, 16),
108 0, none),
4697aac7 109 false, false),
437d5aa5
MD
110};
111
112static const struct lttng_kernel_event_field *event_fields_user[NR_FIELDS] = {
113 lttng_kernel_static_event_field("_callstack_user_length",
114 lttng_kernel_static_type_integer_from_type(unsigned int, __BYTE_ORDER, 10),
4697aac7 115 false, false),
437d5aa5 116 lttng_kernel_static_event_field("callstack_user",
51ef4536 117 lttng_kernel_static_type_sequence(NULL,
437d5aa5
MD
118 lttng_kernel_static_type_integer_from_type(unsigned long, __BYTE_ORDER, 16),
119 0, none),
4697aac7 120 false, false),
437d5aa5
MD
121};
122
123const struct lttng_kernel_event_field **lttng_cs_event_fields(enum lttng_cs_ctx_modes mode)
124{
125 switch (mode) {
126 case CALLSTACK_KERNEL:
127 return event_fields_kernel;
128 case CALLSTACK_USER:
129 return event_fields_user;
130 default:
131 return NULL;
132 }
133}
ceabb767 134
2fa2d39a 135static
437d5aa5 136int __lttng_add_callstack_generic(struct lttng_kernel_ctx **ctx,
0bb47c89 137 enum lttng_cs_ctx_modes mode)
2fa2d39a 138{
437d5aa5
MD
139 const struct lttng_kernel_event_field **event_fields;
140 struct lttng_kernel_ctx_field ctx_field;
2fa2d39a 141 struct field_data *fdata;
437d5aa5 142 int ret, i;
2fa2d39a
FG
143
144 ret = init_type(mode);
145 if (ret)
146 return ret;
437d5aa5
MD
147 event_fields = lttng_cs_event_fields(mode);
148 if (!event_fields) {
149 return -EINVAL;
ceabb767 150 }
437d5aa5
MD
151 for (i = 0; i < NR_FIELDS; i++) {
152 if (lttng_kernel_find_context(*ctx, event_fields[i]->name))
153 return -EEXIST;
2fa2d39a 154 }
64cc198b 155 fdata = field_data_create(mode);
2fa2d39a
FG
156 if (!fdata) {
157 ret = -ENOMEM;
158 goto error_create;
159 }
437d5aa5
MD
160 memset(&ctx_field, 0, sizeof(ctx_field));
161 ctx_field.event_field = event_fields[0];
2dc781e0 162 ctx_field.get_size = lttng_callstack_length_get_size;
437d5aa5
MD
163 ctx_field.record = lttng_callstack_length_record;
164 ctx_field.priv = fdata;
165 ret = lttng_kernel_context_append(ctx, &ctx_field);
166 if (ret) {
167 ret = -ENOMEM;
168 goto error_append0;
169 }
2fa2d39a 170
437d5aa5
MD
171 memset(&ctx_field, 0, sizeof(ctx_field));
172 ctx_field.event_field = event_fields[1];
2dc781e0 173 ctx_field.get_size = lttng_callstack_sequence_get_size;
437d5aa5
MD
174 ctx_field.record = lttng_callstack_sequence_record;
175 ctx_field.destroy = lttng_callstack_sequence_destroy;
176 ctx_field.priv = fdata;
177 ret = lttng_kernel_context_append(ctx, &ctx_field);
178 if (ret) {
179 ret = -ENOMEM;
180 goto error_append1;
181 }
2fa2d39a
FG
182 return 0;
183
437d5aa5
MD
184error_append1:
185 lttng_kernel_context_remove_last(ctx);
186error_append0:
187 field_data_free(fdata);
2fa2d39a 188error_create:
2fa2d39a
FG
189 return ret;
190}
191
192/**
193 * lttng_add_callstack_to_ctx - add callstack event context
194 *
195 * @ctx: the lttng_ctx pointer to initialize
196 * @type: the context type
197 *
198 * Supported callstack type supported:
199 * LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
200 * Records the callstack of the kernel
201 * LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
202 * Records the callstack of the userspace program (from the kernel)
203 *
204 * Return 0 for success, or error code.
205 */
437d5aa5 206int lttng_add_callstack_to_ctx(struct lttng_kernel_ctx **ctx, int type)
2fa2d39a
FG
207{
208 switch (type) {
606828e4 209 case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_KERNEL:
2fa2d39a 210 return __lttng_add_callstack_generic(ctx, CALLSTACK_KERNEL);
b874d3f3 211#ifdef CONFIG_X86
606828e4 212 case LTTNG_KERNEL_ABI_CONTEXT_CALLSTACK_USER:
2fa2d39a 213 return __lttng_add_callstack_generic(ctx, CALLSTACK_USER);
b874d3f3 214#endif
2fa2d39a
FG
215 default:
216 return -EINVAL;
217 }
218}
219EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx);
This page took 0.057437 seconds and 4 git commands to generate.