2 * lttng-context-callstack.c
4 * LTTng callstack event context.
6 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; only
12 * version 2.1 of the License.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * The callstack context can be added to any kernel event. It records
24 * either the kernel or the userspace callstack, up to a max depth. The
25 * context is a CTF sequence, such that it uses only the space required
26 * for the number of callstack entries.
28 * It allocates callstack buffers per-CPU up to 4 interrupt nesting.
29 * This nesting limit is the same as defined in the ring buffer. It
30 * therefore uses a fixed amount of memory, proportional to the number
33 * size = cpus * nest * depth * sizeof(unsigned long)
35 * Which is about 800 bytes per CPU on 64-bit host and a depth of 25.
36 * The allocation is done at the initialization to avoid memory
37 * allocation overhead while tracing, using a shallow stack.
39 * The kernel callstack is recovered using save_stack_trace(), and the
40 * userspace callstack uses save_stack_trace_user(). They rely on frame
41 * pointers. These are usually available for the kernel, but the
42 * compiler option -fomit-frame-pointer frequently used in popular Linux
43 * distributions may cause the userspace callstack to be unreliable, and
44 * is a known limitation of this approach. If frame pointers are not
45 * available, it produces no error, but the callstack will be empty. We
46 * still provide the feature, because it works well for runtime
47 * environments having frame pointers. In the future, unwind support
48 * and/or last branch record may provide a solution to this problem.
50 * The symbol name resolution is left to the trace reader.
53 #include <linux/module.h>
54 #include <linux/slab.h>
55 #include <linux/sched.h>
56 #include <linux/utsname.h>
57 #include <linux/stacktrace.h>
58 #include <linux/spinlock.h>
59 #include "lttng-events.h"
60 #include "wrapper/ringbuffer/backend.h"
61 #include "wrapper/ringbuffer/frontend.h"
62 #include "wrapper/vmalloc.h"
63 #include "lttng-tracer.h"
65 #define MAX_ENTRIES 25
67 enum lttng_cs_ctx_modes
{
73 struct lttng_cs_dispatch
{
74 struct stack_trace stack_trace
;
75 unsigned long entries
[MAX_ENTRIES
];
79 struct lttng_cs_dispatch dispatch
[RING_BUFFER_MAX_NESTING
];
83 struct lttng_cs __percpu
*cs_percpu
;
84 enum lttng_cs_ctx_modes mode
;
87 struct lttng_cs_type
{
89 const char *save_func_name
;
90 void (*save_func
)(struct stack_trace
*trace
);
93 static struct lttng_cs_type cs_types
[] = {
95 .name
= "callstack_kernel",
96 .save_func_name
= "save_stack_trace",
100 .name
= "callstack_user",
101 .save_func_name
= "save_stack_trace_user",
107 int init_type(enum lttng_cs_ctx_modes mode
)
111 if (cs_types
[mode
].save_func
)
113 func
= kallsyms_lookup_funcptr(cs_types
[mode
].save_func_name
);
115 printk(KERN_WARNING
"LTTng: symbol lookup failed: %s\n",
116 cs_types
[mode
].save_func_name
);
119 cs_types
[mode
].save_func
= (void *) func
;
124 struct stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
125 struct lib_ring_buffer_ctx
*ctx
)
129 struct field_data
*fdata
= field
->priv
;
132 * get_cpu() is not required, preemption is already
133 * disabled while event is written.
135 * max nesting is checked in lib_ring_buffer_get_cpu().
136 * Check it again as a safety net.
138 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
139 nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
140 if (nesting
>= RING_BUFFER_MAX_NESTING
) {
143 return &cs
->dispatch
[nesting
].stack_trace
;
147 * In order to reserve the correct size, the callstack is computed. The
148 * resulting callstack is saved to be accessed in the record step.
151 size_t lttng_callstack_get_size(size_t offset
, struct lttng_ctx_field
*field
,
152 struct lib_ring_buffer_ctx
*ctx
,
153 struct lttng_channel
*chan
)
156 struct stack_trace
*trace
;
157 struct field_data
*fdata
= field
->priv
;
159 /* do not write data if no space is available */
160 trace
= stack_trace_context(field
, ctx
);
161 if (unlikely(!trace
)) {
162 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
163 size
+= sizeof(unsigned int);
164 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
168 /* reset stack trace, no need to clear memory */
169 trace
->nr_entries
= 0;
171 /* do the real work and reserve space */
172 cs_types
[fdata
->mode
].save_func(trace
);
174 * Remove final ULONG_MAX delimiter. If we cannot find it, add
175 * our own marker to show that the stack is incomplete. This is
176 * more compact for a trace.
178 if (trace
->nr_entries
> 0
179 && trace
->entries
[trace
->nr_entries
- 1] == ULONG_MAX
) {
182 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
183 size
+= sizeof(unsigned int);
184 size
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
185 size
+= sizeof(unsigned long) * trace
->nr_entries
;
186 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
187 if (trace
->nr_entries
== trace
->max_entries
)
188 size
+= sizeof(unsigned long);
193 void lttng_callstack_record(struct lttng_ctx_field
*field
,
194 struct lib_ring_buffer_ctx
*ctx
,
195 struct lttng_channel
*chan
)
197 struct stack_trace
*trace
= stack_trace_context(field
, ctx
);
198 unsigned int nr_seq_entries
;
200 if (unlikely(!trace
)) {
202 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
203 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
204 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
207 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
208 nr_seq_entries
= trace
->nr_entries
;
209 if (trace
->nr_entries
== trace
->max_entries
)
211 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
212 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
213 chan
->ops
->event_write(ctx
, trace
->entries
,
214 sizeof(unsigned long) * trace
->nr_entries
);
215 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
216 if (trace
->nr_entries
== trace
->max_entries
) {
217 unsigned long delim
= ULONG_MAX
;
219 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));
224 void field_data_free(struct field_data
*fdata
)
228 free_percpu(fdata
->cs_percpu
);
233 struct field_data __percpu
*field_data_create(enum lttng_cs_ctx_modes mode
)
236 struct lttng_cs __percpu
*cs_set
;
237 struct field_data
*fdata
;
239 fdata
= kzalloc(sizeof(*fdata
), GFP_KERNEL
);
242 cs_set
= alloc_percpu(struct lttng_cs
);
246 fdata
->cs_percpu
= cs_set
;
247 for_each_possible_cpu(cpu
) {
250 cs
= per_cpu_ptr(cs_set
, cpu
);
251 for (i
= 0; i
< RING_BUFFER_MAX_NESTING
; i
++) {
252 struct lttng_cs_dispatch
*dispatch
;
254 dispatch
= &cs
->dispatch
[i
];
255 dispatch
->stack_trace
.entries
= dispatch
->entries
;
256 dispatch
->stack_trace
.max_entries
= MAX_ENTRIES
;
263 field_data_free(fdata
);
268 void lttng_callstack_destroy(struct lttng_ctx_field
*field
)
270 struct field_data
*fdata
= field
->priv
;
272 field_data_free(fdata
);
276 int __lttng_add_callstack_generic(struct lttng_ctx
**ctx
,
277 enum lttng_cs_ctx_modes mode
)
279 const char *ctx_name
= cs_types
[mode
].name
;
280 struct lttng_ctx_field
*field
;
281 struct field_data
*fdata
;
284 ret
= init_type(mode
);
287 field
= lttng_append_context(ctx
);
290 if (lttng_find_context(*ctx
, ctx_name
)) {
294 fdata
= field_data_create(mode
);
300 field
->event_field
.name
= ctx_name
;
301 field
->event_field
.type
.atype
= atype_sequence
;
302 field
->event_field
.type
.u
.sequence
.elem_type
.atype
= atype_integer
;
303 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
= sizeof(unsigned long) * CHAR_BIT
;
304 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.alignment
= lttng_alignof(long) * CHAR_BIT
;
305 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(unsigned long);
306 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
= 0;
307 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.base
= 16;
308 field
->event_field
.type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
310 field
->event_field
.type
.u
.sequence
.length_type
.atype
= atype_integer
;
311 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.size
= sizeof(unsigned int) * CHAR_BIT
;
312 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.alignment
= lttng_alignof(unsigned int) * CHAR_BIT
;
313 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.signedness
= lttng_is_signed_type(unsigned int);
314 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.reverse_byte_order
= 0;
315 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.base
= 10;
316 field
->event_field
.type
.u
.sequence
.length_type
.u
.basic
.integer
.encoding
= lttng_encode_none
;
318 field
->get_size_arg
= lttng_callstack_get_size
;
319 field
->record
= lttng_callstack_record
;
321 field
->destroy
= lttng_callstack_destroy
;
322 wrapper_vmalloc_sync_all();
326 field_data_free(fdata
);
328 lttng_remove_context_field(ctx
, field
);
333 * lttng_add_callstack_to_ctx - add callstack event context
335 * @ctx: the lttng_ctx pointer to initialize
336 * @type: the context type
338 * Supported callstack type supported:
339 * LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
340 * Records the callstack of the kernel
341 * LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
342 * Records the callstack of the userspace program (from the kernel)
344 * Return 0 for success, or error code.
346 int lttng_add_callstack_to_ctx(struct lttng_ctx
**ctx
, int type
)
349 case LTTNG_KERNEL_CONTEXT_CALLSTACK_KERNEL
:
350 return __lttng_add_callstack_generic(ctx
, CALLSTACK_KERNEL
);
351 case LTTNG_KERNEL_CONTEXT_CALLSTACK_USER
:
352 return __lttng_add_callstack_generic(ctx
, CALLSTACK_USER
);
357 EXPORT_SYMBOL_GPL(lttng_add_callstack_to_ctx
);
359 MODULE_LICENSE("GPL and additional rights");
360 MODULE_AUTHOR("Francis Giraldeau");
361 MODULE_DESCRIPTION("Linux Trace Toolkit Callstack Support");