1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-callstack-stackwalk-impl.h
5 * LTTng callstack event context, stackwalk implementation. Targets
6 * kernels and architectures using the stacktrace common infrastructure
7 * introduced in the upstream Linux kernel by commit 214d8ca6ee
8 * "stacktrace: Provide common infrastructure" (merged in Linux 5.2,
9 * then gradually introduced within architectures).
11 * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_stack_trace
{
24 unsigned long entries
[MAX_ENTRIES
];
25 unsigned int nr_entries
;
29 struct lttng_stack_trace stack_trace
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
38 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode
)
41 case CALLSTACK_KERNEL
:
42 return "callstack_kernel";
44 return "callstack_user";
51 const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode
)
54 case CALLSTACK_KERNEL
:
55 return "_callstack_kernel_length";
57 return "_callstack_user_length";
64 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
68 /* Keep track of nesting inside userspace callstack context code */
69 DEFINE_PER_CPU(int, callstack_user_nesting
);
72 struct lttng_stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
73 struct lib_ring_buffer_ctx
*ctx
)
75 int buffer_nesting
, cs_user_nesting
;
77 struct field_data
*fdata
= field
->priv
;
80 * Do not gather the userspace callstack context when the event was
81 * triggered by the userspace callstack context saving mechanism.
83 cs_user_nesting
= per_cpu(callstack_user_nesting
, ctx
->cpu
);
85 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
89 * get_cpu() is not required, preemption is already
90 * disabled while event is written.
92 * max nesting is checked in lib_ring_buffer_get_cpu().
93 * Check it again as a safety net.
95 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
96 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
97 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
100 return &cs
->stack_trace
[buffer_nesting
];
104 size_t lttng_callstack_length_get_size(size_t offset
, struct lttng_ctx_field
*field
,
105 struct lib_ring_buffer_ctx
*ctx
,
106 struct lttng_channel
*chan
)
108 size_t orig_offset
= offset
;
110 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
111 offset
+= sizeof(unsigned int);
112 return offset
- orig_offset
;
116 * In order to reserve the correct size, the callstack is computed. The
117 * resulting callstack is saved to be accessed in the record step.
120 size_t lttng_callstack_sequence_get_size(size_t offset
, struct lttng_ctx_field
*field
,
121 struct lib_ring_buffer_ctx
*ctx
,
122 struct lttng_channel
*chan
)
124 struct lttng_stack_trace
*trace
;
125 struct field_data
*fdata
= field
->priv
;
126 size_t orig_offset
= offset
;
128 /* do not write data if no space is available */
129 trace
= stack_trace_context(field
, ctx
);
130 if (unlikely(!trace
)) {
131 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
132 return offset
- orig_offset
;
135 /* reset stack trace, no need to clear memory */
136 trace
->nr_entries
= 0;
138 switch (fdata
->mode
) {
139 case CALLSTACK_KERNEL
:
140 /* do the real work and reserve space */
141 trace
->nr_entries
= stack_trace_save(trace
->entries
,
145 ++per_cpu(callstack_user_nesting
, ctx
->cpu
);
146 /* do the real work and reserve space */
147 trace
->nr_entries
= stack_trace_save_user(trace
->entries
,
149 per_cpu(callstack_user_nesting
, ctx
->cpu
)--;
156 * If the array is filled, add our own marker to show that the
157 * stack is incomplete.
159 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
160 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
161 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
162 if (trace
->nr_entries
== MAX_ENTRIES
)
163 offset
+= sizeof(unsigned long);
164 return offset
- orig_offset
;
168 void lttng_callstack_length_record(struct lttng_ctx_field
*field
,
169 struct lib_ring_buffer_ctx
*ctx
,
170 struct lttng_channel
*chan
)
172 struct lttng_stack_trace
*trace
= stack_trace_context(field
, ctx
);
173 unsigned int nr_seq_entries
;
175 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
176 if (unlikely(!trace
)) {
179 nr_seq_entries
= trace
->nr_entries
;
180 if (trace
->nr_entries
== MAX_ENTRIES
)
183 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
187 void lttng_callstack_sequence_record(struct lttng_ctx_field
*field
,
188 struct lib_ring_buffer_ctx
*ctx
,
189 struct lttng_channel
*chan
)
191 struct lttng_stack_trace
*trace
= stack_trace_context(field
, ctx
);
192 unsigned int nr_seq_entries
;
194 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
195 if (unlikely(!trace
)) {
198 nr_seq_entries
= trace
->nr_entries
;
199 if (trace
->nr_entries
== MAX_ENTRIES
)
201 chan
->ops
->event_write(ctx
, trace
->entries
,
202 sizeof(unsigned long) * trace
->nr_entries
);
203 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
204 if (trace
->nr_entries
== MAX_ENTRIES
) {
205 unsigned long delim
= ULONG_MAX
;
207 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));