Commit | Line | Data |
---|---|---|
b7cdc182 | 1 | /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) |
b29c6286 MD |
2 | * |
3 | * lttng-context-callstack-stackwalk-impl.h | |
4 | * | |
5 | * LTTng callstack event context, stackwalk implementation. Targets | |
6 | * kernels and architectures using the stacktrace common infrastructure | |
7 | * introduced in the upstream Linux kernel by commit 214d8ca6ee | |
8 | * "stacktrace: Provide common infrastructure" (merged in Linux 5.2, | |
9 | * then gradually introduced within architectures). | |
10 | * | |
11 | * Copyright (C) 2014-2019 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
12 | * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com> | |
13 | */ | |
14 | ||
15 | #define MAX_ENTRIES 128 | |
16 | ||
17 | enum lttng_cs_ctx_modes { | |
18 | CALLSTACK_KERNEL = 0, | |
19 | CALLSTACK_USER = 1, | |
20 | NR_CALLSTACK_MODES, | |
21 | }; | |
22 | ||
23 | struct lttng_stack_trace { | |
24 | unsigned long entries[MAX_ENTRIES]; | |
25 | unsigned int nr_entries; | |
26 | }; | |
27 | ||
28 | struct lttng_cs { | |
29 | struct lttng_stack_trace stack_trace[RING_BUFFER_MAX_NESTING]; | |
30 | }; | |
31 | ||
32 | struct field_data { | |
33 | struct lttng_cs __percpu *cs_percpu; | |
34 | enum lttng_cs_ctx_modes mode; | |
35 | }; | |
36 | ||
b29c6286 MD |
37 | static |
38 | const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode) | |
39 | { | |
40 | switch (mode) { | |
41 | case CALLSTACK_KERNEL: | |
42 | return "callstack_kernel"; | |
43 | case CALLSTACK_USER: | |
44 | return "callstack_user"; | |
45 | default: | |
46 | return NULL; | |
47 | } | |
48 | } | |
49 | ||
ceabb767 MD |
50 | static |
51 | const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode) | |
52 | { | |
53 | switch (mode) { | |
54 | case CALLSTACK_KERNEL: | |
55 | return "_callstack_kernel_length"; | |
56 | case CALLSTACK_USER: | |
57 | return "_callstack_user_length"; | |
58 | default: | |
59 | return NULL; | |
60 | } | |
61 | } | |
62 | ||
b29c6286 MD |
63 | static |
64 | void lttng_cs_set_init(struct lttng_cs __percpu *cs_set) | |
65 | { | |
66 | } | |
67 | ||
68 | /* Keep track of nesting inside userspace callstack context code */ | |
69 | DEFINE_PER_CPU(int, callstack_user_nesting); | |
70 | ||
71 | static | |
72 | struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field, | |
73 | struct lib_ring_buffer_ctx *ctx) | |
74 | { | |
75 | int buffer_nesting, cs_user_nesting; | |
76 | struct lttng_cs *cs; | |
77 | struct field_data *fdata = field->priv; | |
78 | ||
79 | /* | |
80 | * Do not gather the userspace callstack context when the event was | |
81 | * triggered by the userspace callstack context saving mechanism. | |
82 | */ | |
83 | cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); | |
84 | ||
85 | if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) | |
86 | return NULL; | |
87 | ||
88 | /* | |
89 | * get_cpu() is not required, preemption is already | |
90 | * disabled while event is written. | |
91 | * | |
92 | * max nesting is checked in lib_ring_buffer_get_cpu(). | |
93 | * Check it again as a safety net. | |
94 | */ | |
95 | cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); | |
96 | buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; | |
97 | if (buffer_nesting >= RING_BUFFER_MAX_NESTING) | |
98 | return NULL; | |
99 | ||
100 | return &cs->stack_trace[buffer_nesting]; | |
101 | } | |
102 | ||
ceabb767 MD |
103 | static |
104 | size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field, | |
105 | struct lib_ring_buffer_ctx *ctx, | |
106 | struct lttng_channel *chan) | |
107 | { | |
108 | size_t orig_offset = offset; | |
109 | ||
110 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int)); | |
111 | offset += sizeof(unsigned int); | |
112 | return offset - orig_offset; | |
113 | } | |
114 | ||
b29c6286 MD |
115 | /* |
116 | * In order to reserve the correct size, the callstack is computed. The | |
117 | * resulting callstack is saved to be accessed in the record step. | |
118 | */ | |
119 | static | |
ceabb767 MD |
120 | size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field, |
121 | struct lib_ring_buffer_ctx *ctx, | |
122 | struct lttng_channel *chan) | |
b29c6286 MD |
123 | { |
124 | struct lttng_stack_trace *trace; | |
125 | struct field_data *fdata = field->priv; | |
126 | size_t orig_offset = offset; | |
127 | ||
128 | /* do not write data if no space is available */ | |
129 | trace = stack_trace_context(field, ctx); | |
130 | if (unlikely(!trace)) { | |
b29c6286 MD |
131 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
132 | return offset - orig_offset; | |
133 | } | |
134 | ||
135 | /* reset stack trace, no need to clear memory */ | |
136 | trace->nr_entries = 0; | |
137 | ||
138 | switch (fdata->mode) { | |
139 | case CALLSTACK_KERNEL: | |
140 | /* do the real work and reserve space */ | |
45ce950f | 141 | trace->nr_entries = stack_trace_save(trace->entries, |
b29c6286 MD |
142 | MAX_ENTRIES, 0); |
143 | break; | |
144 | case CALLSTACK_USER: | |
145 | ++per_cpu(callstack_user_nesting, ctx->cpu); | |
146 | /* do the real work and reserve space */ | |
45ce950f | 147 | trace->nr_entries = stack_trace_save_user(trace->entries, |
b29c6286 MD |
148 | MAX_ENTRIES); |
149 | per_cpu(callstack_user_nesting, ctx->cpu)--; | |
150 | break; | |
151 | default: | |
152 | WARN_ON_ONCE(1); | |
153 | } | |
154 | ||
155 | /* | |
156 | * If the array is filled, add our own marker to show that the | |
157 | * stack is incomplete. | |
158 | */ | |
b29c6286 MD |
159 | offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long)); |
160 | offset += sizeof(unsigned long) * trace->nr_entries; | |
161 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
162 | if (trace->nr_entries == MAX_ENTRIES) | |
163 | offset += sizeof(unsigned long); | |
164 | return offset - orig_offset; | |
165 | } | |
166 | ||
167 | static | |
ceabb767 | 168 | void lttng_callstack_length_record(struct lttng_ctx_field *field, |
b29c6286 MD |
169 | struct lib_ring_buffer_ctx *ctx, |
170 | struct lttng_channel *chan) | |
171 | { | |
172 | struct lttng_stack_trace *trace = stack_trace_context(field, ctx); | |
173 | unsigned int nr_seq_entries; | |
174 | ||
ceabb767 | 175 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int)); |
b29c6286 MD |
176 | if (unlikely(!trace)) { |
177 | nr_seq_entries = 0; | |
ceabb767 MD |
178 | } else { |
179 | nr_seq_entries = trace->nr_entries; | |
180 | if (trace->nr_entries == MAX_ENTRIES) | |
181 | nr_seq_entries++; | |
182 | } | |
183 | chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int)); | |
184 | } | |
185 | ||
186 | static | |
187 | void lttng_callstack_sequence_record(struct lttng_ctx_field *field, | |
188 | struct lib_ring_buffer_ctx *ctx, | |
189 | struct lttng_channel *chan) | |
190 | { | |
191 | struct lttng_stack_trace *trace = stack_trace_context(field, ctx); | |
192 | unsigned int nr_seq_entries; | |
193 | ||
194 | lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long)); | |
195 | if (unlikely(!trace)) { | |
b29c6286 MD |
196 | return; |
197 | } | |
b29c6286 MD |
198 | nr_seq_entries = trace->nr_entries; |
199 | if (trace->nr_entries == MAX_ENTRIES) | |
200 | nr_seq_entries++; | |
b29c6286 MD |
201 | chan->ops->event_write(ctx, trace->entries, |
202 | sizeof(unsigned long) * trace->nr_entries); | |
203 | /* Add our own ULONG_MAX delimiter to show incomplete stack. */ | |
204 | if (trace->nr_entries == MAX_ENTRIES) { | |
205 | unsigned long delim = ULONG_MAX; | |
206 | ||
207 | chan->ops->event_write(ctx, &delim, sizeof(unsigned long)); | |
208 | } | |
209 | } |