1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
3 * lttng-context-callstack-legacy-impl.h
5 * LTTng callstack event context, legacy implementation. Targets
6 * kernels and architectures not yet using the stacktrace common
7 * infrastructure introduced in the upstream Linux kernel by commit
8 * 214d8ca6ee "stacktrace: Provide common infrastructure" (merged in
9 * Linux 5.2, then gradually introduced within architectures).
11 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
12 * Copyright (C) 2014 Francis Giraldeau <francis.giraldeau@gmail.com>
15 #define MAX_ENTRIES 128
17 enum lttng_cs_ctx_modes
{
23 struct lttng_cs_dispatch
{
24 struct stack_trace stack_trace
;
25 unsigned long entries
[MAX_ENTRIES
];
29 struct lttng_cs_dispatch dispatch
[RING_BUFFER_MAX_NESTING
];
33 struct lttng_cs __percpu
*cs_percpu
;
34 enum lttng_cs_ctx_modes mode
;
37 struct lttng_cs_type
{
39 const char *length_name
;
42 static struct lttng_cs_type cs_types
[] = {
44 .name
= "callstack_kernel",
45 .length_name
= "_callstack_kernel_length",
48 .name
= "callstack_user",
49 .length_name
= "_callstack_user_length",
54 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode
)
56 return cs_types
[mode
].name
;
60 const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode
)
62 return cs_types
[mode
].length_name
;
66 void lttng_cs_set_init(struct lttng_cs __percpu
*cs_set
)
70 for_each_possible_cpu(cpu
) {
73 cs
= per_cpu_ptr(cs_set
, cpu
);
74 for (i
= 0; i
< RING_BUFFER_MAX_NESTING
; i
++) {
75 struct lttng_cs_dispatch
*dispatch
;
77 dispatch
= &cs
->dispatch
[i
];
78 dispatch
->stack_trace
.entries
= dispatch
->entries
;
79 dispatch
->stack_trace
.max_entries
= MAX_ENTRIES
;
84 /* Keep track of nesting inside userspace callstack context code */
85 DEFINE_PER_CPU(int, callstack_user_nesting
);
88 struct stack_trace
*stack_trace_context(struct lttng_ctx_field
*field
,
89 struct lib_ring_buffer_ctx
*ctx
)
91 int buffer_nesting
, cs_user_nesting
;
93 struct field_data
*fdata
= field
->priv
;
96 * Do not gather the userspace callstack context when the event was
97 * triggered by the userspace callstack context saving mechanism.
99 cs_user_nesting
= per_cpu(callstack_user_nesting
, ctx
->cpu
);
101 if (fdata
->mode
== CALLSTACK_USER
&& cs_user_nesting
>= 1)
105 * get_cpu() is not required, preemption is already
106 * disabled while event is written.
108 * max nesting is checked in lib_ring_buffer_get_cpu().
109 * Check it again as a safety net.
111 cs
= per_cpu_ptr(fdata
->cs_percpu
, ctx
->cpu
);
112 buffer_nesting
= per_cpu(lib_ring_buffer_nesting
, ctx
->cpu
) - 1;
113 if (buffer_nesting
>= RING_BUFFER_MAX_NESTING
)
116 return &cs
->dispatch
[buffer_nesting
].stack_trace
;
120 size_t lttng_callstack_length_get_size(size_t offset
, struct lttng_ctx_field
*field
,
121 struct lib_ring_buffer_ctx
*ctx
,
122 struct lttng_channel
*chan
)
124 size_t orig_offset
= offset
;
126 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned int));
127 offset
+= sizeof(unsigned int);
128 return offset
- orig_offset
;
132 * In order to reserve the correct size, the callstack is computed. The
133 * resulting callstack is saved to be accessed in the record step.
136 size_t lttng_callstack_sequence_get_size(size_t offset
, struct lttng_ctx_field
*field
,
137 struct lib_ring_buffer_ctx
*ctx
,
138 struct lttng_channel
*chan
)
140 struct stack_trace
*trace
;
141 struct field_data
*fdata
= field
->priv
;
142 size_t orig_offset
= offset
;
144 /* do not write data if no space is available */
145 trace
= stack_trace_context(field
, ctx
);
146 if (unlikely(!trace
)) {
147 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
148 return offset
- orig_offset
;
151 /* reset stack trace, no need to clear memory */
152 trace
->nr_entries
= 0;
154 if (fdata
->mode
== CALLSTACK_USER
) {
155 ++per_cpu(callstack_user_nesting
, ctx
->cpu
);
156 /* do the real work and reserve space */
157 save_stack_trace_user(trace
);
158 per_cpu(callstack_user_nesting
, ctx
->cpu
)--;
160 save_stack_trace(trace
);
164 * Remove final ULONG_MAX delimiter. If we cannot find it, add
165 * our own marker to show that the stack is incomplete. This is
166 * more compact for a trace.
168 if (trace
->nr_entries
> 0
169 && trace
->entries
[trace
->nr_entries
- 1] == ULONG_MAX
) {
172 offset
+= lib_ring_buffer_align(offset
, lttng_alignof(unsigned long));
173 offset
+= sizeof(unsigned long) * trace
->nr_entries
;
174 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
175 if (trace
->nr_entries
== trace
->max_entries
)
176 offset
+= sizeof(unsigned long);
177 return offset
- orig_offset
;
181 void lttng_callstack_length_record(struct lttng_ctx_field
*field
,
182 struct lib_ring_buffer_ctx
*ctx
,
183 struct lttng_channel
*chan
)
185 struct stack_trace
*trace
= stack_trace_context(field
, ctx
);
186 unsigned int nr_seq_entries
;
188 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned int));
189 if (unlikely(!trace
)) {
192 nr_seq_entries
= trace
->nr_entries
;
193 if (trace
->nr_entries
== trace
->max_entries
)
196 chan
->ops
->event_write(ctx
, &nr_seq_entries
, sizeof(unsigned int));
199 void lttng_callstack_sequence_record(struct lttng_ctx_field
*field
,
200 struct lib_ring_buffer_ctx
*ctx
,
201 struct lttng_channel
*chan
)
203 struct stack_trace
*trace
= stack_trace_context(field
, ctx
);
204 unsigned int nr_seq_entries
;
206 lib_ring_buffer_align_ctx(ctx
, lttng_alignof(unsigned long));
207 if (unlikely(!trace
)) {
210 nr_seq_entries
= trace
->nr_entries
;
211 if (trace
->nr_entries
== trace
->max_entries
)
213 chan
->ops
->event_write(ctx
, trace
->entries
,
214 sizeof(unsigned long) * trace
->nr_entries
);
215 /* Add our own ULONG_MAX delimiter to show incomplete stack. */
216 if (trace
->nr_entries
== trace
->max_entries
) {
217 unsigned long delim
= ULONG_MAX
;
219 chan
->ops
->event_write(ctx
, &delim
, sizeof(unsigned long));