#include "wrapper/vmalloc.h"
#include "lttng-tracer.h"
-#define MAX_ENTRIES 25 /* BUG: saving more than 30 entries causes trace corruption */
+#define MAX_ENTRIES 25
+
+struct lttng_cs_nesting {
+ struct stack_trace stack_trace;
+ unsigned long entries[MAX_ENTRIES];
+};
struct lttng_cs {
- struct stack_trace items[RING_BUFFER_MAX_NESTING];
+ struct lttng_cs_nesting level[RING_BUFFER_MAX_NESTING];
};
struct field_data {
- int mode;
struct lttng_cs __percpu *cs_percpu;
+ int mode;
};
struct lttng_cs_type {
static struct lttng_cs_type cs_types[] = {
{
- .name = "callstack_kernel",
- .save_func_name = "save_stack_trace",
- .save_func = NULL,
+ .name = "callstack_kernel",
+ .save_func_name = "save_stack_trace",
+ .save_func = NULL,
},
{
- .name = "callstack_user",
- .save_func_name = "save_stack_trace_user",
- .save_func = NULL,
+ .name = "callstack_user",
+ .save_func_name = "save_stack_trace_user",
+ .save_func = NULL,
},
};
{
int nesting;
struct lttng_cs *cs;
- struct field_data *fdata = field->private;
+ struct field_data *fdata = field->priv;
/*
* get_cpu() is not required, preemption is already
if (nesting >= RING_BUFFER_MAX_NESTING) {
return NULL;
}
- return &cs->items[nesting];
+ return &cs->level[nesting].stack_trace;
}
/*
{
size_t size = 0;
struct stack_trace *trace;
- struct field_data *fdata = field->private;
+ struct field_data *fdata = field->priv;
/* do not write data if no space is available */
trace = stack_trace_context(field, ctx);
/* do the real work and reserve space */
cs_types[fdata->mode].save_func(trace);
+ /*
+ * Remove final ULONG_MAX delimiter. If we cannot find it, add
+ * our own marker to show that the stack is incomplete. This is
+ * more compact for a trace.
+ */
+ if (trace->nr_entries > 0
+ && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
+ trace->nr_entries--;
+ }
size += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
size += sizeof(unsigned int);
size += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
size += sizeof(unsigned long) * trace->nr_entries;
+ /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+ if (trace->nr_entries == trace->max_entries)
+ size += sizeof(unsigned long);
return size;
}
static
void lttng_callstack_record(struct lttng_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
- struct lttng_channel *chan)
+ struct lib_ring_buffer_ctx *ctx,
+ struct lttng_channel *chan)
{
struct stack_trace *trace = stack_trace_context(field, ctx);
+ unsigned int nr_seq_entries;
if (!trace)
return;
lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
- chan->ops->event_write(ctx, &trace->nr_entries, sizeof(unsigned int));
+ nr_seq_entries = trace->nr_entries;
+ if (trace->nr_entries == trace->max_entries)
+ nr_seq_entries++;
+ chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
chan->ops->event_write(ctx, trace->entries,
sizeof(unsigned long) * trace->nr_entries);
+ /* Add our own ULONG_MAX delimiter to show incomplete stack. */
+ if (trace->nr_entries == trace->max_entries) {
+ unsigned long delim = ULONG_MAX;
+
+ chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+ }
}
static
void field_data_free(struct field_data *fdata)
{
- int cpu, i;
- struct lttng_cs *cs;
-
if (!fdata)
return;
- for_each_possible_cpu(cpu) {
- cs = per_cpu_ptr(fdata->cs_percpu, cpu);
- for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
- kfree(cs->items[i].entries);
- }
- }
free_percpu(fdata->cs_percpu);
kfree(fdata);
}
static
-struct field_data __percpu *field_data_create(unsigned int entries, int type)
+struct field_data __percpu *field_data_create(int type)
{
int cpu, i;
- struct stack_trace *item;
- struct lttng_cs *cs;
struct lttng_cs __percpu *cs_set;
- struct field_data* fdata;
+ struct field_data *fdata;
- fdata = kzalloc(sizeof(unsigned long) * entries, GFP_KERNEL);
+ fdata = kzalloc(sizeof(*fdata), GFP_KERNEL);
if (!fdata)
return NULL;
cs_set = alloc_percpu(struct lttng_cs);
fdata->cs_percpu = cs_set;
for_each_possible_cpu(cpu) {
+ struct lttng_cs *cs;
+
cs = per_cpu_ptr(cs_set, cpu);
for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
- item = &cs->items[i];
- item->entries = kzalloc(sizeof(unsigned long) * entries, GFP_KERNEL);
- if (!item->entries) {
- goto error_alloc;
- }
- item->max_entries = entries;
+ struct lttng_cs_nesting *level;
+
+ level = &cs->level[i];
+ level->stack_trace.entries = level->entries;
+ level->stack_trace.max_entries = MAX_ENTRIES;
}
}
fdata->mode = type;
static
void lttng_callstack_destroy(struct lttng_ctx_field *field)
{
- struct field_data *fdata = field->private;
+ struct field_data *fdata = field->priv;
field_data_free(fdata);
}
if (!field)
return -ENOMEM;
if (lttng_find_context(*ctx, ctx_name)) {
- printk("%s lttng_find_context failed\n", ctx_name);
ret = -EEXIST;
goto error_find;
}
- fdata = field_data_create(MAX_ENTRIES, mode);
+ fdata = field_data_create(mode);
if (!fdata) {
ret = -ENOMEM;
goto error_create;
field->get_size_arg = lttng_callstack_get_size;
field->record = lttng_callstack_record;
- field->private = fdata;
+ field->priv = fdata;
field->destroy = lttng_callstack_destroy;
wrapper_vmalloc_sync_all();
- printk("lttng add-context %s\n", ctx_name);
return 0;
error_create: