instrumentation: module: remove compatibility code
[lttng-modules.git] / lttng-context-callstack-legacy-impl.h
index a68e8a18954aff11787527d9c72d87ffe22210df..adcb0842b941ef5e35b00fe9fe8b9d1f7b3324a3 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
  *
  * lttng-context-callstack-legacy-impl.h
  *
@@ -36,38 +36,49 @@ struct field_data {
 
 struct lttng_cs_type {
        const char *name;
-       const char *save_func_name;
-       void (*save_func)(struct stack_trace *trace);
+       const char *length_name;
 };
 
 static struct lttng_cs_type cs_types[] = {
        {
                .name           = "callstack_kernel",
-               .save_func_name = "save_stack_trace",
-               .save_func      = NULL,
+               .length_name    = "_callstack_kernel_length",
        },
        {
                .name           = "callstack_user",
-               .save_func_name = "save_stack_trace_user",
-               .save_func      = NULL,
+               .length_name    = "_callstack_user_length",
        },
 };
 
 static
-int init_type(enum lttng_cs_ctx_modes mode)
+const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
 {
-       unsigned long func;
-
-       if (cs_types[mode].save_func)
-               return 0;
-       func = kallsyms_lookup_funcptr(cs_types[mode].save_func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               cs_types[mode].save_func_name);
-               return -EINVAL;
+       return cs_types[mode].name;
+}
+
+static
+const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
+{
+       return cs_types[mode].length_name;
+}
+
+static
+void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
+{
+       int cpu, i;
+
+       for_each_possible_cpu(cpu) {
+               struct lttng_cs *cs;
+
+               cs = per_cpu_ptr(cs_set, cpu);
+               for (i = 0; i < RING_BUFFER_MAX_NESTING; i++) {
+                       struct lttng_cs_dispatch *dispatch;
+
+                       dispatch = &cs->dispatch[i];
+                       dispatch->stack_trace.entries = dispatch->entries;
+                       dispatch->stack_trace.max_entries = MAX_ENTRIES;
+               }
        }
-       cs_types[mode].save_func = (void *) func;
-       return 0;
 }
 
 /* Keep track of nesting inside userspace callstack context code */
@@ -105,14 +116,26 @@ struct stack_trace *stack_trace_context(struct lttng_ctx_field *field,
        return &cs->dispatch[buffer_nesting].stack_trace;
 }
 
+static
+size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
+                               struct lib_ring_buffer_ctx *ctx,
+                               struct lttng_channel *chan)
+{
+       size_t orig_offset = offset;
+
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       return offset - orig_offset;
+}
+
 /*
  * In order to reserve the correct size, the callstack is computed. The
  * resulting callstack is saved to be accessed in the record step.
  */
 static
-size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
-                               struct lib_ring_buffer_ctx *ctx,
-                               struct lttng_channel *chan)
+size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx,
+                                       struct lttng_channel *chan)
 {
        struct stack_trace *trace;
        struct field_data *fdata = field->priv;
@@ -121,8 +144,6 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
        /* do not write data if no space is available */
        trace = stack_trace_context(field, ctx);
        if (unlikely(!trace)) {
-               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-               offset += sizeof(unsigned int);
                offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
                return offset - orig_offset;
        }
@@ -130,14 +151,14 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
        /* reset stack trace, no need to clear memory */
        trace->nr_entries = 0;
 
-       if (fdata->mode == CALLSTACK_USER)
+       if (fdata->mode == CALLSTACK_USER) {
                ++per_cpu(callstack_user_nesting, ctx->cpu);
-
-       /* do the real work and reserve space */
-       cs_types[fdata->mode].save_func(trace);
-
-       if (fdata->mode == CALLSTACK_USER)
+               /* do the real work and reserve space */
+               save_stack_trace_user(trace);
                per_cpu(callstack_user_nesting, ctx->cpu)--;
+       } else {
+               save_stack_trace(trace);
+       }
 
        /*
         * Remove final ULONG_MAX delimiter. If we cannot find it, add
@@ -148,8 +169,6 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
                        && trace->entries[trace->nr_entries - 1] == ULONG_MAX) {
                trace->nr_entries--;
        }
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       offset += sizeof(unsigned int);
        offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
        offset += sizeof(unsigned long) * trace->nr_entries;
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
@@ -159,26 +178,38 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
 }
 
 static
-void lttng_callstack_record(struct lttng_ctx_field *field,
+void lttng_callstack_length_record(struct lttng_ctx_field *field,
                        struct lib_ring_buffer_ctx *ctx,
                        struct lttng_channel *chan)
 {
        struct stack_trace *trace = stack_trace_context(field, ctx);
        unsigned int nr_seq_entries;
 
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
        if (unlikely(!trace)) {
                nr_seq_entries = 0;
-               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
-               chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       } else {
+               nr_seq_entries = trace->nr_entries;
+               if (trace->nr_entries == trace->max_entries)
+                       nr_seq_entries++;
+       }
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+}
+static
+void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       if (unlikely(!trace)) {
                return;
        }
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
        nr_seq_entries = trace->nr_entries;
        if (trace->nr_entries == trace->max_entries)
                nr_seq_entries++;
-       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
        chan->ops->event_write(ctx, trace->entries,
                        sizeof(unsigned long) * trace->nr_entries);
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
This page took 0.027621 seconds and 4 git commands to generate.