lttng-test probe: remove compatibility code
[lttng-modules.git] / lttng-context-callstack-stackwalk-impl.h
index dfb60c554b42634e04aa62fa5c0441d043b7cfed..c227efb208386051e74f73341c9062827aed14e4 100644 (file)
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1)
+/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
  *
  * lttng-context-callstack-stackwalk-impl.h
  *
@@ -34,12 +34,6 @@ struct field_data {
        enum lttng_cs_ctx_modes mode;
 };
 
-static
-unsigned int (*save_func_kernel)(unsigned long *store, unsigned int size,
-                               unsigned int skipnr);
-static
-unsigned int (*save_func_user)(unsigned long *store, unsigned int size);
-
 static
 const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
 {
@@ -54,51 +48,15 @@ const char *lttng_cs_ctx_mode_name(enum lttng_cs_ctx_modes mode)
 }
 
 static
-int init_type_callstack_kernel(void)
-{
-       unsigned long func;
-       const char *func_name = "stack_trace_save";
-
-       if (save_func_kernel)
-               return 0;
-       func = kallsyms_lookup_funcptr(func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               func_name);
-               return -EINVAL;
-       }
-       save_func_kernel = (void *) func;
-       return 0;
-}
-
-static
-int init_type_callstack_user(void)
-{
-       unsigned long func;
-       const char *func_name = "stack_trace_save_user";
-
-       if (save_func_user)
-               return 0;
-       func = kallsyms_lookup_funcptr(func_name);
-       if (!func) {
-               printk(KERN_WARNING "LTTng: symbol lookup failed: %s\n",
-                               func_name);
-               return -EINVAL;
-       }
-       save_func_user = (void *) func;
-       return 0;
-}
-
-static
-int init_type(enum lttng_cs_ctx_modes mode)
+const char *lttng_cs_ctx_mode_length_name(enum lttng_cs_ctx_modes mode)
 {
        switch (mode) {
        case CALLSTACK_KERNEL:
-               return init_type_callstack_kernel();
+               return "_callstack_kernel_length";
        case CALLSTACK_USER:
-               return init_type_callstack_user();
+               return "_callstack_user_length";
        default:
-               return -EINVAL;
+               return NULL;
        }
 }
 
@@ -142,14 +100,26 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_ctx_field *field,
        return &cs->stack_trace[buffer_nesting];
 }
 
+static
+size_t lttng_callstack_length_get_size(size_t offset, struct lttng_ctx_field *field,
+                               struct lib_ring_buffer_ctx *ctx,
+                               struct lttng_channel *chan)
+{
+       size_t orig_offset = offset;
+
+       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
+       offset += sizeof(unsigned int);
+       return offset - orig_offset;
+}
+
 /*
  * In order to reserve the correct size, the callstack is computed. The
  * resulting callstack is saved to be accessed in the record step.
  */
 static
-size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
-                               struct lib_ring_buffer_ctx *ctx,
-                               struct lttng_channel *chan)
+size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_ctx_field *field,
+                                       struct lib_ring_buffer_ctx *ctx,
+                                       struct lttng_channel *chan)
 {
        struct lttng_stack_trace *trace;
        struct field_data *fdata = field->priv;
@@ -158,8 +128,6 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
        /* do not write data if no space is available */
        trace = stack_trace_context(field, ctx);
        if (unlikely(!trace)) {
-               offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-               offset += sizeof(unsigned int);
                offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
                return offset - orig_offset;
        }
@@ -170,13 +138,13 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
        switch (fdata->mode) {
        case CALLSTACK_KERNEL:
                /* do the real work and reserve space */
-               trace->nr_entries = save_func_kernel(trace->entries,
+               trace->nr_entries = stack_trace_save(trace->entries,
                                                MAX_ENTRIES, 0);
                break;
        case CALLSTACK_USER:
                ++per_cpu(callstack_user_nesting, ctx->cpu);
                /* do the real work and reserve space */
-               trace->nr_entries = save_func_user(trace->entries,
+               trace->nr_entries = stack_trace_save_user(trace->entries,
                                                MAX_ENTRIES);
                per_cpu(callstack_user_nesting, ctx->cpu)--;
                break;
@@ -188,8 +156,6 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
         * If the array is filled, add our own marker to show that the
         * stack is incomplete.
         */
-       offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned int));
-       offset += sizeof(unsigned int);
        offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
        offset += sizeof(unsigned long) * trace->nr_entries;
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
@@ -199,26 +165,39 @@ size_t lttng_callstack_get_size(size_t offset, struct lttng_ctx_field *field,
 }
 
 static
-void lttng_callstack_record(struct lttng_ctx_field *field,
+void lttng_callstack_length_record(struct lttng_ctx_field *field,
                        struct lib_ring_buffer_ctx *ctx,
                        struct lttng_channel *chan)
 {
        struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
        unsigned int nr_seq_entries;
 
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
        if (unlikely(!trace)) {
                nr_seq_entries = 0;
-               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
-               chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       } else {
+               nr_seq_entries = trace->nr_entries;
+               if (trace->nr_entries == MAX_ENTRIES)
+                       nr_seq_entries++;
+       }
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+}
+
+static
+void lttng_callstack_sequence_record(struct lttng_ctx_field *field,
+                       struct lib_ring_buffer_ctx *ctx,
+                       struct lttng_channel *chan)
+{
+       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       unsigned int nr_seq_entries;
+
+       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
+       if (unlikely(!trace)) {
                return;
        }
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
        nr_seq_entries = trace->nr_entries;
        if (trace->nr_entries == MAX_ENTRIES)
                nr_seq_entries++;
-       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
        chan->ops->event_write(ctx, trace->entries,
                        sizeof(unsigned long) * trace->nr_entries);
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
This page took 0.025912 seconds and 4 git commands to generate.