Rename "tsc" to "timestamp"
[lttng-modules.git] / src / lttng-context-callstack-stackwalk-impl.h
index a7c5a062fb10f521f8a3722f2f0c1a40bab88850..5075e9850d6742f320a89e2aa412807c256fdfd0 100644 (file)
@@ -97,19 +97,21 @@ void lttng_cs_set_init(struct lttng_cs __percpu *cs_set)
 /* Keep track of nesting inside userspace callstack context code */
 DEFINE_PER_CPU(int, callstack_user_nesting);
 
+/*
+ * Note: these callbacks expect to be invoked with preemption disabled across
+ * get_size and record due to its use of a per-cpu stack.
+ */
 static
-struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx)
+struct lttng_stack_trace *stack_trace_context(struct field_data *fdata, int cpu)
 {
        int buffer_nesting, cs_user_nesting;
        struct lttng_cs *cs;
-       struct field_data *fdata = field->priv;
 
        /*
         * Do not gather the userspace callstack context when the event was
         * triggered by the userspace callstack context saving mechanism.
         */
-       cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
+       cs_user_nesting = per_cpu(callstack_user_nesting, cpu);
 
        if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
                return NULL;
@@ -121,8 +123,8 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie
         * max nesting is checked in lib_ring_buffer_get_cpu().
         * Check it again as a safety net.
         */
-       cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
-       buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
+       cs = per_cpu_ptr(fdata->cs_percpu, cpu);
+       buffer_nesting = per_cpu(lib_ring_buffer_nesting, cpu) - 1;
        if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
                return NULL;
 
@@ -130,9 +132,7 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie
 }
 
 static
-size_t lttng_callstack_length_get_size(size_t offset, struct lttng_kernel_ctx_field *field,
-                               struct lib_ring_buffer_ctx *ctx,
-                               struct lttng_channel *chan)
+size_t lttng_callstack_length_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
 {
        size_t orig_offset = offset;
 
@@ -146,16 +146,16 @@ size_t lttng_callstack_length_get_size(size_t offset, struct lttng_kernel_ctx_fi
  * resulting callstack is saved to be accessed in the record step.
  */
 static
-size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_field *field,
-                                       struct lib_ring_buffer_ctx *ctx,
-                                       struct lttng_channel *chan)
+size_t lttng_callstack_sequence_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
 {
        struct lttng_stack_trace *trace;
-       struct field_data *fdata = field->priv;
+       struct field_data *fdata = (struct field_data *) priv;
        size_t orig_offset = offset;
+       int cpu = smp_processor_id();
+       struct irq_ibt_state irq_ibt_state;
 
        /* do not write data if no space is available */
-       trace = stack_trace_context(field, ctx);
+       trace = stack_trace_context(fdata, cpu);
        if (unlikely(!trace)) {
                offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
                return offset - orig_offset;
@@ -167,15 +167,19 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_
        switch (fdata->mode) {
        case CALLSTACK_KERNEL:
                /* do the real work and reserve space */
+               irq_ibt_state = wrapper_irq_ibt_save();
                trace->nr_entries = save_func_kernel(trace->entries,
                                                MAX_ENTRIES, 0);
+               wrapper_irq_ibt_restore(irq_ibt_state);
                break;
        case CALLSTACK_USER:
-               ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
+               ++per_cpu(callstack_user_nesting, cpu);
                /* do the real work and reserve space */
+               irq_ibt_state = wrapper_irq_ibt_save();
                trace->nr_entries = save_func_user(trace->entries,
                                                MAX_ENTRIES);
-               per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
+               wrapper_irq_ibt_restore(irq_ibt_state);
+               per_cpu(callstack_user_nesting, cpu)--;
                break;
        default:
                WARN_ON_ONCE(1);
@@ -194,14 +198,15 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_
 }
 
 static
-void lttng_callstack_length_record(struct lttng_kernel_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
+void lttng_callstack_length_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
+                       struct lttng_kernel_ring_buffer_ctx *ctx,
+                       struct lttng_kernel_channel_buffer *chan)
 {
-       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       int cpu = ctx->priv.reserve_cpu;
+       struct field_data *fdata = (struct field_data *) priv;
+       struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu);
        unsigned int nr_seq_entries;
 
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
        if (unlikely(!trace)) {
                nr_seq_entries = 0;
        } else {
@@ -209,30 +214,33 @@ void lttng_callstack_length_record(struct lttng_kernel_ctx_field *field,
                if (trace->nr_entries == MAX_ENTRIES)
                        nr_seq_entries++;
        }
-       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+       chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int), lttng_alignof(unsigned int));
 }
 
 static
-void lttng_callstack_sequence_record(struct lttng_kernel_ctx_field *field,
-                       struct lib_ring_buffer_ctx *ctx,
-                       struct lttng_channel *chan)
+void lttng_callstack_sequence_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
+                       struct lttng_kernel_ring_buffer_ctx *ctx,
+                       struct lttng_kernel_channel_buffer *chan)
 {
-       struct lttng_stack_trace *trace = stack_trace_context(field, ctx);
+       int cpu = ctx->priv.reserve_cpu;
+       struct field_data *fdata = (struct field_data *) priv;
+       struct lttng_stack_trace *trace = stack_trace_context(fdata, cpu);
        unsigned int nr_seq_entries;
 
-       lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
        if (unlikely(!trace)) {
+               /* We need to align even if there are 0 elements. */
+               lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
                return;
        }
        nr_seq_entries = trace->nr_entries;
        if (trace->nr_entries == MAX_ENTRIES)
                nr_seq_entries++;
        chan->ops->event_write(ctx, trace->entries,
-                       sizeof(unsigned long) * trace->nr_entries);
+                       sizeof(unsigned long) * trace->nr_entries, lttng_alignof(unsigned long));
        /* Add our own ULONG_MAX delimiter to show incomplete stack. */
        if (trace->nr_entries == MAX_ENTRIES) {
                unsigned long delim = ULONG_MAX;
 
-               chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+               chan->ops->event_write(ctx, &delim, sizeof(unsigned long), 1);
        }
 }
This page took 0.02561 seconds and 4 git commands to generate.