X-Git-Url: https://git.liburcu.org/?p=ust.git;a=blobdiff_plain;f=libust%2Fserialize.c;h=81f8e4c610107e6080c1ea6eee8e3702bcd74cd6;hp=8f393ed074d08ad98836db24e39ae729cd0f0122;hb=d4d511d9f43d6d33c3a40b4aab69d976c4f93cec;hpb=bf0d695d692163edb23b8fbbbd976387dfef232d diff --git a/libust/serialize.c b/libust/serialize.c index 8f393ed..81f8e4c 100644 --- a/libust/serialize.c +++ b/libust/serialize.c @@ -25,6 +25,7 @@ */ #define _GNU_SOURCE +#define _LGPL_SOURCE #include #include #include @@ -32,17 +33,22 @@ #include #include -#include -#define _LGPL_SOURCE #include #include +#include +#include #include "buffers.h" #include "tracer.h" -//#include "list.h" -#include "usterr.h" +#include "usterr_signal_safe.h" #include "ust_snprintf.h" +/* + * Because UST core defines a non-const PAGE_SIZE, define PAGE_SIZE_STATIC here. + * It is just an approximation for the tracer stack. + */ +#define PAGE_SIZE_STATIC 4096 + enum ltt_type { LTT_TYPE_SIGNED_INT, LTT_TYPE_UNSIGNED_INT, @@ -50,10 +56,15 @@ enum ltt_type { LTT_TYPE_NONE, }; -static int ust_get_cpu(void) -{ - return sched_getcpu(); -} +/* + * Special stack for the tracer. Keeps serialization offsets for each field. + * Per-thread. Deals with reentrancy from signals by simply ensuring that + * interrupting signals put the stack back to its original position. + */ +#define TRACER_STACK_LEN (PAGE_SIZE_STATIC / sizeof(unsigned long)) +static unsigned long __thread tracer_stack[TRACER_STACK_LEN]; + +static unsigned int __thread tracer_stack_pos; #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1) @@ -354,7 +365,9 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, size_t buf_offset, char trace_size, enum ltt_type trace_type, char c_size, enum ltt_type c_type, - int *largest_align, va_list *args) + unsigned int *stack_pos_ctx, + int *largest_align, + va_list *args) { union { unsigned long v_ulong; @@ -410,10 +423,20 @@ static inline size_t serialize_trace_data(struct ust_buffer *buf, tmp.v_string.s = va_arg(*args, const char *); if ((unsigned long)tmp.v_string.s < PAGE_SIZE) tmp.v_string.s = ""; - tmp.v_string.len = strlen(tmp.v_string.s)+1; + if (!buf) { + /* + * Reserve tracer stack entry. + */ + tracer_stack_pos++; + assert(tracer_stack_pos <= TRACER_STACK_LEN); + cmm_barrier(); + tracer_stack[*stack_pos_ctx] = + strlen(tmp.v_string.s) + 1; + } + tmp.v_string.len = tracer_stack[(*stack_pos_ctx)++]; if (buf) - ust_buffers_write(buf, buf_offset, tmp.v_string.s, - tmp.v_string.len); + ust_buffers_strncpy(buf, buf_offset, tmp.v_string.s, + tmp.v_string.len); buf_offset += tmp.v_string.len; goto copydone; default: @@ -513,7 +536,9 @@ copydone: notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int *largest_align, const char *fmt, va_list *args) { char trace_size = 0, c_size = 0; /* @@ -553,7 +578,9 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, buf_offset = serialize_trace_data(buf, buf_offset, trace_size, trace_type, c_size, c_type, - largest_align, args); + &stack_pos_ctx, + largest_align, + args); trace_size = 0; c_size = 0; trace_type = LTT_TYPE_NONE; @@ -571,41 +598,44 @@ notrace size_t ltt_serialize_data(struct ust_buffer *buf, size_t buf_offset, * Assume that the padding for alignment starts at a sizeof(void *) address. */ static notrace size_t ltt_get_data_size(struct ltt_serialize_closure *closure, - void *serialize_private, int *largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, int *largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; return (size_t)cb(NULL, 0, closure, serialize_private, - largest_align, fmt, args); + stack_pos_ctx, largest_align, fmt, args); } static notrace void ltt_write_event_data(struct ust_buffer *buf, size_t buf_offset, struct ltt_serialize_closure *closure, - void *serialize_private, int largest_align, + void *serialize_private, + unsigned int stack_pos_ctx, + int largest_align, const char *fmt, va_list *args) { ltt_serialize_cb cb = closure->callbacks[0]; closure->cb_idx = 0; buf_offset += ltt_align(buf_offset, largest_align); - cb(buf, buf_offset, closure, serialize_private, NULL, fmt, args); + cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL, + fmt, args); } -notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, - struct registers *regs, void *call_data, +notrace void ltt_vtrace(const struct ust_marker *mdata, void *probe_data, + void *call_data, const char *fmt, va_list *args) { int largest_align, ret; - struct ltt_active_marker *pdata; + struct ltt_active_ust_marker *pdata; uint16_t eID; size_t data_size, slot_size; unsigned int chan_index; struct ust_channel *channel; struct ust_trace *trace, *dest_trace = NULL; struct ust_buffer *buf; - void *transport_data; u64 tsc; long buf_offset; va_list args_copy; @@ -614,6 +644,7 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, void *serialize_private = NULL; int cpu; unsigned int rflags; + unsigned int stack_pos_ctx; /* * This test is useful for quickly exiting static tracing when no trace @@ -622,14 +653,15 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, if (unlikely(ltt_traces.num_active_traces == 0)) return; - rcu_read_lock(); //ust// rcu_read_lock_sched_notrace(); -//ust// cpu = smp_processor_id(); + rcu_read_lock(); cpu = ust_get_cpu(); -//ust// __get_cpu_var(ltt_nesting)++; - /* FIXME: should nesting be per-cpu? */ - ltt_nesting++; - pdata = (struct ltt_active_marker *)probe_data; + /* Force volatile access. */ + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1); + stack_pos_ctx = tracer_stack_pos; + cmm_barrier(); + + pdata = (struct ltt_active_ust_marker *)probe_data; eID = mdata->event_id; chan_index = mdata->channel_id; closure.callbacks = pdata->probe->callbacks; @@ -647,12 +679,13 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, */ largest_align = 1; /* must be non-zero for ltt_align */ data_size = ltt_get_data_size(&closure, serialize_private, - &largest_align, fmt, &args_copy); + stack_pos_ctx, &largest_align, + fmt, &args_copy); largest_align = min_t(int, largest_align, sizeof(void *)); va_end(args_copy); /* Iterate on each trace */ - list_for_each_entry_rcu(trace, <t_traces.head, list) { + cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { /* * Expect the filter to filter out events. If we get here, * we went through tracepoint activation as a first step. @@ -680,51 +713,53 @@ notrace void ltt_vtrace(const struct marker *mdata, void *probe_data, if (!channel->active) continue; - /* If a new cpu was plugged since the trace was started, we did + /* + * If a new cpu was plugged since the trace was started, we did * not add it to the trace, and therefore we write the event to * cpu 0. */ - if(cpu >= channel->n_cpus) { + if (cpu >= channel->n_cpus) { cpu = 0; } /* reserve space : header and data */ - ret = ltt_reserve_slot(trace, channel, &transport_data, - data_size, &slot_size, &buf_offset, - &tsc, &rflags, - largest_align, cpu); + ret = ltt_reserve_slot(channel, trace, data_size, largest_align, + cpu, &buf, &slot_size, &buf_offset, + &tsc, &rflags); if (unlikely(ret < 0)) continue; /* buffer full */ va_copy(args_copy, *args); /* FIXME : could probably encapsulate transport better. */ -//ust// buf = ((struct rchan *)channel->trans_channel_data)->buf[cpu]; buf = channel->buf[cpu]; /* Out-of-order write : header and data */ - buf_offset = ltt_write_event_header(trace, - channel, buf, buf_offset, + buf_offset = ltt_write_event_header(channel, buf, buf_offset, eID, data_size, tsc, rflags); ltt_write_event_data(buf, buf_offset, &closure, - serialize_private, - largest_align, fmt, &args_copy); + serialize_private, + stack_pos_ctx, largest_align, + fmt, &args_copy); va_end(args_copy); /* Out-of-order commit */ ltt_commit_slot(channel, buf, buf_offset, data_size, slot_size); - DBG("just commited event at offset %ld and size %zd", buf_offset, slot_size); + DBG("just commited event (%s/%s) at offset %ld and size %zd", mdata->channel, mdata->name, buf_offset, slot_size); } -//ust// __get_cpu_var(ltt_nesting)--; - ltt_nesting--; - rcu_read_unlock(); //ust// rcu_read_unlock_sched_notrace(); + + cmm_barrier(); + tracer_stack_pos = stack_pos_ctx; + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1); + + rcu_read_unlock(); } -notrace void ltt_trace(const struct marker *mdata, void *probe_data, - struct registers *regs, void *call_data, +notrace void ltt_trace(const struct ust_marker *mdata, void *probe_data, + void *call_data, const char *fmt, ...) { va_list args; va_start(args, fmt); - ltt_vtrace(mdata, probe_data, regs, call_data, fmt, &args); + ltt_vtrace(mdata, probe_data, call_data, fmt, &args); va_end(args); } @@ -736,7 +771,7 @@ static notrace void skip_space(const char **ps) static notrace void copy_token(char **out, const char **in) { - while(**in != ' ' && **in != '\0') { + while (**in != ' ' && **in != '\0') { **out = **in; (*out)++; (*in)++; @@ -773,23 +808,23 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap) int result; enum { none, cfmt, tracefmt, argname } prev_token = none; - while(*orig_fmt_p != '\0') { - if(*orig_fmt_p == '%') { + while (*orig_fmt_p != '\0') { + if (*orig_fmt_p == '%') { prev_token = cfmt; copy_token(&new_fmt_p, &orig_fmt_p); } - else if(*orig_fmt_p == '#') { + else if (*orig_fmt_p == '#') { prev_token = tracefmt; do { orig_fmt_p++; - } while(*orig_fmt_p != ' ' && *orig_fmt_p != '\0'); + } while (*orig_fmt_p != ' ' && *orig_fmt_p != '\0'); } - else if(*orig_fmt_p == ' ') { - if(prev_token == argname) { + else if (*orig_fmt_p == ' ') { + if (prev_token == argname) { *new_fmt_p = '='; new_fmt_p++; } - else if(prev_token == cfmt) { + else if (prev_token == cfmt) { *new_fmt_p = ' '; new_fmt_p++; } @@ -804,7 +839,7 @@ int serialize_to_text(char *outbuf, int bufsize, const char *fmt, va_list ap) *new_fmt_p = '\0'; - if(outbuf == NULL) { + if (outbuf == NULL) { /* use this false_buffer for compatibility with pre-C99 */ outbuf = &false_buf; bufsize = 1;