X-Git-Url: http://git.liburcu.org/?a=blobdiff_plain;f=libust%2Ftype-serializer.c;h=070542c65e2a938a158e00e88aad320f867a55b2;hb=HEAD;hp=5d20960e03a7c58a82a27a8fa6e352ffd568cd00;hpb=518d7abb8e3720433c611499f704c3bd9d554102;p=ust.git diff --git a/libust/type-serializer.c b/libust/type-serializer.c index 5d20960..070542c 100644 --- a/libust/type-serializer.c +++ b/libust/type-serializer.c @@ -7,14 +7,20 @@ * * Dual LGPL v2.1/GPL v2 license. */ + +/* This file contains functions for tracepoint custom probes support. */ + +#define _GNU_SOURCE +#define _LGPL_SOURCE #include -#include #include #include +#include #include "tracer.h" +#include "type-serializer.h" notrace -void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, +void _ltt_specialized_trace(const struct ust_marker *mdata, void *probe_data, void *serialize_private, unsigned int data_size, unsigned int largest_align) { @@ -40,7 +46,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, cpu = ust_get_cpu(); /* Force volatile access. */ - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) + 1); /* * asm volatile and "memory" clobber prevent the compiler from moving @@ -49,7 +55,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * traps, divisions by 0, ...) are triggered within the incremented * nesting count section. */ - barrier(); + cmm_barrier(); eID = mdata->event_id; chan_index = mdata->channel_id; @@ -57,7 +63,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * Iterate on each trace, typically small number of active traces, * list iteration with prefetch is usually slower. */ - list_for_each_entry_rcu(trace, <t_traces.head, list) { + cds_list_for_each_entry_rcu(trace, <t_traces.head, list) { if (unlikely(!trace->active)) continue; //ust// if (unlikely(!ltt_run_filter(trace, eID))) @@ -79,6 +85,14 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, if (!chan->active) continue; + /* If a new cpu was plugged since the trace was started, we did + * not add it to the trace, and therefore we write the event to + * cpu 0. + */ + if(cpu >= chan->n_cpus) { + cpu = 0; + } + /* reserve space : header and data */ ret = ltt_reserve_slot(chan, trace, data_size, largest_align, cpu, &buf, &slot_size, &buf_offset, &tsc, @@ -106,7 +120,7 @@ void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, * traps, divisions by 0, ...) are triggered within the incremented * nesting count section. */ - barrier(); - STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); + cmm_barrier(); + CMM_STORE_SHARED(ltt_nesting, CMM_LOAD_SHARED(ltt_nesting) - 1); rcu_read_unlock(); }