2 * ltt-type-serializer.c
4 * LTTng specialized type serializer.
6 * Copyright Mathieu Desnoyers, 2008.
8 * Dual LGPL v2.1/GPL v2 license.
11 /* This file contains functions for tracepoint custom probes support. */
13 #include <urcu/rculist.h>
14 #include <ust/type-serializer.h>
16 #include <ust/clock.h>
20 void _ltt_specialized_trace(const struct ust_marker
*mdata
, void *probe_data
,
21 void *serialize_private
, unsigned int data_size
,
22 unsigned int largest_align
)
27 unsigned int chan_index
;
28 struct ust_buffer
*buf
;
29 struct ust_channel
*chan
;
30 struct ust_trace
*trace
;
37 * If we get here, it's probably because we have useful work to do.
39 if (unlikely(ltt_traces
.num_active_traces
== 0))
45 /* Force volatile access. */
46 CMM_STORE_SHARED(ltt_nesting
, CMM_LOAD_SHARED(ltt_nesting
) + 1);
49 * asm volatile and "memory" clobber prevent the compiler from moving
50 * instructions out of the ltt nesting count. This is required to ensure
51 * that probe side-effects which can cause recursion (e.g. unforeseen
52 * traps, divisions by 0, ...) are triggered within the incremented
53 * nesting count section.
56 eID
= mdata
->event_id
;
57 chan_index
= mdata
->channel_id
;
60 * Iterate on each trace, typically small number of active traces,
61 * list iteration with prefetch is usually slower.
63 cds_list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
64 if (unlikely(!trace
->active
))
66 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
68 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
69 rflags
= LTT_RFLAG_ID_SIZE
;
71 if (unlikely(eID
>= LTT_FREE_EVENTS
))
72 rflags
= LTT_RFLAG_ID
;
77 * Skip channels added after trace creation.
79 if (unlikely(chan_index
>= trace
->nr_channels
))
81 chan
= &trace
->channels
[chan_index
];
85 /* If a new cpu was plugged since the trace was started, we did
86 * not add it to the trace, and therefore we write the event to
89 if(cpu
>= chan
->n_cpus
) {
93 /* reserve space : header and data */
94 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
95 cpu
, &buf
, &slot_size
, &buf_offset
, &tsc
,
97 if (unlikely(ret
< 0))
98 continue; /* buffer full */
100 /* Out-of-order write : header and data */
101 buf_offset
= ltt_write_event_header(chan
, buf
,
102 buf_offset
, eID
, data_size
,
105 buf_offset
+= ltt_align(buf_offset
, largest_align
);
106 ust_buffers_write(buf
, buf_offset
,
107 serialize_private
, data_size
);
108 buf_offset
+= data_size
;
110 /* Out-of-order commit */
111 ltt_commit_slot(chan
, buf
, buf_offset
, data_size
, slot_size
);
114 * asm volatile and "memory" clobber prevent the compiler from moving
115 * instructions out of the ltt nesting count. This is required to ensure
116 * that probe side-effects which can cause recursion (e.g. unforeseen
117 * traps, divisions by 0, ...) are triggered within the incremented
118 * nesting count section.
121 CMM_STORE_SHARED(ltt_nesting
, CMM_LOAD_SHARED(ltt_nesting
) - 1);
This page took 0.034905 seconds and 5 git commands to generate.