2 * ltt-type-serializer.c
4 * LTTng specialized type serializer.
6 * Copyright Mathieu Desnoyers, 2008.
8 * Dual LGPL v2.1/GPL v2 license.
10 #include <urcu/rculist.h>
11 #include <ust/type-serializer.h>
13 #include <ust/clock.h>
17 void _ltt_specialized_trace(const struct marker
*mdata
, void *probe_data
,
18 void *serialize_private
, unsigned int data_size
,
19 unsigned int largest_align
)
24 unsigned int chan_index
;
25 struct ust_buffer
*buf
;
26 struct ust_channel
*chan
;
27 struct ust_trace
*trace
;
34 * If we get here, it's probably because we have useful work to do.
36 if (unlikely(ltt_traces
.num_active_traces
== 0))
42 /* Force volatile access. */
43 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) + 1);
46 * asm volatile and "memory" clobber prevent the compiler from moving
47 * instructions out of the ltt nesting count. This is required to ensure
48 * that probe side-effects which can cause recursion (e.g. unforeseen
49 * traps, divisions by 0, ...) are triggered within the incremented
50 * nesting count section.
53 eID
= mdata
->event_id
;
54 chan_index
= mdata
->channel_id
;
57 * Iterate on each trace, typically small number of active traces,
58 * list iteration with prefetch is usually slower.
60 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
61 if (unlikely(!trace
->active
))
63 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
65 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
66 rflags
= LTT_RFLAG_ID_SIZE
;
68 if (unlikely(eID
>= LTT_FREE_EVENTS
))
69 rflags
= LTT_RFLAG_ID
;
74 * Skip channels added after trace creation.
76 if (unlikely(chan_index
>= trace
->nr_channels
))
78 chan
= &trace
->channels
[chan_index
];
82 /* reserve space : header and data */
83 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
84 cpu
, &buf
, &slot_size
, &buf_offset
, &tsc
,
86 if (unlikely(ret
< 0))
87 continue; /* buffer full */
89 /* Out-of-order write : header and data */
90 buf_offset
= ltt_write_event_header(chan
, buf
,
91 buf_offset
, eID
, data_size
,
94 buf_offset
+= ltt_align(buf_offset
, largest_align
);
95 ust_buffers_write(buf
, buf_offset
,
96 serialize_private
, data_size
);
97 buf_offset
+= data_size
;
99 /* Out-of-order commit */
100 ltt_commit_slot(chan
, buf
, buf_offset
, data_size
, slot_size
);
103 * asm volatile and "memory" clobber prevent the compiler from moving
104 * instructions out of the ltt nesting count. This is required to ensure
105 * that probe side-effects which can cause recursion (e.g. unforeseen
106 * traps, divisions by 0, ...) are triggered within the incremented
107 * nesting count section.
110 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) - 1);
This page took 0.034239 seconds and 4 git commands to generate.