2 * ltt-type-serializer.c
4 * LTTng specialized type serializer.
6 * Copyright Mathieu Desnoyers, 2008.
8 * Dual LGPL v2.1/GPL v2 license.
11 /* This file contains functions for tracepoint custom probes support. */
13 #include <urcu/rculist.h>
14 #include <ust/type-serializer.h>
16 #include <ust/clock.h>
20 void _ltt_specialized_trace(const struct marker
*mdata
, void *probe_data
,
21 void *serialize_private
, unsigned int data_size
,
22 unsigned int largest_align
)
27 unsigned int chan_index
;
28 struct ust_buffer
*buf
;
29 struct ust_channel
*chan
;
30 struct ust_trace
*trace
;
37 * If we get here, it's probably because we have useful work to do.
39 if (unlikely(ltt_traces
.num_active_traces
== 0))
45 /* Force volatile access. */
46 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) + 1);
49 * asm volatile and "memory" clobber prevent the compiler from moving
50 * instructions out of the ltt nesting count. This is required to ensure
51 * that probe side-effects which can cause recursion (e.g. unforeseen
52 * traps, divisions by 0, ...) are triggered within the incremented
53 * nesting count section.
56 eID
= mdata
->event_id
;
57 chan_index
= mdata
->channel_id
;
60 * Iterate on each trace, typically small number of active traces,
61 * list iteration with prefetch is usually slower.
63 list_for_each_entry_rcu(trace
, <t_traces
.head
, list
) {
64 if (unlikely(!trace
->active
))
66 //ust// if (unlikely(!ltt_run_filter(trace, eID)))
68 #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE
69 rflags
= LTT_RFLAG_ID_SIZE
;
71 if (unlikely(eID
>= LTT_FREE_EVENTS
))
72 rflags
= LTT_RFLAG_ID
;
77 * Skip channels added after trace creation.
79 if (unlikely(chan_index
>= trace
->nr_channels
))
81 chan
= &trace
->channels
[chan_index
];
85 /* reserve space : header and data */
86 ret
= ltt_reserve_slot(chan
, trace
, data_size
, largest_align
,
87 cpu
, &buf
, &slot_size
, &buf_offset
, &tsc
,
89 if (unlikely(ret
< 0))
90 continue; /* buffer full */
92 /* Out-of-order write : header and data */
93 buf_offset
= ltt_write_event_header(chan
, buf
,
94 buf_offset
, eID
, data_size
,
97 buf_offset
+= ltt_align(buf_offset
, largest_align
);
98 ust_buffers_write(buf
, buf_offset
,
99 serialize_private
, data_size
);
100 buf_offset
+= data_size
;
102 /* Out-of-order commit */
103 ltt_commit_slot(chan
, buf
, buf_offset
, data_size
, slot_size
);
106 * asm volatile and "memory" clobber prevent the compiler from moving
107 * instructions out of the ltt nesting count. This is required to ensure
108 * that probe side-effects which can cause recursion (e.g. unforeseen
109 * traps, divisions by 0, ...) are triggered within the incremented
110 * nesting count section.
113 STORE_SHARED(ltt_nesting
, LOAD_SHARED(ltt_nesting
) - 1);
This page took 0.054935 seconds and 4 git commands to generate.