Commit | Line | Data |
---|---|---|
12e81b07 PMF |
1 | /** |
2 | * ltt-type-serializer.c | |
3 | * | |
4 | * LTTng specialized type serializer. | |
5 | * | |
6 | * Copyright Mathieu Desnoyers, 2008. | |
7 | * | |
8 | * Dual LGPL v2.1/GPL v2 license. | |
9 | */ | |
10 | #include <urcu/rculist.h> | |
11 | #include <ust/type-serializer.h> | |
12 | #include <ust/core.h> | |
13 | #include "tracer.h" | |
14 | ||
15 | notrace | |
16 | void _ltt_specialized_trace(const struct marker *mdata, void *probe_data, | |
17 | void *serialize_private, unsigned int data_size, | |
18 | unsigned int largest_align) | |
19 | { | |
20 | int ret; | |
21 | uint16_t eID; | |
22 | size_t slot_size; | |
23 | unsigned int chan_index; | |
24 | struct ust_buffer *buf; | |
25 | struct ust_channel *chan; | |
26 | struct ust_trace *trace; | |
27 | u64 tsc; | |
28 | long buf_offset; | |
29 | int cpu; | |
30 | unsigned int rflags; | |
31 | ||
32 | /* | |
33 | * If we get here, it's probably because we have useful work to do. | |
34 | */ | |
35 | if (unlikely(ltt_traces.num_active_traces == 0)) | |
36 | return; | |
37 | ||
38 | rcu_read_lock(); | |
39 | cpu = ust_get_cpu(); | |
40 | ||
41 | /* Force volatile access. */ | |
42 | STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) + 1); | |
43 | ||
44 | /* | |
45 | * asm volatile and "memory" clobber prevent the compiler from moving | |
46 | * instructions out of the ltt nesting count. This is required to ensure | |
47 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
48 | * traps, divisions by 0, ...) are triggered within the incremented | |
49 | * nesting count section. | |
50 | */ | |
51 | barrier(); | |
52 | eID = mdata->event_id; | |
53 | chan_index = mdata->channel_id; | |
54 | ||
55 | /* | |
56 | * Iterate on each trace, typically small number of active traces, | |
57 | * list iteration with prefetch is usually slower. | |
58 | */ | |
59 | list_for_each_entry_rcu(trace, <t_traces.head, list) { | |
60 | if (unlikely(!trace->active)) | |
61 | continue; | |
62 | //ust// if (unlikely(!ltt_run_filter(trace, eID))) | |
63 | //ust// continue; | |
64 | #ifdef CONFIG_LTT_DEBUG_EVENT_SIZE | |
65 | rflags = LTT_RFLAG_ID_SIZE; | |
66 | #else | |
67 | if (unlikely(eID >= LTT_FREE_EVENTS)) | |
68 | rflags = LTT_RFLAG_ID; | |
69 | else | |
70 | rflags = 0; | |
71 | #endif | |
72 | /* | |
73 | * Skip channels added after trace creation. | |
74 | */ | |
75 | if (unlikely(chan_index >= trace->nr_channels)) | |
76 | continue; | |
77 | chan = &trace->channels[chan_index]; | |
78 | if (!chan->active) | |
79 | continue; | |
80 | ||
81 | /* reserve space : header and data */ | |
82 | ret = ltt_reserve_slot(chan, trace, data_size, largest_align, | |
83 | cpu, &buf, &slot_size, &buf_offset, &tsc, | |
84 | &rflags); | |
85 | if (unlikely(ret < 0)) | |
86 | continue; /* buffer full */ | |
87 | ||
88 | /* Out-of-order write : header and data */ | |
89 | buf_offset = ltt_write_event_header(chan, buf, | |
90 | buf_offset, eID, data_size, | |
91 | tsc, rflags); | |
92 | if (data_size) { | |
93 | buf_offset += ltt_align(buf_offset, largest_align); | |
94 | ust_buffers_write(buf, buf_offset, | |
95 | serialize_private, data_size); | |
96 | buf_offset += data_size; | |
97 | } | |
98 | /* Out-of-order commit */ | |
99 | ltt_commit_slot(chan, buf, buf_offset, data_size, slot_size); | |
100 | } | |
101 | /* | |
102 | * asm volatile and "memory" clobber prevent the compiler from moving | |
103 | * instructions out of the ltt nesting count. This is required to ensure | |
104 | * that probe side-effects which can cause recursion (e.g. unforeseen | |
105 | * traps, divisions by 0, ...) are triggered within the incremented | |
106 | * nesting count section. | |
107 | */ | |
108 | barrier(); | |
109 | STORE_SHARED(ltt_nesting, LOAD_SHARED(ltt_nesting) - 1); | |
110 | rcu_read_unlock(); | |
111 | } |