Commit | Line | Data |
---|---|---|
1c8284eb MD |
1 | /* |
2 | * ltt/probes/block-trace.c | |
3 | * | |
4 | * block layer tracepoint probes. | |
5 | * | |
6 | * (C) Copyright 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> | |
7 | * Dual LGPL v2.1/GPL v2 license. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | ||
12 | #include <trace/events/block.h> | |
13 | ||
14 | /* | |
15 | * Add rq cmd as a sequence. Needs new type. (size + binary blob) | |
16 | */ | |
17 | ||
18 | void probe_block_rq_abort(void *data, struct request_queue *q, struct request *rq) | |
19 | { | |
20 | int rw = rq->cmd_flags & 0x03; | |
21 | ||
393a20d0 MD |
22 | if (rq->cmd_flags & REQ_DISCARD) |
23 | rw |= REQ_DISCARD; | |
1c8284eb | 24 | |
393a20d0 | 25 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
1c8284eb MD |
26 | trace_mark_tp(block, rq_abort_pc, block_rq_abort, |
27 | probe_block_rq_abort, | |
28 | "data_len %u rw %d errors %d", | |
29 | blk_rq_bytes(rq), rw, rq->errors); | |
30 | } else { | |
31 | /* | |
32 | * FIXME Using a simple trace_mark for the second event | |
33 | * possibility because tracepoints do not support multiple | |
34 | * connections to the same probe yet. They should have some | |
35 | * refcounting. Need to enable both rq_abort_pc and rq_abort_fs | |
36 | * markers to have the rq_abort_fs marker enabled. | |
37 | */ | |
38 | trace_mark(block, rq_abort_fs, | |
39 | "hard_sector %llu " | |
40 | "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), | |
41 | rw, rq->errors); | |
42 | } | |
43 | } | |
44 | ||
45 | void probe_block_rq_insert(void *data, struct request_queue *q, struct request *rq) | |
46 | { | |
47 | int rw = rq->cmd_flags & 0x03; | |
48 | ||
393a20d0 MD |
49 | if (rq->cmd_flags & REQ_DISCARD) |
50 | rw |= REQ_DISCARD; | |
1c8284eb | 51 | |
393a20d0 | 52 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
1c8284eb MD |
53 | trace_mark_tp(block, rq_insert_pc, block_rq_insert, |
54 | probe_block_rq_insert, | |
55 | "data_len %u rw %d errors %d", | |
56 | blk_rq_bytes(rq), rw, rq->errors); | |
57 | } else { | |
58 | /* | |
59 | * FIXME Using a simple trace_mark for the second event | |
60 | * possibility because tracepoints do not support multiple | |
61 | * connections to the same probe yet. They should have some | |
62 | * refcounting. Need to enable both rq_insert_pc and | |
63 | * rq_insert_fs markers to have the rq_insert_fs marker enabled. | |
64 | */ | |
65 | trace_mark(block, rq_insert_fs, | |
66 | "hard_sector %llu " | |
67 | "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), | |
68 | rw, rq->errors); | |
69 | } | |
70 | } | |
71 | ||
72 | void probe_block_rq_issue(void *data, struct request_queue *q, struct request *rq) | |
73 | { | |
74 | int rw = rq->cmd_flags & 0x03; | |
75 | ||
393a20d0 MD |
76 | if (rq->cmd_flags & REQ_DISCARD) |
77 | rw |= REQ_DISCARD; | |
1c8284eb | 78 | |
393a20d0 | 79 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
1c8284eb MD |
80 | trace_mark_tp(block, rq_issue_pc, block_rq_issue, |
81 | probe_block_rq_issue, | |
82 | "data_len %u rw %d errors %d", | |
83 | blk_rq_bytes(rq), rw, rq->errors); | |
84 | } else { | |
85 | /* | |
86 | * FIXME Using a simple trace_mark for the second event | |
87 | * possibility because tracepoints do not support multiple | |
88 | * connections to the same probe yet. They should have some | |
89 | * refcounting. Need to enable both rq_issue_pc and rq_issue_fs | |
90 | * markers to have the rq_issue_fs marker enabled. | |
91 | */ | |
92 | trace_mark(block, rq_issue_fs, | |
93 | "hard_sector %llu " | |
94 | "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), | |
95 | rw, rq->errors); | |
96 | } | |
97 | } | |
98 | ||
99 | void probe_block_rq_requeue(void *data, struct request_queue *q, struct request *rq) | |
100 | { | |
101 | int rw = rq->cmd_flags & 0x03; | |
102 | ||
393a20d0 MD |
103 | if (rq->cmd_flags & REQ_DISCARD) |
104 | rw |= REQ_DISCARD; | |
1c8284eb | 105 | |
393a20d0 | 106 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
1c8284eb MD |
107 | trace_mark_tp(block, rq_requeue_pc, block_rq_requeue, |
108 | probe_block_rq_requeue, | |
109 | "data_len %u rw %d errors %d", | |
110 | blk_rq_bytes(rq), rw, rq->errors); | |
111 | } else { | |
112 | /* | |
113 | * FIXME Using a simple trace_mark for the second event | |
114 | * possibility because tracepoints do not support multiple | |
115 | * connections to the same probe yet. They should have some | |
116 | * refcounting. Need to enable both rq_requeue_pc and | |
117 | * rq_requeue_fs markers to have the rq_requeue_fs marker | |
118 | * enabled. | |
119 | */ | |
120 | trace_mark(block, rq_requeue_fs, | |
121 | "hard_sector %llu " | |
122 | "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), | |
123 | rw, rq->errors); | |
124 | } | |
125 | } | |
126 | ||
127 | void probe_block_rq_complete(void *data, struct request_queue *q, struct request *rq) | |
128 | { | |
129 | int rw = rq->cmd_flags & 0x03; | |
130 | ||
393a20d0 MD |
131 | if (rq->cmd_flags & REQ_DISCARD) |
132 | rw |= REQ_DISCARD; | |
1c8284eb | 133 | |
393a20d0 | 134 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
1c8284eb MD |
135 | trace_mark_tp(block, rq_complete_pc, block_rq_complete, |
136 | probe_block_rq_complete, | |
137 | "data_len %u rw %d errors %d", | |
138 | blk_rq_bytes(rq), rw, rq->errors); | |
139 | } else { | |
140 | /* | |
141 | * FIXME Using a simple trace_mark for the second event | |
142 | * possibility because tracepoints do not support multiple | |
143 | * connections to the same probe yet. They should have some | |
144 | * refcounting. Need to enable both rq_complete_pc and | |
145 | * rq_complete_fs markers to have the rq_complete_fs marker | |
146 | * enabled. | |
147 | */ | |
148 | trace_mark(block, rq_complete_fs, | |
149 | "hard_sector %llu " | |
150 | "rw %d errors %d", (unsigned long long)blk_rq_pos(rq), | |
151 | rw, rq->errors); | |
152 | } | |
153 | } | |
154 | ||
155 | void probe_block_bio_bounce(void *data, struct request_queue *q, struct bio *bio) | |
156 | { | |
157 | trace_mark_tp(block, bio_bounce, block_bio_bounce, | |
158 | probe_block_bio_bounce, | |
159 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
160 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
161 | "not_uptodate #1u%d", | |
162 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
163 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
164 | } | |
165 | ||
0bc7b654 | 166 | void probe_block_bio_complete(void *data, struct request_queue *q, struct bio *bio, int error) |
1c8284eb MD |
167 | { |
168 | trace_mark_tp(block, bio_complete, block_bio_complete, | |
169 | probe_block_bio_complete, | |
170 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
171 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
0bc7b654 | 172 | "not_uptodate #1u%d error %d", |
1c8284eb | 173 | (unsigned long long)bio->bi_sector, bio->bi_size, |
0bc7b654 | 174 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE), error); |
1c8284eb MD |
175 | } |
176 | ||
177 | void probe_block_bio_backmerge(void *data, struct request_queue *q, struct bio *bio) | |
178 | { | |
179 | trace_mark_tp(block, bio_backmerge, block_bio_backmerge, | |
180 | probe_block_bio_backmerge, | |
181 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
182 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
183 | "not_uptodate #1u%d", | |
184 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
185 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
186 | } | |
187 | ||
188 | void probe_block_bio_frontmerge(void *data, struct request_queue *q, struct bio *bio) | |
189 | { | |
190 | trace_mark_tp(block, bio_frontmerge, block_bio_frontmerge, | |
191 | probe_block_bio_frontmerge, | |
192 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
193 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
194 | "not_uptodate #1u%d", | |
195 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
196 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
197 | } | |
198 | ||
199 | void probe_block_bio_queue(void *data, struct request_queue *q, struct bio *bio) | |
200 | { | |
201 | trace_mark_tp(block, bio_queue, block_bio_queue, | |
202 | probe_block_bio_queue, | |
203 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
204 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
205 | "not_uptodate #1u%d", | |
206 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
207 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
208 | } | |
209 | ||
210 | void probe_block_getrq(void *data, struct request_queue *q, struct bio *bio, int rw) | |
211 | { | |
212 | if (bio) { | |
213 | trace_mark_tp(block, getrq_bio, block_getrq, | |
214 | probe_block_getrq, | |
215 | "sector %llu size %u " | |
216 | "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
217 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
218 | "not_uptodate #1u%d", | |
219 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
220 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
221 | } else { | |
222 | /* | |
223 | * FIXME Using a simple trace_mark for the second event | |
224 | * possibility because tracepoints do not support multiple | |
225 | * connections to the same probe yet. They should have some | |
226 | * refcounting. Need to enable both getrq_bio and getrq markers | |
227 | * to have the getrq marker enabled. | |
228 | */ | |
229 | trace_mark(block, getrq, "rw %d", rw); | |
230 | } | |
231 | } | |
232 | ||
233 | void probe_block_sleeprq(void *data, struct request_queue *q, struct bio *bio, int rw) | |
234 | { | |
235 | if (bio) { | |
236 | trace_mark_tp(block, sleeprq_bio, block_sleeprq, | |
237 | probe_block_sleeprq, | |
238 | "sector %llu size %u " | |
239 | "rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
240 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
241 | "not_uptodate #1u%d", | |
242 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
243 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE)); | |
244 | } else { | |
245 | /* | |
246 | * FIXME Using a simple trace_mark for the second event | |
247 | * possibility because tracepoints do not support multiple | |
248 | * connections to the same probe yet. They should have some | |
249 | * refcounting. Need to enable both sleeprq_bio and sleeprq | |
250 | * markers to have the sleeprq marker enabled. | |
251 | */ | |
252 | trace_mark(block, sleeprq, "rw %d", rw); | |
253 | } | |
254 | } | |
255 | ||
256 | void probe_block_plug(void *data, struct request_queue *q) | |
257 | { | |
258 | trace_mark_tp(block, plug, block_plug, probe_block_plug, | |
259 | MARK_NOARGS); | |
260 | } | |
261 | ||
262 | void probe_block_unplug_io(void *data, struct request_queue *q) | |
263 | { | |
264 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
265 | ||
266 | trace_mark_tp(block, unplug_io, block_unplug_io, probe_block_unplug_io, | |
267 | "pdu %u", pdu); | |
268 | } | |
269 | ||
270 | void probe_block_unplug_timer(void *data, struct request_queue *q) | |
271 | { | |
272 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | |
273 | ||
274 | trace_mark_tp(block, unplug_timer, block_unplug_timer, | |
275 | probe_block_unplug_timer, | |
276 | "pdu %u", pdu); | |
277 | } | |
278 | ||
279 | void probe_block_split(void *data, struct request_queue *q, struct bio *bio, | |
280 | unsigned int pdu) | |
281 | { | |
282 | trace_mark_tp(block, split, block_split, | |
283 | probe_block_split, | |
284 | "sector %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
285 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " | |
286 | "not_uptodate #1u%d pdu %u", | |
287 | (unsigned long long)bio->bi_sector, bio->bi_size, | |
288 | bio->bi_rw, !bio_flagged(bio, BIO_UPTODATE), pdu); | |
289 | } | |
290 | ||
0bc7b654 MD |
291 | void probe_block_bio_remap(void *data, struct request_queue *q, struct bio *bio, |
292 | dev_t dev, sector_t from) | |
1c8284eb | 293 | { |
0bc7b654 MD |
294 | trace_mark_tp(block, bio_remap, block_bio_remap, |
295 | probe_block_bio_remap, | |
296 | "device_from %lu sector_from %llu nr_sector %llu device_to %lu " | |
297 | "sector_to %llu size %u rw(FAILFAST_DRIVER,FAILFAST_TRANSPORT," | |
1c8284eb MD |
298 | "FAILFAST_DEV,DISCARD,META,SYNC,BARRIER,AHEAD,RW) %lX " |
299 | "not_uptodate #1u%d", | |
1c8284eb | 300 | (unsigned long)dev, |
0bc7b654 MD |
301 | (unsigned long long)from, |
302 | (unsigned long long)bio->bi_size >> 9, | |
303 | (unsigned long)bio->bi_bdev->bd_dev, | |
304 | (unsigned long long)bio->bi_sector, | |
1c8284eb MD |
305 | bio->bi_size, bio->bi_rw, |
306 | !bio_flagged(bio, BIO_UPTODATE)); | |
307 | } | |
308 | ||
0bc7b654 MD |
309 | void probe_block_rq_remap(void *data, struct request_queue *q, |
310 | struct request *rq, | |
311 | dev_t dev, sector_t from) | |
312 | { | |
313 | trace_mark_tp(block, rq_remap, block_rq_remap, | |
314 | probe_block_rq_remap, | |
315 | "device_from %lu sector_from %llu nr_sector %llu device_to %lu " | |
316 | "sector_to %llu size %u", | |
317 | (unsigned long)dev, | |
318 | (unsigned long long)from, | |
319 | (unsigned long long)blk_rq_sectors(rq), | |
320 | (unsigned long)disk_devt(rq->rq_disk), | |
321 | (unsigned long long)blk_rq_pos(rq), | |
322 | blk_rq_bytes(rq)); | |
323 | } | |
324 | ||
1c8284eb MD |
325 | MODULE_LICENSE("GPL and additional rights"); |
326 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
327 | MODULE_DESCRIPTION("Block Tracepoint Probes"); |