d42ebd4e4cff2fbc73f0b0741bd4916305441b36
[lttng-modules.git] / instrumentation / events / lttng-module / block.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM block
3
4 #if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_BLOCK_H
6
7 #include <linux/blktrace_api.h>
8 #include <linux/blkdev.h>
9 #include <linux/tracepoint.h>
10 #include <linux/trace_seq.h>
11 #include <linux/version.h>
12
13 #define RWBS_LEN 8
14
15 #ifndef _TRACE_BLOCK_DEF_
16 #define _TRACE_BLOCK_DEF_
17
18 #define __blk_dump_cmd(cmd, len) "<unknown>"
19
20 enum {
21 RWBS_FLAG_WRITE = (1 << 0),
22 RWBS_FLAG_DISCARD = (1 << 1),
23 RWBS_FLAG_READ = (1 << 2),
24 RWBS_FLAG_RAHEAD = (1 << 3),
25 RWBS_FLAG_BARRIER = (1 << 4),
26 RWBS_FLAG_SYNC = (1 << 5),
27 RWBS_FLAG_META = (1 << 6),
28 RWBS_FLAG_SECURE = (1 << 7),
29 RWBS_FLAG_FLUSH = (1 << 8),
30 RWBS_FLAG_FUA = (1 << 9),
31 };
32
33 #endif /* _TRACE_BLOCK_DEF_ */
34
35 #define __print_rwbs_flags(rwbs) \
36 __print_flags(rwbs, "", \
37 { RWBS_FLAG_FLUSH, "F" }, \
38 { RWBS_FLAG_WRITE, "W" }, \
39 { RWBS_FLAG_DISCARD, "D" }, \
40 { RWBS_FLAG_READ, "R" }, \
41 { RWBS_FLAG_FUA, "F" }, \
42 { RWBS_FLAG_RAHEAD, "A" }, \
43 { RWBS_FLAG_BARRIER, "B" }, \
44 { RWBS_FLAG_SYNC, "S" }, \
45 { RWBS_FLAG_META, "M" }, \
46 { RWBS_FLAG_SECURE, "E" })
47
48 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
49
50 #define blk_fill_rwbs(rwbs, rw, bytes) \
51 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
52 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
53 ( (bytes) ? RWBS_FLAG_READ : \
54 ( 0 )))) \
55 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
56 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
57 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
58 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
59 | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
60 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
61
62 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
63
64 #define blk_fill_rwbs(rwbs, rw, bytes) \
65 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
66 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
67 ( (bytes) ? RWBS_FLAG_READ : \
68 ( 0 )))) \
69 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
70 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
71 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
72 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
73
74 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36))
75
76 #define blk_fill_rwbs(rwbs, rw, bytes) \
77 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
78 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
79 ( (bytes) ? RWBS_FLAG_READ : \
80 ( 0 )))) \
81 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
82 | ((rw) & REQ_HARDBARRIER ? RWBS_FLAG_BARRIER : 0) \
83 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
84 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
85 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
86
87 #else
88
89 #define blk_fill_rwbs(rwbs, rw, bytes) \
90 tp_assign(rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
91 ( (rw) & (1 << BIO_RW_DISCARD) ? RWBS_FLAG_DISCARD : \
92 ( (bytes) ? RWBS_FLAG_READ : \
93 ( 0 )))) \
94 | ((rw) & (1 << BIO_RW_AHEAD) ? RWBS_FLAG_RAHEAD : 0) \
95 | ((rw) & (1 << BIO_RW_SYNCIO) ? RWBS_FLAG_SYNC : 0) \
96 | ((rw) & (1 << BIO_RW_META) ? RWBS_FLAG_META : 0) \
97 | ((rw) & (1 << BIO_RW_BARRIER) ? RWBS_FLAG_BARRIER : 0))
98
99 #endif
100
101 DECLARE_EVENT_CLASS(block_rq_with_error,
102
103 TP_PROTO(struct request_queue *q, struct request *rq),
104
105 TP_ARGS(q, rq),
106
107 TP_STRUCT__entry(
108 __field( dev_t, dev )
109 __field( sector_t, sector )
110 __field( unsigned int, nr_sector )
111 __field( int, errors )
112 __field( unsigned int, rwbs )
113 __dynamic_array_hex( unsigned char, cmd,
114 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
115 rq->cmd_len : 0)
116 ),
117
118 TP_fast_assign(
119 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
120 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
121 0 : blk_rq_pos(rq))
122 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
123 0 : blk_rq_sectors(rq))
124 tp_assign(errors, rq->errors)
125 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
126 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
127 rq->cmd : NULL);
128 ),
129
130 TP_printk("%d,%d %s (%s) %llu + %u [%d]",
131 MAJOR(__entry->dev), MINOR(__entry->dev),
132 __print_rwbs_flags(__entry->rwbs),
133 __blk_dump_cmd(__get_dynamic_array(cmd),
134 __get_dynamic_array_len(cmd)),
135 (unsigned long long)__entry->sector,
136 __entry->nr_sector, __entry->errors)
137 )
138
139 /**
140 * block_rq_abort - abort block operation request
141 * @q: queue containing the block operation request
142 * @rq: block IO operation request
143 *
144 * Called immediately after pending block IO operation request @rq in
145 * queue @q is aborted. The fields in the operation request @rq
146 * can be examined to determine which device and sectors the pending
147 * operation would access.
148 */
149 DEFINE_EVENT(block_rq_with_error, block_rq_abort,
150
151 TP_PROTO(struct request_queue *q, struct request *rq),
152
153 TP_ARGS(q, rq)
154 )
155
156 /**
157 * block_rq_requeue - place block IO request back on a queue
158 * @q: queue holding operation
159 * @rq: block IO operation request
160 *
161 * The block operation request @rq is being placed back into queue
162 * @q. For some reason the request was not completed and needs to be
163 * put back in the queue.
164 */
165 DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
166
167 TP_PROTO(struct request_queue *q, struct request *rq),
168
169 TP_ARGS(q, rq)
170 )
171
172 /**
173 * block_rq_complete - block IO operation completed by device driver
174 * @q: queue containing the block operation request
175 * @rq: block operations request
176 *
177 * The block_rq_complete tracepoint event indicates that some portion
178 * of operation request has been completed by the device driver. If
179 * the @rq->bio is %NULL, then there is absolutely no additional work to
180 * do for the request. If @rq->bio is non-NULL then there is
181 * additional work required to complete the request.
182 */
183 DEFINE_EVENT(block_rq_with_error, block_rq_complete,
184
185 TP_PROTO(struct request_queue *q, struct request *rq),
186
187 TP_ARGS(q, rq)
188 )
189
190 DECLARE_EVENT_CLASS(block_rq,
191
192 TP_PROTO(struct request_queue *q, struct request *rq),
193
194 TP_ARGS(q, rq),
195
196 TP_STRUCT__entry(
197 __field( dev_t, dev )
198 __field( sector_t, sector )
199 __field( unsigned int, nr_sector )
200 __field( unsigned int, bytes )
201 __field( unsigned int, rwbs )
202 __array_text( char, comm, TASK_COMM_LEN )
203 __dynamic_array_hex( unsigned char, cmd,
204 (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
205 rq->cmd_len : 0)
206 ),
207
208 TP_fast_assign(
209 tp_assign(dev, rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
210 tp_assign(sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
211 0 : blk_rq_pos(rq))
212 tp_assign(nr_sector, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
213 0 : blk_rq_sectors(rq))
214 tp_assign(bytes, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
215 blk_rq_bytes(rq) : 0)
216 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
217 tp_memcpy_dyn(cmd, (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
218 rq->cmd : NULL);
219 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
220 ),
221
222 TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
223 MAJOR(__entry->dev), MINOR(__entry->dev),
224 __print_rwbs_flags(__entry->rwbs),
225 __entry->bytes,
226 __blk_dump_cmd(__get_dynamic_array(cmd),
227 __get_dynamic_array_len(cmd)),
228 (unsigned long long)__entry->sector,
229 __entry->nr_sector, __entry->comm)
230 )
231
232 /**
233 * block_rq_insert - insert block operation request into queue
234 * @q: target queue
235 * @rq: block IO operation request
236 *
237 * Called immediately before block operation request @rq is inserted
238 * into queue @q. The fields in the operation request @rq struct can
239 * be examined to determine which device and sectors the pending
240 * operation would access.
241 */
242 DEFINE_EVENT(block_rq, block_rq_insert,
243
244 TP_PROTO(struct request_queue *q, struct request *rq),
245
246 TP_ARGS(q, rq)
247 )
248
249 /**
250 * block_rq_issue - issue pending block IO request operation to device driver
251 * @q: queue holding operation
252 * @rq: block IO operation operation request
253 *
254 * Called when block operation request @rq from queue @q is sent to a
255 * device driver for processing.
256 */
257 DEFINE_EVENT(block_rq, block_rq_issue,
258
259 TP_PROTO(struct request_queue *q, struct request *rq),
260
261 TP_ARGS(q, rq)
262 )
263
264 /**
265 * block_bio_bounce - used bounce buffer when processing block operation
266 * @q: queue holding the block operation
267 * @bio: block operation
268 *
269 * A bounce buffer was used to handle the block operation @bio in @q.
270 * This occurs when hardware limitations prevent a direct transfer of
271 * data between the @bio data memory area and the IO device. Use of a
272 * bounce buffer requires extra copying of data and decreases
273 * performance.
274 */
275 TRACE_EVENT(block_bio_bounce,
276
277 TP_PROTO(struct request_queue *q, struct bio *bio),
278
279 TP_ARGS(q, bio),
280
281 TP_STRUCT__entry(
282 __field( dev_t, dev )
283 __field( sector_t, sector )
284 __field( unsigned int, nr_sector )
285 __field( unsigned int, rwbs )
286 __array_text( char, comm, TASK_COMM_LEN )
287 ),
288
289 TP_fast_assign(
290 tp_assign(dev, bio->bi_bdev ?
291 bio->bi_bdev->bd_dev : 0)
292 tp_assign(sector, bio->bi_sector)
293 tp_assign(nr_sector, bio->bi_size >> 9)
294 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
295 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
296 ),
297
298 TP_printk("%d,%d %s %llu + %u [%s]",
299 MAJOR(__entry->dev), MINOR(__entry->dev),
300 __print_rwbs_flags(__entry->rwbs),
301 (unsigned long long)__entry->sector,
302 __entry->nr_sector, __entry->comm)
303 )
304
305 /**
306 * block_bio_complete - completed all work on the block operation
307 * @q: queue holding the block operation
308 * @bio: block operation completed
309 * @error: io error value
310 *
311 * This tracepoint indicates there is no further work to do on this
312 * block IO operation @bio.
313 */
314 TRACE_EVENT(block_bio_complete,
315
316 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
317 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
318
319 TP_ARGS(q, bio, error),
320 #else
321 TP_PROTO(struct request_queue *q, struct bio *bio),
322
323 TP_ARGS(q, bio),
324 #endif
325
326 TP_STRUCT__entry(
327 __field( dev_t, dev )
328 __field( sector_t, sector )
329 __field( unsigned, nr_sector )
330 __field( int, error )
331 __field( unsigned int, rwbs )
332 ),
333
334 TP_fast_assign(
335 tp_assign(dev, bio->bi_bdev->bd_dev)
336 tp_assign(sector, bio->bi_sector)
337 tp_assign(nr_sector, bio->bi_size >> 9)
338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
339 tp_assign(error, error)
340 #else
341 tp_assign(error, 0)
342 #endif
343 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
344 ),
345
346 TP_printk("%d,%d %s %llu + %u [%d]",
347 MAJOR(__entry->dev), MINOR(__entry->dev),
348 __print_rwbs_flags(__entry->rwbs),
349 (unsigned long long)__entry->sector,
350 __entry->nr_sector, __entry->error)
351 )
352
353 DECLARE_EVENT_CLASS(block_bio,
354
355 TP_PROTO(struct request_queue *q, struct bio *bio),
356
357 TP_ARGS(q, bio),
358
359 TP_STRUCT__entry(
360 __field( dev_t, dev )
361 __field( sector_t, sector )
362 __field( unsigned int, nr_sector )
363 __field( unsigned int, rwbs )
364 __array_text( char, comm, TASK_COMM_LEN )
365 ),
366
367 TP_fast_assign(
368 tp_assign(dev, bio->bi_bdev->bd_dev)
369 tp_assign(sector, bio->bi_sector)
370 tp_assign(nr_sector, bio->bi_size >> 9)
371 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
372 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
373 ),
374
375 TP_printk("%d,%d %s %llu + %u [%s]",
376 MAJOR(__entry->dev), MINOR(__entry->dev),
377 __print_rwbs_flags(__entry->rwbs),
378 (unsigned long long)__entry->sector,
379 __entry->nr_sector, __entry->comm)
380 )
381
382 /**
383 * block_bio_backmerge - merging block operation to the end of an existing operation
384 * @q: queue holding operation
385 * @bio: new block operation to merge
386 *
387 * Merging block request @bio to the end of an existing block request
388 * in queue @q.
389 */
390 DEFINE_EVENT(block_bio, block_bio_backmerge,
391
392 TP_PROTO(struct request_queue *q, struct bio *bio),
393
394 TP_ARGS(q, bio)
395 )
396
397 /**
398 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
399 * @q: queue holding operation
400 * @bio: new block operation to merge
401 *
402 * Merging block IO operation @bio to the beginning of an existing block
403 * operation in queue @q.
404 */
405 DEFINE_EVENT(block_bio, block_bio_frontmerge,
406
407 TP_PROTO(struct request_queue *q, struct bio *bio),
408
409 TP_ARGS(q, bio)
410 )
411
412 /**
413 * block_bio_queue - putting new block IO operation in queue
414 * @q: queue holding operation
415 * @bio: new block operation
416 *
417 * About to place the block IO operation @bio into queue @q.
418 */
419 DEFINE_EVENT(block_bio, block_bio_queue,
420
421 TP_PROTO(struct request_queue *q, struct bio *bio),
422
423 TP_ARGS(q, bio)
424 )
425
426 DECLARE_EVENT_CLASS(block_get_rq,
427
428 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
429
430 TP_ARGS(q, bio, rw),
431
432 TP_STRUCT__entry(
433 __field( dev_t, dev )
434 __field( sector_t, sector )
435 __field( unsigned int, nr_sector )
436 __field( unsigned int, rwbs )
437 __array_text( char, comm, TASK_COMM_LEN )
438 ),
439
440 TP_fast_assign(
441 tp_assign(dev, bio ? bio->bi_bdev->bd_dev : 0)
442 tp_assign(sector, bio ? bio->bi_sector : 0)
443 tp_assign(nr_sector, bio ? bio->bi_size >> 9 : 0)
444 blk_fill_rwbs(rwbs, bio ? bio->bi_rw : 0,
445 bio ? bio->bi_size >> 9 : 0)
446 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
447 ),
448
449 TP_printk("%d,%d %s %llu + %u [%s]",
450 MAJOR(__entry->dev), MINOR(__entry->dev),
451 __print_rwbs_flags(__entry->rwbs),
452 (unsigned long long)__entry->sector,
453 __entry->nr_sector, __entry->comm)
454 )
455
456 /**
457 * block_getrq - get a free request entry in queue for block IO operations
458 * @q: queue for operations
459 * @bio: pending block IO operation
460 * @rw: low bit indicates a read (%0) or a write (%1)
461 *
462 * A request struct for queue @q has been allocated to handle the
463 * block IO operation @bio.
464 */
465 DEFINE_EVENT(block_get_rq, block_getrq,
466
467 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
468
469 TP_ARGS(q, bio, rw)
470 )
471
472 /**
473 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
474 * @q: queue for operation
475 * @bio: pending block IO operation
476 * @rw: low bit indicates a read (%0) or a write (%1)
477 *
478 * In the case where a request struct cannot be provided for queue @q
479 * the process needs to wait for an request struct to become
480 * available. This tracepoint event is generated each time the
481 * process goes to sleep waiting for request struct become available.
482 */
483 DEFINE_EVENT(block_get_rq, block_sleeprq,
484
485 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
486
487 TP_ARGS(q, bio, rw)
488 )
489
490 /**
491 * block_plug - keep operations requests in request queue
492 * @q: request queue to plug
493 *
494 * Plug the request queue @q. Do not allow block operation requests
495 * to be sent to the device driver. Instead, accumulate requests in
496 * the queue to improve throughput performance of the block device.
497 */
498 TRACE_EVENT(block_plug,
499
500 TP_PROTO(struct request_queue *q),
501
502 TP_ARGS(q),
503
504 TP_STRUCT__entry(
505 __array_text( char, comm, TASK_COMM_LEN )
506 ),
507
508 TP_fast_assign(
509 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
510 ),
511
512 TP_printk("[%s]", __entry->comm)
513 )
514
515 DECLARE_EVENT_CLASS(block_unplug,
516
517 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
518 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
519
520 TP_ARGS(q, depth, explicit),
521 #else
522 TP_PROTO(struct request_queue *q),
523
524 TP_ARGS(q),
525 #endif
526
527 TP_STRUCT__entry(
528 __field( int, nr_rq )
529 __array_text( char, comm, TASK_COMM_LEN )
530 ),
531
532 TP_fast_assign(
533 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
534 tp_assign(nr_rq, depth)
535 #else
536 tp_assign(nr_rq, q->rq.count[READ] + q->rq.count[WRITE])
537 #endif
538 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
539 ),
540
541 TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
542 )
543
544 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39))
545 /**
546 * block_unplug_timer - timed release of operations requests in queue to device driver
547 * @q: request queue to unplug
548 *
549 * Unplug the request queue @q because a timer expired and allow block
550 * operation requests to be sent to the device driver.
551 */
552 DEFINE_EVENT(block_unplug, block_unplug_timer,
553
554 TP_PROTO(struct request_queue *q),
555
556 TP_ARGS(q)
557 )
558 #endif
559
560 /**
561 * block_unplug - release of operations requests in request queue
562 * @q: request queue to unplug
563 * @depth: number of requests just added to the queue
564 * @explicit: whether this was an explicit unplug, or one from schedule()
565 *
566 * Unplug request queue @q because device driver is scheduled to work
567 * on elements in the request queue.
568 */
569 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
570 DEFINE_EVENT(block_unplug, block_unplug,
571 #else
572 DEFINE_EVENT(block_unplug, block_unplug_io,
573 #endif
574
575 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,39))
576 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
577
578 TP_ARGS(q, depth, explicit)
579 #else
580 TP_PROTO(struct request_queue *q),
581
582 TP_ARGS(q)
583 #endif
584 )
585
586 /**
587 * block_split - split a single bio struct into two bio structs
588 * @q: queue containing the bio
589 * @bio: block operation being split
590 * @new_sector: The starting sector for the new bio
591 *
592 * The bio request @bio in request queue @q needs to be split into two
593 * bio requests. The newly created @bio request starts at
594 * @new_sector. This split may be required due to hardware limitation
595 * such as operation crossing device boundaries in a RAID system.
596 */
597 TRACE_EVENT(block_split,
598
599 TP_PROTO(struct request_queue *q, struct bio *bio,
600 unsigned int new_sector),
601
602 TP_ARGS(q, bio, new_sector),
603
604 TP_STRUCT__entry(
605 __field( dev_t, dev )
606 __field( sector_t, sector )
607 __field( sector_t, new_sector )
608 __field( unsigned int, rwbs )
609 __array_text( char, comm, TASK_COMM_LEN )
610 ),
611
612 TP_fast_assign(
613 tp_assign(dev, bio->bi_bdev->bd_dev)
614 tp_assign(sector, bio->bi_sector)
615 tp_assign(new_sector, new_sector)
616 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
617 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
618 ),
619
620 TP_printk("%d,%d %s %llu / %llu [%s]",
621 MAJOR(__entry->dev), MINOR(__entry->dev),
622 __print_rwbs_flags(__entry->rwbs),
623 (unsigned long long)__entry->sector,
624 (unsigned long long)__entry->new_sector,
625 __entry->comm)
626 )
627
628 /**
629 * block_bio_remap - map request for a logical device to the raw device
630 * @q: queue holding the operation
631 * @bio: revised operation
632 * @dev: device for the operation
633 * @from: original sector for the operation
634 *
635 * An operation for a logical device has been mapped to the
636 * raw block device.
637 */
638 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,38))
639 TRACE_EVENT(block_bio_remap,
640 #else
641 TRACE_EVENT(block_remap,
642 #endif
643
644 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
645 sector_t from),
646
647 TP_ARGS(q, bio, dev, from),
648
649 TP_STRUCT__entry(
650 __field( dev_t, dev )
651 __field( sector_t, sector )
652 __field( unsigned int, nr_sector )
653 __field( dev_t, old_dev )
654 __field( sector_t, old_sector )
655 __field( unsigned int, rwbs )
656 ),
657
658 TP_fast_assign(
659 tp_assign(dev, bio->bi_bdev->bd_dev)
660 tp_assign(sector, bio->bi_sector)
661 tp_assign(nr_sector, bio->bi_size >> 9)
662 tp_assign(old_dev, dev)
663 tp_assign(old_sector, from)
664 blk_fill_rwbs(rwbs, bio->bi_rw, bio->bi_size)
665 ),
666
667 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
668 MAJOR(__entry->dev), MINOR(__entry->dev),
669 __print_rwbs_flags(__entry->rwbs),
670 (unsigned long long)__entry->sector,
671 __entry->nr_sector,
672 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
673 (unsigned long long)__entry->old_sector)
674 )
675
676 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
677 /**
678 * block_rq_remap - map request for a block operation request
679 * @q: queue holding the operation
680 * @rq: block IO operation request
681 * @dev: device for the operation
682 * @from: original sector for the operation
683 *
684 * The block operation request @rq in @q has been remapped. The block
685 * operation request @rq holds the current information and @from hold
686 * the original sector.
687 */
688 TRACE_EVENT(block_rq_remap,
689
690 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
691 sector_t from),
692
693 TP_ARGS(q, rq, dev, from),
694
695 TP_STRUCT__entry(
696 __field( dev_t, dev )
697 __field( sector_t, sector )
698 __field( unsigned int, nr_sector )
699 __field( dev_t, old_dev )
700 __field( sector_t, old_sector )
701 __field( unsigned int, rwbs )
702 ),
703
704 TP_fast_assign(
705 tp_assign(dev, disk_devt(rq->rq_disk))
706 tp_assign(sector, blk_rq_pos(rq))
707 tp_assign(nr_sector, blk_rq_sectors(rq))
708 tp_assign(old_dev, dev)
709 tp_assign(old_sector, from)
710 blk_fill_rwbs(rwbs, rq->cmd_flags, blk_rq_bytes(rq))
711 ),
712
713 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
714 MAJOR(__entry->dev), MINOR(__entry->dev),
715 __print_rwbs_flags(__entry->rwbs),
716 (unsigned long long)__entry->sector,
717 __entry->nr_sector,
718 MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
719 (unsigned long long)__entry->old_sector)
720 )
721 #endif
722
723 #undef __print_rwbs_flags
724 #undef blk_fill_rwbs
725
726 #endif /* _TRACE_BLOCK_H */
727
728 /* This part must be outside protection */
729 #include "../../../probes/define_trace.h"
730
This page took 0.067311 seconds and 3 git commands to generate.