Cleanup: Move lttng-modules instrumentation headers
[lttng-modules.git] / include / instrumentation / events / block.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM block
4
5 #if !defined(LTTNG_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_BLOCK_H
7
8 #include <lttng/tracepoint-event.h>
9 #include <linux/blktrace_api.h>
10 #include <linux/blkdev.h>
11 #include <linux/trace_seq.h>
12 #include <linux/version.h>
13
14 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
15 #include <scsi/scsi_request.h>
16 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
17
18 #ifndef _TRACE_BLOCK_DEF_
19 #define _TRACE_BLOCK_DEF_
20
21 enum {
22 RWBS_FLAG_WRITE = (1 << 0),
23 RWBS_FLAG_DISCARD = (1 << 1),
24 RWBS_FLAG_READ = (1 << 2),
25 RWBS_FLAG_RAHEAD = (1 << 3),
26 RWBS_FLAG_BARRIER = (1 << 4),
27 RWBS_FLAG_SYNC = (1 << 5),
28 RWBS_FLAG_META = (1 << 6),
29 RWBS_FLAG_SECURE = (1 << 7),
30 RWBS_FLAG_FLUSH = (1 << 8),
31 RWBS_FLAG_FUA = (1 << 9),
32 RWBS_FLAG_PREFLUSH = (1 << 10),
33 };
34
35 #endif /* _TRACE_BLOCK_DEF_ */
36
37 LTTNG_TRACEPOINT_ENUM(block_rq_type,
38 TP_ENUM_VALUES(
39 ctf_enum_value("RWBS_FLAG_WRITE", RWBS_FLAG_WRITE)
40 ctf_enum_value("RWBS_FLAG_DISCARD", RWBS_FLAG_DISCARD)
41 ctf_enum_value("RWBS_FLAG_READ", RWBS_FLAG_READ)
42 ctf_enum_value("RWBS_FLAG_RAHEAD", RWBS_FLAG_RAHEAD)
43 ctf_enum_value("RWBS_FLAG_BARRIER", RWBS_FLAG_BARRIER)
44 ctf_enum_value("RWBS_FLAG_SYNC", RWBS_FLAG_SYNC)
45 ctf_enum_value("RWBS_FLAG_META", RWBS_FLAG_META)
46 ctf_enum_value("RWBS_FLAG_SECURE", RWBS_FLAG_SECURE)
47 ctf_enum_value("RWBS_FLAG_FLUSH", RWBS_FLAG_FLUSH)
48 ctf_enum_value("RWBS_FLAG_FUA", RWBS_FLAG_FUA)
49 ctf_enum_value("RWBS_FLAG_PREFLUSH", RWBS_FLAG_PREFLUSH)
50 )
51 )
52
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0) || \
54 LTTNG_SLE_KERNEL_RANGE(4,4,73,5,0,0, 4,4,73,6,0,0) || \
55 LTTNG_SLE_KERNEL_RANGE(4,4,82,6,0,0, 4,4,82,7,0,0) || \
56 LTTNG_SLE_KERNEL_RANGE(4,4,92,6,0,0, 4,4,92,7,0,0) || \
57 LTTNG_SLE_KERNEL_RANGE(4,4,103,6,0,0, 4,4,103,7,0,0) || \
58 LTTNG_SLE_KERNEL_RANGE(4,4,114,94,0,0, 4,4,114,95,0,0) || \
59 LTTNG_SLE_KERNEL_RANGE(4,4,120,94,0,0, 4,4,120,95,0,0) || \
60 LTTNG_SLE_KERNEL_RANGE(4,4,126,94,0,0, 4,5,0,0,0,0))
61
62 #define lttng_req_op(rq) req_op(rq)
63 #define lttng_req_rw(rq) ((rq)->cmd_flags)
64 #define lttng_bio_op(bio) bio_op(bio)
65 #define lttng_bio_rw(bio) ((bio)->bi_opf)
66
67 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
68 ctf_enum(block_rq_type, type, rwbs, \
69 (((op) == REQ_OP_WRITE || (op) == REQ_OP_WRITE_SAME) ? RWBS_FLAG_WRITE : \
70 ( (op) == REQ_OP_DISCARD ? RWBS_FLAG_DISCARD : \
71 ( (op) == REQ_OP_SECURE_ERASE ? (RWBS_FLAG_DISCARD | RWBS_FLAG_SECURE) : \
72 ( (op) == REQ_OP_FLUSH ? RWBS_FLAG_FLUSH : \
73 ( (op) == REQ_OP_READ ? RWBS_FLAG_READ : \
74 ( 0 )))))) \
75 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
76 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
77 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
78 | ((rw) & REQ_PREFLUSH ? RWBS_FLAG_PREFLUSH : 0) \
79 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
80
81 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,1,0))
82
83 #define lttng_req_op(rq)
84 #define lttng_req_rw(rq) ((rq)->cmd_flags)
85 #define lttng_bio_op(bio)
86 #define lttng_bio_rw(bio) ((bio)->bi_rw)
87
88 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
89 ctf_enum(block_rq_type, type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
90 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
91 ( (bytes) ? RWBS_FLAG_READ : \
92 ( 0 )))) \
93 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
94 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
95 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
96 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0) \
97 | ((rw) & REQ_FLUSH ? RWBS_FLAG_FLUSH : 0) \
98 | ((rw) & REQ_FUA ? RWBS_FLAG_FUA : 0))
99
100 #else
101
102 #define lttng_req_op(rq)
103 #define lttng_req_rw(rq) ((rq)->cmd_flags)
104 #define lttng_bio_op(bio)
105 #define lttng_bio_rw(bio) ((bio)->bi_rw)
106
107 #define blk_rwbs_ctf_integer(type, rwbs, op, rw, bytes) \
108 ctf_enum(block_rq_type, type, rwbs, ((rw) & WRITE ? RWBS_FLAG_WRITE : \
109 ( (rw) & REQ_DISCARD ? RWBS_FLAG_DISCARD : \
110 ( (bytes) ? RWBS_FLAG_READ : \
111 ( 0 )))) \
112 | ((rw) & REQ_RAHEAD ? RWBS_FLAG_RAHEAD : 0) \
113 | ((rw) & REQ_SYNC ? RWBS_FLAG_SYNC : 0) \
114 | ((rw) & REQ_META ? RWBS_FLAG_META : 0) \
115 | ((rw) & REQ_SECURE ? RWBS_FLAG_SECURE : 0))
116
117 #endif
118
119 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
120 LTTNG_TRACEPOINT_EVENT_CLASS(block_buffer,
121
122 TP_PROTO(struct buffer_head *bh),
123
124 TP_ARGS(bh),
125
126 TP_FIELDS (
127 ctf_integer(dev_t, dev, bh->b_bdev->bd_dev)
128 ctf_integer(sector_t, sector, bh->b_blocknr)
129 ctf_integer(size_t, size, bh->b_size)
130 )
131 )
132
133 /**
134 * block_touch_buffer - mark a buffer accessed
135 * @bh: buffer_head being touched
136 *
137 * Called from touch_buffer().
138 */
139 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_touch_buffer,
140
141 TP_PROTO(struct buffer_head *bh),
142
143 TP_ARGS(bh)
144 )
145
146 /**
147 * block_dirty_buffer - mark a buffer dirty
148 * @bh: buffer_head being dirtied
149 *
150 * Called from mark_buffer_dirty().
151 */
152 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_buffer, block_dirty_buffer,
153
154 TP_PROTO(struct buffer_head *bh),
155
156 TP_ARGS(bh)
157 )
158 #endif
159
160 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
161 /* block_rq_with_error event class removed in kernel 4.12 */
162 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
163 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq_with_error,
164
165 TP_PROTO(struct request_queue *q, struct request *rq),
166
167 TP_ARGS(q, rq),
168
169 TP_locvar(
170 sector_t sector;
171 unsigned int nr_sector;
172 unsigned char *cmd;
173 size_t cmd_len;
174 ),
175
176 TP_code_pre(
177 if (blk_rq_is_scsi(rq)) {
178 struct scsi_request *scsi_rq = scsi_req(rq);
179 tp_locvar->sector = 0;
180 tp_locvar->nr_sector = 0;
181 tp_locvar->cmd = scsi_rq->cmd;
182 tp_locvar->cmd_len = scsi_rq->cmd_len;
183 } else {
184 tp_locvar->sector = blk_rq_pos(rq);
185 tp_locvar->nr_sector = blk_rq_sectors(rq);
186 tp_locvar->cmd = NULL;
187 tp_locvar->cmd_len = 0;
188 }
189 ),
190
191 TP_FIELDS(
192 ctf_integer(dev_t, dev,
193 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
194 ctf_integer(sector_t, sector, tp_locvar->sector)
195 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
196 ctf_integer(int, errors, rq->errors)
197 blk_rwbs_ctf_integer(unsigned int, rwbs,
198 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
199 ctf_sequence_hex(unsigned char, cmd,
200 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
201 ),
202
203 TP_code_post()
204 )
205 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
206 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq_with_error,
207
208 TP_PROTO(struct request_queue *q, struct request *rq),
209
210 TP_ARGS(q, rq),
211
212 TP_locvar(
213 sector_t sector;
214 unsigned int nr_sector;
215 unsigned char *cmd;
216 size_t cmd_len;
217 ),
218
219 TP_code_pre(
220
221 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
222 tp_locvar->sector = 0;
223 tp_locvar->nr_sector = 0;
224 tp_locvar->cmd = rq->cmd;
225 tp_locvar->cmd_len = rq->cmd_len;
226 } else {
227 tp_locvar->sector = blk_rq_pos(rq);
228 tp_locvar->nr_sector = blk_rq_sectors(rq);
229 tp_locvar->cmd = NULL;
230 tp_locvar->cmd_len = 0;
231 }
232 ),
233
234 TP_FIELDS(
235 ctf_integer(dev_t, dev,
236 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
237 ctf_integer(sector_t, sector, tp_locvar->sector)
238 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
239 ctf_integer(int, errors, rq->errors)
240 blk_rwbs_ctf_integer(unsigned int, rwbs,
241 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
242 ctf_sequence_hex(unsigned char, cmd,
243 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
244 ),
245
246 TP_code_post()
247 )
248 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
249
250 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0))
251 /**
252 * block_rq_abort - abort block operation request
253 * @q: queue containing the block operation request
254 * @rq: block IO operation request
255 *
256 * Called immediately after pending block IO operation request @rq in
257 * queue @q is aborted. The fields in the operation request @rq
258 * can be examined to determine which device and sectors the pending
259 * operation would access.
260 */
261 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_abort,
262
263 TP_PROTO(struct request_queue *q, struct request *rq),
264
265 TP_ARGS(q, rq)
266 )
267 #endif
268
269 /**
270 * block_rq_requeue - place block IO request back on a queue
271 * @q: queue holding operation
272 * @rq: block IO operation request
273 *
274 * The block operation request @rq is being placed back into queue
275 * @q. For some reason the request was not completed and needs to be
276 * put back in the queue.
277 */
278 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
279 LTTNG_TRACEPOINT_EVENT(block_rq_requeue,
280
281 TP_PROTO(struct request_queue *q, struct request *rq),
282
283 TP_ARGS(q, rq),
284
285 TP_FIELDS(
286 ctf_integer(dev_t, dev,
287 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
288 ctf_integer(sector_t, sector, blk_rq_trace_sector(rq))
289 ctf_integer(unsigned int, nr_sector, blk_rq_trace_nr_sectors(rq))
290 blk_rwbs_ctf_integer(unsigned int, rwbs,
291 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
292 )
293 )
294 #else
295 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_requeue,
296
297 TP_PROTO(struct request_queue *q, struct request *rq),
298
299 TP_ARGS(q, rq)
300 )
301 #endif
302
303 /**
304 * block_rq_complete - block IO operation completed by device driver
305 * @q: queue containing the block operation request
306 * @rq: block operations request
307 * @nr_bytes: number of completed bytes
308 *
309 * The block_rq_complete tracepoint event indicates that some portion
310 * of operation request has been completed by the device driver. If
311 * the @rq->bio is %NULL, then there is absolutely no additional work to
312 * do for the request. If @rq->bio is non-NULL then there is
313 * additional work required to complete the request.
314 */
315 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
316 LTTNG_TRACEPOINT_EVENT(block_rq_complete,
317
318 TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
319
320 TP_ARGS(rq, error, nr_bytes),
321
322 TP_FIELDS(
323 ctf_integer(dev_t, dev,
324 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
325 ctf_integer(sector_t, sector, blk_rq_pos(rq))
326 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
327 ctf_integer(int, error, error)
328 blk_rwbs_ctf_integer(unsigned int, rwbs,
329 lttng_req_op(rq), lttng_req_rw(rq), nr_bytes)
330 )
331 )
332 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
333 LTTNG_TRACEPOINT_EVENT_CODE(block_rq_complete,
334
335 TP_PROTO(struct request_queue *q, struct request *rq,
336 unsigned int nr_bytes),
337
338 TP_ARGS(q, rq, nr_bytes),
339
340 TP_locvar(
341 unsigned char *cmd;
342 size_t cmd_len;
343 ),
344
345 TP_code_pre(
346 if (blk_rq_is_scsi(rq)) {
347 struct scsi_request *scsi_rq = scsi_req(rq);
348 tp_locvar->cmd = scsi_rq->cmd;
349 tp_locvar->cmd_len = scsi_rq->cmd_len;
350 } else {
351 tp_locvar->cmd = NULL;
352 tp_locvar->cmd_len = 0;
353 }
354 ),
355
356 TP_FIELDS(
357 ctf_integer(dev_t, dev,
358 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
359 ctf_integer(sector_t, sector, blk_rq_pos(rq))
360 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
361 ctf_integer(int, errors, rq->errors)
362 blk_rwbs_ctf_integer(unsigned int, rwbs,
363 lttng_req_op(rq), lttng_req_rw(rq), nr_bytes)
364 ctf_sequence_hex(unsigned char, cmd,
365 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
366 ),
367
368 TP_code_post()
369 )
370 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,5) \
371 || LTTNG_KERNEL_RANGE(3,12,21, 3,13,0) \
372 || LTTNG_KERNEL_RANGE(3,10,41, 3,11,0) \
373 || LTTNG_KERNEL_RANGE(3,4,91, 3,5,0) \
374 || LTTNG_KERNEL_RANGE(3,2,58, 3,3,0) \
375 || LTTNG_UBUNTU_KERNEL_RANGE(3,13,11,28, 3,14,0,0) \
376 || LTTNG_RHEL_KERNEL_RANGE(3,10,0,229,0,0, 3,11,0,0,0,0))
377
378 LTTNG_TRACEPOINT_EVENT_CODE(block_rq_complete,
379
380 TP_PROTO(struct request_queue *q, struct request *rq,
381 unsigned int nr_bytes),
382
383 TP_ARGS(q, rq, nr_bytes),
384
385 TP_locvar(
386 unsigned char *cmd;
387 size_t cmd_len;
388 ),
389
390 TP_code_pre(
391 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
392 tp_locvar->cmd = rq->cmd;
393 tp_locvar->cmd_len = rq->cmd_len;
394 } else {
395 tp_locvar->cmd = NULL;
396 tp_locvar->cmd_len = 0;
397 }
398 ),
399
400 TP_FIELDS(
401 ctf_integer(dev_t, dev,
402 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
403 ctf_integer(sector_t, sector, blk_rq_pos(rq))
404 ctf_integer(unsigned int, nr_sector, nr_bytes >> 9)
405 ctf_integer(int, errors, rq->errors)
406 blk_rwbs_ctf_integer(unsigned int, rwbs,
407 lttng_req_op(rq), lttng_req_rw(rq), nr_bytes)
408 ctf_sequence_hex(unsigned char, cmd,
409 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
410 ),
411
412 TP_code_post()
413 )
414
415 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
416
417 /**
418 * block_rq_complete - block IO operation completed by device driver
419 * @q: queue containing the block operation request
420 * @rq: block operations request
421 *
422 * The block_rq_complete tracepoint event indicates that some portion
423 * of operation request has been completed by the device driver. If
424 * the @rq->bio is %NULL, then there is absolutely no additional work to
425 * do for the request. If @rq->bio is non-NULL then there is
426 * additional work required to complete the request.
427 */
428 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq_with_error, block_rq_complete,
429
430 TP_PROTO(struct request_queue *q, struct request *rq),
431
432 TP_ARGS(q, rq)
433 )
434
435 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,15,0)) */
436
437 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
438 LTTNG_TRACEPOINT_EVENT_CLASS(block_rq,
439
440 TP_PROTO(struct request_queue *q, struct request *rq),
441
442 TP_ARGS(q, rq),
443
444 TP_FIELDS(
445 ctf_integer(dev_t, dev,
446 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
447 ctf_integer(sector_t, sector, blk_rq_trace_sector(rq))
448 ctf_integer(unsigned int, nr_sector, blk_rq_trace_nr_sectors(rq))
449 ctf_integer(unsigned int, bytes, blk_rq_bytes(rq))
450 ctf_integer(pid_t, tid, current->pid)
451 blk_rwbs_ctf_integer(unsigned int, rwbs,
452 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
453 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
454 )
455 )
456 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0))
457 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq,
458
459 TP_PROTO(struct request_queue *q, struct request *rq),
460
461 TP_ARGS(q, rq),
462
463 TP_locvar(
464 sector_t sector;
465 unsigned int nr_sector;
466 unsigned int bytes;
467 unsigned char *cmd;
468 size_t cmd_len;
469 ),
470
471 TP_code_pre(
472 if (blk_rq_is_scsi(rq)) {
473 struct scsi_request *scsi_rq = scsi_req(rq);
474 tp_locvar->sector = 0;
475 tp_locvar->nr_sector = 0;
476 tp_locvar->bytes = scsi_rq->resid_len;
477 tp_locvar->cmd = scsi_rq->cmd;
478 tp_locvar->cmd_len = scsi_rq->cmd_len;
479 } else {
480 tp_locvar->sector = blk_rq_pos(rq);
481 tp_locvar->nr_sector = blk_rq_sectors(rq);
482 tp_locvar->bytes = 0;
483 tp_locvar->cmd = NULL;
484 tp_locvar->cmd_len = 0;
485 }
486 ),
487
488 TP_FIELDS(
489 ctf_integer(dev_t, dev,
490 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
491 ctf_integer(sector_t, sector, tp_locvar->sector)
492 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
493 ctf_integer(unsigned int, bytes, tp_locvar->bytes)
494 ctf_integer(pid_t, tid, current->pid)
495 blk_rwbs_ctf_integer(unsigned int, rwbs,
496 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
497 ctf_sequence_hex(unsigned char, cmd,
498 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
499 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
500 ),
501
502 TP_code_post()
503 )
504 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
505 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(block_rq,
506
507 TP_PROTO(struct request_queue *q, struct request *rq),
508
509 TP_ARGS(q, rq),
510
511 TP_locvar(
512 sector_t sector;
513 unsigned int nr_sector;
514 unsigned int bytes;
515 unsigned char *cmd;
516 size_t cmd_len;
517 ),
518
519 TP_code_pre(
520 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
521 tp_locvar->sector = 0;
522 tp_locvar->nr_sector = 0;
523 tp_locvar->bytes = blk_rq_bytes(rq);
524 tp_locvar->cmd = rq->cmd;
525 tp_locvar->cmd_len = rq->cmd_len;
526 } else {
527 tp_locvar->sector = blk_rq_pos(rq);
528 tp_locvar->nr_sector = blk_rq_sectors(rq);
529 tp_locvar->bytes = 0;
530 tp_locvar->cmd = NULL;
531 tp_locvar->cmd_len = 0;
532 }
533 ),
534
535 TP_FIELDS(
536 ctf_integer(dev_t, dev,
537 rq->rq_disk ? disk_devt(rq->rq_disk) : 0)
538 ctf_integer(sector_t, sector, tp_locvar->sector)
539 ctf_integer(unsigned int, nr_sector, tp_locvar->nr_sector)
540 ctf_integer(unsigned int, bytes, tp_locvar->bytes)
541 ctf_integer(pid_t, tid, current->pid)
542 blk_rwbs_ctf_integer(unsigned int, rwbs,
543 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
544 ctf_sequence_hex(unsigned char, cmd,
545 tp_locvar->cmd, size_t, tp_locvar->cmd_len)
546 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
547 ),
548
549 TP_code_post()
550 )
551 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,0)) */
552
553 /**
554 * block_rq_insert - insert block operation request into queue
555 * @q: target queue
556 * @rq: block IO operation request
557 *
558 * Called immediately before block operation request @rq is inserted
559 * into queue @q. The fields in the operation request @rq struct can
560 * be examined to determine which device and sectors the pending
561 * operation would access.
562 */
563 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_insert,
564
565 TP_PROTO(struct request_queue *q, struct request *rq),
566
567 TP_ARGS(q, rq)
568 )
569
570 /**
571 * block_rq_issue - issue pending block IO request operation to device driver
572 * @q: queue holding operation
573 * @rq: block IO operation operation request
574 *
575 * Called when block operation request @rq from queue @q is sent to a
576 * device driver for processing.
577 */
578 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_rq, block_rq_issue,
579
580 TP_PROTO(struct request_queue *q, struct request *rq),
581
582 TP_ARGS(q, rq)
583 )
584
585 /**
586 * block_bio_bounce - used bounce buffer when processing block operation
587 * @q: queue holding the block operation
588 * @bio: block operation
589 *
590 * A bounce buffer was used to handle the block operation @bio in @q.
591 * This occurs when hardware limitations prevent a direct transfer of
592 * data between the @bio data memory area and the IO device. Use of a
593 * bounce buffer requires extra copying of data and decreases
594 * performance.
595 */
596 LTTNG_TRACEPOINT_EVENT(block_bio_bounce,
597
598 TP_PROTO(struct request_queue *q, struct bio *bio),
599
600 TP_ARGS(q, bio),
601
602 TP_FIELDS(
603 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
604 ctf_integer(dev_t, dev, bio_dev(bio))
605 #else
606 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
607 #endif
608 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
609 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
610 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
611 blk_rwbs_ctf_integer(unsigned int, rwbs,
612 lttng_bio_op(bio), lttng_bio_rw(bio),
613 bio->bi_iter.bi_size)
614 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
615 ctf_integer(sector_t, sector, bio->bi_sector)
616 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
617 blk_rwbs_ctf_integer(unsigned int, rwbs,
618 lttng_bio_op(bio), lttng_bio_rw(bio),
619 bio->bi_size)
620 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
621 ctf_integer(pid_t, tid, current->pid)
622 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
623 )
624 )
625
626 /**
627 * block_bio_complete - completed all work on the block operation
628 * @q: queue holding the block operation
629 * @bio: block operation completed
630 * @error: io error value
631 *
632 * This tracepoint indicates there is no further work to do on this
633 * block IO operation @bio.
634 */
635 LTTNG_TRACEPOINT_EVENT(block_bio_complete,
636
637 TP_PROTO(struct request_queue *q, struct bio *bio, int error),
638
639 TP_ARGS(q, bio, error),
640
641 TP_FIELDS(
642 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
643 ctf_integer(dev_t, dev, bio_dev(bio))
644 #else
645 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
646 #endif
647 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
648 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
649 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
650 ctf_integer(int, error, error)
651 blk_rwbs_ctf_integer(unsigned int, rwbs,
652 lttng_bio_op(bio), lttng_bio_rw(bio),
653 bio->bi_iter.bi_size)
654 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
655 ctf_integer(sector_t, sector, bio->bi_sector)
656 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
657 ctf_integer(int, error, error)
658 blk_rwbs_ctf_integer(unsigned int, rwbs,
659 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
660 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
661 )
662 )
663
664 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
665 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio_merge,
666
667 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
668
669 TP_ARGS(q, rq, bio),
670
671 TP_FIELDS(
672 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
673 ctf_integer(dev_t, dev, bio_dev(bio))
674 #else
675 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
676 #endif
677 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
678 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
679 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
680 blk_rwbs_ctf_integer(unsigned int, rwbs,
681 lttng_bio_op(bio), lttng_bio_rw(bio),
682 bio->bi_iter.bi_size)
683 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
684 ctf_integer(sector_t, sector, bio->bi_sector)
685 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
686 blk_rwbs_ctf_integer(unsigned int, rwbs,
687 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
688 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
689 ctf_integer(pid_t, tid, current->pid)
690 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
691 )
692 )
693
694 /**
695 * block_bio_backmerge - merging block operation to the end of an existing operation
696 * @q: queue holding operation
697 * @bio: new block operation to merge
698 *
699 * Merging block request @bio to the end of an existing block request
700 * in queue @q.
701 */
702 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_backmerge,
703
704 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
705
706 TP_ARGS(q, rq, bio)
707 )
708
709 /**
710 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
711 * @q: queue holding operation
712 * @bio: new block operation to merge
713 *
714 * Merging block IO operation @bio to the beginning of an existing block
715 * operation in queue @q.
716 */
717 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio_merge, block_bio_frontmerge,
718
719 TP_PROTO(struct request_queue *q, struct request *rq, struct bio *bio),
720
721 TP_ARGS(q, rq, bio)
722 )
723
724 /**
725 * block_bio_queue - putting new block IO operation in queue
726 * @q: queue holding operation
727 * @bio: new block operation
728 *
729 * About to place the block IO operation @bio into queue @q.
730 */
731 LTTNG_TRACEPOINT_EVENT(block_bio_queue,
732
733 TP_PROTO(struct request_queue *q, struct bio *bio),
734
735 TP_ARGS(q, bio),
736
737 TP_FIELDS(
738 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
739 ctf_integer(dev_t, dev, bio_dev(bio))
740 #else
741 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
742 #endif
743 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
744 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
745 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
746 blk_rwbs_ctf_integer(unsigned int, rwbs,
747 lttng_bio_op(bio), lttng_bio_rw(bio),
748 bio->bi_iter.bi_size)
749 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
750 ctf_integer(sector_t, sector, bio->bi_sector)
751 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
752 blk_rwbs_ctf_integer(unsigned int, rwbs,
753 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
754 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
755 ctf_integer(pid_t, tid, current->pid)
756 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
757 )
758 )
759 #else /* if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
760 LTTNG_TRACEPOINT_EVENT_CLASS(block_bio,
761
762 TP_PROTO(struct request_queue *q, struct bio *bio),
763
764 TP_ARGS(q, bio),
765
766 TP_FIELDS(
767 ctf_integer(dev_t, dev, bio->bi_bdev ? bio->bi_bdev->bd_dev : 0)
768 ctf_integer(sector_t, sector, bio->bi_sector)
769 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
770 blk_rwbs_ctf_integer(unsigned int, rwbs,
771 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
772 ctf_integer(pid_t, tid, current->pid)
773 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
774 )
775 )
776
777 /**
778 * block_bio_backmerge - merging block operation to the end of an existing operation
779 * @q: queue holding operation
780 * @bio: new block operation to merge
781 *
782 * Merging block request @bio to the end of an existing block request
783 * in queue @q.
784 */
785 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_backmerge,
786
787 TP_PROTO(struct request_queue *q, struct bio *bio),
788
789 TP_ARGS(q, bio)
790 )
791
792 /**
793 * block_bio_frontmerge - merging block operation to the beginning of an existing operation
794 * @q: queue holding operation
795 * @bio: new block operation to merge
796 *
797 * Merging block IO operation @bio to the beginning of an existing block
798 * operation in queue @q.
799 */
800 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_frontmerge,
801
802 TP_PROTO(struct request_queue *q, struct bio *bio),
803
804 TP_ARGS(q, bio)
805 )
806
807 /**
808 * block_bio_queue - putting new block IO operation in queue
809 * @q: queue holding operation
810 * @bio: new block operation
811 *
812 * About to place the block IO operation @bio into queue @q.
813 */
814 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_bio, block_bio_queue,
815
816 TP_PROTO(struct request_queue *q, struct bio *bio),
817
818 TP_ARGS(q, bio)
819 )
820 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) */
821
822 LTTNG_TRACEPOINT_EVENT_CLASS(block_get_rq,
823
824 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
825
826 TP_ARGS(q, bio, rw),
827
828 TP_FIELDS(
829 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
830 ctf_integer(dev_t, dev, bio ? bio_dev(bio) : 0)
831 #else
832 ctf_integer(dev_t, dev, bio ? bio->bi_bdev->bd_dev : 0)
833 #endif
834 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
835 ctf_integer(sector_t, sector, bio ? bio->bi_iter.bi_sector : 0)
836 ctf_integer(unsigned int, nr_sector,
837 bio ? bio_sectors(bio) : 0)
838 blk_rwbs_ctf_integer(unsigned int, rwbs,
839 bio ? lttng_bio_op(bio) : 0,
840 bio ? lttng_bio_rw(bio) : 0,
841 bio ? bio->bi_iter.bi_size : 0)
842 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
843 ctf_integer(sector_t, sector, bio ? bio->bi_sector : 0)
844 ctf_integer(unsigned int, nr_sector,
845 bio ? bio->bi_size >> 9 : 0)
846 blk_rwbs_ctf_integer(unsigned int, rwbs,
847 bio ? lttng_bio_op(bio) : 0,
848 bio ? lttng_bio_rw(bio) : 0,
849 bio ? bio->bi_size : 0)
850 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
851 ctf_integer(pid_t, tid, current->pid)
852 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
853 )
854 )
855
856 /**
857 * block_getrq - get a free request entry in queue for block IO operations
858 * @q: queue for operations
859 * @bio: pending block IO operation (can be %NULL)
860 * @rw: low bit indicates a read (%0) or a write (%1)
861 *
862 * A request struct for queue @q has been allocated to handle the
863 * block IO operation @bio.
864 */
865 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_getrq,
866
867 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
868
869 TP_ARGS(q, bio, rw)
870 )
871
872 /**
873 * block_sleeprq - waiting to get a free request entry in queue for block IO operation
874 * @q: queue for operation
875 * @bio: pending block IO operation (can be %NULL)
876 * @rw: low bit indicates a read (%0) or a write (%1)
877 *
878 * In the case where a request struct cannot be provided for queue @q
879 * the process needs to wait for an request struct to become
880 * available. This tracepoint event is generated each time the
881 * process goes to sleep waiting for request struct become available.
882 */
883 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_get_rq, block_sleeprq,
884
885 TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
886
887 TP_ARGS(q, bio, rw)
888 )
889
890 /**
891 * block_plug - keep operations requests in request queue
892 * @q: request queue to plug
893 *
894 * Plug the request queue @q. Do not allow block operation requests
895 * to be sent to the device driver. Instead, accumulate requests in
896 * the queue to improve throughput performance of the block device.
897 */
898 LTTNG_TRACEPOINT_EVENT(block_plug,
899
900 TP_PROTO(struct request_queue *q),
901
902 TP_ARGS(q),
903
904 TP_FIELDS(
905 ctf_integer(pid_t, tid, current->pid)
906 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
907 )
908 )
909
910 LTTNG_TRACEPOINT_EVENT_CLASS(block_unplug,
911
912 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
913
914 TP_ARGS(q, depth, explicit),
915
916 TP_FIELDS(
917 ctf_integer(int, nr_rq, depth)
918 ctf_integer(pid_t, tid, current->pid)
919 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
920 )
921 )
922
923 /**
924 * block_unplug - release of operations requests in request queue
925 * @q: request queue to unplug
926 * @depth: number of requests just added to the queue
927 * @explicit: whether this was an explicit unplug, or one from schedule()
928 *
929 * Unplug request queue @q because device driver is scheduled to work
930 * on elements in the request queue.
931 */
932 LTTNG_TRACEPOINT_EVENT_INSTANCE(block_unplug, block_unplug,
933
934 TP_PROTO(struct request_queue *q, unsigned int depth, bool explicit),
935
936 TP_ARGS(q, depth, explicit)
937 )
938
939 /**
940 * block_split - split a single bio struct into two bio structs
941 * @q: queue containing the bio
942 * @bio: block operation being split
943 * @new_sector: The starting sector for the new bio
944 *
945 * The bio request @bio in request queue @q needs to be split into two
946 * bio requests. The newly created @bio request starts at
947 * @new_sector. This split may be required due to hardware limitation
948 * such as operation crossing device boundaries in a RAID system.
949 */
950 LTTNG_TRACEPOINT_EVENT(block_split,
951
952 TP_PROTO(struct request_queue *q, struct bio *bio,
953 unsigned int new_sector),
954
955 TP_ARGS(q, bio, new_sector),
956
957 TP_FIELDS(
958 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
959 ctf_integer(dev_t, dev, bio_dev(bio))
960 #else
961 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
962 #endif
963 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
964 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
965 blk_rwbs_ctf_integer(unsigned int, rwbs,
966 lttng_bio_op(bio), lttng_bio_rw(bio),
967 bio->bi_iter.bi_size)
968 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
969 ctf_integer(sector_t, sector, bio->bi_sector)
970 blk_rwbs_ctf_integer(unsigned int, rwbs,
971 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
972 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
973 ctf_integer(sector_t, new_sector, new_sector)
974 ctf_integer(pid_t, tid, current->pid)
975 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
976 )
977 )
978
979 /**
980 * block_bio_remap - map request for a logical device to the raw device
981 * @q: queue holding the operation
982 * @bio: revised operation
983 * @dev: device for the operation
984 * @from: original sector for the operation
985 *
986 * An operation for a logical device has been mapped to the
987 * raw block device.
988 */
989 LTTNG_TRACEPOINT_EVENT(block_bio_remap,
990
991 TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
992 sector_t from),
993
994 TP_ARGS(q, bio, dev, from),
995
996 TP_FIELDS(
997 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
998 ctf_integer(dev_t, dev, bio_dev(bio))
999 #else
1000 ctf_integer(dev_t, dev, bio->bi_bdev->bd_dev)
1001 #endif
1002 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0))
1003 ctf_integer(sector_t, sector, bio->bi_iter.bi_sector)
1004 ctf_integer(unsigned int, nr_sector, bio_sectors(bio))
1005 blk_rwbs_ctf_integer(unsigned int, rwbs,
1006 lttng_bio_op(bio), lttng_bio_rw(bio),
1007 bio->bi_iter.bi_size)
1008 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
1009 ctf_integer(sector_t, sector, bio->bi_sector)
1010 ctf_integer(unsigned int, nr_sector, bio->bi_size >> 9)
1011 blk_rwbs_ctf_integer(unsigned int, rwbs,
1012 lttng_bio_op(bio), lttng_bio_rw(bio), bio->bi_size)
1013 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)) */
1014 ctf_integer(dev_t, old_dev, dev)
1015 ctf_integer(sector_t, old_sector, from)
1016 )
1017 )
1018
1019 /**
1020 * block_rq_remap - map request for a block operation request
1021 * @q: queue holding the operation
1022 * @rq: block IO operation request
1023 * @dev: device for the operation
1024 * @from: original sector for the operation
1025 *
1026 * The block operation request @rq in @q has been remapped. The block
1027 * operation request @rq holds the current information and @from hold
1028 * the original sector.
1029 */
1030 LTTNG_TRACEPOINT_EVENT(block_rq_remap,
1031
1032 TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev,
1033 sector_t from),
1034
1035 TP_ARGS(q, rq, dev, from),
1036
1037 TP_FIELDS(
1038 ctf_integer(dev_t, dev, disk_devt(rq->rq_disk))
1039 ctf_integer(sector_t, sector, blk_rq_pos(rq))
1040 ctf_integer(unsigned int, nr_sector, blk_rq_sectors(rq))
1041 ctf_integer(dev_t, old_dev, dev)
1042 ctf_integer(sector_t, old_sector, from)
1043 blk_rwbs_ctf_integer(unsigned int, rwbs,
1044 lttng_req_op(rq), lttng_req_rw(rq), blk_rq_bytes(rq))
1045 )
1046 )
1047
1048 #undef __print_rwbs_flags
1049 #undef blk_fill_rwbs
1050
1051 #endif /* LTTNG_TRACE_BLOCK_H */
1052
1053 /* This part must be outside protection */
1054 #include <lttng/define_trace.h>
This page took 0.083724 seconds and 4 git commands to generate.