tracing: make tracing_reset safe for external use
[deliverable/linux.git] / kernel / trace / blktrace.c
CommitLineData
2056a782 1/*
0fe23479 2 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
2056a782
JA
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
16 *
17 */
2056a782
JA
18#include <linux/kernel.h>
19#include <linux/blkdev.h>
20#include <linux/blktrace_api.h>
21#include <linux/percpu.h>
22#include <linux/init.h>
23#include <linux/mutex.h>
24#include <linux/debugfs.h>
405f5571 25#include <linux/smp_lock.h>
be1c6341 26#include <linux/time.h>
939b3669 27#include <linux/uaccess.h>
55782138
LZ
28
29#include <trace/events/block.h>
30
2db270a8 31#include "trace_output.h"
2056a782 32
55782138
LZ
33#ifdef CONFIG_BLK_DEV_IO_TRACE
34
2056a782
JA
35static unsigned int blktrace_seq __read_mostly = 1;
36
c71a8961 37static struct trace_array *blk_tr;
5006ea73 38static bool blk_tracer_enabled __read_mostly;
c71a8961
ACM
39
40/* Select an alternative, minimalistic output than the original one */
ef18012b 41#define TRACE_BLK_OPT_CLASSIC 0x1
c71a8961
ACM
42
43static struct tracer_opt blk_tracer_opts[] = {
44 /* Default disable the minimalistic output */
157f9c00 45 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
c71a8961
ACM
46 { }
47};
48
49static struct tracer_flags blk_tracer_flags = {
50 .val = 0,
51 .opts = blk_tracer_opts,
52};
53
5f3ea37c 54/* Global reference count of probes */
5f3ea37c
ACM
55static atomic_t blk_probes_ref = ATOMIC_INIT(0);
56
3c289ba7 57static void blk_register_tracepoints(void);
5f3ea37c
ACM
58static void blk_unregister_tracepoints(void);
59
be1c6341
OK
60/*
61 * Send out a notify message.
62 */
a863055b
JA
63static void trace_note(struct blk_trace *bt, pid_t pid, int action,
64 const void *data, size_t len)
be1c6341
OK
65{
66 struct blk_io_trace *t;
18cea459
LZ
67 struct ring_buffer_event *event = NULL;
68 int pc = 0;
69 int cpu = smp_processor_id();
70 bool blk_tracer = blk_tracer_enabled;
71
72 if (blk_tracer) {
73 pc = preempt_count();
74 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
75 sizeof(*t) + len,
76 0, pc);
77 if (!event)
78 return;
79 t = ring_buffer_event_data(event);
80 goto record_it;
81 }
be1c6341 82
c71a8961
ACM
83 if (!bt->rchan)
84 return;
85
be1c6341 86 t = relay_reserve(bt->rchan, sizeof(*t) + len);
d3d9d2a5 87 if (t) {
d3d9d2a5 88 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
2997c8c4 89 t->time = ktime_to_ns(ktime_get());
18cea459 90record_it:
d3d9d2a5
JA
91 t->device = bt->dev;
92 t->action = action;
93 t->pid = pid;
94 t->cpu = cpu;
95 t->pdu_len = len;
96 memcpy((void *) t + sizeof(*t), data, len);
18cea459
LZ
97
98 if (blk_tracer)
99 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
d3d9d2a5 100 }
be1c6341
OK
101}
102
2056a782
JA
103/*
104 * Send out a notify for this process, if we haven't done so since a trace
105 * started
106 */
107static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
108{
a863055b
JA
109 tsk->btrace_seq = blktrace_seq;
110 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
be1c6341 111}
2056a782 112
be1c6341
OK
113static void trace_note_time(struct blk_trace *bt)
114{
115 struct timespec now;
116 unsigned long flags;
117 u32 words[2];
118
119 getnstimeofday(&now);
120 words[0] = now.tv_sec;
121 words[1] = now.tv_nsec;
122
123 local_irq_save(flags);
124 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
125 local_irq_restore(flags);
2056a782
JA
126}
127
9d5f09a4
AB
128void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
129{
130 int n;
131 va_list args;
14a73f54 132 unsigned long flags;
64565911 133 char *buf;
9d5f09a4 134
18cea459
LZ
135 if (unlikely(bt->trace_state != Blktrace_running &&
136 !blk_tracer_enabled))
c71a8961
ACM
137 return;
138
14a73f54 139 local_irq_save(flags);
64565911 140 buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
9d5f09a4 141 va_start(args, fmt);
64565911 142 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
9d5f09a4
AB
143 va_end(args);
144
64565911 145 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n);
14a73f54 146 local_irq_restore(flags);
9d5f09a4
AB
147}
148EXPORT_SYMBOL_GPL(__trace_note_message);
149
2056a782
JA
150static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
151 pid_t pid)
152{
153 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
154 return 1;
d0deef5b 155 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
2056a782
JA
156 return 1;
157 if (bt->pid && pid != bt->pid)
158 return 1;
159
160 return 0;
161}
162
163/*
164 * Data direction bit lookup
165 */
e4955c99
LZ
166static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
167 BLK_TC_ACT(BLK_TC_WRITE) };
2056a782 168
35ba8f70 169/* The ilog2() calls fall out because they're constant */
939b3669
ACM
170#define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \
171 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name))
2056a782
JA
172
173/*
174 * The worker for the various blk_add_trace*() types. Fills out a
175 * blk_io_trace structure and places it in a per-cpu subbuffer.
176 */
5f3ea37c 177static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
2056a782
JA
178 int rw, u32 what, int error, int pdu_len, void *pdu_data)
179{
180 struct task_struct *tsk = current;
c71a8961 181 struct ring_buffer_event *event = NULL;
2056a782 182 struct blk_io_trace *t;
0a987751 183 unsigned long flags = 0;
2056a782
JA
184 unsigned long *sequence;
185 pid_t pid;
c71a8961 186 int cpu, pc = 0;
18cea459 187 bool blk_tracer = blk_tracer_enabled;
2056a782 188
18cea459 189 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
2056a782
JA
190 return;
191
192 what |= ddir_act[rw & WRITE];
35ba8f70 193 what |= MASK_TC_BIT(rw, BARRIER);
93dbb393 194 what |= MASK_TC_BIT(rw, SYNCIO);
35ba8f70
DW
195 what |= MASK_TC_BIT(rw, AHEAD);
196 what |= MASK_TC_BIT(rw, META);
197 what |= MASK_TC_BIT(rw, DISCARD);
2056a782
JA
198
199 pid = tsk->pid;
d0deef5b 200 if (act_log_check(bt, what, sector, pid))
2056a782 201 return;
c71a8961
ACM
202 cpu = raw_smp_processor_id();
203
18cea459 204 if (blk_tracer) {
c71a8961
ACM
205 tracing_record_cmdline(current);
206
51a763dd
ACM
207 pc = preempt_count();
208 event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK,
209 sizeof(*t) + pdu_len,
210 0, pc);
c71a8961
ACM
211 if (!event)
212 return;
51a763dd 213 t = ring_buffer_event_data(event);
c71a8961
ACM
214 goto record_it;
215 }
2056a782
JA
216
217 /*
218 * A word about the locking here - we disable interrupts to reserve
219 * some space in the relay per-cpu buffer, to prevent an irq
14a73f54 220 * from coming in and stepping on our toes.
2056a782
JA
221 */
222 local_irq_save(flags);
223
224 if (unlikely(tsk->btrace_seq != blktrace_seq))
225 trace_note_tsk(bt, tsk);
226
227 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
228 if (t) {
2056a782
JA
229 sequence = per_cpu_ptr(bt->sequence, cpu);
230
231 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
232 t->sequence = ++(*sequence);
2997c8c4 233 t->time = ktime_to_ns(ktime_get());
c71a8961 234record_it:
08a06b83 235 /*
939b3669
ACM
236 * These two are not needed in ftrace as they are in the
237 * generic trace_entry, filled by tracing_generic_entry_update,
238 * but for the trace_event->bin() synthesizer benefit we do it
239 * here too.
240 */
241 t->cpu = cpu;
242 t->pid = pid;
08a06b83 243
2056a782
JA
244 t->sector = sector;
245 t->bytes = bytes;
246 t->action = what;
2056a782 247 t->device = bt->dev;
2056a782
JA
248 t->error = error;
249 t->pdu_len = pdu_len;
250
251 if (pdu_len)
252 memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
c71a8961 253
18cea459 254 if (blk_tracer) {
51a763dd 255 trace_buffer_unlock_commit(blk_tr, event, 0, pc);
c71a8961
ACM
256 return;
257 }
2056a782
JA
258 }
259
260 local_irq_restore(flags);
261}
262
2056a782 263static struct dentry *blk_tree_root;
11a57153 264static DEFINE_MUTEX(blk_tree_mutex);
2056a782 265
ad5dd549 266static void blk_trace_free(struct blk_trace *bt)
2056a782 267{
02c62304 268 debugfs_remove(bt->msg_file);
2056a782 269 debugfs_remove(bt->dropped_file);
fd51d251 270 debugfs_remove(bt->dir);
f48fc4d3 271 relay_close(bt->rchan);
2056a782 272 free_percpu(bt->sequence);
64565911 273 free_percpu(bt->msg_data);
2056a782 274 kfree(bt);
ad5dd549
LZ
275}
276
277static void blk_trace_cleanup(struct blk_trace *bt)
278{
279 blk_trace_free(bt);
5f3ea37c
ACM
280 if (atomic_dec_and_test(&blk_probes_ref))
281 blk_unregister_tracepoints();
2056a782
JA
282}
283
6da127ad 284int blk_trace_remove(struct request_queue *q)
2056a782
JA
285{
286 struct blk_trace *bt;
287
288 bt = xchg(&q->blk_trace, NULL);
289 if (!bt)
290 return -EINVAL;
291
55547204 292 if (bt->trace_state != Blktrace_running)
2056a782
JA
293 blk_trace_cleanup(bt);
294
295 return 0;
296}
6da127ad 297EXPORT_SYMBOL_GPL(blk_trace_remove);
2056a782
JA
298
299static int blk_dropped_open(struct inode *inode, struct file *filp)
300{
8e18e294 301 filp->private_data = inode->i_private;
2056a782
JA
302
303 return 0;
304}
305
306static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
307 size_t count, loff_t *ppos)
308{
309 struct blk_trace *bt = filp->private_data;
310 char buf[16];
311
312 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
313
314 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
315}
316
2b8693c0 317static const struct file_operations blk_dropped_fops = {
2056a782
JA
318 .owner = THIS_MODULE,
319 .open = blk_dropped_open,
320 .read = blk_dropped_read,
321};
322
02c62304
AB
323static int blk_msg_open(struct inode *inode, struct file *filp)
324{
325 filp->private_data = inode->i_private;
326
327 return 0;
328}
329
330static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
331 size_t count, loff_t *ppos)
332{
333 char *msg;
334 struct blk_trace *bt;
335
7635b03a 336 if (count >= BLK_TN_MAX_MSG)
02c62304
AB
337 return -EINVAL;
338
a4b3ada8 339 msg = kmalloc(count + 1, GFP_KERNEL);
02c62304
AB
340 if (msg == NULL)
341 return -ENOMEM;
342
343 if (copy_from_user(msg, buffer, count)) {
344 kfree(msg);
345 return -EFAULT;
346 }
347
a4b3ada8 348 msg[count] = '\0';
02c62304
AB
349 bt = filp->private_data;
350 __trace_note_message(bt, "%s", msg);
351 kfree(msg);
352
353 return count;
354}
355
356static const struct file_operations blk_msg_fops = {
357 .owner = THIS_MODULE,
358 .open = blk_msg_open,
359 .write = blk_msg_write,
360};
361
2056a782
JA
362/*
363 * Keep track of how many times we encountered a full subbuffer, to aid
364 * the user space app in telling how many lost events there were.
365 */
366static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
367 void *prev_subbuf, size_t prev_padding)
368{
369 struct blk_trace *bt;
370
371 if (!relay_buf_full(buf))
372 return 1;
373
374 bt = buf->chan->private_data;
375 atomic_inc(&bt->dropped);
376 return 0;
377}
378
379static int blk_remove_buf_file_callback(struct dentry *dentry)
380{
f48fc4d3 381 struct dentry *parent = dentry->d_parent;
2056a782 382 debugfs_remove(dentry);
f48fc4d3
JA
383
384 /*
385 * this will fail for all but the last file, but that is ok. what we
386 * care about is the top level buts->name directory going away, when
387 * the last trace file is gone. Then we don't have to rmdir() that
388 * manually on trace stop, so it nicely solves the issue with
389 * force killing of running traces.
390 */
391
392 debugfs_remove(parent);
2056a782
JA
393 return 0;
394}
395
396static struct dentry *blk_create_buf_file_callback(const char *filename,
397 struct dentry *parent,
398 int mode,
399 struct rchan_buf *buf,
400 int *is_global)
401{
402 return debugfs_create_file(filename, mode, parent, buf,
403 &relay_file_operations);
404}
405
406static struct rchan_callbacks blk_relay_callbacks = {
407 .subbuf_start = blk_subbuf_start_callback,
408 .create_buf_file = blk_create_buf_file_callback,
409 .remove_buf_file = blk_remove_buf_file_callback,
410};
411
9908c309
LZ
412static void blk_trace_setup_lba(struct blk_trace *bt,
413 struct block_device *bdev)
414{
415 struct hd_struct *part = NULL;
416
417 if (bdev)
418 part = bdev->bd_part;
419
420 if (part) {
421 bt->start_lba = part->start_sect;
422 bt->end_lba = part->start_sect + part->nr_sects;
423 } else {
424 bt->start_lba = 0;
425 bt->end_lba = -1ULL;
426 }
427}
428
2056a782
JA
429/*
430 * Setup everything required to start tracing
431 */
6da127ad 432int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
d0deef5b
SD
433 struct block_device *bdev,
434 struct blk_user_trace_setup *buts)
2056a782 435{
2056a782
JA
436 struct blk_trace *old_bt, *bt = NULL;
437 struct dentry *dir = NULL;
2056a782
JA
438 int ret, i;
439
171044d4 440 if (!buts->buf_size || !buts->buf_nr)
2056a782
JA
441 return -EINVAL;
442
0497b345
JA
443 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
444 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
2056a782
JA
445
446 /*
447 * some device names have larger paths - convert the slashes
448 * to underscores for this to work as expected
449 */
171044d4
AB
450 for (i = 0; i < strlen(buts->name); i++)
451 if (buts->name[i] == '/')
452 buts->name[i] = '_';
2056a782 453
2056a782
JA
454 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
455 if (!bt)
ad5dd549 456 return -ENOMEM;
2056a782 457
ad5dd549 458 ret = -ENOMEM;
2056a782
JA
459 bt->sequence = alloc_percpu(unsigned long);
460 if (!bt->sequence)
461 goto err;
462
313e458f 463 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
64565911
JA
464 if (!bt->msg_data)
465 goto err;
466
2056a782 467 ret = -ENOENT;
f48fc4d3 468
b5230b56 469 mutex_lock(&blk_tree_mutex);
f48fc4d3
JA
470 if (!blk_tree_root) {
471 blk_tree_root = debugfs_create_dir("block", NULL);
b5230b56
LZ
472 if (!blk_tree_root) {
473 mutex_unlock(&blk_tree_mutex);
1a17662e 474 goto err;
b5230b56 475 }
f48fc4d3 476 }
b5230b56 477 mutex_unlock(&blk_tree_mutex);
f48fc4d3
JA
478
479 dir = debugfs_create_dir(buts->name, blk_tree_root);
480
2056a782
JA
481 if (!dir)
482 goto err;
483
484 bt->dir = dir;
6da127ad 485 bt->dev = dev;
2056a782
JA
486 atomic_set(&bt->dropped, 0);
487
488 ret = -EIO;
939b3669
ACM
489 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
490 &blk_dropped_fops);
2056a782
JA
491 if (!bt->dropped_file)
492 goto err;
493
02c62304
AB
494 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
495 if (!bt->msg_file)
496 goto err;
497
171044d4
AB
498 bt->rchan = relay_open("trace", dir, buts->buf_size,
499 buts->buf_nr, &blk_relay_callbacks, bt);
2056a782
JA
500 if (!bt->rchan)
501 goto err;
2056a782 502
171044d4 503 bt->act_mask = buts->act_mask;
2056a782
JA
504 if (!bt->act_mask)
505 bt->act_mask = (u16) -1;
506
9908c309 507 blk_trace_setup_lba(bt, bdev);
2056a782 508
d0deef5b
SD
509 /* overwrite with user settings */
510 if (buts->start_lba)
511 bt->start_lba = buts->start_lba;
512 if (buts->end_lba)
513 bt->end_lba = buts->end_lba;
514
171044d4 515 bt->pid = buts->pid;
2056a782
JA
516 bt->trace_state = Blktrace_setup;
517
518 ret = -EBUSY;
519 old_bt = xchg(&q->blk_trace, bt);
520 if (old_bt) {
521 (void) xchg(&q->blk_trace, old_bt);
522 goto err;
523 }
524
17ba97e3 525 if (atomic_inc_return(&blk_probes_ref) == 1)
cbe28296
LZ
526 blk_register_tracepoints();
527
2056a782
JA
528 return 0;
529err:
ad5dd549 530 blk_trace_free(bt);
2056a782
JA
531 return ret;
532}
171044d4 533
6da127ad 534int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
d0deef5b 535 struct block_device *bdev,
6da127ad 536 char __user *arg)
171044d4
AB
537{
538 struct blk_user_trace_setup buts;
539 int ret;
540
541 ret = copy_from_user(&buts, arg, sizeof(buts));
542 if (ret)
543 return -EFAULT;
544
d0deef5b 545 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
171044d4
AB
546 if (ret)
547 return ret;
548
549 if (copy_to_user(arg, &buts, sizeof(buts)))
550 return -EFAULT;
551
552 return 0;
553}
6da127ad 554EXPORT_SYMBOL_GPL(blk_trace_setup);
2056a782 555
6da127ad 556int blk_trace_startstop(struct request_queue *q, int start)
2056a782 557{
2056a782 558 int ret;
939b3669 559 struct blk_trace *bt = q->blk_trace;
2056a782 560
939b3669 561 if (bt == NULL)
2056a782
JA
562 return -EINVAL;
563
564 /*
565 * For starting a trace, we can transition from a setup or stopped
566 * trace. For stopping a trace, the state must be running
567 */
568 ret = -EINVAL;
569 if (start) {
570 if (bt->trace_state == Blktrace_setup ||
571 bt->trace_state == Blktrace_stopped) {
572 blktrace_seq++;
573 smp_mb();
574 bt->trace_state = Blktrace_running;
be1c6341
OK
575
576 trace_note_time(bt);
2056a782
JA
577 ret = 0;
578 }
579 } else {
580 if (bt->trace_state == Blktrace_running) {
581 bt->trace_state = Blktrace_stopped;
582 relay_flush(bt->rchan);
583 ret = 0;
584 }
585 }
586
587 return ret;
588}
6da127ad 589EXPORT_SYMBOL_GPL(blk_trace_startstop);
2056a782
JA
590
591/**
592 * blk_trace_ioctl: - handle the ioctls associated with tracing
593 * @bdev: the block device
ef18012b 594 * @cmd: the ioctl cmd
2056a782
JA
595 * @arg: the argument data, if any
596 *
597 **/
598int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
599{
165125e1 600 struct request_queue *q;
2056a782 601 int ret, start = 0;
6da127ad 602 char b[BDEVNAME_SIZE];
2056a782
JA
603
604 q = bdev_get_queue(bdev);
605 if (!q)
606 return -ENXIO;
607
608 mutex_lock(&bdev->bd_mutex);
609
610 switch (cmd) {
611 case BLKTRACESETUP:
f36f21ec 612 bdevname(bdev, b);
d0deef5b 613 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
2056a782
JA
614 break;
615 case BLKTRACESTART:
616 start = 1;
617 case BLKTRACESTOP:
618 ret = blk_trace_startstop(q, start);
619 break;
620 case BLKTRACETEARDOWN:
621 ret = blk_trace_remove(q);
622 break;
623 default:
624 ret = -ENOTTY;
625 break;
626 }
627
628 mutex_unlock(&bdev->bd_mutex);
629 return ret;
630}
631
632/**
633 * blk_trace_shutdown: - stop and cleanup trace structures
634 * @q: the request queue associated with the device
635 *
636 **/
165125e1 637void blk_trace_shutdown(struct request_queue *q)
2056a782 638{
6c5c9341
AD
639 if (q->blk_trace) {
640 blk_trace_startstop(q, 0);
641 blk_trace_remove(q);
642 }
2056a782 643}
5f3ea37c
ACM
644
645/*
646 * blktrace probes
647 */
648
649/**
650 * blk_add_trace_rq - Add a trace for a request oriented action
651 * @q: queue the io is for
652 * @rq: the source request
653 * @what: the action
654 *
655 * Description:
656 * Records an action against a request. Will log the bio offset + size.
657 *
658 **/
659static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
660 u32 what)
661{
662 struct blk_trace *bt = q->blk_trace;
663 int rw = rq->cmd_flags & 0x03;
664
665 if (likely(!bt))
666 return;
667
668 if (blk_discard_rq(rq))
669 rw |= (1 << BIO_RW_DISCARD);
670
671 if (blk_pc_request(rq)) {
672 what |= BLK_TC_ACT(BLK_TC_PC);
2e46e8b2
TH
673 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
674 what, rq->errors, rq->cmd_len, rq->cmd);
5f3ea37c
ACM
675 } else {
676 what |= BLK_TC_ACT(BLK_TC_FS);
2e46e8b2
TH
677 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
678 what, rq->errors, 0, NULL);
5f3ea37c
ACM
679 }
680}
681
682static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq)
683{
684 blk_add_trace_rq(q, rq, BLK_TA_ABORT);
685}
686
687static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq)
688{
689 blk_add_trace_rq(q, rq, BLK_TA_INSERT);
690}
691
692static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq)
693{
694 blk_add_trace_rq(q, rq, BLK_TA_ISSUE);
695}
696
939b3669
ACM
697static void blk_add_trace_rq_requeue(struct request_queue *q,
698 struct request *rq)
5f3ea37c
ACM
699{
700 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE);
701}
702
939b3669
ACM
703static void blk_add_trace_rq_complete(struct request_queue *q,
704 struct request *rq)
5f3ea37c
ACM
705{
706 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE);
707}
708
709/**
710 * blk_add_trace_bio - Add a trace for a bio oriented action
711 * @q: queue the io is for
712 * @bio: the source bio
713 * @what: the action
714 *
715 * Description:
716 * Records an action against a bio. Will log the bio offset + size.
717 *
718 **/
719static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
720 u32 what)
721{
722 struct blk_trace *bt = q->blk_trace;
723
724 if (likely(!bt))
725 return;
726
727 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
728 !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
729}
730
731static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio)
732{
733 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
734}
735
736static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio)
737{
738 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
739}
740
939b3669
ACM
741static void blk_add_trace_bio_backmerge(struct request_queue *q,
742 struct bio *bio)
5f3ea37c
ACM
743{
744 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
745}
746
939b3669
ACM
747static void blk_add_trace_bio_frontmerge(struct request_queue *q,
748 struct bio *bio)
5f3ea37c
ACM
749{
750 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
751}
752
753static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio)
754{
755 blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
756}
757
939b3669
ACM
758static void blk_add_trace_getrq(struct request_queue *q,
759 struct bio *bio, int rw)
5f3ea37c
ACM
760{
761 if (bio)
762 blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
763 else {
764 struct blk_trace *bt = q->blk_trace;
765
766 if (bt)
767 __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL);
768 }
769}
770
771
939b3669
ACM
772static void blk_add_trace_sleeprq(struct request_queue *q,
773 struct bio *bio, int rw)
5f3ea37c
ACM
774{
775 if (bio)
776 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
777 else {
778 struct blk_trace *bt = q->blk_trace;
779
780 if (bt)
939b3669
ACM
781 __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ,
782 0, 0, NULL);
5f3ea37c
ACM
783 }
784}
785
786static void blk_add_trace_plug(struct request_queue *q)
787{
788 struct blk_trace *bt = q->blk_trace;
789
790 if (bt)
791 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
792}
793
794static void blk_add_trace_unplug_io(struct request_queue *q)
795{
796 struct blk_trace *bt = q->blk_trace;
797
798 if (bt) {
799 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
800 __be64 rpdu = cpu_to_be64(pdu);
801
802 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
803 sizeof(rpdu), &rpdu);
804 }
805}
806
807static void blk_add_trace_unplug_timer(struct request_queue *q)
808{
809 struct blk_trace *bt = q->blk_trace;
810
811 if (bt) {
812 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
813 __be64 rpdu = cpu_to_be64(pdu);
814
815 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
816 sizeof(rpdu), &rpdu);
817 }
818}
819
820static void blk_add_trace_split(struct request_queue *q, struct bio *bio,
821 unsigned int pdu)
822{
823 struct blk_trace *bt = q->blk_trace;
824
825 if (bt) {
826 __be64 rpdu = cpu_to_be64(pdu);
827
828 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
829 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE),
830 sizeof(rpdu), &rpdu);
831 }
832}
833
834/**
835 * blk_add_trace_remap - Add a trace for a remap operation
836 * @q: queue the io is for
837 * @bio: the source bio
838 * @dev: target device
a42aaa3b 839 * @from: source sector
5f3ea37c
ACM
840 *
841 * Description:
842 * Device mapper or raid target sometimes need to split a bio because
843 * it spans a stripe (or similar). Add a trace for that action.
844 *
845 **/
846static void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
22a7c31a 847 dev_t dev, sector_t from)
5f3ea37c
ACM
848{
849 struct blk_trace *bt = q->blk_trace;
850 struct blk_io_trace_remap r;
851
852 if (likely(!bt))
853 return;
854
a42aaa3b
AB
855 r.device_from = cpu_to_be32(dev);
856 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
857 r.sector_from = cpu_to_be64(from);
5f3ea37c 858
22a7c31a
AB
859 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw,
860 BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE),
861 sizeof(r), &r);
5f3ea37c
ACM
862}
863
864/**
865 * blk_add_driver_data - Add binary message with driver-specific data
866 * @q: queue the io is for
867 * @rq: io request
868 * @data: driver-specific data
869 * @len: length of driver-specific data
870 *
871 * Description:
872 * Some drivers might want to write driver-specific data per request.
873 *
874 **/
875void blk_add_driver_data(struct request_queue *q,
876 struct request *rq,
877 void *data, size_t len)
878{
879 struct blk_trace *bt = q->blk_trace;
880
881 if (likely(!bt))
882 return;
883
884 if (blk_pc_request(rq))
2e46e8b2
TH
885 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0,
886 BLK_TA_DRV_DATA, rq->errors, len, data);
5f3ea37c 887 else
2e46e8b2
TH
888 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0,
889 BLK_TA_DRV_DATA, rq->errors, len, data);
5f3ea37c
ACM
890}
891EXPORT_SYMBOL_GPL(blk_add_driver_data);
892
3c289ba7 893static void blk_register_tracepoints(void)
5f3ea37c
ACM
894{
895 int ret;
896
897 ret = register_trace_block_rq_abort(blk_add_trace_rq_abort);
898 WARN_ON(ret);
899 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert);
900 WARN_ON(ret);
901 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue);
902 WARN_ON(ret);
903 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue);
904 WARN_ON(ret);
905 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete);
906 WARN_ON(ret);
907 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce);
908 WARN_ON(ret);
909 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete);
910 WARN_ON(ret);
911 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
912 WARN_ON(ret);
913 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
914 WARN_ON(ret);
915 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue);
916 WARN_ON(ret);
917 ret = register_trace_block_getrq(blk_add_trace_getrq);
918 WARN_ON(ret);
919 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq);
920 WARN_ON(ret);
921 ret = register_trace_block_plug(blk_add_trace_plug);
922 WARN_ON(ret);
923 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer);
924 WARN_ON(ret);
925 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io);
926 WARN_ON(ret);
927 ret = register_trace_block_split(blk_add_trace_split);
928 WARN_ON(ret);
929 ret = register_trace_block_remap(blk_add_trace_remap);
930 WARN_ON(ret);
5f3ea37c
ACM
931}
932
933static void blk_unregister_tracepoints(void)
934{
935 unregister_trace_block_remap(blk_add_trace_remap);
936 unregister_trace_block_split(blk_add_trace_split);
937 unregister_trace_block_unplug_io(blk_add_trace_unplug_io);
938 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer);
939 unregister_trace_block_plug(blk_add_trace_plug);
940 unregister_trace_block_sleeprq(blk_add_trace_sleeprq);
941 unregister_trace_block_getrq(blk_add_trace_getrq);
942 unregister_trace_block_bio_queue(blk_add_trace_bio_queue);
943 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge);
944 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge);
945 unregister_trace_block_bio_complete(blk_add_trace_bio_complete);
946 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce);
947 unregister_trace_block_rq_complete(blk_add_trace_rq_complete);
948 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue);
949 unregister_trace_block_rq_issue(blk_add_trace_rq_issue);
950 unregister_trace_block_rq_insert(blk_add_trace_rq_insert);
951 unregister_trace_block_rq_abort(blk_add_trace_rq_abort);
952
953 tracepoint_synchronize_unregister();
954}
c71a8961
ACM
955
956/*
957 * struct blk_io_tracer formatting routines
958 */
959
960static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
961{
157f9c00 962 int i = 0;
65796348 963 int tc = t->action >> BLK_TC_SHIFT;
157f9c00 964
18cea459
LZ
965 if (t->action == BLK_TN_MESSAGE) {
966 rwbs[i++] = 'N';
967 goto out;
968 }
969
65796348 970 if (tc & BLK_TC_DISCARD)
157f9c00 971 rwbs[i++] = 'D';
65796348 972 else if (tc & BLK_TC_WRITE)
157f9c00
ACM
973 rwbs[i++] = 'W';
974 else if (t->bytes)
975 rwbs[i++] = 'R';
976 else
977 rwbs[i++] = 'N';
978
65796348 979 if (tc & BLK_TC_AHEAD)
157f9c00 980 rwbs[i++] = 'A';
65796348 981 if (tc & BLK_TC_BARRIER)
157f9c00 982 rwbs[i++] = 'B';
65796348 983 if (tc & BLK_TC_SYNC)
157f9c00 984 rwbs[i++] = 'S';
65796348 985 if (tc & BLK_TC_META)
157f9c00 986 rwbs[i++] = 'M';
18cea459 987out:
157f9c00 988 rwbs[i] = '\0';
c71a8961
ACM
989}
990
991static inline
992const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
993{
994 return (const struct blk_io_trace *)ent;
995}
996
997static inline const void *pdu_start(const struct trace_entry *ent)
998{
999 return te_blk_io_trace(ent) + 1;
1000}
1001
66de7792
LZ
1002static inline u32 t_action(const struct trace_entry *ent)
1003{
1004 return te_blk_io_trace(ent)->action;
1005}
1006
1007static inline u32 t_bytes(const struct trace_entry *ent)
1008{
1009 return te_blk_io_trace(ent)->bytes;
1010}
1011
c71a8961
ACM
1012static inline u32 t_sec(const struct trace_entry *ent)
1013{
1014 return te_blk_io_trace(ent)->bytes >> 9;
1015}
1016
1017static inline unsigned long long t_sector(const struct trace_entry *ent)
1018{
1019 return te_blk_io_trace(ent)->sector;
1020}
1021
1022static inline __u16 t_error(const struct trace_entry *ent)
1023{
e0dc81be 1024 return te_blk_io_trace(ent)->error;
c71a8961
ACM
1025}
1026
1027static __u64 get_pdu_int(const struct trace_entry *ent)
1028{
1029 const __u64 *val = pdu_start(ent);
1030 return be64_to_cpu(*val);
1031}
1032
1033static void get_pdu_remap(const struct trace_entry *ent,
1034 struct blk_io_trace_remap *r)
1035{
1036 const struct blk_io_trace_remap *__r = pdu_start(ent);
a42aaa3b 1037 __u64 sector_from = __r->sector_from;
c71a8961 1038
c71a8961 1039 r->device_from = be32_to_cpu(__r->device_from);
a42aaa3b
AB
1040 r->device_to = be32_to_cpu(__r->device_to);
1041 r->sector_from = be64_to_cpu(sector_from);
c71a8961
ACM
1042}
1043
b6a4b0c3
LZ
1044typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act);
1045
1046static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
c71a8961
ACM
1047{
1048 char rwbs[6];
35ac51bf
LZ
1049 unsigned long long ts = iter->ts;
1050 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
c71a8961 1051 unsigned secs = (unsigned long)ts;
b6a4b0c3 1052 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
c71a8961
ACM
1053
1054 fill_rwbs(rwbs, t);
1055
1056 return trace_seq_printf(&iter->seq,
35ac51bf 1057 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
c71a8961 1058 MAJOR(t->device), MINOR(t->device), iter->cpu,
b6a4b0c3 1059 secs, nsec_rem, iter->ent->pid, act, rwbs);
c71a8961
ACM
1060}
1061
b6a4b0c3 1062static int blk_log_action(struct trace_iterator *iter, const char *act)
c71a8961
ACM
1063{
1064 char rwbs[6];
b6a4b0c3
LZ
1065 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1066
c71a8961 1067 fill_rwbs(rwbs, t);
b6a4b0c3 1068 return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
c71a8961
ACM
1069 MAJOR(t->device), MINOR(t->device), act, rwbs);
1070}
1071
66de7792
LZ
1072static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
1073{
04986257 1074 const unsigned char *pdu_buf;
66de7792
LZ
1075 int pdu_len;
1076 int i, end, ret;
1077
1078 pdu_buf = pdu_start(ent);
1079 pdu_len = te_blk_io_trace(ent)->pdu_len;
1080
1081 if (!pdu_len)
1082 return 1;
1083
1084 /* find the last zero that needs to be printed */
1085 for (end = pdu_len - 1; end >= 0; end--)
1086 if (pdu_buf[end])
1087 break;
1088 end++;
1089
1090 if (!trace_seq_putc(s, '('))
1091 return 0;
1092
1093 for (i = 0; i < pdu_len; i++) {
1094
1095 ret = trace_seq_printf(s, "%s%02x",
1096 i == 0 ? "" : " ", pdu_buf[i]);
1097 if (!ret)
1098 return ret;
1099
1100 /*
1101 * stop when the rest is just zeroes and indicate so
1102 * with a ".." appended
1103 */
1104 if (i == end && end != pdu_len - 1)
1105 return trace_seq_puts(s, " ..) ");
1106 }
1107
1108 return trace_seq_puts(s, ") ");
1109}
1110
c71a8961
ACM
1111static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
1112{
4ca53085
SR
1113 char cmd[TASK_COMM_LEN];
1114
1115 trace_find_cmdline(ent->pid, cmd);
c71a8961 1116
66de7792
LZ
1117 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1118 int ret;
1119
1120 ret = trace_seq_printf(s, "%u ", t_bytes(ent));
1121 if (!ret)
1122 return 0;
1123 ret = blk_log_dump_pdu(s, ent);
1124 if (!ret)
1125 return 0;
1126 return trace_seq_printf(s, "[%s]\n", cmd);
1127 } else {
1128 if (t_sec(ent))
1129 return trace_seq_printf(s, "%llu + %u [%s]\n",
1130 t_sector(ent), t_sec(ent), cmd);
1131 return trace_seq_printf(s, "[%s]\n", cmd);
1132 }
c71a8961
ACM
1133}
1134
157f9c00
ACM
1135static int blk_log_with_error(struct trace_seq *s,
1136 const struct trace_entry *ent)
c71a8961 1137{
66de7792
LZ
1138 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1139 int ret;
1140
1141 ret = blk_log_dump_pdu(s, ent);
1142 if (ret)
1143 return trace_seq_printf(s, "[%d]\n", t_error(ent));
1144 return 0;
1145 } else {
1146 if (t_sec(ent))
1147 return trace_seq_printf(s, "%llu + %u [%d]\n",
1148 t_sector(ent),
1149 t_sec(ent), t_error(ent));
1150 return trace_seq_printf(s, "%llu [%d]\n",
1151 t_sector(ent), t_error(ent));
1152 }
c71a8961
ACM
1153}
1154
1155static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
1156{
a42aaa3b 1157 struct blk_io_trace_remap r = { .device_from = 0, };
c71a8961
ACM
1158
1159 get_pdu_remap(ent, &r);
1160 return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
a42aaa3b
AB
1161 t_sector(ent), t_sec(ent),
1162 MAJOR(r.device_from), MINOR(r.device_from),
1163 (unsigned long long)r.sector_from);
c71a8961
ACM
1164}
1165
1166static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
1167{
4ca53085
SR
1168 char cmd[TASK_COMM_LEN];
1169
1170 trace_find_cmdline(ent->pid, cmd);
1171
1172 return trace_seq_printf(s, "[%s]\n", cmd);
c71a8961
ACM
1173}
1174
1175static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
1176{
4ca53085
SR
1177 char cmd[TASK_COMM_LEN];
1178
1179 trace_find_cmdline(ent->pid, cmd);
1180
1181 return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
c71a8961
ACM
1182}
1183
1184static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
1185{
4ca53085
SR
1186 char cmd[TASK_COMM_LEN];
1187
1188 trace_find_cmdline(ent->pid, cmd);
1189
c71a8961 1190 return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
4ca53085 1191 get_pdu_int(ent), cmd);
c71a8961
ACM
1192}
1193
18cea459
LZ
1194static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
1195{
1196 int ret;
1197 const struct blk_io_trace *t = te_blk_io_trace(ent);
1198
1199 ret = trace_seq_putmem(s, t + 1, t->pdu_len);
1200 if (ret)
1201 return trace_seq_putc(s, '\n');
1202 return ret;
1203}
1204
c71a8961
ACM
1205/*
1206 * struct tracer operations
1207 */
1208
1209static void blk_tracer_print_header(struct seq_file *m)
1210{
1211 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1212 return;
1213 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1214 "# | | | | | |\n");
1215}
1216
1217static void blk_tracer_start(struct trace_array *tr)
1218{
ad5dd549 1219 blk_tracer_enabled = true;
c71a8961
ACM
1220}
1221
1222static int blk_tracer_init(struct trace_array *tr)
1223{
1224 blk_tr = tr;
1225 blk_tracer_start(tr);
c71a8961
ACM
1226 return 0;
1227}
1228
1229static void blk_tracer_stop(struct trace_array *tr)
1230{
ad5dd549 1231 blk_tracer_enabled = false;
c71a8961
ACM
1232}
1233
1234static void blk_tracer_reset(struct trace_array *tr)
1235{
c71a8961
ACM
1236 blk_tracer_stop(tr);
1237}
1238
e4955c99 1239static const struct {
c71a8961 1240 const char *act[2];
ef18012b 1241 int (*print)(struct trace_seq *s, const struct trace_entry *ent);
e4955c99 1242} what2act[] = {
ef18012b 1243 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
c71a8961
ACM
1244 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1245 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1246 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1247 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1248 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1249 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1250 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1251 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1252 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1253 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1254 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1255 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1256 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1257 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1258};
1259
b6a4b0c3
LZ
1260static enum print_line_t print_one_line(struct trace_iterator *iter,
1261 bool classic)
c71a8961 1262{
2c9b238e 1263 struct trace_seq *s = &iter->seq;
b6a4b0c3
LZ
1264 const struct blk_io_trace *t;
1265 u16 what;
c71a8961 1266 int ret;
b6a4b0c3
LZ
1267 bool long_act;
1268 blk_log_action_t *log_action;
c71a8961 1269
b6a4b0c3
LZ
1270 t = te_blk_io_trace(iter->ent);
1271 what = t->action & ((1 << BLK_TC_SHIFT) - 1);
1272 long_act = !!(trace_flags & TRACE_ITER_VERBOSE);
1273 log_action = classic ? &blk_log_action_classic : &blk_log_action;
08a06b83 1274
18cea459
LZ
1275 if (t->action == BLK_TN_MESSAGE) {
1276 ret = log_action(iter, long_act ? "message" : "m");
1277 if (ret)
1278 ret = blk_log_msg(s, iter->ent);
1279 goto out;
1280 }
1281
eb08f8eb 1282 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
b78825d6 1283 ret = trace_seq_printf(s, "Unknown action %x\n", what);
c71a8961 1284 else {
b6a4b0c3 1285 ret = log_action(iter, what2act[what].act[long_act]);
c71a8961 1286 if (ret)
2c9b238e 1287 ret = what2act[what].print(s, iter->ent);
c71a8961 1288 }
18cea459 1289out:
c71a8961
ACM
1290 return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1291}
1292
b6a4b0c3
LZ
1293static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1294 int flags)
1295{
b6a4b0c3
LZ
1296 return print_one_line(iter, false);
1297}
1298
08a06b83
ACM
1299static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1300{
1301 struct trace_seq *s = &iter->seq;
1302 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1303 const int offset = offsetof(struct blk_io_trace, sector);
1304 struct blk_io_trace old = {
1305 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
6c051ce0 1306 .time = iter->ts,
08a06b83
ACM
1307 };
1308
1309 if (!trace_seq_putmem(s, &old, offset))
1310 return 0;
1311 return trace_seq_putmem(s, &t->sector,
1312 sizeof(old) - offset + t->pdu_len);
1313}
1314
ae7462b4
ACM
1315static enum print_line_t
1316blk_trace_event_print_binary(struct trace_iterator *iter, int flags)
08a06b83
ACM
1317{
1318 return blk_trace_synthesize_old_trace(iter) ?
1319 TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
1320}
1321
c71a8961
ACM
1322static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1323{
c71a8961
ACM
1324 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1325 return TRACE_TYPE_UNHANDLED;
1326
b6a4b0c3 1327 return print_one_line(iter, true);
c71a8961
ACM
1328}
1329
f3948f88
LZ
1330static int blk_tracer_set_flag(u32 old_flags, u32 bit, int set)
1331{
1332 /* don't output context-info for blk_classic output */
1333 if (bit == TRACE_BLK_OPT_CLASSIC) {
1334 if (set)
1335 trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1336 else
1337 trace_flags |= TRACE_ITER_CONTEXT_INFO;
1338 }
1339 return 0;
1340}
1341
c71a8961
ACM
1342static struct tracer blk_tracer __read_mostly = {
1343 .name = "blk",
1344 .init = blk_tracer_init,
1345 .reset = blk_tracer_reset,
1346 .start = blk_tracer_start,
1347 .stop = blk_tracer_stop,
1348 .print_header = blk_tracer_print_header,
1349 .print_line = blk_tracer_print_line,
1350 .flags = &blk_tracer_flags,
f3948f88 1351 .set_flag = blk_tracer_set_flag,
c71a8961
ACM
1352};
1353
1354static struct trace_event trace_blk_event = {
ef18012b 1355 .type = TRACE_BLK,
c71a8961 1356 .trace = blk_trace_event_print,
08a06b83 1357 .binary = blk_trace_event_print_binary,
c71a8961
ACM
1358};
1359
1360static int __init init_blk_tracer(void)
1361{
1362 if (!register_ftrace_event(&trace_blk_event)) {
1363 pr_warning("Warning: could not register block events\n");
1364 return 1;
1365 }
1366
1367 if (register_tracer(&blk_tracer) != 0) {
1368 pr_warning("Warning: could not register the block tracer\n");
1369 unregister_ftrace_event(&trace_blk_event);
1370 return 1;
1371 }
1372
1373 return 0;
1374}
1375
1376device_initcall(init_blk_tracer);
1377
1378static int blk_trace_remove_queue(struct request_queue *q)
1379{
1380 struct blk_trace *bt;
1381
1382 bt = xchg(&q->blk_trace, NULL);
1383 if (bt == NULL)
1384 return -EINVAL;
1385
17ba97e3
LZ
1386 if (atomic_dec_and_test(&blk_probes_ref))
1387 blk_unregister_tracepoints();
1388
ad5dd549 1389 blk_trace_free(bt);
c71a8961
ACM
1390 return 0;
1391}
1392
1393/*
1394 * Setup everything required to start tracing
1395 */
9908c309
LZ
1396static int blk_trace_setup_queue(struct request_queue *q,
1397 struct block_device *bdev)
c71a8961
ACM
1398{
1399 struct blk_trace *old_bt, *bt = NULL;
18cea459 1400 int ret = -ENOMEM;
c71a8961 1401
c71a8961
ACM
1402 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1403 if (!bt)
15152e44 1404 return -ENOMEM;
c71a8961 1405
18cea459
LZ
1406 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1407 if (!bt->msg_data)
1408 goto free_bt;
1409
9908c309 1410 bt->dev = bdev->bd_dev;
c71a8961 1411 bt->act_mask = (u16)-1;
9908c309
LZ
1412
1413 blk_trace_setup_lba(bt, bdev);
c71a8961
ACM
1414
1415 old_bt = xchg(&q->blk_trace, bt);
1416 if (old_bt != NULL) {
1417 (void)xchg(&q->blk_trace, old_bt);
18cea459
LZ
1418 ret = -EBUSY;
1419 goto free_bt;
c71a8961 1420 }
15152e44 1421
17ba97e3
LZ
1422 if (atomic_inc_return(&blk_probes_ref) == 1)
1423 blk_register_tracepoints();
c71a8961 1424 return 0;
18cea459
LZ
1425
1426free_bt:
1427 blk_trace_free(bt);
1428 return ret;
c71a8961
ACM
1429}
1430
1431/*
1432 * sysfs interface to enable and configure tracing
1433 */
1434
c71a8961
ACM
1435static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1436 struct device_attribute *attr,
1437 char *buf);
1438static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1439 struct device_attribute *attr,
1440 const char *buf, size_t count);
1441#define BLK_TRACE_DEVICE_ATTR(_name) \
1442 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1443 sysfs_blk_trace_attr_show, \
1444 sysfs_blk_trace_attr_store)
1445
cd649b8b 1446static BLK_TRACE_DEVICE_ATTR(enable);
c71a8961
ACM
1447static BLK_TRACE_DEVICE_ATTR(act_mask);
1448static BLK_TRACE_DEVICE_ATTR(pid);
1449static BLK_TRACE_DEVICE_ATTR(start_lba);
1450static BLK_TRACE_DEVICE_ATTR(end_lba);
1451
1452static struct attribute *blk_trace_attrs[] = {
1453 &dev_attr_enable.attr,
1454 &dev_attr_act_mask.attr,
1455 &dev_attr_pid.attr,
1456 &dev_attr_start_lba.attr,
1457 &dev_attr_end_lba.attr,
1458 NULL
1459};
1460
1461struct attribute_group blk_trace_attr_group = {
1462 .name = "trace",
1463 .attrs = blk_trace_attrs,
1464};
1465
09341997
LZ
1466static const struct {
1467 int mask;
1468 const char *str;
1469} mask_maps[] = {
1470 { BLK_TC_READ, "read" },
1471 { BLK_TC_WRITE, "write" },
1472 { BLK_TC_BARRIER, "barrier" },
1473 { BLK_TC_SYNC, "sync" },
1474 { BLK_TC_QUEUE, "queue" },
1475 { BLK_TC_REQUEUE, "requeue" },
1476 { BLK_TC_ISSUE, "issue" },
1477 { BLK_TC_COMPLETE, "complete" },
1478 { BLK_TC_FS, "fs" },
1479 { BLK_TC_PC, "pc" },
1480 { BLK_TC_AHEAD, "ahead" },
1481 { BLK_TC_META, "meta" },
1482 { BLK_TC_DISCARD, "discard" },
1483 { BLK_TC_DRV_DATA, "drv_data" },
1484};
1485
1486static int blk_trace_str2mask(const char *str)
c71a8961 1487{
09341997 1488 int i;
c71a8961 1489 int mask = 0;
9eb85125 1490 char *buf, *s, *token;
c71a8961 1491
9eb85125
LZ
1492 buf = kstrdup(str, GFP_KERNEL);
1493 if (buf == NULL)
c71a8961 1494 return -ENOMEM;
9eb85125 1495 s = strstrip(buf);
c71a8961
ACM
1496
1497 while (1) {
09341997
LZ
1498 token = strsep(&s, ",");
1499 if (token == NULL)
c71a8961
ACM
1500 break;
1501
09341997
LZ
1502 if (*token == '\0')
1503 continue;
1504
1505 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1506 if (strcasecmp(token, mask_maps[i].str) == 0) {
1507 mask |= mask_maps[i].mask;
1508 break;
1509 }
1510 }
1511 if (i == ARRAY_SIZE(mask_maps)) {
1512 mask = -EINVAL;
1513 break;
1514 }
c71a8961 1515 }
9eb85125 1516 kfree(buf);
c71a8961
ACM
1517
1518 return mask;
1519}
1520
09341997
LZ
1521static ssize_t blk_trace_mask2str(char *buf, int mask)
1522{
1523 int i;
1524 char *p = buf;
1525
1526 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1527 if (mask & mask_maps[i].mask) {
1528 p += sprintf(p, "%s%s",
1529 (p == buf) ? "" : ",", mask_maps[i].str);
1530 }
1531 }
1532 *p++ = '\n';
1533
1534 return p - buf;
1535}
1536
b125130b
LZ
1537static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1538{
1539 if (bdev->bd_disk == NULL)
1540 return NULL;
1541
1542 return bdev_get_queue(bdev);
1543}
1544
c71a8961
ACM
1545static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1546 struct device_attribute *attr,
1547 char *buf)
1548{
1549 struct hd_struct *p = dev_to_part(dev);
1550 struct request_queue *q;
1551 struct block_device *bdev;
1552 ssize_t ret = -ENXIO;
1553
1554 lock_kernel();
1555 bdev = bdget(part_devt(p));
1556 if (bdev == NULL)
1557 goto out_unlock_kernel;
1558
b125130b 1559 q = blk_trace_get_queue(bdev);
c71a8961
ACM
1560 if (q == NULL)
1561 goto out_bdput;
b125130b 1562
c71a8961 1563 mutex_lock(&bdev->bd_mutex);
cd649b8b
LZ
1564
1565 if (attr == &dev_attr_enable) {
1566 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1567 goto out_unlock_bdev;
1568 }
1569
c71a8961
ACM
1570 if (q->blk_trace == NULL)
1571 ret = sprintf(buf, "disabled\n");
1572 else if (attr == &dev_attr_act_mask)
09341997 1573 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
c71a8961
ACM
1574 else if (attr == &dev_attr_pid)
1575 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1576 else if (attr == &dev_attr_start_lba)
1577 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1578 else if (attr == &dev_attr_end_lba)
1579 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
cd649b8b
LZ
1580
1581out_unlock_bdev:
c71a8961
ACM
1582 mutex_unlock(&bdev->bd_mutex);
1583out_bdput:
1584 bdput(bdev);
1585out_unlock_kernel:
1586 unlock_kernel();
1587 return ret;
1588}
1589
1590static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1591 struct device_attribute *attr,
1592 const char *buf, size_t count)
1593{
1594 struct block_device *bdev;
1595 struct request_queue *q;
1596 struct hd_struct *p;
1597 u64 value;
09341997 1598 ssize_t ret = -EINVAL;
c71a8961
ACM
1599
1600 if (count == 0)
1601 goto out;
1602
1603 if (attr == &dev_attr_act_mask) {
1604 if (sscanf(buf, "%llx", &value) != 1) {
1605 /* Assume it is a list of trace category names */
09341997
LZ
1606 ret = blk_trace_str2mask(buf);
1607 if (ret < 0)
c71a8961 1608 goto out;
09341997 1609 value = ret;
c71a8961
ACM
1610 }
1611 } else if (sscanf(buf, "%llu", &value) != 1)
1612 goto out;
1613
09341997
LZ
1614 ret = -ENXIO;
1615
c71a8961
ACM
1616 lock_kernel();
1617 p = dev_to_part(dev);
1618 bdev = bdget(part_devt(p));
1619 if (bdev == NULL)
1620 goto out_unlock_kernel;
1621
b125130b 1622 q = blk_trace_get_queue(bdev);
c71a8961
ACM
1623 if (q == NULL)
1624 goto out_bdput;
1625
1626 mutex_lock(&bdev->bd_mutex);
cd649b8b
LZ
1627
1628 if (attr == &dev_attr_enable) {
1629 if (value)
9908c309 1630 ret = blk_trace_setup_queue(q, bdev);
cd649b8b
LZ
1631 else
1632 ret = blk_trace_remove_queue(q);
1633 goto out_unlock_bdev;
1634 }
1635
c71a8961
ACM
1636 ret = 0;
1637 if (q->blk_trace == NULL)
9908c309 1638 ret = blk_trace_setup_queue(q, bdev);
c71a8961
ACM
1639
1640 if (ret == 0) {
1641 if (attr == &dev_attr_act_mask)
1642 q->blk_trace->act_mask = value;
1643 else if (attr == &dev_attr_pid)
1644 q->blk_trace->pid = value;
1645 else if (attr == &dev_attr_start_lba)
1646 q->blk_trace->start_lba = value;
1647 else if (attr == &dev_attr_end_lba)
1648 q->blk_trace->end_lba = value;
c71a8961 1649 }
cd649b8b
LZ
1650
1651out_unlock_bdev:
c71a8961
ACM
1652 mutex_unlock(&bdev->bd_mutex);
1653out_bdput:
1654 bdput(bdev);
1655out_unlock_kernel:
1656 unlock_kernel();
1657out:
cd649b8b 1658 return ret ? ret : count;
c71a8961 1659}
cd649b8b 1660
1d54ad6d
LZ
1661int blk_trace_init_sysfs(struct device *dev)
1662{
1663 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1664}
1665
55782138
LZ
1666#endif /* CONFIG_BLK_DEV_IO_TRACE */
1667
1668#ifdef CONFIG_EVENT_TRACING
1669
1670void blk_dump_cmd(char *buf, struct request *rq)
1671{
1672 int i, end;
1673 int len = rq->cmd_len;
1674 unsigned char *cmd = rq->cmd;
1675
1676 if (!blk_pc_request(rq)) {
1677 buf[0] = '\0';
1678 return;
1679 }
1680
1681 for (end = len - 1; end >= 0; end--)
1682 if (cmd[end])
1683 break;
1684 end++;
1685
1686 for (i = 0; i < len; i++) {
1687 buf += sprintf(buf, "%s%02x", i == 0 ? "" : " ", cmd[i]);
1688 if (i == end && end != len - 1) {
1689 sprintf(buf, " ..");
1690 break;
1691 }
1692 }
1693}
1694
1695void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
1696{
1697 int i = 0;
1698
1699 if (rw & WRITE)
1700 rwbs[i++] = 'W';
1701 else if (rw & 1 << BIO_RW_DISCARD)
1702 rwbs[i++] = 'D';
1703 else if (bytes)
1704 rwbs[i++] = 'R';
1705 else
1706 rwbs[i++] = 'N';
1707
1708 if (rw & 1 << BIO_RW_AHEAD)
1709 rwbs[i++] = 'A';
1710 if (rw & 1 << BIO_RW_BARRIER)
1711 rwbs[i++] = 'B';
1712 if (rw & 1 << BIO_RW_SYNCIO)
1713 rwbs[i++] = 'S';
1714 if (rw & 1 << BIO_RW_META)
1715 rwbs[i++] = 'M';
1716
1717 rwbs[i] = '\0';
1718}
1719
1720void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
1721{
1722 int rw = rq->cmd_flags & 0x03;
1723 int bytes;
1724
1725 if (blk_discard_rq(rq))
1726 rw |= (1 << BIO_RW_DISCARD);
1727
c9059598 1728 bytes = blk_rq_bytes(rq);
55782138
LZ
1729
1730 blk_fill_rwbs(rwbs, rw, bytes);
1731}
1732
1733#endif /* CONFIG_EVENT_TRACING */
1734
This page took 0.409939 seconds and 5 git commands to generate.