1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
4 *
5 */
6 #include <linux/kernel.h>
7 #include <linux/blkdev.h>
8 #include <linux/blktrace_api.h>
9 #include <linux/percpu.h>
10 #include <linux/init.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/debugfs.h>
14 #include <linux/export.h>
15 #include <linux/time.h>
16 #include <linux/uaccess.h>
17 #include <linux/list.h>
18 #include <linux/blk-cgroup.h>
19
20 #include "../../block/blk.h"
21
22 #include <trace/events/block.h>
23
24 #include "trace_output.h"
25
26 #ifdef CONFIG_BLK_DEV_IO_TRACE
27
28 static unsigned int blktrace_seq __read_mostly = 1;
29
30 static struct trace_array *blk_tr;
31 static bool blk_tracer_enabled __read_mostly;
32
33 static LIST_HEAD(running_trace_list);
34 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(running_trace_lock);
35
36 /* Select an alternative, minimalistic output than the original one */
37 #define TRACE_BLK_OPT_CLASSIC 0x1
38 #define TRACE_BLK_OPT_CGROUP 0x2
39 #define TRACE_BLK_OPT_CGNAME 0x4
40
41 static struct tracer_opt blk_tracer_opts[] = {
42 /* Default disable the minimalistic output */
43 { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) },
44 #ifdef CONFIG_BLK_CGROUP
45 { TRACER_OPT(blk_cgroup, TRACE_BLK_OPT_CGROUP) },
46 { TRACER_OPT(blk_cgname, TRACE_BLK_OPT_CGNAME) },
47 #endif
48 { }
49 };
50
51 static struct tracer_flags blk_tracer_flags = {
52 .val = 0,
53 .opts = blk_tracer_opts,
54 };
55
56 /* Global reference count of probes */
57 static DEFINE_MUTEX(blk_probe_mutex);
58 static int blk_probes_ref;
59
60 static void blk_register_tracepoints(void);
61 static void blk_unregister_tracepoints(void);
62
63 /*
64 * Send out a notify message.
65 */
trace_note(struct blk_trace * bt,pid_t pid,int action,const void * data,size_t len,union kernfs_node_id * cgid)66 static void trace_note(struct blk_trace *bt, pid_t pid, int action,
67 const void *data, size_t len,
68 union kernfs_node_id *cgid)
69 {
70 struct blk_io_trace *t;
71 struct ring_buffer_event *event = NULL;
72 struct ring_buffer *buffer = NULL;
73 int pc = 0;
74 int cpu = smp_processor_id();
75 bool blk_tracer = blk_tracer_enabled;
76 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
77
78 if (blk_tracer) {
79 buffer = blk_tr->trace_buffer.buffer;
80 pc = preempt_count();
81 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
82 sizeof(*t) + len + cgid_len,
83 0, pc);
84 if (!event)
85 return;
86 t = ring_buffer_event_data(event);
87 goto record_it;
88 }
89
90 if (!bt->rchan)
91 return;
92
93 t = relay_reserve(bt->rchan, sizeof(*t) + len + cgid_len);
94 if (t) {
95 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
96 t->time = ktime_to_ns(ktime_get());
97 record_it:
98 t->device = bt->dev;
99 t->action = action | (cgid ? __BLK_TN_CGROUP : 0);
100 t->pid = pid;
101 t->cpu = cpu;
102 t->pdu_len = len + cgid_len;
103 if (cgid)
104 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
105 memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
106
107 if (blk_tracer)
108 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
109 }
110 }
111
112 /*
113 * Send out a notify for this process, if we haven't done so since a trace
114 * started
115 */
trace_note_tsk(struct task_struct * tsk)116 static void trace_note_tsk(struct task_struct *tsk)
117 {
118 unsigned long flags;
119 struct blk_trace *bt;
120
121 tsk->btrace_seq = blktrace_seq;
122 spin_lock_irqsave(&running_trace_lock, flags);
123 list_for_each_entry(bt, &running_trace_list, running_list) {
124 trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
125 sizeof(tsk->comm), NULL);
126 }
127 spin_unlock_irqrestore(&running_trace_lock, flags);
128 }
129
trace_note_time(struct blk_trace * bt)130 static void trace_note_time(struct blk_trace *bt)
131 {
132 struct timespec64 now;
133 unsigned long flags;
134 u32 words[2];
135
136 /* need to check user space to see if this breaks in y2038 or y2106 */
137 ktime_get_real_ts64(&now);
138 words[0] = (u32)now.tv_sec;
139 words[1] = now.tv_nsec;
140
141 local_irq_save(flags);
142 trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
143 local_irq_restore(flags);
144 }
145
__trace_note_message(struct blk_trace * bt,struct blkcg * blkcg,const char * fmt,...)146 void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
147 const char *fmt, ...)
148 {
149 int n;
150 va_list args;
151 unsigned long flags;
152 char *buf;
153
154 if (unlikely(bt->trace_state != Blktrace_running &&
155 !blk_tracer_enabled))
156 return;
157
158 /*
159 * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
160 * message to the trace.
161 */
162 if (!(bt->act_mask & BLK_TC_NOTIFY))
163 return;
164
165 local_irq_save(flags);
166 buf = this_cpu_ptr(bt->msg_data);
167 va_start(args, fmt);
168 n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args);
169 va_end(args);
170
171 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
172 blkcg = NULL;
173 #ifdef CONFIG_BLK_CGROUP
174 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
175 blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
176 #else
177 trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
178 #endif
179 local_irq_restore(flags);
180 }
181 EXPORT_SYMBOL_GPL(__trace_note_message);
182
act_log_check(struct blk_trace * bt,u32 what,sector_t sector,pid_t pid)183 static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
184 pid_t pid)
185 {
186 if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
187 return 1;
188 if (sector && (sector < bt->start_lba || sector > bt->end_lba))
189 return 1;
190 if (bt->pid && pid != bt->pid)
191 return 1;
192
193 return 0;
194 }
195
196 /*
197 * Data direction bit lookup
198 */
199 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
200 BLK_TC_ACT(BLK_TC_WRITE) };
201
202 #define BLK_TC_RAHEAD BLK_TC_AHEAD
203 #define BLK_TC_PREFLUSH BLK_TC_FLUSH
204
205 /* The ilog2() calls fall out because they're constant */
206 #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \
207 (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name))
208
209 /*
210 * The worker for the various blk_add_trace*() types. Fills out a
211 * blk_io_trace structure and places it in a per-cpu subbuffer.
212 */
__blk_add_trace(struct blk_trace * bt,sector_t sector,int bytes,int op,int op_flags,u32 what,int error,int pdu_len,void * pdu_data,union kernfs_node_id * cgid)213 static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
214 int op, int op_flags, u32 what, int error, int pdu_len,
215 void *pdu_data, union kernfs_node_id *cgid)
216 {
217 struct task_struct *tsk = current;
218 struct ring_buffer_event *event = NULL;
219 struct ring_buffer *buffer = NULL;
220 struct blk_io_trace *t;
221 unsigned long flags = 0;
222 unsigned long *sequence;
223 pid_t pid;
224 int cpu, pc = 0;
225 bool blk_tracer = blk_tracer_enabled;
226 ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
227
228 if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
229 return;
230
231 what |= ddir_act[op_is_write(op) ? WRITE : READ];
232 what |= MASK_TC_BIT(op_flags, SYNC);
233 what |= MASK_TC_BIT(op_flags, RAHEAD);
234 what |= MASK_TC_BIT(op_flags, META);
235 what |= MASK_TC_BIT(op_flags, PREFLUSH);
236 what |= MASK_TC_BIT(op_flags, FUA);
237 if (op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE)
238 what |= BLK_TC_ACT(BLK_TC_DISCARD);
239 if (op == REQ_OP_FLUSH)
240 what |= BLK_TC_ACT(BLK_TC_FLUSH);
241 if (cgid)
242 what |= __BLK_TA_CGROUP;
243
244 pid = tsk->pid;
245 if (act_log_check(bt, what, sector, pid))
246 return;
247 cpu = raw_smp_processor_id();
248
249 if (blk_tracer) {
250 tracing_record_cmdline(current);
251
252 buffer = blk_tr->trace_buffer.buffer;
253 pc = preempt_count();
254 event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
255 sizeof(*t) + pdu_len + cgid_len,
256 0, pc);
257 if (!event)
258 return;
259 t = ring_buffer_event_data(event);
260 goto record_it;
261 }
262
263 if (unlikely(tsk->btrace_seq != blktrace_seq))
264 trace_note_tsk(tsk);
265
266 /*
267 * A word about the locking here - we disable interrupts to reserve
268 * some space in the relay per-cpu buffer, to prevent an irq
269 * from coming in and stepping on our toes.
270 */
271 local_irq_save(flags);
272 t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len + cgid_len);
273 if (t) {
274 sequence = per_cpu_ptr(bt->sequence, cpu);
275
276 t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
277 t->sequence = ++(*sequence);
278 t->time = ktime_to_ns(ktime_get());
279 record_it:
280 /*
281 * These two are not needed in ftrace as they are in the
282 * generic trace_entry, filled by tracing_generic_entry_update,
283 * but for the trace_event->bin() synthesizer benefit we do it
284 * here too.
285 */
286 t->cpu = cpu;
287 t->pid = pid;
288
289 t->sector = sector;
290 t->bytes = bytes;
291 t->action = what;
292 t->device = bt->dev;
293 t->error = error;
294 t->pdu_len = pdu_len + cgid_len;
295
296 if (cgid_len)
297 memcpy((void *)t + sizeof(*t), cgid, cgid_len);
298 if (pdu_len)
299 memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
300
301 if (blk_tracer) {
302 trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
303 return;
304 }
305 }
306
307 local_irq_restore(flags);
308 }
309
blk_trace_free(struct blk_trace * bt)310 static void blk_trace_free(struct blk_trace *bt)
311 {
312 debugfs_remove(bt->msg_file);
313 debugfs_remove(bt->dropped_file);
314 relay_close(bt->rchan);
315 debugfs_remove(bt->dir);
316 free_percpu(bt->sequence);
317 free_percpu(bt->msg_data);
318 kfree(bt);
319 }
320
get_probe_ref(void)321 static void get_probe_ref(void)
322 {
323 mutex_lock(&blk_probe_mutex);
324 if (++blk_probes_ref == 1)
325 blk_register_tracepoints();
326 mutex_unlock(&blk_probe_mutex);
327 }
328
put_probe_ref(void)329 static void put_probe_ref(void)
330 {
331 mutex_lock(&blk_probe_mutex);
332 if (!--blk_probes_ref)
333 blk_unregister_tracepoints();
334 mutex_unlock(&blk_probe_mutex);
335 }
336
blk_trace_cleanup(struct blk_trace * bt)337 static void blk_trace_cleanup(struct blk_trace *bt)
338 {
339 blk_trace_free(bt);
340 put_probe_ref();
341 }
342
__blk_trace_remove(struct request_queue * q)343 static int __blk_trace_remove(struct request_queue *q)
344 {
345 struct blk_trace *bt;
346
347 bt = xchg(&q->blk_trace, NULL);
348 if (!bt)
349 return -EINVAL;
350
351 if (bt->trace_state != Blktrace_running)
352 blk_trace_cleanup(bt);
353
354 return 0;
355 }
356
blk_trace_remove(struct request_queue * q)357 int blk_trace_remove(struct request_queue *q)
358 {
359 int ret;
360
361 mutex_lock(&q->blk_trace_mutex);
362 ret = __blk_trace_remove(q);
363 mutex_unlock(&q->blk_trace_mutex);
364
365 return ret;
366 }
367 EXPORT_SYMBOL_GPL(blk_trace_remove);
368
blk_dropped_read(struct file * filp,char __user * buffer,size_t count,loff_t * ppos)369 static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
370 size_t count, loff_t *ppos)
371 {
372 struct blk_trace *bt = filp->private_data;
373 char buf[16];
374
375 snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
376
377 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
378 }
379
380 static const struct file_operations blk_dropped_fops = {
381 .owner = THIS_MODULE,
382 .open = simple_open,
383 .read = blk_dropped_read,
384 .llseek = default_llseek,
385 };
386
blk_msg_write(struct file * filp,const char __user * buffer,size_t count,loff_t * ppos)387 static ssize_t blk_msg_write(struct file *filp, const char __user *buffer,
388 size_t count, loff_t *ppos)
389 {
390 char *msg;
391 struct blk_trace *bt;
392
393 if (count >= BLK_TN_MAX_MSG)
394 return -EINVAL;
395
396 msg = memdup_user_nul(buffer, count);
397 if (IS_ERR(msg))
398 return PTR_ERR(msg);
399
400 bt = filp->private_data;
401 __trace_note_message(bt, NULL, "%s", msg);
402 kfree(msg);
403
404 return count;
405 }
406
407 static const struct file_operations blk_msg_fops = {
408 .owner = THIS_MODULE,
409 .open = simple_open,
410 .write = blk_msg_write,
411 .llseek = noop_llseek,
412 };
413
414 /*
415 * Keep track of how many times we encountered a full subbuffer, to aid
416 * the user space app in telling how many lost events there were.
417 */
blk_subbuf_start_callback(struct rchan_buf * buf,void * subbuf,void * prev_subbuf,size_t prev_padding)418 static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
419 void *prev_subbuf, size_t prev_padding)
420 {
421 struct blk_trace *bt;
422
423 if (!relay_buf_full(buf))
424 return 1;
425
426 bt = buf->chan->private_data;
427 atomic_inc(&bt->dropped);
428 return 0;
429 }
430
blk_remove_buf_file_callback(struct dentry * dentry)431 static int blk_remove_buf_file_callback(struct dentry *dentry)
432 {
433 debugfs_remove(dentry);
434
435 return 0;
436 }
437
blk_create_buf_file_callback(const char * filename,struct dentry * parent,umode_t mode,struct rchan_buf * buf,int * is_global)438 static struct dentry *blk_create_buf_file_callback(const char *filename,
439 struct dentry *parent,
440 umode_t mode,
441 struct rchan_buf *buf,
442 int *is_global)
443 {
444 return debugfs_create_file(filename, mode, parent, buf,
445 &relay_file_operations);
446 }
447
448 static struct rchan_callbacks blk_relay_callbacks = {
449 .subbuf_start = blk_subbuf_start_callback,
450 .create_buf_file = blk_create_buf_file_callback,
451 .remove_buf_file = blk_remove_buf_file_callback,
452 };
453
blk_trace_setup_lba(struct blk_trace * bt,struct block_device * bdev)454 static void blk_trace_setup_lba(struct blk_trace *bt,
455 struct block_device *bdev)
456 {
457 struct hd_struct *part = NULL;
458
459 if (bdev)
460 part = bdev->bd_part;
461
462 if (part) {
463 bt->start_lba = part->start_sect;
464 bt->end_lba = part->start_sect + part->nr_sects;
465 } else {
466 bt->start_lba = 0;
467 bt->end_lba = -1ULL;
468 }
469 }
470
471 /*
472 * Setup everything required to start tracing
473 */
do_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,struct blk_user_trace_setup * buts)474 static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
475 struct block_device *bdev,
476 struct blk_user_trace_setup *buts)
477 {
478 struct blk_trace *bt = NULL;
479 struct dentry *dir = NULL;
480 int ret;
481
482 if (!buts->buf_size || !buts->buf_nr)
483 return -EINVAL;
484
485 if (!blk_debugfs_root)
486 return -ENOENT;
487
488 strncpy(buts->name, name, BLKTRACE_BDEV_SIZE);
489 buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0';
490
491 /*
492 * some device names have larger paths - convert the slashes
493 * to underscores for this to work as expected
494 */
495 strreplace(buts->name, '/', '_');
496
497 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
498 if (!bt)
499 return -ENOMEM;
500
501 ret = -ENOMEM;
502 bt->sequence = alloc_percpu(unsigned long);
503 if (!bt->sequence)
504 goto err;
505
506 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
507 if (!bt->msg_data)
508 goto err;
509
510 ret = -ENOENT;
511
512 dir = debugfs_lookup(buts->name, blk_debugfs_root);
513 if (!dir)
514 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
515 if (!dir)
516 goto err;
517
518 bt->dev = dev;
519 atomic_set(&bt->dropped, 0);
520 INIT_LIST_HEAD(&bt->running_list);
521
522 ret = -EIO;
523 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
524 &blk_dropped_fops);
525 if (!bt->dropped_file)
526 goto err;
527
528 bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops);
529 if (!bt->msg_file)
530 goto err;
531
532 bt->rchan = relay_open("trace", dir, buts->buf_size,
533 buts->buf_nr, &blk_relay_callbacks, bt);
534 if (!bt->rchan)
535 goto err;
536
537 bt->act_mask = buts->act_mask;
538 if (!bt->act_mask)
539 bt->act_mask = (u16) -1;
540
541 blk_trace_setup_lba(bt, bdev);
542
543 /* overwrite with user settings */
544 if (buts->start_lba)
545 bt->start_lba = buts->start_lba;
546 if (buts->end_lba)
547 bt->end_lba = buts->end_lba;
548
549 bt->pid = buts->pid;
550 bt->trace_state = Blktrace_setup;
551
552 ret = -EBUSY;
553 if (cmpxchg(&q->blk_trace, NULL, bt))
554 goto err;
555
556 get_probe_ref();
557
558 ret = 0;
559 err:
560 if (dir && !bt->dir)
561 dput(dir);
562 if (ret)
563 blk_trace_free(bt);
564 return ret;
565 }
566
__blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)567 static int __blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
568 struct block_device *bdev, char __user *arg)
569 {
570 struct blk_user_trace_setup buts;
571 int ret;
572
573 ret = copy_from_user(&buts, arg, sizeof(buts));
574 if (ret)
575 return -EFAULT;
576
577 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
578 if (ret)
579 return ret;
580
581 if (copy_to_user(arg, &buts, sizeof(buts))) {
582 __blk_trace_remove(q);
583 return -EFAULT;
584 }
585 return 0;
586 }
587
blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)588 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
589 struct block_device *bdev,
590 char __user *arg)
591 {
592 int ret;
593
594 mutex_lock(&q->blk_trace_mutex);
595 ret = __blk_trace_setup(q, name, dev, bdev, arg);
596 mutex_unlock(&q->blk_trace_mutex);
597
598 return ret;
599 }
600 EXPORT_SYMBOL_GPL(blk_trace_setup);
601
602 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
compat_blk_trace_setup(struct request_queue * q,char * name,dev_t dev,struct block_device * bdev,char __user * arg)603 static int compat_blk_trace_setup(struct request_queue *q, char *name,
604 dev_t dev, struct block_device *bdev,
605 char __user *arg)
606 {
607 struct blk_user_trace_setup buts;
608 struct compat_blk_user_trace_setup cbuts;
609 int ret;
610
611 if (copy_from_user(&cbuts, arg, sizeof(cbuts)))
612 return -EFAULT;
613
614 buts = (struct blk_user_trace_setup) {
615 .act_mask = cbuts.act_mask,
616 .buf_size = cbuts.buf_size,
617 .buf_nr = cbuts.buf_nr,
618 .start_lba = cbuts.start_lba,
619 .end_lba = cbuts.end_lba,
620 .pid = cbuts.pid,
621 };
622
623 ret = do_blk_trace_setup(q, name, dev, bdev, &buts);
624 if (ret)
625 return ret;
626
627 if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
628 __blk_trace_remove(q);
629 return -EFAULT;
630 }
631
632 return 0;
633 }
634 #endif
635
__blk_trace_startstop(struct request_queue * q,int start)636 static int __blk_trace_startstop(struct request_queue *q, int start)
637 {
638 int ret;
639 struct blk_trace *bt = q->blk_trace;
640
641 if (bt == NULL)
642 return -EINVAL;
643
644 /*
645 * For starting a trace, we can transition from a setup or stopped
646 * trace. For stopping a trace, the state must be running
647 */
648 ret = -EINVAL;
649 if (start) {
650 if (bt->trace_state == Blktrace_setup ||
651 bt->trace_state == Blktrace_stopped) {
652 blktrace_seq++;
653 smp_mb();
654 bt->trace_state = Blktrace_running;
655 spin_lock_irq(&running_trace_lock);
656 list_add(&bt->running_list, &running_trace_list);
657 spin_unlock_irq(&running_trace_lock);
658
659 trace_note_time(bt);
660 ret = 0;
661 }
662 } else {
663 if (bt->trace_state == Blktrace_running) {
664 bt->trace_state = Blktrace_stopped;
665 spin_lock_irq(&running_trace_lock);
666 list_del_init(&bt->running_list);
667 spin_unlock_irq(&running_trace_lock);
668 relay_flush(bt->rchan);
669 ret = 0;
670 }
671 }
672
673 return ret;
674 }
675
blk_trace_startstop(struct request_queue * q,int start)676 int blk_trace_startstop(struct request_queue *q, int start)
677 {
678 int ret;
679
680 mutex_lock(&q->blk_trace_mutex);
681 ret = __blk_trace_startstop(q, start);
682 mutex_unlock(&q->blk_trace_mutex);
683
684 return ret;
685 }
686 EXPORT_SYMBOL_GPL(blk_trace_startstop);
687
688 /*
689 * When reading or writing the blktrace sysfs files, the references to the
690 * opened sysfs or device files should prevent the underlying block device
691 * from being removed. So no further delete protection is really needed.
692 */
693
694 /**
695 * blk_trace_ioctl: - handle the ioctls associated with tracing
696 * @bdev: the block device
697 * @cmd: the ioctl cmd
698 * @arg: the argument data, if any
699 *
700 **/
blk_trace_ioctl(struct block_device * bdev,unsigned cmd,char __user * arg)701 int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
702 {
703 struct request_queue *q;
704 int ret, start = 0;
705 char b[BDEVNAME_SIZE];
706
707 q = bdev_get_queue(bdev);
708 if (!q)
709 return -ENXIO;
710
711 mutex_lock(&q->blk_trace_mutex);
712
713 switch (cmd) {
714 case BLKTRACESETUP:
715 bdevname(bdev, b);
716 ret = __blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
717 break;
718 #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64)
719 case BLKTRACESETUP32:
720 bdevname(bdev, b);
721 ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg);
722 break;
723 #endif
724 case BLKTRACESTART:
725 start = 1;
726 case BLKTRACESTOP:
727 ret = __blk_trace_startstop(q, start);
728 break;
729 case BLKTRACETEARDOWN:
730 ret = __blk_trace_remove(q);
731 break;
732 default:
733 ret = -ENOTTY;
734 break;
735 }
736
737 mutex_unlock(&q->blk_trace_mutex);
738 return ret;
739 }
740
741 /**
742 * blk_trace_shutdown: - stop and cleanup trace structures
743 * @q: the request queue associated with the device
744 *
745 **/
blk_trace_shutdown(struct request_queue * q)746 void blk_trace_shutdown(struct request_queue *q)
747 {
748 mutex_lock(&q->blk_trace_mutex);
749
750 if (q->blk_trace) {
751 __blk_trace_startstop(q, 0);
752 __blk_trace_remove(q);
753 }
754
755 mutex_unlock(&q->blk_trace_mutex);
756 }
757
758 #ifdef CONFIG_BLK_CGROUP
759 static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)760 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
761 {
762 struct blk_trace *bt = q->blk_trace;
763
764 if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
765 return NULL;
766
767 if (!bio->bi_css)
768 return NULL;
769 return cgroup_get_kernfs_id(bio->bi_css->cgroup);
770 }
771 #else
772 static union kernfs_node_id *
blk_trace_bio_get_cgid(struct request_queue * q,struct bio * bio)773 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
774 {
775 return NULL;
776 }
777 #endif
778
779 static union kernfs_node_id *
blk_trace_request_get_cgid(struct request_queue * q,struct request * rq)780 blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
781 {
782 if (!rq->bio)
783 return NULL;
784 /* Use the first bio */
785 return blk_trace_bio_get_cgid(q, rq->bio);
786 }
787
788 /*
789 * blktrace probes
790 */
791
792 /**
793 * blk_add_trace_rq - Add a trace for a request oriented action
794 * @rq: the source request
795 * @error: return status to log
796 * @nr_bytes: number of completed bytes
797 * @what: the action
798 * @cgid: the cgroup info
799 *
800 * Description:
801 * Records an action against a request. Will log the bio offset + size.
802 *
803 **/
blk_add_trace_rq(struct request * rq,int error,unsigned int nr_bytes,u32 what,union kernfs_node_id * cgid)804 static void blk_add_trace_rq(struct request *rq, int error,
805 unsigned int nr_bytes, u32 what,
806 union kernfs_node_id *cgid)
807 {
808 struct blk_trace *bt = rq->q->blk_trace;
809
810 if (likely(!bt))
811 return;
812
813 if (blk_rq_is_passthrough(rq))
814 what |= BLK_TC_ACT(BLK_TC_PC);
815 else
816 what |= BLK_TC_ACT(BLK_TC_FS);
817
818 __blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
819 rq->cmd_flags, what, error, 0, NULL, cgid);
820 }
821
blk_add_trace_rq_insert(void * ignore,struct request_queue * q,struct request * rq)822 static void blk_add_trace_rq_insert(void *ignore,
823 struct request_queue *q, struct request *rq)
824 {
825 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_INSERT,
826 blk_trace_request_get_cgid(q, rq));
827 }
828
blk_add_trace_rq_issue(void * ignore,struct request_queue * q,struct request * rq)829 static void blk_add_trace_rq_issue(void *ignore,
830 struct request_queue *q, struct request *rq)
831 {
832 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_ISSUE,
833 blk_trace_request_get_cgid(q, rq));
834 }
835
blk_add_trace_rq_requeue(void * ignore,struct request_queue * q,struct request * rq)836 static void blk_add_trace_rq_requeue(void *ignore,
837 struct request_queue *q,
838 struct request *rq)
839 {
840 blk_add_trace_rq(rq, 0, blk_rq_bytes(rq), BLK_TA_REQUEUE,
841 blk_trace_request_get_cgid(q, rq));
842 }
843
blk_add_trace_rq_complete(void * ignore,struct request * rq,int error,unsigned int nr_bytes)844 static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
845 int error, unsigned int nr_bytes)
846 {
847 blk_add_trace_rq(rq, error, nr_bytes, BLK_TA_COMPLETE,
848 blk_trace_request_get_cgid(rq->q, rq));
849 }
850
851 /**
852 * blk_add_trace_bio - Add a trace for a bio oriented action
853 * @q: queue the io is for
854 * @bio: the source bio
855 * @what: the action
856 * @error: error, if any
857 *
858 * Description:
859 * Records an action against a bio. Will log the bio offset + size.
860 *
861 **/
blk_add_trace_bio(struct request_queue * q,struct bio * bio,u32 what,int error)862 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
863 u32 what, int error)
864 {
865 struct blk_trace *bt = q->blk_trace;
866
867 if (likely(!bt))
868 return;
869
870 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
871 bio_op(bio), bio->bi_opf, what, error, 0, NULL,
872 blk_trace_bio_get_cgid(q, bio));
873 }
874
blk_add_trace_bio_bounce(void * ignore,struct request_queue * q,struct bio * bio)875 static void blk_add_trace_bio_bounce(void *ignore,
876 struct request_queue *q, struct bio *bio)
877 {
878 blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
879 }
880
blk_add_trace_bio_complete(void * ignore,struct request_queue * q,struct bio * bio,int error)881 static void blk_add_trace_bio_complete(void *ignore,
882 struct request_queue *q, struct bio *bio,
883 int error)
884 {
885 blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
886 }
887
blk_add_trace_bio_backmerge(void * ignore,struct request_queue * q,struct request * rq,struct bio * bio)888 static void blk_add_trace_bio_backmerge(void *ignore,
889 struct request_queue *q,
890 struct request *rq,
891 struct bio *bio)
892 {
893 blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
894 }
895
blk_add_trace_bio_frontmerge(void * ignore,struct request_queue * q,struct request * rq,struct bio * bio)896 static void blk_add_trace_bio_frontmerge(void *ignore,
897 struct request_queue *q,
898 struct request *rq,
899 struct bio *bio)
900 {
901 blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
902 }
903
blk_add_trace_bio_queue(void * ignore,struct request_queue * q,struct bio * bio)904 static void blk_add_trace_bio_queue(void *ignore,
905 struct request_queue *q, struct bio *bio)
906 {
907 blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
908 }
909
blk_add_trace_getrq(void * ignore,struct request_queue * q,struct bio * bio,int rw)910 static void blk_add_trace_getrq(void *ignore,
911 struct request_queue *q,
912 struct bio *bio, int rw)
913 {
914 if (bio)
915 blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
916 else {
917 struct blk_trace *bt = q->blk_trace;
918
919 if (bt)
920 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
921 NULL, NULL);
922 }
923 }
924
925
blk_add_trace_sleeprq(void * ignore,struct request_queue * q,struct bio * bio,int rw)926 static void blk_add_trace_sleeprq(void *ignore,
927 struct request_queue *q,
928 struct bio *bio, int rw)
929 {
930 if (bio)
931 blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
932 else {
933 struct blk_trace *bt = q->blk_trace;
934
935 if (bt)
936 __blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
937 0, 0, NULL, NULL);
938 }
939 }
940
blk_add_trace_plug(void * ignore,struct request_queue * q)941 static void blk_add_trace_plug(void *ignore, struct request_queue *q)
942 {
943 struct blk_trace *bt = q->blk_trace;
944
945 if (bt)
946 __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
947 }
948
blk_add_trace_unplug(void * ignore,struct request_queue * q,unsigned int depth,bool explicit)949 static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
950 unsigned int depth, bool explicit)
951 {
952 struct blk_trace *bt = q->blk_trace;
953
954 if (bt) {
955 __be64 rpdu = cpu_to_be64(depth);
956 u32 what;
957
958 if (explicit)
959 what = BLK_TA_UNPLUG_IO;
960 else
961 what = BLK_TA_UNPLUG_TIMER;
962
963 __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
964 }
965 }
966
blk_add_trace_split(void * ignore,struct request_queue * q,struct bio * bio,unsigned int pdu)967 static void blk_add_trace_split(void *ignore,
968 struct request_queue *q, struct bio *bio,
969 unsigned int pdu)
970 {
971 struct blk_trace *bt = q->blk_trace;
972
973 if (bt) {
974 __be64 rpdu = cpu_to_be64(pdu);
975
976 __blk_add_trace(bt, bio->bi_iter.bi_sector,
977 bio->bi_iter.bi_size, bio_op(bio), bio->bi_opf,
978 BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
979 &rpdu, blk_trace_bio_get_cgid(q, bio));
980 }
981 }
982
983 /**
984 * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
985 * @ignore: trace callback data parameter (not used)
986 * @q: queue the io is for
987 * @bio: the source bio
988 * @dev: target device
989 * @from: source sector
990 *
991 * Description:
992 * Device mapper or raid target sometimes need to split a bio because
993 * it spans a stripe (or similar). Add a trace for that action.
994 *
995 **/
blk_add_trace_bio_remap(void * ignore,struct request_queue * q,struct bio * bio,dev_t dev,sector_t from)996 static void blk_add_trace_bio_remap(void *ignore,
997 struct request_queue *q, struct bio *bio,
998 dev_t dev, sector_t from)
999 {
1000 struct blk_trace *bt = q->blk_trace;
1001 struct blk_io_trace_remap r;
1002
1003 if (likely(!bt))
1004 return;
1005
1006 r.device_from = cpu_to_be32(dev);
1007 r.device_to = cpu_to_be32(bio_dev(bio));
1008 r.sector_from = cpu_to_be64(from);
1009
1010 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
1011 bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
1012 sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
1013 }
1014
1015 /**
1016 * blk_add_trace_rq_remap - Add a trace for a request-remap operation
1017 * @ignore: trace callback data parameter (not used)
1018 * @q: queue the io is for
1019 * @rq: the source request
1020 * @dev: target device
1021 * @from: source sector
1022 *
1023 * Description:
1024 * Device mapper remaps request to other devices.
1025 * Add a trace for that action.
1026 *
1027 **/
blk_add_trace_rq_remap(void * ignore,struct request_queue * q,struct request * rq,dev_t dev,sector_t from)1028 static void blk_add_trace_rq_remap(void *ignore,
1029 struct request_queue *q,
1030 struct request *rq, dev_t dev,
1031 sector_t from)
1032 {
1033 struct blk_trace *bt = q->blk_trace;
1034 struct blk_io_trace_remap r;
1035
1036 if (likely(!bt))
1037 return;
1038
1039 r.device_from = cpu_to_be32(dev);
1040 r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
1041 r.sector_from = cpu_to_be64(from);
1042
1043 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
1044 rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
1045 sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
1046 }
1047
1048 /**
1049 * blk_add_driver_data - Add binary message with driver-specific data
1050 * @q: queue the io is for
1051 * @rq: io request
1052 * @data: driver-specific data
1053 * @len: length of driver-specific data
1054 *
1055 * Description:
1056 * Some drivers might want to write driver-specific data per request.
1057 *
1058 **/
blk_add_driver_data(struct request_queue * q,struct request * rq,void * data,size_t len)1059 void blk_add_driver_data(struct request_queue *q,
1060 struct request *rq,
1061 void *data, size_t len)
1062 {
1063 struct blk_trace *bt = q->blk_trace;
1064
1065 if (likely(!bt))
1066 return;
1067
1068 __blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
1069 BLK_TA_DRV_DATA, 0, len, data,
1070 blk_trace_request_get_cgid(q, rq));
1071 }
1072 EXPORT_SYMBOL_GPL(blk_add_driver_data);
1073
blk_register_tracepoints(void)1074 static void blk_register_tracepoints(void)
1075 {
1076 int ret;
1077
1078 ret = register_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1079 WARN_ON(ret);
1080 ret = register_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1081 WARN_ON(ret);
1082 ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1083 WARN_ON(ret);
1084 ret = register_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1085 WARN_ON(ret);
1086 ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1087 WARN_ON(ret);
1088 ret = register_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1089 WARN_ON(ret);
1090 ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1091 WARN_ON(ret);
1092 ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1093 WARN_ON(ret);
1094 ret = register_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1095 WARN_ON(ret);
1096 ret = register_trace_block_getrq(blk_add_trace_getrq, NULL);
1097 WARN_ON(ret);
1098 ret = register_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1099 WARN_ON(ret);
1100 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1101 WARN_ON(ret);
1102 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1103 WARN_ON(ret);
1104 ret = register_trace_block_split(blk_add_trace_split, NULL);
1105 WARN_ON(ret);
1106 ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1107 WARN_ON(ret);
1108 ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1109 WARN_ON(ret);
1110 }
1111
blk_unregister_tracepoints(void)1112 static void blk_unregister_tracepoints(void)
1113 {
1114 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1115 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1116 unregister_trace_block_split(blk_add_trace_split, NULL);
1117 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1118 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1119 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1120 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
1121 unregister_trace_block_bio_queue(blk_add_trace_bio_queue, NULL);
1122 unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge, NULL);
1123 unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge, NULL);
1124 unregister_trace_block_bio_complete(blk_add_trace_bio_complete, NULL);
1125 unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce, NULL);
1126 unregister_trace_block_rq_complete(blk_add_trace_rq_complete, NULL);
1127 unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue, NULL);
1128 unregister_trace_block_rq_issue(blk_add_trace_rq_issue, NULL);
1129 unregister_trace_block_rq_insert(blk_add_trace_rq_insert, NULL);
1130
1131 tracepoint_synchronize_unregister();
1132 }
1133
1134 /*
1135 * struct blk_io_tracer formatting routines
1136 */
1137
fill_rwbs(char * rwbs,const struct blk_io_trace * t)1138 static void fill_rwbs(char *rwbs, const struct blk_io_trace *t)
1139 {
1140 int i = 0;
1141 int tc = t->action >> BLK_TC_SHIFT;
1142
1143 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1144 rwbs[i++] = 'N';
1145 goto out;
1146 }
1147
1148 if (tc & BLK_TC_FLUSH)
1149 rwbs[i++] = 'F';
1150
1151 if (tc & BLK_TC_DISCARD)
1152 rwbs[i++] = 'D';
1153 else if (tc & BLK_TC_WRITE)
1154 rwbs[i++] = 'W';
1155 else if (t->bytes)
1156 rwbs[i++] = 'R';
1157 else
1158 rwbs[i++] = 'N';
1159
1160 if (tc & BLK_TC_FUA)
1161 rwbs[i++] = 'F';
1162 if (tc & BLK_TC_AHEAD)
1163 rwbs[i++] = 'A';
1164 if (tc & BLK_TC_SYNC)
1165 rwbs[i++] = 'S';
1166 if (tc & BLK_TC_META)
1167 rwbs[i++] = 'M';
1168 out:
1169 rwbs[i] = '\0';
1170 }
1171
1172 static inline
te_blk_io_trace(const struct trace_entry * ent)1173 const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
1174 {
1175 return (const struct blk_io_trace *)ent;
1176 }
1177
pdu_start(const struct trace_entry * ent,bool has_cg)1178 static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
1179 {
1180 return (void *)(te_blk_io_trace(ent) + 1) +
1181 (has_cg ? sizeof(union kernfs_node_id) : 0);
1182 }
1183
cgid_start(const struct trace_entry * ent)1184 static inline const void *cgid_start(const struct trace_entry *ent)
1185 {
1186 return (void *)(te_blk_io_trace(ent) + 1);
1187 }
1188
pdu_real_len(const struct trace_entry * ent,bool has_cg)1189 static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
1190 {
1191 return te_blk_io_trace(ent)->pdu_len -
1192 (has_cg ? sizeof(union kernfs_node_id) : 0);
1193 }
1194
t_action(const struct trace_entry * ent)1195 static inline u32 t_action(const struct trace_entry *ent)
1196 {
1197 return te_blk_io_trace(ent)->action;
1198 }
1199
t_bytes(const struct trace_entry * ent)1200 static inline u32 t_bytes(const struct trace_entry *ent)
1201 {
1202 return te_blk_io_trace(ent)->bytes;
1203 }
1204
t_sec(const struct trace_entry * ent)1205 static inline u32 t_sec(const struct trace_entry *ent)
1206 {
1207 return te_blk_io_trace(ent)->bytes >> 9;
1208 }
1209
t_sector(const struct trace_entry * ent)1210 static inline unsigned long long t_sector(const struct trace_entry *ent)
1211 {
1212 return te_blk_io_trace(ent)->sector;
1213 }
1214
t_error(const struct trace_entry * ent)1215 static inline __u16 t_error(const struct trace_entry *ent)
1216 {
1217 return te_blk_io_trace(ent)->error;
1218 }
1219
get_pdu_int(const struct trace_entry * ent,bool has_cg)1220 static __u64 get_pdu_int(const struct trace_entry *ent, bool has_cg)
1221 {
1222 const __u64 *val = pdu_start(ent, has_cg);
1223 return be64_to_cpu(*val);
1224 }
1225
get_pdu_remap(const struct trace_entry * ent,struct blk_io_trace_remap * r,bool has_cg)1226 static void get_pdu_remap(const struct trace_entry *ent,
1227 struct blk_io_trace_remap *r, bool has_cg)
1228 {
1229 const struct blk_io_trace_remap *__r = pdu_start(ent, has_cg);
1230 __u64 sector_from = __r->sector_from;
1231
1232 r->device_from = be32_to_cpu(__r->device_from);
1233 r->device_to = be32_to_cpu(__r->device_to);
1234 r->sector_from = be64_to_cpu(sector_from);
1235 }
1236
1237 typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act,
1238 bool has_cg);
1239
blk_log_action_classic(struct trace_iterator * iter,const char * act,bool has_cg)1240 static void blk_log_action_classic(struct trace_iterator *iter, const char *act,
1241 bool has_cg)
1242 {
1243 char rwbs[RWBS_LEN];
1244 unsigned long long ts = iter->ts;
1245 unsigned long nsec_rem = do_div(ts, NSEC_PER_SEC);
1246 unsigned secs = (unsigned long)ts;
1247 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1248
1249 fill_rwbs(rwbs, t);
1250
1251 trace_seq_printf(&iter->seq,
1252 "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
1253 MAJOR(t->device), MINOR(t->device), iter->cpu,
1254 secs, nsec_rem, iter->ent->pid, act, rwbs);
1255 }
1256
blk_log_action(struct trace_iterator * iter,const char * act,bool has_cg)1257 static void blk_log_action(struct trace_iterator *iter, const char *act,
1258 bool has_cg)
1259 {
1260 char rwbs[RWBS_LEN];
1261 const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
1262
1263 fill_rwbs(rwbs, t);
1264 if (has_cg) {
1265 const union kernfs_node_id *id = cgid_start(iter->ent);
1266
1267 if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
1268 char blkcg_name_buf[NAME_MAX + 1] = "<...>";
1269
1270 cgroup_path_from_kernfs_id(id, blkcg_name_buf,
1271 sizeof(blkcg_name_buf));
1272 trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
1273 MAJOR(t->device), MINOR(t->device),
1274 blkcg_name_buf, act, rwbs);
1275 } else
1276 trace_seq_printf(&iter->seq,
1277 "%3d,%-3d %x,%-x %2s %3s ",
1278 MAJOR(t->device), MINOR(t->device),
1279 id->ino, id->generation, act, rwbs);
1280 } else
1281 trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
1282 MAJOR(t->device), MINOR(t->device), act, rwbs);
1283 }
1284
blk_log_dump_pdu(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1285 static void blk_log_dump_pdu(struct trace_seq *s,
1286 const struct trace_entry *ent, bool has_cg)
1287 {
1288 const unsigned char *pdu_buf;
1289 int pdu_len;
1290 int i, end;
1291
1292 pdu_buf = pdu_start(ent, has_cg);
1293 pdu_len = pdu_real_len(ent, has_cg);
1294
1295 if (!pdu_len)
1296 return;
1297
1298 /* find the last zero that needs to be printed */
1299 for (end = pdu_len - 1; end >= 0; end--)
1300 if (pdu_buf[end])
1301 break;
1302 end++;
1303
1304 trace_seq_putc(s, '(');
1305
1306 for (i = 0; i < pdu_len; i++) {
1307
1308 trace_seq_printf(s, "%s%02x",
1309 i == 0 ? "" : " ", pdu_buf[i]);
1310
1311 /*
1312 * stop when the rest is just zeroes and indicate so
1313 * with a ".." appended
1314 */
1315 if (i == end && end != pdu_len - 1) {
1316 trace_seq_puts(s, " ..) ");
1317 return;
1318 }
1319 }
1320
1321 trace_seq_puts(s, ") ");
1322 }
1323
blk_log_generic(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1324 static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1325 {
1326 char cmd[TASK_COMM_LEN];
1327
1328 trace_find_cmdline(ent->pid, cmd);
1329
1330 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1331 trace_seq_printf(s, "%u ", t_bytes(ent));
1332 blk_log_dump_pdu(s, ent, has_cg);
1333 trace_seq_printf(s, "[%s]\n", cmd);
1334 } else {
1335 if (t_sec(ent))
1336 trace_seq_printf(s, "%llu + %u [%s]\n",
1337 t_sector(ent), t_sec(ent), cmd);
1338 else
1339 trace_seq_printf(s, "[%s]\n", cmd);
1340 }
1341 }
1342
blk_log_with_error(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1343 static void blk_log_with_error(struct trace_seq *s,
1344 const struct trace_entry *ent, bool has_cg)
1345 {
1346 if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
1347 blk_log_dump_pdu(s, ent, has_cg);
1348 trace_seq_printf(s, "[%d]\n", t_error(ent));
1349 } else {
1350 if (t_sec(ent))
1351 trace_seq_printf(s, "%llu + %u [%d]\n",
1352 t_sector(ent),
1353 t_sec(ent), t_error(ent));
1354 else
1355 trace_seq_printf(s, "%llu [%d]\n",
1356 t_sector(ent), t_error(ent));
1357 }
1358 }
1359
blk_log_remap(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1360 static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1361 {
1362 struct blk_io_trace_remap r = { .device_from = 0, };
1363
1364 get_pdu_remap(ent, &r, has_cg);
1365 trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
1366 t_sector(ent), t_sec(ent),
1367 MAJOR(r.device_from), MINOR(r.device_from),
1368 (unsigned long long)r.sector_from);
1369 }
1370
blk_log_plug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1371 static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1372 {
1373 char cmd[TASK_COMM_LEN];
1374
1375 trace_find_cmdline(ent->pid, cmd);
1376
1377 trace_seq_printf(s, "[%s]\n", cmd);
1378 }
1379
blk_log_unplug(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1380 static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1381 {
1382 char cmd[TASK_COMM_LEN];
1383
1384 trace_find_cmdline(ent->pid, cmd);
1385
1386 trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent, has_cg));
1387 }
1388
blk_log_split(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1389 static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent, bool has_cg)
1390 {
1391 char cmd[TASK_COMM_LEN];
1392
1393 trace_find_cmdline(ent->pid, cmd);
1394
1395 trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
1396 get_pdu_int(ent, has_cg), cmd);
1397 }
1398
blk_log_msg(struct trace_seq * s,const struct trace_entry * ent,bool has_cg)1399 static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent,
1400 bool has_cg)
1401 {
1402
1403 trace_seq_putmem(s, pdu_start(ent, has_cg),
1404 pdu_real_len(ent, has_cg));
1405 trace_seq_putc(s, '\n');
1406 }
1407
1408 /*
1409 * struct tracer operations
1410 */
1411
blk_tracer_print_header(struct seq_file * m)1412 static void blk_tracer_print_header(struct seq_file *m)
1413 {
1414 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1415 return;
1416 seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n"
1417 "# | | | | | |\n");
1418 }
1419
blk_tracer_start(struct trace_array * tr)1420 static void blk_tracer_start(struct trace_array *tr)
1421 {
1422 blk_tracer_enabled = true;
1423 }
1424
blk_tracer_init(struct trace_array * tr)1425 static int blk_tracer_init(struct trace_array *tr)
1426 {
1427 blk_tr = tr;
1428 blk_tracer_start(tr);
1429 return 0;
1430 }
1431
blk_tracer_stop(struct trace_array * tr)1432 static void blk_tracer_stop(struct trace_array *tr)
1433 {
1434 blk_tracer_enabled = false;
1435 }
1436
blk_tracer_reset(struct trace_array * tr)1437 static void blk_tracer_reset(struct trace_array *tr)
1438 {
1439 blk_tracer_stop(tr);
1440 }
1441
1442 static const struct {
1443 const char *act[2];
1444 void (*print)(struct trace_seq *s, const struct trace_entry *ent,
1445 bool has_cg);
1446 } what2act[] = {
1447 [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
1448 [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
1449 [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic },
1450 [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic },
1451 [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic },
1452 [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error },
1453 [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic },
1454 [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error },
1455 [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug },
1456 [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug },
1457 [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug },
1458 [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic },
1459 [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split },
1460 [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic },
1461 [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap },
1462 };
1463
print_one_line(struct trace_iterator * iter,bool classic)1464 static enum print_line_t print_one_line(struct trace_iterator *iter,
1465 bool classic)
1466 {
1467 struct trace_array *tr = iter->tr;
1468 struct trace_seq *s = &iter->seq;
1469 const struct blk_io_trace *t;
1470 u16 what;
1471 bool long_act;
1472 blk_log_action_t *log_action;
1473 bool has_cg;
1474
1475 t = te_blk_io_trace(iter->ent);
1476 what = (t->action & ((1 << BLK_TC_SHIFT) - 1)) & ~__BLK_TA_CGROUP;
1477 long_act = !!(tr->trace_flags & TRACE_ITER_VERBOSE);
1478 log_action = classic ? &blk_log_action_classic : &blk_log_action;
1479 has_cg = t->action & __BLK_TA_CGROUP;
1480
1481 if ((t->action & ~__BLK_TN_CGROUP) == BLK_TN_MESSAGE) {
1482 log_action(iter, long_act ? "message" : "m", has_cg);
1483 blk_log_msg(s, iter->ent, has_cg);
1484 return trace_handle_return(s);
1485 }
1486
1487 if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
1488 trace_seq_printf(s, "Unknown action %x\n", what);
1489 else {
1490 log_action(iter, what2act[what].act[long_act], has_cg);
1491 what2act[what].print(s, iter->ent, has_cg);
1492 }
1493
1494 return trace_handle_return(s);
1495 }
1496
blk_trace_event_print(struct trace_iterator * iter,int flags,struct trace_event * event)1497 static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
1498 int flags, struct trace_event *event)
1499 {
1500 return print_one_line(iter, false);
1501 }
1502
blk_trace_synthesize_old_trace(struct trace_iterator * iter)1503 static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
1504 {
1505 struct trace_seq *s = &iter->seq;
1506 struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
1507 const int offset = offsetof(struct blk_io_trace, sector);
1508 struct blk_io_trace old = {
1509 .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION,
1510 .time = iter->ts,
1511 };
1512
1513 trace_seq_putmem(s, &old, offset);
1514 trace_seq_putmem(s, &t->sector,
1515 sizeof(old) - offset + t->pdu_len);
1516 }
1517
1518 static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator * iter,int flags,struct trace_event * event)1519 blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
1520 struct trace_event *event)
1521 {
1522 blk_trace_synthesize_old_trace(iter);
1523
1524 return trace_handle_return(&iter->seq);
1525 }
1526
blk_tracer_print_line(struct trace_iterator * iter)1527 static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
1528 {
1529 if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC))
1530 return TRACE_TYPE_UNHANDLED;
1531
1532 return print_one_line(iter, true);
1533 }
1534
1535 static int
blk_tracer_set_flag(struct trace_array * tr,u32 old_flags,u32 bit,int set)1536 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1537 {
1538 /* don't output context-info for blk_classic output */
1539 if (bit == TRACE_BLK_OPT_CLASSIC) {
1540 if (set)
1541 tr->trace_flags &= ~TRACE_ITER_CONTEXT_INFO;
1542 else
1543 tr->trace_flags |= TRACE_ITER_CONTEXT_INFO;
1544 }
1545 return 0;
1546 }
1547
1548 static struct tracer blk_tracer __read_mostly = {
1549 .name = "blk",
1550 .init = blk_tracer_init,
1551 .reset = blk_tracer_reset,
1552 .start = blk_tracer_start,
1553 .stop = blk_tracer_stop,
1554 .print_header = blk_tracer_print_header,
1555 .print_line = blk_tracer_print_line,
1556 .flags = &blk_tracer_flags,
1557 .set_flag = blk_tracer_set_flag,
1558 };
1559
1560 static struct trace_event_functions trace_blk_event_funcs = {
1561 .trace = blk_trace_event_print,
1562 .binary = blk_trace_event_print_binary,
1563 };
1564
1565 static struct trace_event trace_blk_event = {
1566 .type = TRACE_BLK,
1567 .funcs = &trace_blk_event_funcs,
1568 };
1569
init_blk_tracer(void)1570 static int __init init_blk_tracer(void)
1571 {
1572 if (!register_trace_event(&trace_blk_event)) {
1573 pr_warn("Warning: could not register block events\n");
1574 return 1;
1575 }
1576
1577 if (register_tracer(&blk_tracer) != 0) {
1578 pr_warn("Warning: could not register the block tracer\n");
1579 unregister_trace_event(&trace_blk_event);
1580 return 1;
1581 }
1582
1583 return 0;
1584 }
1585
1586 device_initcall(init_blk_tracer);
1587
blk_trace_remove_queue(struct request_queue * q)1588 static int blk_trace_remove_queue(struct request_queue *q)
1589 {
1590 struct blk_trace *bt;
1591
1592 bt = xchg(&q->blk_trace, NULL);
1593 if (bt == NULL)
1594 return -EINVAL;
1595
1596 put_probe_ref();
1597 blk_trace_free(bt);
1598 return 0;
1599 }
1600
1601 /*
1602 * Setup everything required to start tracing
1603 */
blk_trace_setup_queue(struct request_queue * q,struct block_device * bdev)1604 static int blk_trace_setup_queue(struct request_queue *q,
1605 struct block_device *bdev)
1606 {
1607 struct blk_trace *bt = NULL;
1608 int ret = -ENOMEM;
1609
1610 bt = kzalloc(sizeof(*bt), GFP_KERNEL);
1611 if (!bt)
1612 return -ENOMEM;
1613
1614 bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char));
1615 if (!bt->msg_data)
1616 goto free_bt;
1617
1618 bt->dev = bdev->bd_dev;
1619 bt->act_mask = (u16)-1;
1620
1621 blk_trace_setup_lba(bt, bdev);
1622
1623 ret = -EBUSY;
1624 if (cmpxchg(&q->blk_trace, NULL, bt))
1625 goto free_bt;
1626
1627 get_probe_ref();
1628 return 0;
1629
1630 free_bt:
1631 blk_trace_free(bt);
1632 return ret;
1633 }
1634
1635 /*
1636 * sysfs interface to enable and configure tracing
1637 */
1638
1639 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1640 struct device_attribute *attr,
1641 char *buf);
1642 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1643 struct device_attribute *attr,
1644 const char *buf, size_t count);
1645 #define BLK_TRACE_DEVICE_ATTR(_name) \
1646 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \
1647 sysfs_blk_trace_attr_show, \
1648 sysfs_blk_trace_attr_store)
1649
1650 static BLK_TRACE_DEVICE_ATTR(enable);
1651 static BLK_TRACE_DEVICE_ATTR(act_mask);
1652 static BLK_TRACE_DEVICE_ATTR(pid);
1653 static BLK_TRACE_DEVICE_ATTR(start_lba);
1654 static BLK_TRACE_DEVICE_ATTR(end_lba);
1655
1656 static struct attribute *blk_trace_attrs[] = {
1657 &dev_attr_enable.attr,
1658 &dev_attr_act_mask.attr,
1659 &dev_attr_pid.attr,
1660 &dev_attr_start_lba.attr,
1661 &dev_attr_end_lba.attr,
1662 NULL
1663 };
1664
1665 struct attribute_group blk_trace_attr_group = {
1666 .name = "trace",
1667 .attrs = blk_trace_attrs,
1668 };
1669
1670 static const struct {
1671 int mask;
1672 const char *str;
1673 } mask_maps[] = {
1674 { BLK_TC_READ, "read" },
1675 { BLK_TC_WRITE, "write" },
1676 { BLK_TC_FLUSH, "flush" },
1677 { BLK_TC_SYNC, "sync" },
1678 { BLK_TC_QUEUE, "queue" },
1679 { BLK_TC_REQUEUE, "requeue" },
1680 { BLK_TC_ISSUE, "issue" },
1681 { BLK_TC_COMPLETE, "complete" },
1682 { BLK_TC_FS, "fs" },
1683 { BLK_TC_PC, "pc" },
1684 { BLK_TC_NOTIFY, "notify" },
1685 { BLK_TC_AHEAD, "ahead" },
1686 { BLK_TC_META, "meta" },
1687 { BLK_TC_DISCARD, "discard" },
1688 { BLK_TC_DRV_DATA, "drv_data" },
1689 { BLK_TC_FUA, "fua" },
1690 };
1691
blk_trace_str2mask(const char * str)1692 static int blk_trace_str2mask(const char *str)
1693 {
1694 int i;
1695 int mask = 0;
1696 char *buf, *s, *token;
1697
1698 buf = kstrdup(str, GFP_KERNEL);
1699 if (buf == NULL)
1700 return -ENOMEM;
1701 s = strstrip(buf);
1702
1703 while (1) {
1704 token = strsep(&s, ",");
1705 if (token == NULL)
1706 break;
1707
1708 if (*token == '\0')
1709 continue;
1710
1711 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1712 if (strcasecmp(token, mask_maps[i].str) == 0) {
1713 mask |= mask_maps[i].mask;
1714 break;
1715 }
1716 }
1717 if (i == ARRAY_SIZE(mask_maps)) {
1718 mask = -EINVAL;
1719 break;
1720 }
1721 }
1722 kfree(buf);
1723
1724 return mask;
1725 }
1726
blk_trace_mask2str(char * buf,int mask)1727 static ssize_t blk_trace_mask2str(char *buf, int mask)
1728 {
1729 int i;
1730 char *p = buf;
1731
1732 for (i = 0; i < ARRAY_SIZE(mask_maps); i++) {
1733 if (mask & mask_maps[i].mask) {
1734 p += sprintf(p, "%s%s",
1735 (p == buf) ? "" : ",", mask_maps[i].str);
1736 }
1737 }
1738 *p++ = '\n';
1739
1740 return p - buf;
1741 }
1742
blk_trace_get_queue(struct block_device * bdev)1743 static struct request_queue *blk_trace_get_queue(struct block_device *bdev)
1744 {
1745 if (bdev->bd_disk == NULL)
1746 return NULL;
1747
1748 return bdev_get_queue(bdev);
1749 }
1750
sysfs_blk_trace_attr_show(struct device * dev,struct device_attribute * attr,char * buf)1751 static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
1752 struct device_attribute *attr,
1753 char *buf)
1754 {
1755 struct hd_struct *p = dev_to_part(dev);
1756 struct request_queue *q;
1757 struct block_device *bdev;
1758 ssize_t ret = -ENXIO;
1759
1760 bdev = bdget(part_devt(p));
1761 if (bdev == NULL)
1762 goto out;
1763
1764 q = blk_trace_get_queue(bdev);
1765 if (q == NULL)
1766 goto out_bdput;
1767
1768 mutex_lock(&q->blk_trace_mutex);
1769
1770 if (attr == &dev_attr_enable) {
1771 ret = sprintf(buf, "%u\n", !!q->blk_trace);
1772 goto out_unlock_bdev;
1773 }
1774
1775 if (q->blk_trace == NULL)
1776 ret = sprintf(buf, "disabled\n");
1777 else if (attr == &dev_attr_act_mask)
1778 ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
1779 else if (attr == &dev_attr_pid)
1780 ret = sprintf(buf, "%u\n", q->blk_trace->pid);
1781 else if (attr == &dev_attr_start_lba)
1782 ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
1783 else if (attr == &dev_attr_end_lba)
1784 ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
1785
1786 out_unlock_bdev:
1787 mutex_unlock(&q->blk_trace_mutex);
1788 out_bdput:
1789 bdput(bdev);
1790 out:
1791 return ret;
1792 }
1793
sysfs_blk_trace_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1794 static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
1795 struct device_attribute *attr,
1796 const char *buf, size_t count)
1797 {
1798 struct block_device *bdev;
1799 struct request_queue *q;
1800 struct hd_struct *p;
1801 u64 value;
1802 ssize_t ret = -EINVAL;
1803
1804 if (count == 0)
1805 goto out;
1806
1807 if (attr == &dev_attr_act_mask) {
1808 if (kstrtoull(buf, 0, &value)) {
1809 /* Assume it is a list of trace category names */
1810 ret = blk_trace_str2mask(buf);
1811 if (ret < 0)
1812 goto out;
1813 value = ret;
1814 }
1815 } else if (kstrtoull(buf, 0, &value))
1816 goto out;
1817
1818 ret = -ENXIO;
1819
1820 p = dev_to_part(dev);
1821 bdev = bdget(part_devt(p));
1822 if (bdev == NULL)
1823 goto out;
1824
1825 q = blk_trace_get_queue(bdev);
1826 if (q == NULL)
1827 goto out_bdput;
1828
1829 mutex_lock(&q->blk_trace_mutex);
1830
1831 if (attr == &dev_attr_enable) {
1832 if (!!value == !!q->blk_trace) {
1833 ret = 0;
1834 goto out_unlock_bdev;
1835 }
1836 if (value)
1837 ret = blk_trace_setup_queue(q, bdev);
1838 else
1839 ret = blk_trace_remove_queue(q);
1840 goto out_unlock_bdev;
1841 }
1842
1843 ret = 0;
1844 if (q->blk_trace == NULL)
1845 ret = blk_trace_setup_queue(q, bdev);
1846
1847 if (ret == 0) {
1848 if (attr == &dev_attr_act_mask)
1849 q->blk_trace->act_mask = value;
1850 else if (attr == &dev_attr_pid)
1851 q->blk_trace->pid = value;
1852 else if (attr == &dev_attr_start_lba)
1853 q->blk_trace->start_lba = value;
1854 else if (attr == &dev_attr_end_lba)
1855 q->blk_trace->end_lba = value;
1856 }
1857
1858 out_unlock_bdev:
1859 mutex_unlock(&q->blk_trace_mutex);
1860 out_bdput:
1861 bdput(bdev);
1862 out:
1863 return ret ? ret : count;
1864 }
1865
blk_trace_init_sysfs(struct device * dev)1866 int blk_trace_init_sysfs(struct device *dev)
1867 {
1868 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group);
1869 }
1870
blk_trace_remove_sysfs(struct device * dev)1871 void blk_trace_remove_sysfs(struct device *dev)
1872 {
1873 sysfs_remove_group(&dev->kobj, &blk_trace_attr_group);
1874 }
1875
1876 #endif /* CONFIG_BLK_DEV_IO_TRACE */
1877
1878 #ifdef CONFIG_EVENT_TRACING
1879
blk_fill_rwbs(char * rwbs,unsigned int op,int bytes)1880 void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes)
1881 {
1882 int i = 0;
1883
1884 if (op & REQ_PREFLUSH)
1885 rwbs[i++] = 'F';
1886
1887 switch (op & REQ_OP_MASK) {
1888 case REQ_OP_WRITE:
1889 case REQ_OP_WRITE_SAME:
1890 rwbs[i++] = 'W';
1891 break;
1892 case REQ_OP_DISCARD:
1893 rwbs[i++] = 'D';
1894 break;
1895 case REQ_OP_SECURE_ERASE:
1896 rwbs[i++] = 'D';
1897 rwbs[i++] = 'E';
1898 break;
1899 case REQ_OP_FLUSH:
1900 rwbs[i++] = 'F';
1901 break;
1902 case REQ_OP_READ:
1903 rwbs[i++] = 'R';
1904 break;
1905 default:
1906 rwbs[i++] = 'N';
1907 }
1908
1909 if (op & REQ_FUA)
1910 rwbs[i++] = 'F';
1911 if (op & REQ_RAHEAD)
1912 rwbs[i++] = 'A';
1913 if (op & REQ_SYNC)
1914 rwbs[i++] = 'S';
1915 if (op & REQ_META)
1916 rwbs[i++] = 'M';
1917
1918 rwbs[i] = '\0';
1919 }
1920 EXPORT_SYMBOL_GPL(blk_fill_rwbs);
1921
1922 #endif /* CONFIG_EVENT_TRACING */
1923
1924