1 /*
2 * bsg.c - block layer implementation of the sg v4 interface
3 *
4 * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
5 * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License version 2. See the file "COPYING" in the main directory of this
9 * archive for more details.
10 *
11 */
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/file.h>
15 #include <linux/blkdev.h>
16 #include <linux/cdev.h>
17 #include <linux/jiffies.h>
18 #include <linux/percpu.h>
19 #include <linux/idr.h>
20 #include <linux/bsg.h>
21 #include <linux/slab.h>
22
23 #include <scsi/scsi.h>
24 #include <scsi/scsi_ioctl.h>
25 #include <scsi/scsi_cmnd.h>
26 #include <scsi/scsi_device.h>
27 #include <scsi/scsi_driver.h>
28 #include <scsi/sg.h>
29
30 #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
31 #define BSG_VERSION "0.4"
32
33 #define bsg_dbg(bd, fmt, ...) \
34 pr_debug("%s: " fmt, (bd)->name, ##__VA_ARGS__)
35
36 struct bsg_device {
37 struct request_queue *queue;
38 spinlock_t lock;
39 struct hlist_node dev_list;
40 refcount_t ref_count;
41 char name[20];
42 int max_queue;
43 };
44
45 #define BSG_DEFAULT_CMDS 64
46 #define BSG_MAX_DEVS 32768
47
48 static DEFINE_MUTEX(bsg_mutex);
49 static DEFINE_IDR(bsg_minor_idr);
50
51 #define BSG_LIST_ARRAY_SIZE 8
52 static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
53
54 static struct class *bsg_class;
55 static int bsg_major;
56
bsg_dev_idx_hash(int index)57 static inline struct hlist_head *bsg_dev_idx_hash(int index)
58 {
59 return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
60 }
61
62 #define uptr64(val) ((void __user *)(uintptr_t)(val))
63
bsg_scsi_check_proto(struct sg_io_v4 * hdr)64 static int bsg_scsi_check_proto(struct sg_io_v4 *hdr)
65 {
66 if (hdr->protocol != BSG_PROTOCOL_SCSI ||
67 hdr->subprotocol != BSG_SUB_PROTOCOL_SCSI_CMD)
68 return -EINVAL;
69 return 0;
70 }
71
bsg_scsi_fill_hdr(struct request * rq,struct sg_io_v4 * hdr,fmode_t mode)72 static int bsg_scsi_fill_hdr(struct request *rq, struct sg_io_v4 *hdr,
73 fmode_t mode)
74 {
75 struct scsi_request *sreq = scsi_req(rq);
76
77 sreq->cmd_len = hdr->request_len;
78 if (sreq->cmd_len > BLK_MAX_CDB) {
79 sreq->cmd = kzalloc(sreq->cmd_len, GFP_KERNEL);
80 if (!sreq->cmd)
81 return -ENOMEM;
82 }
83
84 if (copy_from_user(sreq->cmd, uptr64(hdr->request), sreq->cmd_len))
85 return -EFAULT;
86 if (blk_verify_command(sreq->cmd, mode))
87 return -EPERM;
88 return 0;
89 }
90
bsg_scsi_complete_rq(struct request * rq,struct sg_io_v4 * hdr)91 static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
92 {
93 struct scsi_request *sreq = scsi_req(rq);
94 int ret = 0;
95
96 /*
97 * fill in all the output members
98 */
99 hdr->device_status = sreq->result & 0xff;
100 hdr->transport_status = host_byte(sreq->result);
101 hdr->driver_status = driver_byte(sreq->result);
102 hdr->info = 0;
103 if (hdr->device_status || hdr->transport_status || hdr->driver_status)
104 hdr->info |= SG_INFO_CHECK;
105 hdr->response_len = 0;
106
107 if (sreq->sense_len && hdr->response) {
108 int len = min_t(unsigned int, hdr->max_response_len,
109 sreq->sense_len);
110
111 if (copy_to_user(uptr64(hdr->response), sreq->sense, len))
112 ret = -EFAULT;
113 else
114 hdr->response_len = len;
115 }
116
117 if (rq->next_rq) {
118 hdr->dout_resid = sreq->resid_len;
119 hdr->din_resid = scsi_req(rq->next_rq)->resid_len;
120 } else if (rq_data_dir(rq) == READ) {
121 hdr->din_resid = sreq->resid_len;
122 } else {
123 hdr->dout_resid = sreq->resid_len;
124 }
125
126 return ret;
127 }
128
bsg_scsi_free_rq(struct request * rq)129 static void bsg_scsi_free_rq(struct request *rq)
130 {
131 scsi_req_free_cmd(scsi_req(rq));
132 }
133
134 static const struct bsg_ops bsg_scsi_ops = {
135 .check_proto = bsg_scsi_check_proto,
136 .fill_hdr = bsg_scsi_fill_hdr,
137 .complete_rq = bsg_scsi_complete_rq,
138 .free_rq = bsg_scsi_free_rq,
139 };
140
141 static struct request *
bsg_map_hdr(struct request_queue * q,struct sg_io_v4 * hdr,fmode_t mode)142 bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
143 {
144 struct request *rq, *next_rq = NULL;
145 int ret;
146
147 if (!q->bsg_dev.class_dev)
148 return ERR_PTR(-ENXIO);
149
150 if (hdr->guard != 'Q')
151 return ERR_PTR(-EINVAL);
152
153 ret = q->bsg_dev.ops->check_proto(hdr);
154 if (ret)
155 return ERR_PTR(ret);
156
157 rq = blk_get_request(q, hdr->dout_xfer_len ?
158 REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, 0);
159 if (IS_ERR(rq))
160 return rq;
161
162 ret = q->bsg_dev.ops->fill_hdr(rq, hdr, mode);
163 if (ret)
164 goto out;
165
166 rq->timeout = msecs_to_jiffies(hdr->timeout);
167 if (!rq->timeout)
168 rq->timeout = q->sg_timeout;
169 if (!rq->timeout)
170 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
171 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
172 rq->timeout = BLK_MIN_SG_TIMEOUT;
173
174 if (hdr->dout_xfer_len && hdr->din_xfer_len) {
175 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
176 ret = -EOPNOTSUPP;
177 goto out;
178 }
179
180 next_rq = blk_get_request(q, REQ_OP_SCSI_IN, 0);
181 if (IS_ERR(next_rq)) {
182 ret = PTR_ERR(next_rq);
183 goto out;
184 }
185
186 rq->next_rq = next_rq;
187 ret = blk_rq_map_user(q, next_rq, NULL, uptr64(hdr->din_xferp),
188 hdr->din_xfer_len, GFP_KERNEL);
189 if (ret)
190 goto out_free_nextrq;
191 }
192
193 if (hdr->dout_xfer_len) {
194 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->dout_xferp),
195 hdr->dout_xfer_len, GFP_KERNEL);
196 } else if (hdr->din_xfer_len) {
197 ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
198 hdr->din_xfer_len, GFP_KERNEL);
199 }
200
201 if (ret)
202 goto out_unmap_nextrq;
203 return rq;
204
205 out_unmap_nextrq:
206 if (rq->next_rq)
207 blk_rq_unmap_user(rq->next_rq->bio);
208 out_free_nextrq:
209 if (rq->next_rq)
210 blk_put_request(rq->next_rq);
211 out:
212 q->bsg_dev.ops->free_rq(rq);
213 blk_put_request(rq);
214 return ERR_PTR(ret);
215 }
216
blk_complete_sgv4_hdr_rq(struct request * rq,struct sg_io_v4 * hdr,struct bio * bio,struct bio * bidi_bio)217 static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
218 struct bio *bio, struct bio *bidi_bio)
219 {
220 int ret;
221
222 ret = rq->q->bsg_dev.ops->complete_rq(rq, hdr);
223
224 if (rq->next_rq) {
225 blk_rq_unmap_user(bidi_bio);
226 blk_put_request(rq->next_rq);
227 }
228
229 blk_rq_unmap_user(bio);
230 rq->q->bsg_dev.ops->free_rq(rq);
231 blk_put_request(rq);
232 return ret;
233 }
234
bsg_alloc_device(void)235 static struct bsg_device *bsg_alloc_device(void)
236 {
237 struct bsg_device *bd;
238
239 bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
240 if (unlikely(!bd))
241 return NULL;
242
243 spin_lock_init(&bd->lock);
244 bd->max_queue = BSG_DEFAULT_CMDS;
245 INIT_HLIST_NODE(&bd->dev_list);
246 return bd;
247 }
248
bsg_put_device(struct bsg_device * bd)249 static int bsg_put_device(struct bsg_device *bd)
250 {
251 struct request_queue *q = bd->queue;
252
253 mutex_lock(&bsg_mutex);
254
255 if (!refcount_dec_and_test(&bd->ref_count)) {
256 mutex_unlock(&bsg_mutex);
257 return 0;
258 }
259
260 hlist_del(&bd->dev_list);
261 mutex_unlock(&bsg_mutex);
262
263 bsg_dbg(bd, "tearing down\n");
264
265 /*
266 * close can always block
267 */
268 kfree(bd);
269 blk_put_queue(q);
270 return 0;
271 }
272
bsg_add_device(struct inode * inode,struct request_queue * rq,struct file * file)273 static struct bsg_device *bsg_add_device(struct inode *inode,
274 struct request_queue *rq,
275 struct file *file)
276 {
277 struct bsg_device *bd;
278 unsigned char buf[32];
279
280 lockdep_assert_held(&bsg_mutex);
281
282 if (!blk_get_queue(rq))
283 return ERR_PTR(-ENXIO);
284
285 bd = bsg_alloc_device();
286 if (!bd) {
287 blk_put_queue(rq);
288 return ERR_PTR(-ENOMEM);
289 }
290
291 bd->queue = rq;
292
293 refcount_set(&bd->ref_count, 1);
294 hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
295
296 strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
297 bsg_dbg(bd, "bound to <%s>, max queue %d\n",
298 format_dev_t(buf, inode->i_rdev), bd->max_queue);
299
300 return bd;
301 }
302
__bsg_get_device(int minor,struct request_queue * q)303 static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
304 {
305 struct bsg_device *bd;
306
307 lockdep_assert_held(&bsg_mutex);
308
309 hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) {
310 if (bd->queue == q) {
311 refcount_inc(&bd->ref_count);
312 goto found;
313 }
314 }
315 bd = NULL;
316 found:
317 return bd;
318 }
319
bsg_get_device(struct inode * inode,struct file * file)320 static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
321 {
322 struct bsg_device *bd;
323 struct bsg_class_device *bcd;
324
325 /*
326 * find the class device
327 */
328 mutex_lock(&bsg_mutex);
329 bcd = idr_find(&bsg_minor_idr, iminor(inode));
330
331 if (!bcd) {
332 bd = ERR_PTR(-ENODEV);
333 goto out_unlock;
334 }
335
336 bd = __bsg_get_device(iminor(inode), bcd->queue);
337 if (!bd)
338 bd = bsg_add_device(inode, bcd->queue, file);
339
340 out_unlock:
341 mutex_unlock(&bsg_mutex);
342 return bd;
343 }
344
bsg_open(struct inode * inode,struct file * file)345 static int bsg_open(struct inode *inode, struct file *file)
346 {
347 struct bsg_device *bd;
348
349 bd = bsg_get_device(inode, file);
350
351 if (IS_ERR(bd))
352 return PTR_ERR(bd);
353
354 file->private_data = bd;
355 return 0;
356 }
357
bsg_release(struct inode * inode,struct file * file)358 static int bsg_release(struct inode *inode, struct file *file)
359 {
360 struct bsg_device *bd = file->private_data;
361
362 file->private_data = NULL;
363 return bsg_put_device(bd);
364 }
365
bsg_ioctl(struct file * file,unsigned int cmd,unsigned long arg)366 static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
367 {
368 struct bsg_device *bd = file->private_data;
369 int __user *uarg = (int __user *) arg;
370 int ret;
371
372 switch (cmd) {
373 /*
374 * our own ioctls
375 */
376 case SG_GET_COMMAND_Q:
377 return put_user(bd->max_queue, uarg);
378 case SG_SET_COMMAND_Q: {
379 int queue;
380
381 if (get_user(queue, uarg))
382 return -EFAULT;
383 if (queue < 1)
384 return -EINVAL;
385
386 spin_lock_irq(&bd->lock);
387 bd->max_queue = queue;
388 spin_unlock_irq(&bd->lock);
389 return 0;
390 }
391
392 /*
393 * SCSI/sg ioctls
394 */
395 case SG_GET_VERSION_NUM:
396 case SCSI_IOCTL_GET_IDLUN:
397 case SCSI_IOCTL_GET_BUS_NUMBER:
398 case SG_SET_TIMEOUT:
399 case SG_GET_TIMEOUT:
400 case SG_GET_RESERVED_SIZE:
401 case SG_SET_RESERVED_SIZE:
402 case SG_EMULATED_HOST:
403 case SCSI_IOCTL_SEND_COMMAND: {
404 void __user *uarg = (void __user *) arg;
405 return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
406 }
407 case SG_IO: {
408 struct request *rq;
409 struct bio *bio, *bidi_bio = NULL;
410 struct sg_io_v4 hdr;
411 int at_head;
412
413 if (copy_from_user(&hdr, uarg, sizeof(hdr)))
414 return -EFAULT;
415
416 rq = bsg_map_hdr(bd->queue, &hdr, file->f_mode);
417 if (IS_ERR(rq))
418 return PTR_ERR(rq);
419
420 bio = rq->bio;
421 if (rq->next_rq)
422 bidi_bio = rq->next_rq->bio;
423
424 at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
425 blk_execute_rq(bd->queue, NULL, rq, at_head);
426 ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
427
428 if (copy_to_user(uarg, &hdr, sizeof(hdr)))
429 return -EFAULT;
430
431 return ret;
432 }
433 default:
434 return -ENOTTY;
435 }
436 }
437
438 static const struct file_operations bsg_fops = {
439 .open = bsg_open,
440 .release = bsg_release,
441 .unlocked_ioctl = bsg_ioctl,
442 .owner = THIS_MODULE,
443 .llseek = default_llseek,
444 };
445
bsg_unregister_queue(struct request_queue * q)446 void bsg_unregister_queue(struct request_queue *q)
447 {
448 struct bsg_class_device *bcd = &q->bsg_dev;
449
450 if (!bcd->class_dev)
451 return;
452
453 mutex_lock(&bsg_mutex);
454 idr_remove(&bsg_minor_idr, bcd->minor);
455 if (q->kobj.sd)
456 sysfs_remove_link(&q->kobj, "bsg");
457 device_unregister(bcd->class_dev);
458 bcd->class_dev = NULL;
459 mutex_unlock(&bsg_mutex);
460 }
461 EXPORT_SYMBOL_GPL(bsg_unregister_queue);
462
bsg_register_queue(struct request_queue * q,struct device * parent,const char * name,const struct bsg_ops * ops)463 int bsg_register_queue(struct request_queue *q, struct device *parent,
464 const char *name, const struct bsg_ops *ops)
465 {
466 struct bsg_class_device *bcd;
467 dev_t dev;
468 int ret;
469 struct device *class_dev = NULL;
470
471 /*
472 * we need a proper transport to send commands, not a stacked device
473 */
474 if (!queue_is_rq_based(q))
475 return 0;
476
477 bcd = &q->bsg_dev;
478 memset(bcd, 0, sizeof(*bcd));
479
480 mutex_lock(&bsg_mutex);
481
482 ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL);
483 if (ret < 0) {
484 if (ret == -ENOSPC) {
485 printk(KERN_ERR "bsg: too many bsg devices\n");
486 ret = -EINVAL;
487 }
488 goto unlock;
489 }
490
491 bcd->minor = ret;
492 bcd->queue = q;
493 bcd->ops = ops;
494 dev = MKDEV(bsg_major, bcd->minor);
495 class_dev = device_create(bsg_class, parent, dev, NULL, "%s", name);
496 if (IS_ERR(class_dev)) {
497 ret = PTR_ERR(class_dev);
498 goto idr_remove;
499 }
500 bcd->class_dev = class_dev;
501
502 if (q->kobj.sd) {
503 ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
504 if (ret)
505 goto unregister_class_dev;
506 }
507
508 mutex_unlock(&bsg_mutex);
509 return 0;
510
511 unregister_class_dev:
512 device_unregister(class_dev);
513 idr_remove:
514 idr_remove(&bsg_minor_idr, bcd->minor);
515 unlock:
516 mutex_unlock(&bsg_mutex);
517 return ret;
518 }
519
bsg_scsi_register_queue(struct request_queue * q,struct device * parent)520 int bsg_scsi_register_queue(struct request_queue *q, struct device *parent)
521 {
522 if (!blk_queue_scsi_passthrough(q)) {
523 WARN_ONCE(true, "Attempt to register a non-SCSI queue\n");
524 return -EINVAL;
525 }
526
527 return bsg_register_queue(q, parent, dev_name(parent), &bsg_scsi_ops);
528 }
529 EXPORT_SYMBOL_GPL(bsg_scsi_register_queue);
530
531 static struct cdev bsg_cdev;
532
bsg_devnode(struct device * dev,umode_t * mode)533 static char *bsg_devnode(struct device *dev, umode_t *mode)
534 {
535 return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
536 }
537
bsg_init(void)538 static int __init bsg_init(void)
539 {
540 int ret, i;
541 dev_t devid;
542
543 for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
544 INIT_HLIST_HEAD(&bsg_device_list[i]);
545
546 bsg_class = class_create(THIS_MODULE, "bsg");
547 if (IS_ERR(bsg_class))
548 return PTR_ERR(bsg_class);
549 bsg_class->devnode = bsg_devnode;
550
551 ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
552 if (ret)
553 goto destroy_bsg_class;
554
555 bsg_major = MAJOR(devid);
556
557 cdev_init(&bsg_cdev, &bsg_fops);
558 ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
559 if (ret)
560 goto unregister_chrdev;
561
562 printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
563 " loaded (major %d)\n", bsg_major);
564 return 0;
565 unregister_chrdev:
566 unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
567 destroy_bsg_class:
568 class_destroy(bsg_class);
569 return ret;
570 }
571
572 MODULE_AUTHOR("Jens Axboe");
573 MODULE_DESCRIPTION(BSG_DESCRIPTION);
574 MODULE_LICENSE("GPL");
575
576 device_initcall(bsg_init);
577