1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/capability.h>
3 #include <linux/compat.h>
4 #include <linux/blkdev.h>
5 #include <linux/export.h>
6 #include <linux/gfp.h>
7 #include <linux/blkpg.h>
8 #include <linux/hdreg.h>
9 #include <linux/backing-dev.h>
10 #include <linux/fs.h>
11 #include <linux/blktrace_api.h>
12 #include <linux/pr.h>
13 #include <linux/uaccess.h>
14 #include "blk.h"
15 
blkpg_do_ioctl(struct block_device * bdev,struct blkpg_partition __user * upart,int op)16 static int blkpg_do_ioctl(struct block_device *bdev,
17 			  struct blkpg_partition __user *upart, int op)
18 {
19 	struct gendisk *disk = bdev->bd_disk;
20 	struct blkpg_partition p;
21 	long long start, length;
22 
23 	if (disk->flags & GENHD_FL_NO_PART)
24 		return -EINVAL;
25 	if (!capable(CAP_SYS_ADMIN))
26 		return -EACCES;
27 	if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
28 		return -EFAULT;
29 	if (bdev_is_partition(bdev))
30 		return -EINVAL;
31 
32 	if (p.pno <= 0)
33 		return -EINVAL;
34 
35 	if (op == BLKPG_DEL_PARTITION)
36 		return bdev_del_partition(disk, p.pno);
37 
38 	start = p.start >> SECTOR_SHIFT;
39 	length = p.length >> SECTOR_SHIFT;
40 
41 	switch (op) {
42 	case BLKPG_ADD_PARTITION:
43 		/* check if partition is aligned to blocksize */
44 		if (p.start & (bdev_logical_block_size(bdev) - 1))
45 			return -EINVAL;
46 		return bdev_add_partition(disk, p.pno, start, length);
47 	case BLKPG_RESIZE_PARTITION:
48 		return bdev_resize_partition(disk, p.pno, start, length);
49 	default:
50 		return -EINVAL;
51 	}
52 }
53 
blkpg_ioctl(struct block_device * bdev,struct blkpg_ioctl_arg __user * arg)54 static int blkpg_ioctl(struct block_device *bdev,
55 		       struct blkpg_ioctl_arg __user *arg)
56 {
57 	struct blkpg_partition __user *udata;
58 	int op;
59 
60 	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
61 		return -EFAULT;
62 
63 	return blkpg_do_ioctl(bdev, udata, op);
64 }
65 
66 #ifdef CONFIG_COMPAT
67 struct compat_blkpg_ioctl_arg {
68 	compat_int_t op;
69 	compat_int_t flags;
70 	compat_int_t datalen;
71 	compat_caddr_t data;
72 };
73 
compat_blkpg_ioctl(struct block_device * bdev,struct compat_blkpg_ioctl_arg __user * arg)74 static int compat_blkpg_ioctl(struct block_device *bdev,
75 			      struct compat_blkpg_ioctl_arg __user *arg)
76 {
77 	compat_caddr_t udata;
78 	int op;
79 
80 	if (get_user(op, &arg->op) || get_user(udata, &arg->data))
81 		return -EFAULT;
82 
83 	return blkpg_do_ioctl(bdev, compat_ptr(udata), op);
84 }
85 #endif
86 
blk_ioctl_discard(struct block_device * bdev,blk_mode_t mode,unsigned long arg)87 static int blk_ioctl_discard(struct block_device *bdev, blk_mode_t mode,
88 		unsigned long arg)
89 {
90 	uint64_t range[2];
91 	uint64_t start, len;
92 	struct inode *inode = bdev->bd_inode;
93 	int err;
94 
95 	if (!(mode & BLK_OPEN_WRITE))
96 		return -EBADF;
97 
98 	if (!bdev_max_discard_sectors(bdev))
99 		return -EOPNOTSUPP;
100 
101 	if (copy_from_user(range, (void __user *)arg, sizeof(range)))
102 		return -EFAULT;
103 
104 	start = range[0];
105 	len = range[1];
106 
107 	if (start & 511)
108 		return -EINVAL;
109 	if (len & 511)
110 		return -EINVAL;
111 
112 	if (start + len > bdev_nr_bytes(bdev))
113 		return -EINVAL;
114 
115 	filemap_invalidate_lock(inode->i_mapping);
116 	err = truncate_bdev_range(bdev, mode, start, start + len - 1);
117 	if (err)
118 		goto fail;
119 	err = blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
120 fail:
121 	filemap_invalidate_unlock(inode->i_mapping);
122 	return err;
123 }
124 
blk_ioctl_secure_erase(struct block_device * bdev,blk_mode_t mode,void __user * argp)125 static int blk_ioctl_secure_erase(struct block_device *bdev, blk_mode_t mode,
126 		void __user *argp)
127 {
128 	uint64_t start, len;
129 	uint64_t range[2];
130 	int err;
131 
132 	if (!(mode & BLK_OPEN_WRITE))
133 		return -EBADF;
134 	if (!bdev_max_secure_erase_sectors(bdev))
135 		return -EOPNOTSUPP;
136 	if (copy_from_user(range, argp, sizeof(range)))
137 		return -EFAULT;
138 
139 	start = range[0];
140 	len = range[1];
141 	if ((start & 511) || (len & 511))
142 		return -EINVAL;
143 	if (start + len > bdev_nr_bytes(bdev))
144 		return -EINVAL;
145 
146 	filemap_invalidate_lock(bdev->bd_inode->i_mapping);
147 	err = truncate_bdev_range(bdev, mode, start, start + len - 1);
148 	if (!err)
149 		err = blkdev_issue_secure_erase(bdev, start >> 9, len >> 9,
150 						GFP_KERNEL);
151 	filemap_invalidate_unlock(bdev->bd_inode->i_mapping);
152 	return err;
153 }
154 
155 
blk_ioctl_zeroout(struct block_device * bdev,blk_mode_t mode,unsigned long arg)156 static int blk_ioctl_zeroout(struct block_device *bdev, blk_mode_t mode,
157 		unsigned long arg)
158 {
159 	uint64_t range[2];
160 	uint64_t start, end, len;
161 	struct inode *inode = bdev->bd_inode;
162 	int err;
163 
164 	if (!(mode & BLK_OPEN_WRITE))
165 		return -EBADF;
166 
167 	if (copy_from_user(range, (void __user *)arg, sizeof(range)))
168 		return -EFAULT;
169 
170 	start = range[0];
171 	len = range[1];
172 	end = start + len - 1;
173 
174 	if (start & 511)
175 		return -EINVAL;
176 	if (len & 511)
177 		return -EINVAL;
178 	if (end >= (uint64_t)bdev_nr_bytes(bdev))
179 		return -EINVAL;
180 	if (end < start)
181 		return -EINVAL;
182 
183 	/* Invalidate the page cache, including dirty pages */
184 	filemap_invalidate_lock(inode->i_mapping);
185 	err = truncate_bdev_range(bdev, mode, start, end);
186 	if (err)
187 		goto fail;
188 
189 	err = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
190 				   BLKDEV_ZERO_NOUNMAP);
191 
192 fail:
193 	filemap_invalidate_unlock(inode->i_mapping);
194 	return err;
195 }
196 
put_ushort(unsigned short __user * argp,unsigned short val)197 static int put_ushort(unsigned short __user *argp, unsigned short val)
198 {
199 	return put_user(val, argp);
200 }
201 
put_int(int __user * argp,int val)202 static int put_int(int __user *argp, int val)
203 {
204 	return put_user(val, argp);
205 }
206 
put_uint(unsigned int __user * argp,unsigned int val)207 static int put_uint(unsigned int __user *argp, unsigned int val)
208 {
209 	return put_user(val, argp);
210 }
211 
put_long(long __user * argp,long val)212 static int put_long(long __user *argp, long val)
213 {
214 	return put_user(val, argp);
215 }
216 
put_ulong(unsigned long __user * argp,unsigned long val)217 static int put_ulong(unsigned long __user *argp, unsigned long val)
218 {
219 	return put_user(val, argp);
220 }
221 
put_u64(u64 __user * argp,u64 val)222 static int put_u64(u64 __user *argp, u64 val)
223 {
224 	return put_user(val, argp);
225 }
226 
227 #ifdef CONFIG_COMPAT
compat_put_long(compat_long_t __user * argp,long val)228 static int compat_put_long(compat_long_t __user *argp, long val)
229 {
230 	return put_user(val, argp);
231 }
232 
compat_put_ulong(compat_ulong_t __user * argp,compat_ulong_t val)233 static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
234 {
235 	return put_user(val, argp);
236 }
237 #endif
238 
239 #ifdef CONFIG_COMPAT
240 /*
241  * This is the equivalent of compat_ptr_ioctl(), to be used by block
242  * drivers that implement only commands that are completely compatible
243  * between 32-bit and 64-bit user space
244  */
blkdev_compat_ptr_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned cmd,unsigned long arg)245 int blkdev_compat_ptr_ioctl(struct block_device *bdev, blk_mode_t mode,
246 			unsigned cmd, unsigned long arg)
247 {
248 	struct gendisk *disk = bdev->bd_disk;
249 
250 	if (disk->fops->ioctl)
251 		return disk->fops->ioctl(bdev, mode, cmd,
252 					 (unsigned long)compat_ptr(arg));
253 
254 	return -ENOIOCTLCMD;
255 }
256 EXPORT_SYMBOL(blkdev_compat_ptr_ioctl);
257 #endif
258 
blkdev_pr_allowed(struct block_device * bdev,blk_mode_t mode)259 static bool blkdev_pr_allowed(struct block_device *bdev, blk_mode_t mode)
260 {
261 	/* no sense to make reservations for partitions */
262 	if (bdev_is_partition(bdev))
263 		return false;
264 
265 	if (capable(CAP_SYS_ADMIN))
266 		return true;
267 	/*
268 	 * Only allow unprivileged reservations if the file descriptor is open
269 	 * for writing.
270 	 */
271 	return mode & BLK_OPEN_WRITE;
272 }
273 
blkdev_pr_register(struct block_device * bdev,blk_mode_t mode,struct pr_registration __user * arg)274 static int blkdev_pr_register(struct block_device *bdev, blk_mode_t mode,
275 		struct pr_registration __user *arg)
276 {
277 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
278 	struct pr_registration reg;
279 
280 	if (!blkdev_pr_allowed(bdev, mode))
281 		return -EPERM;
282 	if (!ops || !ops->pr_register)
283 		return -EOPNOTSUPP;
284 	if (copy_from_user(&reg, arg, sizeof(reg)))
285 		return -EFAULT;
286 
287 	if (reg.flags & ~PR_FL_IGNORE_KEY)
288 		return -EOPNOTSUPP;
289 	return ops->pr_register(bdev, reg.old_key, reg.new_key, reg.flags);
290 }
291 
blkdev_pr_reserve(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)292 static int blkdev_pr_reserve(struct block_device *bdev, blk_mode_t mode,
293 		struct pr_reservation __user *arg)
294 {
295 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
296 	struct pr_reservation rsv;
297 
298 	if (!blkdev_pr_allowed(bdev, mode))
299 		return -EPERM;
300 	if (!ops || !ops->pr_reserve)
301 		return -EOPNOTSUPP;
302 	if (copy_from_user(&rsv, arg, sizeof(rsv)))
303 		return -EFAULT;
304 
305 	if (rsv.flags & ~PR_FL_IGNORE_KEY)
306 		return -EOPNOTSUPP;
307 	return ops->pr_reserve(bdev, rsv.key, rsv.type, rsv.flags);
308 }
309 
blkdev_pr_release(struct block_device * bdev,blk_mode_t mode,struct pr_reservation __user * arg)310 static int blkdev_pr_release(struct block_device *bdev, blk_mode_t mode,
311 		struct pr_reservation __user *arg)
312 {
313 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
314 	struct pr_reservation rsv;
315 
316 	if (!blkdev_pr_allowed(bdev, mode))
317 		return -EPERM;
318 	if (!ops || !ops->pr_release)
319 		return -EOPNOTSUPP;
320 	if (copy_from_user(&rsv, arg, sizeof(rsv)))
321 		return -EFAULT;
322 
323 	if (rsv.flags)
324 		return -EOPNOTSUPP;
325 	return ops->pr_release(bdev, rsv.key, rsv.type);
326 }
327 
blkdev_pr_preempt(struct block_device * bdev,blk_mode_t mode,struct pr_preempt __user * arg,bool abort)328 static int blkdev_pr_preempt(struct block_device *bdev, blk_mode_t mode,
329 		struct pr_preempt __user *arg, bool abort)
330 {
331 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
332 	struct pr_preempt p;
333 
334 	if (!blkdev_pr_allowed(bdev, mode))
335 		return -EPERM;
336 	if (!ops || !ops->pr_preempt)
337 		return -EOPNOTSUPP;
338 	if (copy_from_user(&p, arg, sizeof(p)))
339 		return -EFAULT;
340 
341 	if (p.flags)
342 		return -EOPNOTSUPP;
343 	return ops->pr_preempt(bdev, p.old_key, p.new_key, p.type, abort);
344 }
345 
blkdev_pr_clear(struct block_device * bdev,blk_mode_t mode,struct pr_clear __user * arg)346 static int blkdev_pr_clear(struct block_device *bdev, blk_mode_t mode,
347 		struct pr_clear __user *arg)
348 {
349 	const struct pr_ops *ops = bdev->bd_disk->fops->pr_ops;
350 	struct pr_clear c;
351 
352 	if (!blkdev_pr_allowed(bdev, mode))
353 		return -EPERM;
354 	if (!ops || !ops->pr_clear)
355 		return -EOPNOTSUPP;
356 	if (copy_from_user(&c, arg, sizeof(c)))
357 		return -EFAULT;
358 
359 	if (c.flags)
360 		return -EOPNOTSUPP;
361 	return ops->pr_clear(bdev, c.key);
362 }
363 
blkdev_flushbuf(struct block_device * bdev,unsigned cmd,unsigned long arg)364 static int blkdev_flushbuf(struct block_device *bdev, unsigned cmd,
365 		unsigned long arg)
366 {
367 	if (!capable(CAP_SYS_ADMIN))
368 		return -EACCES;
369 
370 	mutex_lock(&bdev->bd_holder_lock);
371 	if (bdev->bd_holder_ops && bdev->bd_holder_ops->sync)
372 		bdev->bd_holder_ops->sync(bdev);
373 	else
374 		sync_blockdev(bdev);
375 	mutex_unlock(&bdev->bd_holder_lock);
376 
377 	invalidate_bdev(bdev);
378 	return 0;
379 }
380 
blkdev_roset(struct block_device * bdev,unsigned cmd,unsigned long arg)381 static int blkdev_roset(struct block_device *bdev, unsigned cmd,
382 		unsigned long arg)
383 {
384 	int ret, n;
385 
386 	if (!capable(CAP_SYS_ADMIN))
387 		return -EACCES;
388 
389 	if (get_user(n, (int __user *)arg))
390 		return -EFAULT;
391 	if (bdev->bd_disk->fops->set_read_only) {
392 		ret = bdev->bd_disk->fops->set_read_only(bdev, n);
393 		if (ret)
394 			return ret;
395 	}
396 	bdev->bd_read_only = n;
397 	return 0;
398 }
399 
blkdev_getgeo(struct block_device * bdev,struct hd_geometry __user * argp)400 static int blkdev_getgeo(struct block_device *bdev,
401 		struct hd_geometry __user *argp)
402 {
403 	struct gendisk *disk = bdev->bd_disk;
404 	struct hd_geometry geo;
405 	int ret;
406 
407 	if (!argp)
408 		return -EINVAL;
409 	if (!disk->fops->getgeo)
410 		return -ENOTTY;
411 
412 	/*
413 	 * We need to set the startsect first, the driver may
414 	 * want to override it.
415 	 */
416 	memset(&geo, 0, sizeof(geo));
417 	geo.start = get_start_sect(bdev);
418 	ret = disk->fops->getgeo(bdev, &geo);
419 	if (ret)
420 		return ret;
421 	if (copy_to_user(argp, &geo, sizeof(geo)))
422 		return -EFAULT;
423 	return 0;
424 }
425 
426 #ifdef CONFIG_COMPAT
427 struct compat_hd_geometry {
428 	unsigned char heads;
429 	unsigned char sectors;
430 	unsigned short cylinders;
431 	u32 start;
432 };
433 
compat_hdio_getgeo(struct block_device * bdev,struct compat_hd_geometry __user * ugeo)434 static int compat_hdio_getgeo(struct block_device *bdev,
435 			      struct compat_hd_geometry __user *ugeo)
436 {
437 	struct gendisk *disk = bdev->bd_disk;
438 	struct hd_geometry geo;
439 	int ret;
440 
441 	if (!ugeo)
442 		return -EINVAL;
443 	if (!disk->fops->getgeo)
444 		return -ENOTTY;
445 
446 	memset(&geo, 0, sizeof(geo));
447 	/*
448 	 * We need to set the startsect first, the driver may
449 	 * want to override it.
450 	 */
451 	geo.start = get_start_sect(bdev);
452 	ret = disk->fops->getgeo(bdev, &geo);
453 	if (ret)
454 		return ret;
455 
456 	ret = copy_to_user(ugeo, &geo, 4);
457 	ret |= put_user(geo.start, &ugeo->start);
458 	if (ret)
459 		ret = -EFAULT;
460 
461 	return ret;
462 }
463 #endif
464 
465 /* set the logical block size */
blkdev_bszset(struct block_device * bdev,blk_mode_t mode,int __user * argp)466 static int blkdev_bszset(struct block_device *bdev, blk_mode_t mode,
467 		int __user *argp)
468 {
469 	int ret, n;
470 
471 	if (!capable(CAP_SYS_ADMIN))
472 		return -EACCES;
473 	if (!argp)
474 		return -EINVAL;
475 	if (get_user(n, argp))
476 		return -EFAULT;
477 
478 	if (mode & BLK_OPEN_EXCL)
479 		return set_blocksize(bdev, n);
480 
481 	if (IS_ERR(blkdev_get_by_dev(bdev->bd_dev, mode, &bdev, NULL)))
482 		return -EBUSY;
483 	ret = set_blocksize(bdev, n);
484 	blkdev_put(bdev, &bdev);
485 
486 	return ret;
487 }
488 
489 /*
490  * Common commands that are handled the same way on native and compat
491  * user space. Note the separate arg/argp parameters that are needed
492  * to deal with the compat_ptr() conversion.
493  */
blkdev_common_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg,void __user * argp)494 static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
495 			       unsigned int cmd, unsigned long arg,
496 			       void __user *argp)
497 {
498 	unsigned int max_sectors;
499 
500 	switch (cmd) {
501 	case BLKFLSBUF:
502 		return blkdev_flushbuf(bdev, cmd, arg);
503 	case BLKROSET:
504 		return blkdev_roset(bdev, cmd, arg);
505 	case BLKDISCARD:
506 		return blk_ioctl_discard(bdev, mode, arg);
507 	case BLKSECDISCARD:
508 		return blk_ioctl_secure_erase(bdev, mode, argp);
509 	case BLKZEROOUT:
510 		return blk_ioctl_zeroout(bdev, mode, arg);
511 	case BLKGETDISKSEQ:
512 		return put_u64(argp, bdev->bd_disk->diskseq);
513 	case BLKREPORTZONE:
514 		return blkdev_report_zones_ioctl(bdev, cmd, arg);
515 	case BLKRESETZONE:
516 	case BLKOPENZONE:
517 	case BLKCLOSEZONE:
518 	case BLKFINISHZONE:
519 		return blkdev_zone_mgmt_ioctl(bdev, mode, cmd, arg);
520 	case BLKGETZONESZ:
521 		return put_uint(argp, bdev_zone_sectors(bdev));
522 	case BLKGETNRZONES:
523 		return put_uint(argp, bdev_nr_zones(bdev));
524 	case BLKROGET:
525 		return put_int(argp, bdev_read_only(bdev) != 0);
526 	case BLKSSZGET: /* get block device logical block size */
527 		return put_int(argp, bdev_logical_block_size(bdev));
528 	case BLKPBSZGET: /* get block device physical block size */
529 		return put_uint(argp, bdev_physical_block_size(bdev));
530 	case BLKIOMIN:
531 		return put_uint(argp, bdev_io_min(bdev));
532 	case BLKIOOPT:
533 		return put_uint(argp, bdev_io_opt(bdev));
534 	case BLKALIGNOFF:
535 		return put_int(argp, bdev_alignment_offset(bdev));
536 	case BLKDISCARDZEROES:
537 		return put_uint(argp, 0);
538 	case BLKSECTGET:
539 		max_sectors = min_t(unsigned int, USHRT_MAX,
540 				    queue_max_sectors(bdev_get_queue(bdev)));
541 		return put_ushort(argp, max_sectors);
542 	case BLKROTATIONAL:
543 		return put_ushort(argp, !bdev_nonrot(bdev));
544 	case BLKRASET:
545 	case BLKFRASET:
546 		if(!capable(CAP_SYS_ADMIN))
547 			return -EACCES;
548 		bdev->bd_disk->bdi->ra_pages = (arg * 512) / PAGE_SIZE;
549 		return 0;
550 	case BLKRRPART:
551 		if (!capable(CAP_SYS_ADMIN))
552 			return -EACCES;
553 		if (bdev_is_partition(bdev))
554 			return -EINVAL;
555 		return disk_scan_partitions(bdev->bd_disk, mode);
556 	case BLKTRACESTART:
557 	case BLKTRACESTOP:
558 	case BLKTRACETEARDOWN:
559 		return blk_trace_ioctl(bdev, cmd, argp);
560 	case IOC_PR_REGISTER:
561 		return blkdev_pr_register(bdev, mode, argp);
562 	case IOC_PR_RESERVE:
563 		return blkdev_pr_reserve(bdev, mode, argp);
564 	case IOC_PR_RELEASE:
565 		return blkdev_pr_release(bdev, mode, argp);
566 	case IOC_PR_PREEMPT:
567 		return blkdev_pr_preempt(bdev, mode, argp, false);
568 	case IOC_PR_PREEMPT_ABORT:
569 		return blkdev_pr_preempt(bdev, mode, argp, true);
570 	case IOC_PR_CLEAR:
571 		return blkdev_pr_clear(bdev, mode, argp);
572 	default:
573 		return -ENOIOCTLCMD;
574 	}
575 }
576 
577 /*
578  * Always keep this in sync with compat_blkdev_ioctl()
579  * to handle all incompatible commands in both functions.
580  *
581  * New commands must be compatible and go into blkdev_common_ioctl
582  */
blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)583 long blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
584 {
585 	struct block_device *bdev = I_BDEV(file->f_mapping->host);
586 	void __user *argp = (void __user *)arg;
587 	blk_mode_t mode = file_to_blk_mode(file);
588 	int ret;
589 
590 	switch (cmd) {
591 	/* These need separate implementations for the data structure */
592 	case HDIO_GETGEO:
593 		return blkdev_getgeo(bdev, argp);
594 	case BLKPG:
595 		return blkpg_ioctl(bdev, argp);
596 
597 	/* Compat mode returns 32-bit data instead of 'long' */
598 	case BLKRAGET:
599 	case BLKFRAGET:
600 		if (!argp)
601 			return -EINVAL;
602 		return put_long(argp,
603 			(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
604 	case BLKGETSIZE:
605 		if (bdev_nr_sectors(bdev) > ~0UL)
606 			return -EFBIG;
607 		return put_ulong(argp, bdev_nr_sectors(bdev));
608 
609 	/* The data is compatible, but the command number is different */
610 	case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */
611 		return put_int(argp, block_size(bdev));
612 	case BLKBSZSET:
613 		return blkdev_bszset(bdev, mode, argp);
614 	case BLKGETSIZE64:
615 		return put_u64(argp, bdev_nr_bytes(bdev));
616 
617 	/* Incompatible alignment on i386 */
618 	case BLKTRACESETUP:
619 		return blk_trace_ioctl(bdev, cmd, argp);
620 	default:
621 		break;
622 	}
623 
624 	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
625 	if (ret != -ENOIOCTLCMD)
626 		return ret;
627 
628 	if (!bdev->bd_disk->fops->ioctl)
629 		return -ENOTTY;
630 	return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
631 }
632 
633 #ifdef CONFIG_COMPAT
634 
635 #define BLKBSZGET_32		_IOR(0x12, 112, int)
636 #define BLKBSZSET_32		_IOW(0x12, 113, int)
637 #define BLKGETSIZE64_32		_IOR(0x12, 114, int)
638 
639 /* Most of the generic ioctls are handled in the normal fallback path.
640    This assumes the blkdev's low level compat_ioctl always returns
641    ENOIOCTLCMD for unknown ioctls. */
compat_blkdev_ioctl(struct file * file,unsigned cmd,unsigned long arg)642 long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
643 {
644 	int ret;
645 	void __user *argp = compat_ptr(arg);
646 	struct block_device *bdev = I_BDEV(file->f_mapping->host);
647 	struct gendisk *disk = bdev->bd_disk;
648 	blk_mode_t mode = file_to_blk_mode(file);
649 
650 	switch (cmd) {
651 	/* These need separate implementations for the data structure */
652 	case HDIO_GETGEO:
653 		return compat_hdio_getgeo(bdev, argp);
654 	case BLKPG:
655 		return compat_blkpg_ioctl(bdev, argp);
656 
657 	/* Compat mode returns 32-bit data instead of 'long' */
658 	case BLKRAGET:
659 	case BLKFRAGET:
660 		if (!argp)
661 			return -EINVAL;
662 		return compat_put_long(argp,
663 			(bdev->bd_disk->bdi->ra_pages * PAGE_SIZE) / 512);
664 	case BLKGETSIZE:
665 		if (bdev_nr_sectors(bdev) > ~(compat_ulong_t)0)
666 			return -EFBIG;
667 		return compat_put_ulong(argp, bdev_nr_sectors(bdev));
668 
669 	/* The data is compatible, but the command number is different */
670 	case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */
671 		return put_int(argp, bdev_logical_block_size(bdev));
672 	case BLKBSZSET_32:
673 		return blkdev_bszset(bdev, mode, argp);
674 	case BLKGETSIZE64_32:
675 		return put_u64(argp, bdev_nr_bytes(bdev));
676 
677 	/* Incompatible alignment on i386 */
678 	case BLKTRACESETUP32:
679 		return blk_trace_ioctl(bdev, cmd, argp);
680 	default:
681 		break;
682 	}
683 
684 	ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
685 	if (ret == -ENOIOCTLCMD && disk->fops->compat_ioctl)
686 		ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
687 
688 	return ret;
689 }
690 #endif
691