1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1991, 1992 Linus Torvalds
4 * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2016 - 2020 Christoph Hellwig
6 */
7
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/slab.h>
11 #include <linux/kmod.h>
12 #include <linux/major.h>
13 #include <linux/device_cgroup.h>
14 #include <linux/blkdev.h>
15 #include <linux/backing-dev.h>
16 #include <linux/module.h>
17 #include <linux/blkpg.h>
18 #include <linux/magic.h>
19 #include <linux/buffer_head.h>
20 #include <linux/swap.h>
21 #include <linux/writeback.h>
22 #include <linux/mount.h>
23 #include <linux/pseudo_fs.h>
24 #include <linux/uio.h>
25 #include <linux/namei.h>
26 #include <linux/cleancache.h>
27 #include <linux/part_stat.h>
28 #include <linux/uaccess.h>
29 #include "../fs/internal.h"
30 #include "blk.h"
31
32 struct bdev_inode {
33 struct block_device bdev;
34 struct inode vfs_inode;
35 };
36
BDEV_I(struct inode * inode)37 static inline struct bdev_inode *BDEV_I(struct inode *inode)
38 {
39 return container_of(inode, struct bdev_inode, vfs_inode);
40 }
41
I_BDEV(struct inode * inode)42 struct block_device *I_BDEV(struct inode *inode)
43 {
44 return &BDEV_I(inode)->bdev;
45 }
46 EXPORT_SYMBOL(I_BDEV);
47
bdev_write_inode(struct block_device * bdev)48 static void bdev_write_inode(struct block_device *bdev)
49 {
50 struct inode *inode = bdev->bd_inode;
51 int ret;
52
53 spin_lock(&inode->i_lock);
54 while (inode->i_state & I_DIRTY) {
55 spin_unlock(&inode->i_lock);
56 ret = write_inode_now(inode, true);
57 if (ret) {
58 char name[BDEVNAME_SIZE];
59 pr_warn_ratelimited("VFS: Dirty inode writeback failed "
60 "for block device %s (err=%d).\n",
61 bdevname(bdev, name), ret);
62 }
63 spin_lock(&inode->i_lock);
64 }
65 spin_unlock(&inode->i_lock);
66 }
67
68 /* Kill _all_ buffers and pagecache , dirty or not.. */
kill_bdev(struct block_device * bdev)69 static void kill_bdev(struct block_device *bdev)
70 {
71 struct address_space *mapping = bdev->bd_inode->i_mapping;
72
73 if (mapping_empty(mapping))
74 return;
75
76 invalidate_bh_lrus();
77 truncate_inode_pages(mapping, 0);
78 }
79
80 /* Invalidate clean unused buffers and pagecache. */
invalidate_bdev(struct block_device * bdev)81 void invalidate_bdev(struct block_device *bdev)
82 {
83 struct address_space *mapping = bdev->bd_inode->i_mapping;
84
85 if (mapping->nrpages) {
86 invalidate_bh_lrus();
87 lru_add_drain_all(); /* make sure all lru add caches are flushed */
88 invalidate_mapping_pages(mapping, 0, -1);
89 }
90 /* 99% of the time, we don't need to flush the cleancache on the bdev.
91 * But, for the strange corners, lets be cautious
92 */
93 cleancache_invalidate_inode(mapping);
94 }
95 EXPORT_SYMBOL(invalidate_bdev);
96
97 /*
98 * Drop all buffers & page cache for given bdev range. This function bails
99 * with error if bdev has other exclusive owner (such as filesystem).
100 */
truncate_bdev_range(struct block_device * bdev,fmode_t mode,loff_t lstart,loff_t lend)101 int truncate_bdev_range(struct block_device *bdev, fmode_t mode,
102 loff_t lstart, loff_t lend)
103 {
104 /*
105 * If we don't hold exclusive handle for the device, upgrade to it
106 * while we discard the buffer cache to avoid discarding buffers
107 * under live filesystem.
108 */
109 if (!(mode & FMODE_EXCL)) {
110 int err = bd_prepare_to_claim(bdev, truncate_bdev_range);
111 if (err)
112 goto invalidate;
113 }
114
115 truncate_inode_pages_range(bdev->bd_inode->i_mapping, lstart, lend);
116 if (!(mode & FMODE_EXCL))
117 bd_abort_claiming(bdev, truncate_bdev_range);
118 return 0;
119
120 invalidate:
121 /*
122 * Someone else has handle exclusively open. Try invalidating instead.
123 * The 'end' argument is inclusive so the rounding is safe.
124 */
125 return invalidate_inode_pages2_range(bdev->bd_inode->i_mapping,
126 lstart >> PAGE_SHIFT,
127 lend >> PAGE_SHIFT);
128 }
129
set_init_blocksize(struct block_device * bdev)130 static void set_init_blocksize(struct block_device *bdev)
131 {
132 unsigned int bsize = bdev_logical_block_size(bdev);
133 loff_t size = i_size_read(bdev->bd_inode);
134
135 while (bsize < PAGE_SIZE) {
136 if (size & bsize)
137 break;
138 bsize <<= 1;
139 }
140 bdev->bd_inode->i_blkbits = blksize_bits(bsize);
141 }
142
set_blocksize(struct block_device * bdev,int size)143 int set_blocksize(struct block_device *bdev, int size)
144 {
145 /* Size must be a power of two, and between 512 and PAGE_SIZE */
146 if (size > PAGE_SIZE || size < 512 || !is_power_of_2(size))
147 return -EINVAL;
148
149 /* Size cannot be smaller than the size supported by the device */
150 if (size < bdev_logical_block_size(bdev))
151 return -EINVAL;
152
153 /* Don't change the size if it is same as current */
154 if (bdev->bd_inode->i_blkbits != blksize_bits(size)) {
155 sync_blockdev(bdev);
156 bdev->bd_inode->i_blkbits = blksize_bits(size);
157 kill_bdev(bdev);
158 }
159 return 0;
160 }
161
162 EXPORT_SYMBOL(set_blocksize);
163
sb_set_blocksize(struct super_block * sb,int size)164 int sb_set_blocksize(struct super_block *sb, int size)
165 {
166 if (set_blocksize(sb->s_bdev, size))
167 return 0;
168 /* If we get here, we know size is power of two
169 * and it's value is between 512 and PAGE_SIZE */
170 sb->s_blocksize = size;
171 sb->s_blocksize_bits = blksize_bits(size);
172 return sb->s_blocksize;
173 }
174
175 EXPORT_SYMBOL(sb_set_blocksize);
176
sb_min_blocksize(struct super_block * sb,int size)177 int sb_min_blocksize(struct super_block *sb, int size)
178 {
179 int minsize = bdev_logical_block_size(sb->s_bdev);
180 if (size < minsize)
181 size = minsize;
182 return sb_set_blocksize(sb, size);
183 }
184
185 EXPORT_SYMBOL(sb_min_blocksize);
186
__sync_blockdev(struct block_device * bdev,int wait)187 int __sync_blockdev(struct block_device *bdev, int wait)
188 {
189 if (!bdev)
190 return 0;
191 if (!wait)
192 return filemap_flush(bdev->bd_inode->i_mapping);
193 return filemap_write_and_wait(bdev->bd_inode->i_mapping);
194 }
195
196 /*
197 * Write out and wait upon all the dirty data associated with a block
198 * device via its mapping. Does not take the superblock lock.
199 */
sync_blockdev(struct block_device * bdev)200 int sync_blockdev(struct block_device *bdev)
201 {
202 return __sync_blockdev(bdev, 1);
203 }
204 EXPORT_SYMBOL(sync_blockdev);
205
206 /*
207 * Write out and wait upon all dirty data associated with this
208 * device. Filesystem data as well as the underlying block
209 * device. Takes the superblock lock.
210 */
fsync_bdev(struct block_device * bdev)211 int fsync_bdev(struct block_device *bdev)
212 {
213 struct super_block *sb = get_super(bdev);
214 if (sb) {
215 int res = sync_filesystem(sb);
216 drop_super(sb);
217 return res;
218 }
219 return sync_blockdev(bdev);
220 }
221 EXPORT_SYMBOL(fsync_bdev);
222
223 /**
224 * freeze_bdev -- lock a filesystem and force it into a consistent state
225 * @bdev: blockdevice to lock
226 *
227 * If a superblock is found on this device, we take the s_umount semaphore
228 * on it to make sure nobody unmounts until the snapshot creation is done.
229 * The reference counter (bd_fsfreeze_count) guarantees that only the last
230 * unfreeze process can unfreeze the frozen filesystem actually when multiple
231 * freeze requests arrive simultaneously. It counts up in freeze_bdev() and
232 * count down in thaw_bdev(). When it becomes 0, thaw_bdev() will unfreeze
233 * actually.
234 */
freeze_bdev(struct block_device * bdev)235 int freeze_bdev(struct block_device *bdev)
236 {
237 struct super_block *sb;
238 int error = 0;
239
240 mutex_lock(&bdev->bd_fsfreeze_mutex);
241 if (++bdev->bd_fsfreeze_count > 1)
242 goto done;
243
244 sb = get_active_super(bdev);
245 if (!sb)
246 goto sync;
247 if (sb->s_op->freeze_super)
248 error = sb->s_op->freeze_super(sb);
249 else
250 error = freeze_super(sb);
251 deactivate_super(sb);
252
253 if (error) {
254 bdev->bd_fsfreeze_count--;
255 goto done;
256 }
257 bdev->bd_fsfreeze_sb = sb;
258
259 sync:
260 sync_blockdev(bdev);
261 done:
262 mutex_unlock(&bdev->bd_fsfreeze_mutex);
263 return error;
264 }
265 EXPORT_SYMBOL(freeze_bdev);
266
267 /**
268 * thaw_bdev -- unlock filesystem
269 * @bdev: blockdevice to unlock
270 *
271 * Unlocks the filesystem and marks it writeable again after freeze_bdev().
272 */
thaw_bdev(struct block_device * bdev)273 int thaw_bdev(struct block_device *bdev)
274 {
275 struct super_block *sb;
276 int error = -EINVAL;
277
278 mutex_lock(&bdev->bd_fsfreeze_mutex);
279 if (!bdev->bd_fsfreeze_count)
280 goto out;
281
282 error = 0;
283 if (--bdev->bd_fsfreeze_count > 0)
284 goto out;
285
286 sb = bdev->bd_fsfreeze_sb;
287 if (!sb)
288 goto out;
289
290 if (sb->s_op->thaw_super)
291 error = sb->s_op->thaw_super(sb);
292 else
293 error = thaw_super(sb);
294 if (error)
295 bdev->bd_fsfreeze_count++;
296 else
297 bdev->bd_fsfreeze_sb = NULL;
298 out:
299 mutex_unlock(&bdev->bd_fsfreeze_mutex);
300 return error;
301 }
302 EXPORT_SYMBOL(thaw_bdev);
303
304 /**
305 * bdev_read_page() - Start reading a page from a block device
306 * @bdev: The device to read the page from
307 * @sector: The offset on the device to read the page to (need not be aligned)
308 * @page: The page to read
309 *
310 * On entry, the page should be locked. It will be unlocked when the page
311 * has been read. If the block driver implements rw_page synchronously,
312 * that will be true on exit from this function, but it need not be.
313 *
314 * Errors returned by this function are usually "soft", eg out of memory, or
315 * queue full; callers should try a different route to read this page rather
316 * than propagate an error back up the stack.
317 *
318 * Return: negative errno if an error occurs, 0 if submission was successful.
319 */
bdev_read_page(struct block_device * bdev,sector_t sector,struct page * page)320 int bdev_read_page(struct block_device *bdev, sector_t sector,
321 struct page *page)
322 {
323 const struct block_device_operations *ops = bdev->bd_disk->fops;
324 int result = -EOPNOTSUPP;
325
326 if (!ops->rw_page || bdev_get_integrity(bdev))
327 return result;
328
329 result = blk_queue_enter(bdev->bd_disk->queue, 0);
330 if (result)
331 return result;
332 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
333 REQ_OP_READ);
334 blk_queue_exit(bdev->bd_disk->queue);
335 return result;
336 }
337
338 /**
339 * bdev_write_page() - Start writing a page to a block device
340 * @bdev: The device to write the page to
341 * @sector: The offset on the device to write the page to (need not be aligned)
342 * @page: The page to write
343 * @wbc: The writeback_control for the write
344 *
345 * On entry, the page should be locked and not currently under writeback.
346 * On exit, if the write started successfully, the page will be unlocked and
347 * under writeback. If the write failed already (eg the driver failed to
348 * queue the page to the device), the page will still be locked. If the
349 * caller is a ->writepage implementation, it will need to unlock the page.
350 *
351 * Errors returned by this function are usually "soft", eg out of memory, or
352 * queue full; callers should try a different route to write this page rather
353 * than propagate an error back up the stack.
354 *
355 * Return: negative errno if an error occurs, 0 if submission was successful.
356 */
bdev_write_page(struct block_device * bdev,sector_t sector,struct page * page,struct writeback_control * wbc)357 int bdev_write_page(struct block_device *bdev, sector_t sector,
358 struct page *page, struct writeback_control *wbc)
359 {
360 int result;
361 const struct block_device_operations *ops = bdev->bd_disk->fops;
362
363 if (!ops->rw_page || bdev_get_integrity(bdev))
364 return -EOPNOTSUPP;
365 result = blk_queue_enter(bdev->bd_disk->queue, 0);
366 if (result)
367 return result;
368
369 set_page_writeback(page);
370 result = ops->rw_page(bdev, sector + get_start_sect(bdev), page,
371 REQ_OP_WRITE);
372 if (result) {
373 end_page_writeback(page);
374 } else {
375 clean_page_buffers(page);
376 unlock_page(page);
377 }
378 blk_queue_exit(bdev->bd_disk->queue);
379 return result;
380 }
381
382 /*
383 * pseudo-fs
384 */
385
386 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock);
387 static struct kmem_cache * bdev_cachep __read_mostly;
388
bdev_alloc_inode(struct super_block * sb)389 static struct inode *bdev_alloc_inode(struct super_block *sb)
390 {
391 struct bdev_inode *ei = kmem_cache_alloc(bdev_cachep, GFP_KERNEL);
392
393 if (!ei)
394 return NULL;
395 memset(&ei->bdev, 0, sizeof(ei->bdev));
396 return &ei->vfs_inode;
397 }
398
bdev_free_inode(struct inode * inode)399 static void bdev_free_inode(struct inode *inode)
400 {
401 struct block_device *bdev = I_BDEV(inode);
402
403 free_percpu(bdev->bd_stats);
404 kfree(bdev->bd_meta_info);
405
406 if (!bdev_is_partition(bdev)) {
407 if (bdev->bd_disk && bdev->bd_disk->bdi)
408 bdi_put(bdev->bd_disk->bdi);
409 kfree(bdev->bd_disk);
410 }
411
412 if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
413 blk_free_ext_minor(MINOR(bdev->bd_dev));
414
415 kmem_cache_free(bdev_cachep, BDEV_I(inode));
416 }
417
init_once(void * data)418 static void init_once(void *data)
419 {
420 struct bdev_inode *ei = data;
421
422 inode_init_once(&ei->vfs_inode);
423 }
424
bdev_evict_inode(struct inode * inode)425 static void bdev_evict_inode(struct inode *inode)
426 {
427 truncate_inode_pages_final(&inode->i_data);
428 invalidate_inode_buffers(inode); /* is it needed here? */
429 clear_inode(inode);
430 }
431
432 static const struct super_operations bdev_sops = {
433 .statfs = simple_statfs,
434 .alloc_inode = bdev_alloc_inode,
435 .free_inode = bdev_free_inode,
436 .drop_inode = generic_delete_inode,
437 .evict_inode = bdev_evict_inode,
438 };
439
bd_init_fs_context(struct fs_context * fc)440 static int bd_init_fs_context(struct fs_context *fc)
441 {
442 struct pseudo_fs_context *ctx = init_pseudo(fc, BDEVFS_MAGIC);
443 if (!ctx)
444 return -ENOMEM;
445 fc->s_iflags |= SB_I_CGROUPWB;
446 ctx->ops = &bdev_sops;
447 return 0;
448 }
449
450 static struct file_system_type bd_type = {
451 .name = "bdev",
452 .init_fs_context = bd_init_fs_context,
453 .kill_sb = kill_anon_super,
454 };
455
456 struct super_block *blockdev_superblock __read_mostly;
457 EXPORT_SYMBOL_GPL(blockdev_superblock);
458
bdev_cache_init(void)459 void __init bdev_cache_init(void)
460 {
461 int err;
462 static struct vfsmount *bd_mnt;
463
464 bdev_cachep = kmem_cache_create("bdev_cache", sizeof(struct bdev_inode),
465 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
466 SLAB_MEM_SPREAD|SLAB_ACCOUNT|SLAB_PANIC),
467 init_once);
468 err = register_filesystem(&bd_type);
469 if (err)
470 panic("Cannot register bdev pseudo-fs");
471 bd_mnt = kern_mount(&bd_type);
472 if (IS_ERR(bd_mnt))
473 panic("Cannot create bdev pseudo-fs");
474 blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */
475 }
476
bdev_alloc(struct gendisk * disk,u8 partno)477 struct block_device *bdev_alloc(struct gendisk *disk, u8 partno)
478 {
479 struct block_device *bdev;
480 struct inode *inode;
481
482 inode = new_inode(blockdev_superblock);
483 if (!inode)
484 return NULL;
485 inode->i_mode = S_IFBLK;
486 inode->i_rdev = 0;
487 inode->i_data.a_ops = &def_blk_aops;
488 mapping_set_gfp_mask(&inode->i_data, GFP_USER);
489
490 bdev = I_BDEV(inode);
491 mutex_init(&bdev->bd_fsfreeze_mutex);
492 spin_lock_init(&bdev->bd_size_lock);
493 bdev->bd_partno = partno;
494 bdev->bd_inode = inode;
495 bdev->bd_stats = alloc_percpu(struct disk_stats);
496 if (!bdev->bd_stats) {
497 iput(inode);
498 return NULL;
499 }
500 bdev->bd_disk = disk;
501 return bdev;
502 }
503
bdev_add(struct block_device * bdev,dev_t dev)504 void bdev_add(struct block_device *bdev, dev_t dev)
505 {
506 bdev->bd_dev = dev;
507 bdev->bd_inode->i_rdev = dev;
508 bdev->bd_inode->i_ino = dev;
509 insert_inode_hash(bdev->bd_inode);
510 }
511
nr_blockdev_pages(void)512 long nr_blockdev_pages(void)
513 {
514 struct inode *inode;
515 long ret = 0;
516
517 spin_lock(&blockdev_superblock->s_inode_list_lock);
518 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list)
519 ret += inode->i_mapping->nrpages;
520 spin_unlock(&blockdev_superblock->s_inode_list_lock);
521
522 return ret;
523 }
524
525 /**
526 * bd_may_claim - test whether a block device can be claimed
527 * @bdev: block device of interest
528 * @whole: whole block device containing @bdev, may equal @bdev
529 * @holder: holder trying to claim @bdev
530 *
531 * Test whether @bdev can be claimed by @holder.
532 *
533 * CONTEXT:
534 * spin_lock(&bdev_lock).
535 *
536 * RETURNS:
537 * %true if @bdev can be claimed, %false otherwise.
538 */
bd_may_claim(struct block_device * bdev,struct block_device * whole,void * holder)539 static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
540 void *holder)
541 {
542 if (bdev->bd_holder == holder)
543 return true; /* already a holder */
544 else if (bdev->bd_holder != NULL)
545 return false; /* held by someone else */
546 else if (whole == bdev)
547 return true; /* is a whole device which isn't held */
548
549 else if (whole->bd_holder == bd_may_claim)
550 return true; /* is a partition of a device that is being partitioned */
551 else if (whole->bd_holder != NULL)
552 return false; /* is a partition of a held device */
553 else
554 return true; /* is a partition of an un-held device */
555 }
556
557 /**
558 * bd_prepare_to_claim - claim a block device
559 * @bdev: block device of interest
560 * @holder: holder trying to claim @bdev
561 *
562 * Claim @bdev. This function fails if @bdev is already claimed by another
563 * holder and waits if another claiming is in progress. return, the caller
564 * has ownership of bd_claiming and bd_holder[s].
565 *
566 * RETURNS:
567 * 0 if @bdev can be claimed, -EBUSY otherwise.
568 */
bd_prepare_to_claim(struct block_device * bdev,void * holder)569 int bd_prepare_to_claim(struct block_device *bdev, void *holder)
570 {
571 struct block_device *whole = bdev_whole(bdev);
572
573 if (WARN_ON_ONCE(!holder))
574 return -EINVAL;
575 retry:
576 spin_lock(&bdev_lock);
577 /* if someone else claimed, fail */
578 if (!bd_may_claim(bdev, whole, holder)) {
579 spin_unlock(&bdev_lock);
580 return -EBUSY;
581 }
582
583 /* if claiming is already in progress, wait for it to finish */
584 if (whole->bd_claiming) {
585 wait_queue_head_t *wq = bit_waitqueue(&whole->bd_claiming, 0);
586 DEFINE_WAIT(wait);
587
588 prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
589 spin_unlock(&bdev_lock);
590 schedule();
591 finish_wait(wq, &wait);
592 goto retry;
593 }
594
595 /* yay, all mine */
596 whole->bd_claiming = holder;
597 spin_unlock(&bdev_lock);
598 return 0;
599 }
600 EXPORT_SYMBOL_GPL(bd_prepare_to_claim); /* only for the loop driver */
601
bd_clear_claiming(struct block_device * whole,void * holder)602 static void bd_clear_claiming(struct block_device *whole, void *holder)
603 {
604 lockdep_assert_held(&bdev_lock);
605 /* tell others that we're done */
606 BUG_ON(whole->bd_claiming != holder);
607 whole->bd_claiming = NULL;
608 wake_up_bit(&whole->bd_claiming, 0);
609 }
610
611 /**
612 * bd_finish_claiming - finish claiming of a block device
613 * @bdev: block device of interest
614 * @holder: holder that has claimed @bdev
615 *
616 * Finish exclusive open of a block device. Mark the device as exlusively
617 * open by the holder and wake up all waiters for exclusive open to finish.
618 */
bd_finish_claiming(struct block_device * bdev,void * holder)619 static void bd_finish_claiming(struct block_device *bdev, void *holder)
620 {
621 struct block_device *whole = bdev_whole(bdev);
622
623 spin_lock(&bdev_lock);
624 BUG_ON(!bd_may_claim(bdev, whole, holder));
625 /*
626 * Note that for a whole device bd_holders will be incremented twice,
627 * and bd_holder will be set to bd_may_claim before being set to holder
628 */
629 whole->bd_holders++;
630 whole->bd_holder = bd_may_claim;
631 bdev->bd_holders++;
632 bdev->bd_holder = holder;
633 bd_clear_claiming(whole, holder);
634 spin_unlock(&bdev_lock);
635 }
636
637 /**
638 * bd_abort_claiming - abort claiming of a block device
639 * @bdev: block device of interest
640 * @holder: holder that has claimed @bdev
641 *
642 * Abort claiming of a block device when the exclusive open failed. This can be
643 * also used when exclusive open is not actually desired and we just needed
644 * to block other exclusive openers for a while.
645 */
bd_abort_claiming(struct block_device * bdev,void * holder)646 void bd_abort_claiming(struct block_device *bdev, void *holder)
647 {
648 spin_lock(&bdev_lock);
649 bd_clear_claiming(bdev_whole(bdev), holder);
650 spin_unlock(&bdev_lock);
651 }
652 EXPORT_SYMBOL(bd_abort_claiming);
653
blkdev_flush_mapping(struct block_device * bdev)654 static void blkdev_flush_mapping(struct block_device *bdev)
655 {
656 WARN_ON_ONCE(bdev->bd_holders);
657 sync_blockdev(bdev);
658 kill_bdev(bdev);
659 bdev_write_inode(bdev);
660 }
661
blkdev_get_whole(struct block_device * bdev,fmode_t mode)662 static int blkdev_get_whole(struct block_device *bdev, fmode_t mode)
663 {
664 struct gendisk *disk = bdev->bd_disk;
665 int ret = 0;
666
667 if (disk->fops->open) {
668 ret = disk->fops->open(bdev, mode);
669 if (ret) {
670 /* avoid ghost partitions on a removed medium */
671 if (ret == -ENOMEDIUM &&
672 test_bit(GD_NEED_PART_SCAN, &disk->state))
673 bdev_disk_changed(disk, true);
674 return ret;
675 }
676 }
677
678 if (!bdev->bd_openers)
679 set_init_blocksize(bdev);
680 if (test_bit(GD_NEED_PART_SCAN, &disk->state))
681 bdev_disk_changed(disk, false);
682 bdev->bd_openers++;
683 return 0;;
684 }
685
blkdev_put_whole(struct block_device * bdev,fmode_t mode)686 static void blkdev_put_whole(struct block_device *bdev, fmode_t mode)
687 {
688 if (!--bdev->bd_openers)
689 blkdev_flush_mapping(bdev);
690 if (bdev->bd_disk->fops->release)
691 bdev->bd_disk->fops->release(bdev->bd_disk, mode);
692 }
693
blkdev_get_part(struct block_device * part,fmode_t mode)694 static int blkdev_get_part(struct block_device *part, fmode_t mode)
695 {
696 struct gendisk *disk = part->bd_disk;
697 int ret;
698
699 if (part->bd_openers)
700 goto done;
701
702 ret = blkdev_get_whole(bdev_whole(part), mode);
703 if (ret)
704 return ret;
705
706 ret = -ENXIO;
707 if (!bdev_nr_sectors(part))
708 goto out_blkdev_put;
709
710 disk->open_partitions++;
711 set_init_blocksize(part);
712 done:
713 part->bd_openers++;
714 return 0;
715
716 out_blkdev_put:
717 blkdev_put_whole(bdev_whole(part), mode);
718 return ret;
719 }
720
blkdev_put_part(struct block_device * part,fmode_t mode)721 static void blkdev_put_part(struct block_device *part, fmode_t mode)
722 {
723 struct block_device *whole = bdev_whole(part);
724
725 if (--part->bd_openers)
726 return;
727 blkdev_flush_mapping(part);
728 whole->bd_disk->open_partitions--;
729 blkdev_put_whole(whole, mode);
730 }
731
blkdev_get_no_open(dev_t dev)732 struct block_device *blkdev_get_no_open(dev_t dev)
733 {
734 struct block_device *bdev;
735 struct inode *inode;
736
737 inode = ilookup(blockdev_superblock, dev);
738 if (!inode) {
739 blk_request_module(dev);
740 inode = ilookup(blockdev_superblock, dev);
741 if (!inode)
742 return NULL;
743 }
744
745 /* switch from the inode reference to a device mode one: */
746 bdev = &BDEV_I(inode)->bdev;
747 if (!kobject_get_unless_zero(&bdev->bd_device.kobj))
748 bdev = NULL;
749 iput(inode);
750
751 if (!bdev)
752 return NULL;
753 if ((bdev->bd_disk->flags & GENHD_FL_HIDDEN) ||
754 !try_module_get(bdev->bd_disk->fops->owner)) {
755 put_device(&bdev->bd_device);
756 return NULL;
757 }
758
759 return bdev;
760 }
761
blkdev_put_no_open(struct block_device * bdev)762 void blkdev_put_no_open(struct block_device *bdev)
763 {
764 module_put(bdev->bd_disk->fops->owner);
765 put_device(&bdev->bd_device);
766 }
767
768 /**
769 * blkdev_get_by_dev - open a block device by device number
770 * @dev: device number of block device to open
771 * @mode: FMODE_* mask
772 * @holder: exclusive holder identifier
773 *
774 * Open the block device described by device number @dev. If @mode includes
775 * %FMODE_EXCL, the block device is opened with exclusive access. Specifying
776 * %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may nest for
777 * the same @holder.
778 *
779 * Use this interface ONLY if you really do not have anything better - i.e. when
780 * you are behind a truly sucky interface and all you are given is a device
781 * number. Everything else should use blkdev_get_by_path().
782 *
783 * CONTEXT:
784 * Might sleep.
785 *
786 * RETURNS:
787 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
788 */
blkdev_get_by_dev(dev_t dev,fmode_t mode,void * holder)789 struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
790 {
791 bool unblock_events = true;
792 struct block_device *bdev;
793 struct gendisk *disk;
794 int ret;
795
796 ret = devcgroup_check_permission(DEVCG_DEV_BLOCK,
797 MAJOR(dev), MINOR(dev),
798 ((mode & FMODE_READ) ? DEVCG_ACC_READ : 0) |
799 ((mode & FMODE_WRITE) ? DEVCG_ACC_WRITE : 0));
800 if (ret)
801 return ERR_PTR(ret);
802
803 bdev = blkdev_get_no_open(dev);
804 if (!bdev)
805 return ERR_PTR(-ENXIO);
806 disk = bdev->bd_disk;
807
808 if (mode & FMODE_EXCL) {
809 ret = bd_prepare_to_claim(bdev, holder);
810 if (ret)
811 goto put_blkdev;
812 }
813
814 disk_block_events(disk);
815
816 mutex_lock(&disk->open_mutex);
817 ret = -ENXIO;
818 if (!disk_live(disk))
819 goto abort_claiming;
820 if (bdev_is_partition(bdev))
821 ret = blkdev_get_part(bdev, mode);
822 else
823 ret = blkdev_get_whole(bdev, mode);
824 if (ret)
825 goto abort_claiming;
826 if (mode & FMODE_EXCL) {
827 bd_finish_claiming(bdev, holder);
828
829 /*
830 * Block event polling for write claims if requested. Any write
831 * holder makes the write_holder state stick until all are
832 * released. This is good enough and tracking individual
833 * writeable reference is too fragile given the way @mode is
834 * used in blkdev_get/put().
835 */
836 if ((mode & FMODE_WRITE) && !bdev->bd_write_holder &&
837 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
838 bdev->bd_write_holder = true;
839 unblock_events = false;
840 }
841 }
842 mutex_unlock(&disk->open_mutex);
843
844 if (unblock_events)
845 disk_unblock_events(disk);
846 return bdev;
847
848 abort_claiming:
849 if (mode & FMODE_EXCL)
850 bd_abort_claiming(bdev, holder);
851 mutex_unlock(&disk->open_mutex);
852 disk_unblock_events(disk);
853 put_blkdev:
854 blkdev_put_no_open(bdev);
855 return ERR_PTR(ret);
856 }
857 EXPORT_SYMBOL(blkdev_get_by_dev);
858
859 /**
860 * blkdev_get_by_path - open a block device by name
861 * @path: path to the block device to open
862 * @mode: FMODE_* mask
863 * @holder: exclusive holder identifier
864 *
865 * Open the block device described by the device file at @path. If @mode
866 * includes %FMODE_EXCL, the block device is opened with exclusive access.
867 * Specifying %FMODE_EXCL with a %NULL @holder is invalid. Exclusive opens may
868 * nest for the same @holder.
869 *
870 * CONTEXT:
871 * Might sleep.
872 *
873 * RETURNS:
874 * Reference to the block_device on success, ERR_PTR(-errno) on failure.
875 */
blkdev_get_by_path(const char * path,fmode_t mode,void * holder)876 struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
877 void *holder)
878 {
879 struct block_device *bdev;
880 dev_t dev;
881 int error;
882
883 error = lookup_bdev(path, &dev);
884 if (error)
885 return ERR_PTR(error);
886
887 bdev = blkdev_get_by_dev(dev, mode, holder);
888 if (!IS_ERR(bdev) && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
889 blkdev_put(bdev, mode);
890 return ERR_PTR(-EACCES);
891 }
892
893 return bdev;
894 }
895 EXPORT_SYMBOL(blkdev_get_by_path);
896
blkdev_put(struct block_device * bdev,fmode_t mode)897 void blkdev_put(struct block_device *bdev, fmode_t mode)
898 {
899 struct gendisk *disk = bdev->bd_disk;
900
901 /*
902 * Sync early if it looks like we're the last one. If someone else
903 * opens the block device between now and the decrement of bd_openers
904 * then we did a sync that we didn't need to, but that's not the end
905 * of the world and we want to avoid long (could be several minute)
906 * syncs while holding the mutex.
907 */
908 if (bdev->bd_openers == 1)
909 sync_blockdev(bdev);
910
911 mutex_lock(&disk->open_mutex);
912 if (mode & FMODE_EXCL) {
913 struct block_device *whole = bdev_whole(bdev);
914 bool bdev_free;
915
916 /*
917 * Release a claim on the device. The holder fields
918 * are protected with bdev_lock. open_mutex is to
919 * synchronize disk_holder unlinking.
920 */
921 spin_lock(&bdev_lock);
922
923 WARN_ON_ONCE(--bdev->bd_holders < 0);
924 WARN_ON_ONCE(--whole->bd_holders < 0);
925
926 if ((bdev_free = !bdev->bd_holders))
927 bdev->bd_holder = NULL;
928 if (!whole->bd_holders)
929 whole->bd_holder = NULL;
930
931 spin_unlock(&bdev_lock);
932
933 /*
934 * If this was the last claim, remove holder link and
935 * unblock evpoll if it was a write holder.
936 */
937 if (bdev_free && bdev->bd_write_holder) {
938 disk_unblock_events(disk);
939 bdev->bd_write_holder = false;
940 }
941 }
942
943 /*
944 * Trigger event checking and tell drivers to flush MEDIA_CHANGE
945 * event. This is to ensure detection of media removal commanded
946 * from userland - e.g. eject(1).
947 */
948 disk_flush_events(disk, DISK_EVENT_MEDIA_CHANGE);
949
950 if (bdev_is_partition(bdev))
951 blkdev_put_part(bdev, mode);
952 else
953 blkdev_put_whole(bdev, mode);
954 mutex_unlock(&disk->open_mutex);
955
956 blkdev_put_no_open(bdev);
957 }
958 EXPORT_SYMBOL(blkdev_put);
959
960 /**
961 * lookup_bdev - lookup a struct block_device by name
962 * @pathname: special file representing the block device
963 * @dev: return value of the block device's dev_t
964 *
965 * Get a reference to the blockdevice at @pathname in the current
966 * namespace if possible and return it. Return ERR_PTR(error)
967 * otherwise.
968 */
lookup_bdev(const char * pathname,dev_t * dev)969 int lookup_bdev(const char *pathname, dev_t *dev)
970 {
971 struct inode *inode;
972 struct path path;
973 int error;
974
975 if (!pathname || !*pathname)
976 return -EINVAL;
977
978 error = kern_path(pathname, LOOKUP_FOLLOW, &path);
979 if (error)
980 return error;
981
982 inode = d_backing_inode(path.dentry);
983 error = -ENOTBLK;
984 if (!S_ISBLK(inode->i_mode))
985 goto out_path_put;
986 error = -EACCES;
987 if (!may_open_dev(&path))
988 goto out_path_put;
989
990 *dev = inode->i_rdev;
991 error = 0;
992 out_path_put:
993 path_put(&path);
994 return error;
995 }
996 EXPORT_SYMBOL(lookup_bdev);
997
__invalidate_device(struct block_device * bdev,bool kill_dirty)998 int __invalidate_device(struct block_device *bdev, bool kill_dirty)
999 {
1000 struct super_block *sb = get_super(bdev);
1001 int res = 0;
1002
1003 if (sb) {
1004 /*
1005 * no need to lock the super, get_super holds the
1006 * read mutex so the filesystem cannot go away
1007 * under us (->put_super runs with the write lock
1008 * hold).
1009 */
1010 shrink_dcache_sb(sb);
1011 res = invalidate_inodes(sb, kill_dirty);
1012 drop_super(sb);
1013 }
1014 invalidate_bdev(bdev);
1015 return res;
1016 }
1017 EXPORT_SYMBOL(__invalidate_device);
1018
iterate_bdevs(void (* func)(struct block_device *,void *),void * arg)1019 void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
1020 {
1021 struct inode *inode, *old_inode = NULL;
1022
1023 spin_lock(&blockdev_superblock->s_inode_list_lock);
1024 list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
1025 struct address_space *mapping = inode->i_mapping;
1026 struct block_device *bdev;
1027
1028 spin_lock(&inode->i_lock);
1029 if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
1030 mapping->nrpages == 0) {
1031 spin_unlock(&inode->i_lock);
1032 continue;
1033 }
1034 __iget(inode);
1035 spin_unlock(&inode->i_lock);
1036 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1037 /*
1038 * We hold a reference to 'inode' so it couldn't have been
1039 * removed from s_inodes list while we dropped the
1040 * s_inode_list_lock We cannot iput the inode now as we can
1041 * be holding the last reference and we cannot iput it under
1042 * s_inode_list_lock. So we keep the reference and iput it
1043 * later.
1044 */
1045 iput(old_inode);
1046 old_inode = inode;
1047 bdev = I_BDEV(inode);
1048
1049 mutex_lock(&bdev->bd_disk->open_mutex);
1050 if (bdev->bd_openers)
1051 func(bdev, arg);
1052 mutex_unlock(&bdev->bd_disk->open_mutex);
1053
1054 spin_lock(&blockdev_superblock->s_inode_list_lock);
1055 }
1056 spin_unlock(&blockdev_superblock->s_inode_list_lock);
1057 iput(old_inode);
1058 }
1059