Lines Matching refs:md
302 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
304 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
309 struct mapped_device *md; in dm_blk_open() local
313 md = bdev->bd_disk->private_data; in dm_blk_open()
314 if (!md) in dm_blk_open()
317 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
318 dm_deleting_md(md)) { in dm_blk_open()
319 md = NULL; in dm_blk_open()
323 dm_get(md); in dm_blk_open()
324 atomic_inc(&md->open_count); in dm_blk_open()
328 return md ? 0 : -ENXIO; in dm_blk_open()
333 struct mapped_device *md; in dm_blk_close() local
337 md = disk->private_data; in dm_blk_close()
338 if (WARN_ON(!md)) in dm_blk_close()
341 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
342 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
345 dm_put(md); in dm_blk_close()
350 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
352 return atomic_read(&md->open_count); in dm_open_count()
358 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
364 if (dm_open_count(md)) { in dm_lock_for_deletion()
367 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
368 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
371 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
378 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
384 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
387 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
401 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
403 return dm_get_geometry(md, geo); in dm_blk_getgeo()
406 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl() argument
415 map = dm_get_live_table(md, srcu_idx); in dm_prepare_ioctl()
427 if (dm_suspended_md(md)) in dm_prepare_ioctl()
432 dm_put_live_table(md, *srcu_idx); in dm_prepare_ioctl()
440 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl() argument
442 dm_put_live_table(md, srcu_idx); in dm_unprepare_ioctl()
448 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
451 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_blk_ioctl()
474 dm_unprepare_ioctl(md, srcu_idx); in dm_blk_ioctl()
489 struct mapped_device *md = io->md; in start_io_acct() local
493 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
494 dm_stats_account_io(&md->stats, bio_data_dir(bio), in start_io_acct()
499 static void end_io_acct(struct mapped_device *md, struct bio *bio, in end_io_acct() argument
506 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
507 dm_stats_account_io(&md->stats, bio_data_dir(bio), in end_io_acct()
512 if (unlikely(wq_has_sleeper(&md->wait))) in end_io_acct()
513 wake_up(&md->wait); in end_io_acct()
516 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io() argument
522 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
535 io->md = md; in alloc_io()
543 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io() argument
557 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
583 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
587 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
588 bio_list_add(&md->deferred, bio); in queue_io()
589 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
590 queue_work(md->wq, &md->work); in queue_io()
598 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table() argument
600 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
602 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
605 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table() argument
607 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
610 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
612 synchronize_srcu(&md->io_barrier); in dm_sync_table()
620 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
623 return rcu_dereference(md->map); in dm_get_live_table_fast()
626 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
637 struct mapped_device *md) in open_table_device() argument
649 r = bd_link_disk_holder(bdev, dm_disk(md)); in open_table_device()
663 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
668 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
687 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device() argument
693 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
694 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
696 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in dm_get_table_device()
698 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
705 if ((r = open_table_device(td, dev, md))) { in dm_get_table_device()
706 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
714 list_add(&td->list, &md->table_devices); in dm_get_table_device()
718 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
724 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
728 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
730 close_table_device(td, md); in dm_put_table_device()
734 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
753 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
755 *geo = md->geometry; in dm_get_geometry()
763 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
772 md->geometry = *geo; in dm_set_geometry()
777 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
779 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
791 struct mapped_device *md = io->md; in dm_io_dec_pending() local
798 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) in dm_io_dec_pending()
809 spin_lock_irqsave(&md->deferred_lock, flags); in dm_io_dec_pending()
810 if (__noflush_suspending(md) && in dm_io_dec_pending()
811 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) { in dm_io_dec_pending()
813 bio_list_add_head(&md->deferred, bio); in dm_io_dec_pending()
821 spin_unlock_irqrestore(&md->deferred_lock, flags); in dm_io_dec_pending()
827 free_io(md, io); in dm_io_dec_pending()
828 end_io_acct(md, bio, start_time, &stats_aux); in dm_io_dec_pending()
839 queue_io(md, bio); in dm_io_dec_pending()
849 void disable_discard(struct mapped_device *md) in disable_discard() argument
851 struct queue_limits *limits = dm_get_queue_limits(md); in disable_discard()
855 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); in disable_discard()
858 void disable_write_same(struct mapped_device *md) in disable_write_same() argument
860 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_same()
866 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes() argument
868 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_zeroes()
884 struct mapped_device *md = tio->io->md; in clone_endio() local
891 disable_discard(md); in clone_endio()
894 disable_write_same(md); in clone_endio()
897 disable_write_zeroes(md); in clone_endio()
912 if (WARN_ON_ONCE(dm_is_zone_write(md, bio))) in clone_endio()
929 struct mapped_device *md = io->md; in clone_endio() local
930 up(&md->swap_bios_semaphore); in clone_endio()
961 max_len = blk_max_size_offset(ti->table->md->queue, in max_io_len()
985 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target() argument
987 __acquires(md->io_barrier) in dm_dax_get_live_target()
992 map = dm_get_live_table(md, srcu_idx); in dm_dax_get_live_target()
1006 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access() local
1012 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_direct_access()
1025 dm_put_live_table(md, srcu_idx); in dm_dax_direct_access()
1033 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported() local
1038 map = dm_get_live_table(md, &srcu_idx); in dm_dax_supported()
1045 dm_put_live_table(md, srcu_idx); in dm_dax_supported()
1053 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter() local
1059 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_from_iter()
1069 dm_put_live_table(md, srcu_idx); in dm_dax_copy_from_iter()
1077 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter() local
1083 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_to_iter()
1093 dm_put_live_table(md, srcu_idx); in dm_dax_copy_to_iter()
1101 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_zero_page_range() local
1107 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_zero_page_range()
1120 dm_put_live_table(md, srcu_idx); in dm_dax_zero_page_range()
1170 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch) in __set_swap_bios_limit() argument
1172 mutex_lock(&md->swap_bios_lock); in __set_swap_bios_limit()
1173 while (latch < md->swap_bios) { in __set_swap_bios_limit()
1175 down(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1176 md->swap_bios--; in __set_swap_bios_limit()
1178 while (latch > md->swap_bios) { in __set_swap_bios_limit()
1180 up(&md->swap_bios_semaphore); in __set_swap_bios_limit()
1181 md->swap_bios++; in __set_swap_bios_limit()
1183 mutex_unlock(&md->swap_bios_lock); in __set_swap_bios_limit()
1206 struct mapped_device *md = io->md; in __map_bio() local
1208 if (unlikely(latch != md->swap_bios)) in __map_bio()
1209 __set_swap_bios_limit(md, latch); in __map_bio()
1210 down(&md->swap_bios_semaphore); in __map_bio()
1218 if (dm_emulate_zone_append(io->md)) in __map_bio()
1233 struct mapped_device *md = io->md; in __map_bio() local
1234 up(&md->swap_bios_semaphore); in __map_bio()
1241 struct mapped_device *md = io->md; in __map_bio() local
1242 up(&md->swap_bios_semaphore); in __map_bio()
1280 dm_device_name(tio->io->md), in clone_bio()
1319 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1328 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1381 bio_set_dev(&flush_bio, ci->io->md->disk->part0); in __send_empty_flush()
1509 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info() argument
1513 ci->io = alloc_io(md, bio); in init_clone_info()
1523 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio() argument
1530 init_clone_info(&ci, md, map, bio); in __split_and_process_bio()
1553 GFP_NOIO, &md->queue->bio_split); in __split_and_process_bio()
1564 __dm_part_stat_sub(dm_disk(md)->part0, in __split_and_process_bio()
1581 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; in dm_submit_bio() local
1586 map = dm_get_live_table(md, &srcu_idx); in dm_submit_bio()
1589 dm_device_name(md)); in dm_submit_bio()
1595 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_submit_bio()
1601 queue_io(md, bio); in dm_submit_bio()
1612 ret = __split_and_process_bio(md, map, bio); in dm_submit_bio()
1614 dm_put_live_table(md, srcu_idx); in dm_submit_bio()
1686 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
1688 if (md->wq) in cleanup_mapped_device()
1689 destroy_workqueue(md->wq); in cleanup_mapped_device()
1690 bioset_exit(&md->bs); in cleanup_mapped_device()
1691 bioset_exit(&md->io_bs); in cleanup_mapped_device()
1693 if (md->dax_dev) { in cleanup_mapped_device()
1694 kill_dax(md->dax_dev); in cleanup_mapped_device()
1695 put_dax(md->dax_dev); in cleanup_mapped_device()
1696 md->dax_dev = NULL; in cleanup_mapped_device()
1699 if (md->disk) { in cleanup_mapped_device()
1701 md->disk->private_data = NULL; in cleanup_mapped_device()
1703 if (dm_get_md_type(md) != DM_TYPE_NONE) { in cleanup_mapped_device()
1704 dm_sysfs_exit(md); in cleanup_mapped_device()
1705 del_gendisk(md->disk); in cleanup_mapped_device()
1707 dm_queue_destroy_keyslot_manager(md->queue); in cleanup_mapped_device()
1708 blk_cleanup_disk(md->disk); in cleanup_mapped_device()
1711 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
1713 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
1714 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
1715 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
1716 mutex_destroy(&md->swap_bios_lock); in cleanup_mapped_device()
1718 dm_mq_cleanup_mapped_device(md); in cleanup_mapped_device()
1719 dm_cleanup_zoned_dev(md); in cleanup_mapped_device()
1728 struct mapped_device *md; in alloc_dev() local
1731 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); in alloc_dev()
1732 if (!md) { in alloc_dev()
1748 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
1752 md->numa_node_id = numa_node_id; in alloc_dev()
1753 md->init_tio_pdu = false; in alloc_dev()
1754 md->type = DM_TYPE_NONE; in alloc_dev()
1755 mutex_init(&md->suspend_lock); in alloc_dev()
1756 mutex_init(&md->type_lock); in alloc_dev()
1757 mutex_init(&md->table_devices_lock); in alloc_dev()
1758 spin_lock_init(&md->deferred_lock); in alloc_dev()
1759 atomic_set(&md->holders, 1); in alloc_dev()
1760 atomic_set(&md->open_count, 0); in alloc_dev()
1761 atomic_set(&md->event_nr, 0); in alloc_dev()
1762 atomic_set(&md->uevent_seq, 0); in alloc_dev()
1763 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
1764 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
1765 spin_lock_init(&md->uevent_lock); in alloc_dev()
1772 md->disk = blk_alloc_disk(md->numa_node_id); in alloc_dev()
1773 if (!md->disk) in alloc_dev()
1775 md->queue = md->disk->queue; in alloc_dev()
1777 init_waitqueue_head(&md->wait); in alloc_dev()
1778 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
1779 init_waitqueue_head(&md->eventq); in alloc_dev()
1780 init_completion(&md->kobj_holder.completion); in alloc_dev()
1782 md->swap_bios = get_swap_bios(); in alloc_dev()
1783 sema_init(&md->swap_bios_semaphore, md->swap_bios); in alloc_dev()
1784 mutex_init(&md->swap_bios_lock); in alloc_dev()
1786 md->disk->major = _major; in alloc_dev()
1787 md->disk->first_minor = minor; in alloc_dev()
1788 md->disk->minors = 1; in alloc_dev()
1789 md->disk->fops = &dm_blk_dops; in alloc_dev()
1790 md->disk->queue = md->queue; in alloc_dev()
1791 md->disk->private_data = md; in alloc_dev()
1792 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
1795 md->dax_dev = alloc_dax(md, md->disk->disk_name, in alloc_dev()
1797 if (IS_ERR(md->dax_dev)) in alloc_dev()
1801 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
1803 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
1804 if (!md->wq) in alloc_dev()
1807 dm_stats_init(&md->stats); in alloc_dev()
1811 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
1816 return md; in alloc_dev()
1819 cleanup_mapped_device(md); in alloc_dev()
1825 kvfree(md); in alloc_dev()
1829 static void unlock_fs(struct mapped_device *md);
1831 static void free_dev(struct mapped_device *md) in free_dev() argument
1833 int minor = MINOR(disk_devt(md->disk)); in free_dev()
1835 unlock_fs(md); in free_dev()
1837 cleanup_mapped_device(md); in free_dev()
1839 free_table_devices(&md->table_devices); in free_dev()
1840 dm_stats_cleanup(&md->stats); in free_dev()
1844 kvfree(md); in free_dev()
1847 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools() argument
1858 bioset_exit(&md->bs); in __bind_mempools()
1859 bioset_exit(&md->io_bs); in __bind_mempools()
1861 } else if (bioset_initialized(&md->bs)) { in __bind_mempools()
1874 bioset_initialized(&md->bs) || in __bind_mempools()
1875 bioset_initialized(&md->io_bs)); in __bind_mempools()
1877 ret = bioset_init_from_src(&md->bs, &p->bs); in __bind_mempools()
1880 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); in __bind_mempools()
1882 bioset_exit(&md->bs); in __bind_mempools()
1896 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
1898 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
1899 list_splice_init(&md->uevent_list, &uevents); in event_callback()
1900 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
1902 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
1904 atomic_inc(&md->event_nr); in event_callback()
1905 wake_up(&md->eventq); in event_callback()
1912 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
1916 struct request_queue *q = md->queue; in __bind()
1921 lockdep_assert_held(&md->suspend_lock); in __bind()
1928 if (size != dm_get_size(md)) in __bind()
1929 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
1931 if (!get_capacity(md->disk)) in __bind()
1932 set_capacity(md->disk, size); in __bind()
1934 set_capacity_and_notify(md->disk, size); in __bind()
1936 dm_table_event_callback(t, event_callback, md); in __bind()
1953 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
1956 ret = __bind_mempools(md, t); in __bind()
1968 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
1969 rcu_assign_pointer(md->map, (void *)t); in __bind()
1970 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
1973 dm_sync_table(md); in __bind()
1982 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
1984 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
1990 RCU_INIT_POINTER(md->map, NULL); in __unbind()
1991 dm_sync_table(md); in __unbind()
2001 struct mapped_device *md; in dm_create() local
2003 md = alloc_dev(minor); in dm_create()
2004 if (!md) in dm_create()
2007 dm_ima_reset_data(md); in dm_create()
2009 *result = md; in dm_create()
2017 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2019 mutex_lock(&md->type_lock); in dm_lock_md_type()
2022 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2024 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2027 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type() argument
2029 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2030 md->type = type; in dm_set_md_type()
2033 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2035 return md->type; in dm_get_md_type()
2038 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2040 return md->immutable_target_type; in dm_get_immutable_target_type()
2047 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
2049 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2050 return &md->queue->limits; in dm_get_queue_limits()
2057 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue() argument
2065 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2066 r = dm_mq_init_request_queue(md, t); in dm_setup_md_queue()
2085 r = dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2089 add_disk(md->disk); in dm_setup_md_queue()
2091 r = dm_sysfs_init(md); in dm_setup_md_queue()
2093 del_gendisk(md->disk); in dm_setup_md_queue()
2096 md->type = type; in dm_setup_md_queue()
2102 struct mapped_device *md; in dm_get_md() local
2110 md = idr_find(&_minor_idr, minor); in dm_get_md()
2111 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2112 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2113 md = NULL; in dm_get_md()
2116 dm_get(md); in dm_get_md()
2120 return md; in dm_get_md()
2124 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2126 return md->interface_ptr; in dm_get_mdptr()
2129 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2131 md->interface_ptr = ptr; in dm_set_mdptr()
2134 void dm_get(struct mapped_device *md) in dm_get() argument
2136 atomic_inc(&md->holders); in dm_get()
2137 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2140 int dm_hold(struct mapped_device *md) in dm_hold() argument
2143 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2147 dm_get(md); in dm_hold()
2153 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2155 return md->name; in dm_device_name()
2159 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2167 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2168 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2171 blk_set_queue_dying(md->queue); in __dm_destroy()
2177 mutex_lock(&md->suspend_lock); in __dm_destroy()
2178 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2179 if (!dm_suspended_md(md)) { in __dm_destroy()
2181 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2182 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2186 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2187 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2196 while (atomic_read(&md->holders)) in __dm_destroy()
2198 else if (atomic_read(&md->holders)) in __dm_destroy()
2200 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2202 dm_table_destroy(__unbind(md)); in __dm_destroy()
2203 free_dev(md); in __dm_destroy()
2206 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2208 __dm_destroy(md, true); in dm_destroy()
2211 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2213 __dm_destroy(md, false); in dm_destroy_immediate()
2216 void dm_put(struct mapped_device *md) in dm_put() argument
2218 atomic_dec(&md->holders); in dm_put()
2222 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios() argument
2225 struct block_device *part = dm_disk(md)->part0; in md_in_flight_bios()
2236 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state) in dm_wait_for_bios_completion() argument
2242 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2244 if (!md_in_flight_bios(md)) in dm_wait_for_bios_completion()
2254 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2259 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state) in dm_wait_for_completion() argument
2263 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2264 return dm_wait_for_bios_completion(md, task_state); in dm_wait_for_completion()
2267 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2286 struct mapped_device *md = container_of(work, struct mapped_device, work); in dm_wq_work() local
2289 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2290 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2291 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2292 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2301 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2303 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2305 queue_work(md->wq, &md->work); in dm_queue_flush()
2311 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
2317 mutex_lock(&md->suspend_lock); in dm_swap_table()
2320 if (!dm_suspended_md(md)) in dm_swap_table()
2330 live_map = dm_get_live_table_fast(md); in dm_swap_table()
2332 limits = md->queue->limits; in dm_swap_table()
2333 dm_put_live_table_fast(md); in dm_swap_table()
2344 map = __bind(md, table, &limits); in dm_swap_table()
2348 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2356 static int lock_fs(struct mapped_device *md) in lock_fs() argument
2360 WARN_ON(test_bit(DMF_FROZEN, &md->flags)); in lock_fs()
2362 r = freeze_bdev(md->disk->part0); in lock_fs()
2364 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2368 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
2370 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2372 thaw_bdev(md->disk->part0); in unlock_fs()
2373 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2385 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
2393 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2400 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2402 DMDEBUG("%s: suspending with flush", dm_device_name(md)); in __dm_suspend()
2417 r = lock_fs(md); in __dm_suspend()
2435 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2437 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2443 if (dm_request_based(md)) in __dm_suspend()
2444 dm_stop_queue(md->queue); in __dm_suspend()
2446 flush_workqueue(md->wq); in __dm_suspend()
2453 r = dm_wait_for_completion(md, task_state); in __dm_suspend()
2455 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2458 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2460 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2464 dm_queue_flush(md); in __dm_suspend()
2466 if (dm_request_based(md)) in __dm_suspend()
2467 dm_start_queue(md->queue); in __dm_suspend()
2469 unlock_fs(md); in __dm_suspend()
2493 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend() argument
2499 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2501 if (dm_suspended_md(md)) { in dm_suspend()
2506 if (dm_suspended_internally_md(md)) { in dm_suspend()
2508 mutex_unlock(&md->suspend_lock); in dm_suspend()
2509 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2515 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2517 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); in dm_suspend()
2521 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2523 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2526 mutex_unlock(&md->suspend_lock); in dm_suspend()
2530 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
2538 dm_queue_flush(md); in __dm_resume()
2545 if (dm_request_based(md)) in __dm_resume()
2546 dm_start_queue(md->queue); in __dm_resume()
2548 unlock_fs(md); in __dm_resume()
2553 int dm_resume(struct mapped_device *md) in dm_resume() argument
2560 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2562 if (!dm_suspended_md(md)) in dm_resume()
2565 if (dm_suspended_internally_md(md)) { in dm_resume()
2567 mutex_unlock(&md->suspend_lock); in dm_resume()
2568 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2574 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2578 r = __dm_resume(md, map); in dm_resume()
2582 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2584 mutex_unlock(&md->suspend_lock); in dm_resume()
2595 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend() argument
2599 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2601 if (md->internal_suspend_count++) in __dm_internal_suspend()
2604 if (dm_suspended_md(md)) { in __dm_internal_suspend()
2605 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2609 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2617 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, in __dm_internal_suspend()
2620 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2622 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2625 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
2627 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2629 if (--md->internal_suspend_count) in __dm_internal_resume()
2632 if (dm_suspended_md(md)) in __dm_internal_resume()
2639 (void) __dm_resume(md, NULL); in __dm_internal_resume()
2642 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2644 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2647 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
2649 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2650 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
2651 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2655 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
2657 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2658 __dm_internal_resume(md); in dm_internal_resume()
2659 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2668 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
2670 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2671 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
2674 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2675 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2676 flush_workqueue(md->wq); in dm_internal_suspend_fast()
2677 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
2681 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
2683 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
2686 dm_queue_flush(md); in dm_internal_resume_fast()
2689 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
2696 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
2707 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
2711 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
2720 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
2722 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
2725 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
2727 return atomic_read(&md->event_nr); in dm_get_event_nr()
2730 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
2732 return wait_event_interruptible(md->eventq, in dm_wait_event()
2733 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
2736 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
2740 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
2741 list_add(elist, &md->uevent_list); in dm_uevent_add()
2742 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
2749 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
2751 return md->disk; in dm_disk()
2755 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
2757 return &md->kobj_holder.kobj; in dm_kobject()
2762 struct mapped_device *md; in dm_get_from_kobject() local
2764 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2767 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
2768 md = NULL; in dm_get_from_kobject()
2771 dm_get(md); in dm_get_from_kobject()
2775 return md; in dm_get_from_kobject()
2778 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
2780 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
2783 static int dm_post_suspending_md(struct mapped_device *md) in dm_post_suspending_md() argument
2785 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
2788 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
2790 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
2793 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
2795 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
2800 return dm_suspended_md(ti->table->md); in dm_suspended()
2806 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
2812 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
2816 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools() argument
2820 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools()
2885 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr() local
2890 table = dm_get_live_table(md, &srcu_idx); in dm_call_pr()
2905 dm_put_live_table(md, srcu_idx); in dm_call_pr()
2950 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
2954 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_reserve()
2964 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_reserve()
2970 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
2974 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_release()
2984 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_release()
2991 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
2995 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_preempt()
3005 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_preempt()
3011 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3015 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_clear()
3025 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_clear()