Lines Matching refs:md

97 	struct mapped_device *md;  member
328 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
330 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
335 struct mapped_device *md; in dm_blk_open() local
339 md = bdev->bd_disk->private_data; in dm_blk_open()
340 if (!md) in dm_blk_open()
343 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
344 dm_deleting_md(md)) { in dm_blk_open()
345 md = NULL; in dm_blk_open()
349 dm_get(md); in dm_blk_open()
350 atomic_inc(&md->open_count); in dm_blk_open()
354 return md ? 0 : -ENXIO; in dm_blk_open()
359 struct mapped_device *md; in dm_blk_close() local
363 md = disk->private_data; in dm_blk_close()
364 if (WARN_ON(!md)) in dm_blk_close()
367 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
368 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
371 dm_put(md); in dm_blk_close()
376 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
378 return atomic_read(&md->open_count); in dm_open_count()
384 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
390 if (dm_open_count(md)) { in dm_lock_for_deletion()
393 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
394 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
397 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
404 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
410 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
413 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
427 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
429 return dm_get_geometry(md, geo); in dm_blk_getgeo()
466 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local
475 if (dm_suspended_md(md)) in dm_blk_report_zones()
478 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones()
503 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones()
510 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl() argument
519 map = dm_get_live_table(md, srcu_idx); in dm_prepare_ioctl()
531 if (dm_suspended_md(md)) in dm_prepare_ioctl()
536 dm_put_live_table(md, *srcu_idx); in dm_prepare_ioctl()
544 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl() argument
546 dm_put_live_table(md, srcu_idx); in dm_unprepare_ioctl()
552 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
555 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_blk_ioctl()
575 dm_unprepare_ioctl(md, srcu_idx); in dm_blk_ioctl()
590 struct mapped_device *md = io->md; in start_io_acct() local
594 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
595 dm_stats_account_io(&md->stats, bio_data_dir(bio), in start_io_acct()
602 struct mapped_device *md = io->md; in end_io_acct() local
608 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
609 dm_stats_account_io(&md->stats, bio_data_dir(bio), in end_io_acct()
614 if (unlikely(wq_has_sleeper(&md->wait))) in end_io_acct()
615 wake_up(&md->wait); in end_io_acct()
618 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io() argument
624 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
637 io->md = md; in alloc_io()
645 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io() argument
659 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
685 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
689 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
690 bio_list_add(&md->deferred, bio); in queue_io()
691 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
692 queue_work(md->wq, &md->work); in queue_io()
700 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table() argument
702 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
704 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
707 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table() argument
709 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
712 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
714 synchronize_srcu(&md->io_barrier); in dm_sync_table()
722 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
725 return rcu_dereference(md->map); in dm_get_live_table_fast()
728 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
739 struct mapped_device *md) in open_table_device() argument
751 r = bd_link_disk_holder(bdev, dm_disk(md)); in open_table_device()
765 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
770 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
789 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device() argument
795 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
796 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
798 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in dm_get_table_device()
800 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
807 if ((r = open_table_device(td, dev, md))) { in dm_get_table_device()
808 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
816 list_add(&td->list, &md->table_devices); in dm_get_table_device()
820 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
827 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
831 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
833 close_table_device(td, md); in dm_put_table_device()
837 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
857 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
859 *geo = md->geometry; in dm_get_geometry()
867 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
876 md->geometry = *geo; in dm_set_geometry()
881 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
883 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
895 struct mapped_device *md = io->md; in dec_pending() local
900 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) in dec_pending()
910 spin_lock_irqsave(&md->deferred_lock, flags); in dec_pending()
911 if (__noflush_suspending(md)) in dec_pending()
913 bio_list_add_head(&md->deferred, io->orig_bio); in dec_pending()
917 spin_unlock_irqrestore(&md->deferred_lock, flags); in dec_pending()
923 free_io(md, io); in dec_pending()
934 queue_io(md, bio); in dec_pending()
944 void disable_discard(struct mapped_device *md) in disable_discard() argument
946 struct queue_limits *limits = dm_get_queue_limits(md); in disable_discard()
950 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); in disable_discard()
953 void disable_write_same(struct mapped_device *md) in disable_write_same() argument
955 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_same()
961 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes() argument
963 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_zeroes()
974 struct mapped_device *md = tio->io->md; in clone_endio() local
981 disable_discard(md); in clone_endio()
984 disable_write_same(md); in clone_endio()
987 disable_write_zeroes(md); in clone_endio()
1047 max_len = blk_max_size_offset(ti->table->md->queue, in max_io_len()
1071 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target() argument
1073 __acquires(md->io_barrier) in dm_dax_get_live_target()
1078 map = dm_get_live_table(md, srcu_idx); in dm_dax_get_live_target()
1092 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access() local
1098 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_direct_access()
1111 dm_put_live_table(md, srcu_idx); in dm_dax_direct_access()
1119 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported() local
1124 map = dm_get_live_table(md, &srcu_idx); in dm_dax_supported()
1131 dm_put_live_table(md, srcu_idx); in dm_dax_supported()
1139 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter() local
1145 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_from_iter()
1155 dm_put_live_table(md, srcu_idx); in dm_dax_copy_from_iter()
1163 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter() local
1169 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_to_iter()
1179 dm_put_live_table(md, srcu_idx); in dm_dax_copy_to_iter()
1187 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_zero_page_range() local
1193 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_zero_page_range()
1206 dm_put_live_table(md, srcu_idx); in dm_dax_zero_page_range()
1322 dm_device_name(tio->io->md), in clone_bio()
1361 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1370 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1433 bio_set_dev(ci->bio, ci->io->md->bdev); in __send_empty_flush()
1558 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info() argument
1562 ci->io = alloc_io(md, bio); in init_clone_info()
1572 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio() argument
1579 init_clone_info(&ci, md, map, bio); in __split_and_process_bio()
1603 GFP_NOIO, &md->queue->bio_split); in __split_and_process_bio()
1614 __dm_part_stat_sub(&dm_disk(md)->part0, in __split_and_process_bio()
1619 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); in __split_and_process_bio()
1633 struct mapped_device *md = bio->bi_disk->private_data; in dm_submit_bio() local
1638 map = dm_get_live_table(md, &srcu_idx); in dm_submit_bio()
1641 dm_device_name(md)); in dm_submit_bio()
1647 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_submit_bio()
1653 queue_io(md, bio); in dm_submit_bio()
1664 ret = __split_and_process_bio(md, map, bio); in dm_submit_bio()
1666 dm_put_live_table(md, srcu_idx); in dm_submit_bio()
1725 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
1727 if (md->wq) in cleanup_mapped_device()
1728 destroy_workqueue(md->wq); in cleanup_mapped_device()
1729 bioset_exit(&md->bs); in cleanup_mapped_device()
1730 bioset_exit(&md->io_bs); in cleanup_mapped_device()
1732 if (md->dax_dev) { in cleanup_mapped_device()
1733 kill_dax(md->dax_dev); in cleanup_mapped_device()
1734 put_dax(md->dax_dev); in cleanup_mapped_device()
1735 md->dax_dev = NULL; in cleanup_mapped_device()
1738 if (md->disk) { in cleanup_mapped_device()
1740 md->disk->private_data = NULL; in cleanup_mapped_device()
1742 del_gendisk(md->disk); in cleanup_mapped_device()
1743 put_disk(md->disk); in cleanup_mapped_device()
1746 if (md->queue) in cleanup_mapped_device()
1747 blk_cleanup_queue(md->queue); in cleanup_mapped_device()
1749 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
1751 if (md->bdev) { in cleanup_mapped_device()
1752 bdput(md->bdev); in cleanup_mapped_device()
1753 md->bdev = NULL; in cleanup_mapped_device()
1756 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
1757 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
1758 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
1760 dm_mq_cleanup_mapped_device(md); in cleanup_mapped_device()
1769 struct mapped_device *md; in alloc_dev() local
1772 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); in alloc_dev()
1773 if (!md) { in alloc_dev()
1789 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
1793 md->numa_node_id = numa_node_id; in alloc_dev()
1794 md->init_tio_pdu = false; in alloc_dev()
1795 md->type = DM_TYPE_NONE; in alloc_dev()
1796 mutex_init(&md->suspend_lock); in alloc_dev()
1797 mutex_init(&md->type_lock); in alloc_dev()
1798 mutex_init(&md->table_devices_lock); in alloc_dev()
1799 spin_lock_init(&md->deferred_lock); in alloc_dev()
1800 atomic_set(&md->holders, 1); in alloc_dev()
1801 atomic_set(&md->open_count, 0); in alloc_dev()
1802 atomic_set(&md->event_nr, 0); in alloc_dev()
1803 atomic_set(&md->uevent_seq, 0); in alloc_dev()
1804 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
1805 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
1806 spin_lock_init(&md->uevent_lock); in alloc_dev()
1813 md->queue = blk_alloc_queue(numa_node_id); in alloc_dev()
1814 if (!md->queue) in alloc_dev()
1817 md->disk = alloc_disk_node(1, md->numa_node_id); in alloc_dev()
1818 if (!md->disk) in alloc_dev()
1821 init_waitqueue_head(&md->wait); in alloc_dev()
1822 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
1823 init_waitqueue_head(&md->eventq); in alloc_dev()
1824 init_completion(&md->kobj_holder.completion); in alloc_dev()
1826 md->disk->major = _major; in alloc_dev()
1827 md->disk->first_minor = minor; in alloc_dev()
1828 md->disk->fops = &dm_blk_dops; in alloc_dev()
1829 md->disk->queue = md->queue; in alloc_dev()
1830 md->disk->private_data = md; in alloc_dev()
1831 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
1834 md->dax_dev = alloc_dax(md, md->disk->disk_name, in alloc_dev()
1836 if (IS_ERR(md->dax_dev)) in alloc_dev()
1840 add_disk_no_queue_reg(md->disk); in alloc_dev()
1841 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
1843 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
1844 if (!md->wq) in alloc_dev()
1847 md->bdev = bdget_disk(md->disk, 0); in alloc_dev()
1848 if (!md->bdev) in alloc_dev()
1851 dm_stats_init(&md->stats); in alloc_dev()
1855 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
1860 return md; in alloc_dev()
1863 cleanup_mapped_device(md); in alloc_dev()
1869 kvfree(md); in alloc_dev()
1873 static void unlock_fs(struct mapped_device *md);
1875 static void free_dev(struct mapped_device *md) in free_dev() argument
1877 int minor = MINOR(disk_devt(md->disk)); in free_dev()
1879 unlock_fs(md); in free_dev()
1881 cleanup_mapped_device(md); in free_dev()
1883 free_table_devices(&md->table_devices); in free_dev()
1884 dm_stats_cleanup(&md->stats); in free_dev()
1888 kvfree(md); in free_dev()
1891 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools() argument
1902 bioset_exit(&md->bs); in __bind_mempools()
1903 bioset_exit(&md->io_bs); in __bind_mempools()
1905 } else if (bioset_initialized(&md->bs)) { in __bind_mempools()
1918 bioset_initialized(&md->bs) || in __bind_mempools()
1919 bioset_initialized(&md->io_bs)); in __bind_mempools()
1921 ret = bioset_init_from_src(&md->bs, &p->bs); in __bind_mempools()
1924 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); in __bind_mempools()
1926 bioset_exit(&md->bs); in __bind_mempools()
1940 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
1942 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
1943 list_splice_init(&md->uevent_list, &uevents); in event_callback()
1944 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
1946 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
1948 atomic_inc(&md->event_nr); in event_callback()
1949 wake_up(&md->eventq); in event_callback()
1956 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
1960 struct request_queue *q = md->queue; in __bind()
1965 lockdep_assert_held(&md->suspend_lock); in __bind()
1972 if (size != dm_get_size(md)) in __bind()
1973 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
1975 set_capacity(md->disk, size); in __bind()
1976 bd_set_nr_sectors(md->bdev, size); in __bind()
1978 dm_table_event_callback(t, event_callback, md); in __bind()
1995 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
1998 ret = __bind_mempools(md, t); in __bind()
2004 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2005 rcu_assign_pointer(md->map, (void *)t); in __bind()
2006 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2010 dm_sync_table(md); in __bind()
2019 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
2021 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2027 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2028 dm_sync_table(md); in __unbind()
2039 struct mapped_device *md; in dm_create() local
2041 md = alloc_dev(minor); in dm_create()
2042 if (!md) in dm_create()
2045 r = dm_sysfs_init(md); in dm_create()
2047 free_dev(md); in dm_create()
2051 *result = md; in dm_create()
2059 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2061 mutex_lock(&md->type_lock); in dm_lock_md_type()
2064 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2066 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2069 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type() argument
2071 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2072 md->type = type; in dm_set_md_type()
2075 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2077 return md->type; in dm_get_md_type()
2080 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2082 return md->immutable_target_type; in dm_get_immutable_target_type()
2089 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
2091 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2092 return &md->queue->limits; in dm_get_queue_limits()
2099 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue() argument
2103 enum dm_queue_mode type = dm_get_md_type(md); in dm_setup_md_queue()
2107 md->disk->fops = &dm_rq_blk_dops; in dm_setup_md_queue()
2108 r = dm_mq_init_request_queue(md, t); in dm_setup_md_queue()
2127 dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2128 blk_register_queue(md->disk); in dm_setup_md_queue()
2135 struct mapped_device *md; in dm_get_md() local
2143 md = idr_find(&_minor_idr, minor); in dm_get_md()
2144 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2145 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2146 md = NULL; in dm_get_md()
2149 dm_get(md); in dm_get_md()
2153 return md; in dm_get_md()
2157 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2159 return md->interface_ptr; in dm_get_mdptr()
2162 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2164 md->interface_ptr = ptr; in dm_set_mdptr()
2167 void dm_get(struct mapped_device *md) in dm_get() argument
2169 atomic_inc(&md->holders); in dm_get()
2170 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2173 int dm_hold(struct mapped_device *md) in dm_hold() argument
2176 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2180 dm_get(md); in dm_hold()
2186 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2188 return md->name; in dm_device_name()
2192 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2200 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2201 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2204 blk_set_queue_dying(md->queue); in __dm_destroy()
2210 mutex_lock(&md->suspend_lock); in __dm_destroy()
2211 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2212 if (!dm_suspended_md(md)) { in __dm_destroy()
2214 set_bit(DMF_SUSPENDED, &md->flags); in __dm_destroy()
2215 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_destroy()
2219 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2220 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2229 while (atomic_read(&md->holders)) in __dm_destroy()
2231 else if (atomic_read(&md->holders)) in __dm_destroy()
2233 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2235 dm_sysfs_exit(md); in __dm_destroy()
2236 dm_table_destroy(__unbind(md)); in __dm_destroy()
2237 free_dev(md); in __dm_destroy()
2240 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2242 __dm_destroy(md, true); in dm_destroy()
2245 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2247 __dm_destroy(md, false); in dm_destroy_immediate()
2250 void dm_put(struct mapped_device *md) in dm_put() argument
2252 atomic_dec(&md->holders); in dm_put()
2256 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios() argument
2259 struct hd_struct *part = &dm_disk(md)->part0; in md_in_flight_bios()
2270 static int dm_wait_for_bios_completion(struct mapped_device *md, long task_state) in dm_wait_for_bios_completion() argument
2276 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_bios_completion()
2278 if (!md_in_flight_bios(md)) in dm_wait_for_bios_completion()
2288 finish_wait(&md->wait, &wait); in dm_wait_for_bios_completion()
2293 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion() argument
2297 if (!queue_is_mq(md->queue)) in dm_wait_for_completion()
2298 return dm_wait_for_bios_completion(md, task_state); in dm_wait_for_completion()
2301 if (!blk_mq_queue_inflight(md->queue)) in dm_wait_for_completion()
2320 struct mapped_device *md = container_of(work, struct mapped_device, work); in dm_wq_work() local
2323 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2324 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2325 bio = bio_list_pop(&md->deferred); in dm_wq_work()
2326 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2335 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2337 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2339 queue_work(md->wq, &md->work); in dm_queue_flush()
2345 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
2351 mutex_lock(&md->suspend_lock); in dm_swap_table()
2354 if (!dm_suspended_md(md)) in dm_swap_table()
2364 live_map = dm_get_live_table_fast(md); in dm_swap_table()
2366 limits = md->queue->limits; in dm_swap_table()
2367 dm_put_live_table_fast(md); in dm_swap_table()
2378 map = __bind(md, table, &limits); in dm_swap_table()
2382 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2390 static int lock_fs(struct mapped_device *md) in lock_fs() argument
2394 WARN_ON(md->frozen_sb); in lock_fs()
2396 md->frozen_sb = freeze_bdev(md->bdev); in lock_fs()
2397 if (IS_ERR(md->frozen_sb)) { in lock_fs()
2398 r = PTR_ERR(md->frozen_sb); in lock_fs()
2399 md->frozen_sb = NULL; in lock_fs()
2403 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2408 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
2410 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2413 thaw_bdev(md->bdev, md->frozen_sb); in unlock_fs()
2414 md->frozen_sb = NULL; in unlock_fs()
2415 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2427 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
2435 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2442 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2444 DMDEBUG("%s: suspending with flush", dm_device_name(md)); in __dm_suspend()
2459 r = lock_fs(md); in __dm_suspend()
2477 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2479 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2485 if (dm_request_based(md)) in __dm_suspend()
2486 dm_stop_queue(md->queue); in __dm_suspend()
2488 flush_workqueue(md->wq); in __dm_suspend()
2495 r = dm_wait_for_completion(md, task_state); in __dm_suspend()
2497 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2500 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2502 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2506 dm_queue_flush(md); in __dm_suspend()
2508 if (dm_request_based(md)) in __dm_suspend()
2509 dm_start_queue(md->queue); in __dm_suspend()
2511 unlock_fs(md); in __dm_suspend()
2535 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend() argument
2541 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2543 if (dm_suspended_md(md)) { in dm_suspend()
2548 if (dm_suspended_internally_md(md)) { in dm_suspend()
2550 mutex_unlock(&md->suspend_lock); in dm_suspend()
2551 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2557 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2559 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); in dm_suspend()
2563 set_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2565 clear_bit(DMF_POST_SUSPENDING, &md->flags); in dm_suspend()
2568 mutex_unlock(&md->suspend_lock); in dm_suspend()
2572 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
2580 dm_queue_flush(md); in __dm_resume()
2587 if (dm_request_based(md)) in __dm_resume()
2588 dm_start_queue(md->queue); in __dm_resume()
2590 unlock_fs(md); in __dm_resume()
2595 int dm_resume(struct mapped_device *md) in dm_resume() argument
2602 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2604 if (!dm_suspended_md(md)) in dm_resume()
2607 if (dm_suspended_internally_md(md)) { in dm_resume()
2609 mutex_unlock(&md->suspend_lock); in dm_resume()
2610 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2616 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2620 r = __dm_resume(md, map); in dm_resume()
2624 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2626 mutex_unlock(&md->suspend_lock); in dm_resume()
2637 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend() argument
2641 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2643 if (md->internal_suspend_count++) in __dm_internal_suspend()
2646 if (dm_suspended_md(md)) { in __dm_internal_suspend()
2647 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2651 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2659 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, in __dm_internal_suspend()
2662 set_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2664 clear_bit(DMF_POST_SUSPENDING, &md->flags); in __dm_internal_suspend()
2667 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
2669 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2671 if (--md->internal_suspend_count) in __dm_internal_resume()
2674 if (dm_suspended_md(md)) in __dm_internal_resume()
2681 (void) __dm_resume(md, NULL); in __dm_internal_resume()
2684 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2686 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2689 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
2691 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2692 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
2693 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2697 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
2699 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2700 __dm_internal_resume(md); in dm_internal_resume()
2701 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2710 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
2712 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2713 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
2716 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2717 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2718 flush_workqueue(md->wq); in dm_internal_suspend_fast()
2719 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
2723 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
2725 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
2728 dm_queue_flush(md); in dm_internal_resume_fast()
2731 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
2738 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
2749 r = kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
2753 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
2762 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
2764 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
2767 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
2769 return atomic_read(&md->event_nr); in dm_get_event_nr()
2772 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
2774 return wait_event_interruptible(md->eventq, in dm_wait_event()
2775 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
2778 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
2782 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
2783 list_add(elist, &md->uevent_list); in dm_uevent_add()
2784 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
2791 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
2793 return md->disk; in dm_disk()
2797 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
2799 return &md->kobj_holder.kobj; in dm_kobject()
2804 struct mapped_device *md; in dm_get_from_kobject() local
2806 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2809 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
2810 md = NULL; in dm_get_from_kobject()
2813 dm_get(md); in dm_get_from_kobject()
2817 return md; in dm_get_from_kobject()
2820 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
2822 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
2825 static int dm_post_suspending_md(struct mapped_device *md) in dm_post_suspending_md() argument
2827 return test_bit(DMF_POST_SUSPENDING, &md->flags); in dm_post_suspending_md()
2830 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
2832 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
2835 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
2837 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
2842 return dm_suspended_md(ti->table->md); in dm_suspended()
2848 return dm_post_suspending_md(ti->table->md); in dm_post_suspending()
2854 return __noflush_suspending(ti->table->md); in dm_noflush_suspending()
2858 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools() argument
2862 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools()
2927 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr() local
2932 table = dm_get_live_table(md, &srcu_idx); in dm_call_pr()
2947 dm_put_live_table(md, srcu_idx); in dm_call_pr()
2992 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
2996 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_reserve()
3006 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_reserve()
3012 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
3016 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_release()
3026 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_release()
3033 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
3037 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_preempt()
3047 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_preempt()
3053 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3057 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_clear()
3067 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_clear()