Lines Matching refs:md
94 struct mapped_device *md; member
324 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md() argument
326 return test_bit(DMF_DELETING, &md->flags); in dm_deleting_md()
331 struct mapped_device *md; in dm_blk_open() local
335 md = bdev->bd_disk->private_data; in dm_blk_open()
336 if (!md) in dm_blk_open()
339 if (test_bit(DMF_FREEING, &md->flags) || in dm_blk_open()
340 dm_deleting_md(md)) { in dm_blk_open()
341 md = NULL; in dm_blk_open()
345 dm_get(md); in dm_blk_open()
346 atomic_inc(&md->open_count); in dm_blk_open()
350 return md ? 0 : -ENXIO; in dm_blk_open()
355 struct mapped_device *md; in dm_blk_close() local
359 md = disk->private_data; in dm_blk_close()
360 if (WARN_ON(!md)) in dm_blk_close()
363 if (atomic_dec_and_test(&md->open_count) && in dm_blk_close()
364 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) in dm_blk_close()
367 dm_put(md); in dm_blk_close()
372 int dm_open_count(struct mapped_device *md) in dm_open_count() argument
374 return atomic_read(&md->open_count); in dm_open_count()
380 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion() argument
386 if (dm_open_count(md)) { in dm_lock_for_deletion()
389 set_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_lock_for_deletion()
390 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags)) in dm_lock_for_deletion()
393 set_bit(DMF_DELETING, &md->flags); in dm_lock_for_deletion()
400 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove() argument
406 if (test_bit(DMF_DELETING, &md->flags)) in dm_cancel_deferred_remove()
409 clear_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_cancel_deferred_remove()
421 sector_t dm_get_size(struct mapped_device *md) in dm_get_size() argument
423 return get_capacity(md->disk); in dm_get_size()
426 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue() argument
428 return md->queue; in dm_get_md_queue()
431 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats() argument
433 return &md->stats; in dm_get_stats()
438 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo() local
440 return dm_get_geometry(md, geo); in dm_blk_getgeo()
447 struct mapped_device *md = disk->private_data; in dm_blk_report_zones() local
452 if (dm_suspended_md(md)) in dm_blk_report_zones()
455 map = dm_get_live_table(md, &srcu_idx); in dm_blk_report_zones()
485 dm_put_live_table(md, srcu_idx); in dm_blk_report_zones()
492 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl() argument
494 __acquires(md->io_barrier) in dm_prepare_ioctl()
502 map = dm_get_live_table(md, srcu_idx); in dm_prepare_ioctl()
514 if (dm_suspended_md(md)) in dm_prepare_ioctl()
519 dm_put_live_table(md, *srcu_idx); in dm_prepare_ioctl()
527 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl() argument
528 __releases(md->io_barrier) in dm_unprepare_ioctl()
530 dm_put_live_table(md, srcu_idx); in dm_unprepare_ioctl()
536 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl() local
539 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_blk_ioctl()
559 dm_unprepare_ioctl(md, srcu_idx); in dm_blk_ioctl()
565 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io() argument
571 clone = bio_alloc_bioset(GFP_NOIO, 0, &md->io_bs); in alloc_io()
584 io->md = md; in alloc_io()
592 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io() argument
606 struct bio *clone = bio_alloc_bioset(gfp_mask, 0, &ci->io->md->bs); in alloc_tio()
629 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios() argument
632 struct hd_struct *part = &dm_disk(md)->part0; in md_in_flight_bios()
643 static bool md_in_flight(struct mapped_device *md) in md_in_flight() argument
645 if (queue_is_mq(md->queue)) in md_in_flight()
646 return blk_mq_queue_inflight(md->queue); in md_in_flight()
648 return md_in_flight_bios(md); in md_in_flight()
653 struct mapped_device *md = io->md; in start_io_acct() local
658 generic_start_io_acct(md->queue, bio_op(bio), bio_sectors(bio), in start_io_acct()
659 &dm_disk(md)->part0); in start_io_acct()
661 if (unlikely(dm_stats_used(&md->stats))) in start_io_acct()
662 dm_stats_account_io(&md->stats, bio_data_dir(bio), in start_io_acct()
669 struct mapped_device *md = io->md; in end_io_acct() local
673 generic_end_io_acct(md->queue, bio_op(bio), &dm_disk(md)->part0, in end_io_acct()
676 if (unlikely(dm_stats_used(&md->stats))) in end_io_acct()
677 dm_stats_account_io(&md->stats, bio_data_dir(bio), in end_io_acct()
682 if (unlikely(wq_has_sleeper(&md->wait))) in end_io_acct()
683 wake_up(&md->wait); in end_io_acct()
689 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io() argument
693 spin_lock_irqsave(&md->deferred_lock, flags); in queue_io()
694 bio_list_add(&md->deferred, bio); in queue_io()
695 spin_unlock_irqrestore(&md->deferred_lock, flags); in queue_io()
696 queue_work(md->wq, &md->work); in queue_io()
704 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table() argument
706 *srcu_idx = srcu_read_lock(&md->io_barrier); in dm_get_live_table()
708 return srcu_dereference(md->map, &md->io_barrier); in dm_get_live_table()
711 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table() argument
713 srcu_read_unlock(&md->io_barrier, srcu_idx); in dm_put_live_table()
716 void dm_sync_table(struct mapped_device *md) in dm_sync_table() argument
718 synchronize_srcu(&md->io_barrier); in dm_sync_table()
726 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast() argument
729 return rcu_dereference(md->map); in dm_get_live_table_fast()
732 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast() argument
743 struct mapped_device *md) in open_table_device() argument
755 r = bd_link_disk_holder(bdev, dm_disk(md)); in open_table_device()
769 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device() argument
774 bd_unlink_disk_holder(td->dm_dev.bdev, dm_disk(md)); in close_table_device()
793 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device() argument
799 mutex_lock(&md->table_devices_lock); in dm_get_table_device()
800 td = find_table_device(&md->table_devices, dev, mode); in dm_get_table_device()
802 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id); in dm_get_table_device()
804 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
811 if ((r = open_table_device(td, dev, md))) { in dm_get_table_device()
812 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
820 list_add(&td->list, &md->table_devices); in dm_get_table_device()
824 mutex_unlock(&md->table_devices_lock); in dm_get_table_device()
831 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device() argument
835 mutex_lock(&md->table_devices_lock); in dm_put_table_device()
837 close_table_device(td, md); in dm_put_table_device()
841 mutex_unlock(&md->table_devices_lock); in dm_put_table_device()
861 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry() argument
863 *geo = md->geometry; in dm_get_geometry()
871 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry() argument
880 md->geometry = *geo; in dm_set_geometry()
885 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending() argument
887 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __noflush_suspending()
899 struct mapped_device *md = io->md; in dec_pending() local
904 if (!(io->status == BLK_STS_DM_REQUEUE && __noflush_suspending(md))) in dec_pending()
914 spin_lock_irqsave(&md->deferred_lock, flags); in dec_pending()
915 if (__noflush_suspending(md)) in dec_pending()
917 bio_list_add_head(&md->deferred, io->orig_bio); in dec_pending()
921 spin_unlock_irqrestore(&md->deferred_lock, flags); in dec_pending()
927 free_io(md, io); in dec_pending()
938 queue_io(md, bio); in dec_pending()
948 void disable_discard(struct mapped_device *md) in disable_discard() argument
950 struct queue_limits *limits = dm_get_queue_limits(md); in disable_discard()
954 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, md->queue); in disable_discard()
957 void disable_write_same(struct mapped_device *md) in disable_write_same() argument
959 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_same()
965 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes() argument
967 struct queue_limits *limits = dm_get_queue_limits(md); in disable_write_zeroes()
978 struct mapped_device *md = tio->io->md; in clone_endio() local
981 if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) { in clone_endio()
984 disable_discard(md); in clone_endio()
987 disable_write_same(md); in clone_endio()
990 disable_write_zeroes(md); in clone_endio()
1063 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target() argument
1065 __acquires(md->io_barrier) in dm_dax_get_live_target()
1070 map = dm_get_live_table(md, srcu_idx); in dm_dax_get_live_target()
1084 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access() local
1090 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_direct_access()
1103 dm_put_live_table(md, srcu_idx); in dm_dax_direct_access()
1111 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported() local
1116 map = dm_get_live_table(md, &srcu_idx); in dm_dax_supported()
1122 dm_put_live_table(md, srcu_idx); in dm_dax_supported()
1130 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter() local
1136 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_from_iter()
1146 dm_put_live_table(md, srcu_idx); in dm_dax_copy_from_iter()
1154 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter() local
1160 ti = dm_dax_get_live_target(md, sector, &srcu_idx); in dm_dax_copy_to_iter()
1170 dm_put_live_table(md, srcu_idx); in dm_dax_copy_to_iter()
1269 struct mapped_device *md = io->md; in __map_bio() local
1291 if (md->type == DM_TYPE_NVME_BIO_BASED) in __map_bio()
1334 dm_device_name(tio->io->md), in clone_bio()
1373 mutex_lock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1382 mutex_unlock(&ci->io->md->table_devices_lock); in alloc_multiple_bios()
1434 bio_set_dev(ci->bio, ci->io->md->bdev); in __send_empty_flush()
1593 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info() argument
1597 ci->io = alloc_io(md, bio); in init_clone_info()
1607 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio() argument
1614 init_clone_info(&ci, md, map, bio); in __split_and_process_bio()
1649 GFP_NOIO, &md->queue->bio_split); in __split_and_process_bio()
1660 __dm_part_stat_sub(&dm_disk(md)->part0, in __split_and_process_bio()
1665 trace_block_split(md->queue, b, bio->bi_iter.bi_sector); in __split_and_process_bio()
1681 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, in __process_bio() argument
1688 init_clone_info(&ci, md, map, bio); in __process_bio()
1721 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) in dm_queue_split() argument
1729 struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); in dm_queue_split()
1732 trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); in dm_queue_split()
1738 static blk_qc_t dm_process_bio(struct mapped_device *md, in dm_process_bio() argument
1742 struct dm_target *ti = md->immutable_target; in dm_process_bio()
1763 blk_queue_split(md->queue, &bio); in dm_process_bio()
1765 dm_queue_split(md, ti, &bio); in dm_process_bio()
1768 if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) in dm_process_bio()
1769 return __process_bio(md, map, bio, ti); in dm_process_bio()
1771 return __split_and_process_bio(md, map, bio); in dm_process_bio()
1776 struct mapped_device *md = q->queuedata; in dm_make_request() local
1781 map = dm_get_live_table(md, &srcu_idx); in dm_make_request()
1784 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) { in dm_make_request()
1785 dm_put_live_table(md, srcu_idx); in dm_make_request()
1788 queue_io(md, bio); in dm_make_request()
1794 ret = dm_process_bio(md, map, bio); in dm_make_request()
1796 dm_put_live_table(md, srcu_idx); in dm_make_request()
1803 struct mapped_device *md = congested_data; in dm_any_congested() local
1806 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_any_congested()
1807 if (dm_request_based(md)) { in dm_any_congested()
1812 r = md->queue->backing_dev_info->wb.state & bdi_bits; in dm_any_congested()
1814 map = dm_get_live_table_fast(md); in dm_any_congested()
1817 dm_put_live_table_fast(md); in dm_any_congested()
1878 static void dm_init_normal_md_queue(struct mapped_device *md) in dm_init_normal_md_queue() argument
1883 md->queue->backing_dev_info->congested_fn = dm_any_congested; in dm_init_normal_md_queue()
1886 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device() argument
1888 if (md->wq) in cleanup_mapped_device()
1889 destroy_workqueue(md->wq); in cleanup_mapped_device()
1890 bioset_exit(&md->bs); in cleanup_mapped_device()
1891 bioset_exit(&md->io_bs); in cleanup_mapped_device()
1893 if (md->dax_dev) { in cleanup_mapped_device()
1894 kill_dax(md->dax_dev); in cleanup_mapped_device()
1895 put_dax(md->dax_dev); in cleanup_mapped_device()
1896 md->dax_dev = NULL; in cleanup_mapped_device()
1899 if (md->disk) { in cleanup_mapped_device()
1901 md->disk->private_data = NULL; in cleanup_mapped_device()
1903 del_gendisk(md->disk); in cleanup_mapped_device()
1904 put_disk(md->disk); in cleanup_mapped_device()
1907 if (md->queue) in cleanup_mapped_device()
1908 blk_cleanup_queue(md->queue); in cleanup_mapped_device()
1910 cleanup_srcu_struct(&md->io_barrier); in cleanup_mapped_device()
1912 if (md->bdev) { in cleanup_mapped_device()
1913 bdput(md->bdev); in cleanup_mapped_device()
1914 md->bdev = NULL; in cleanup_mapped_device()
1917 mutex_destroy(&md->suspend_lock); in cleanup_mapped_device()
1918 mutex_destroy(&md->type_lock); in cleanup_mapped_device()
1919 mutex_destroy(&md->table_devices_lock); in cleanup_mapped_device()
1921 dm_mq_cleanup_mapped_device(md); in cleanup_mapped_device()
1930 struct mapped_device *md; in alloc_dev() local
1933 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); in alloc_dev()
1934 if (!md) { in alloc_dev()
1950 r = init_srcu_struct(&md->io_barrier); in alloc_dev()
1954 md->numa_node_id = numa_node_id; in alloc_dev()
1955 md->init_tio_pdu = false; in alloc_dev()
1956 md->type = DM_TYPE_NONE; in alloc_dev()
1957 mutex_init(&md->suspend_lock); in alloc_dev()
1958 mutex_init(&md->type_lock); in alloc_dev()
1959 mutex_init(&md->table_devices_lock); in alloc_dev()
1960 spin_lock_init(&md->deferred_lock); in alloc_dev()
1961 atomic_set(&md->holders, 1); in alloc_dev()
1962 atomic_set(&md->open_count, 0); in alloc_dev()
1963 atomic_set(&md->event_nr, 0); in alloc_dev()
1964 atomic_set(&md->uevent_seq, 0); in alloc_dev()
1965 INIT_LIST_HEAD(&md->uevent_list); in alloc_dev()
1966 INIT_LIST_HEAD(&md->table_devices); in alloc_dev()
1967 spin_lock_init(&md->uevent_lock); in alloc_dev()
1969 md->queue = blk_alloc_queue_node(GFP_KERNEL, numa_node_id); in alloc_dev()
1970 if (!md->queue) in alloc_dev()
1972 md->queue->queuedata = md; in alloc_dev()
1973 md->queue->backing_dev_info->congested_data = md; in alloc_dev()
1975 md->disk = alloc_disk_node(1, md->numa_node_id); in alloc_dev()
1976 if (!md->disk) in alloc_dev()
1979 init_waitqueue_head(&md->wait); in alloc_dev()
1980 INIT_WORK(&md->work, dm_wq_work); in alloc_dev()
1981 init_waitqueue_head(&md->eventq); in alloc_dev()
1982 init_completion(&md->kobj_holder.completion); in alloc_dev()
1984 md->disk->major = _major; in alloc_dev()
1985 md->disk->first_minor = minor; in alloc_dev()
1986 md->disk->fops = &dm_blk_dops; in alloc_dev()
1987 md->disk->queue = md->queue; in alloc_dev()
1988 md->disk->private_data = md; in alloc_dev()
1989 sprintf(md->disk->disk_name, "dm-%d", minor); in alloc_dev()
1992 md->dax_dev = alloc_dax(md, md->disk->disk_name, in alloc_dev()
1994 if (!md->dax_dev) in alloc_dev()
1998 add_disk_no_queue_reg(md->disk); in alloc_dev()
1999 format_dev_t(md->name, MKDEV(_major, minor)); in alloc_dev()
2001 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0); in alloc_dev()
2002 if (!md->wq) in alloc_dev()
2005 md->bdev = bdget_disk(md->disk, 0); in alloc_dev()
2006 if (!md->bdev) in alloc_dev()
2009 dm_stats_init(&md->stats); in alloc_dev()
2013 old_md = idr_replace(&_minor_idr, md, minor); in alloc_dev()
2018 return md; in alloc_dev()
2021 cleanup_mapped_device(md); in alloc_dev()
2027 kvfree(md); in alloc_dev()
2031 static void unlock_fs(struct mapped_device *md);
2033 static void free_dev(struct mapped_device *md) in free_dev() argument
2035 int minor = MINOR(disk_devt(md->disk)); in free_dev()
2037 unlock_fs(md); in free_dev()
2039 cleanup_mapped_device(md); in free_dev()
2041 free_table_devices(&md->table_devices); in free_dev()
2042 dm_stats_cleanup(&md->stats); in free_dev()
2046 kvfree(md); in free_dev()
2049 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools() argument
2060 bioset_exit(&md->bs); in __bind_mempools()
2061 bioset_exit(&md->io_bs); in __bind_mempools()
2063 } else if (bioset_initialized(&md->bs)) { in __bind_mempools()
2076 bioset_initialized(&md->bs) || in __bind_mempools()
2077 bioset_initialized(&md->io_bs)); in __bind_mempools()
2079 ret = bioset_init_from_src(&md->bs, &p->bs); in __bind_mempools()
2082 ret = bioset_init_from_src(&md->io_bs, &p->io_bs); in __bind_mempools()
2084 bioset_exit(&md->bs); in __bind_mempools()
2098 struct mapped_device *md = (struct mapped_device *) context; in event_callback() local
2100 spin_lock_irqsave(&md->uevent_lock, flags); in event_callback()
2101 list_splice_init(&md->uevent_list, &uevents); in event_callback()
2102 spin_unlock_irqrestore(&md->uevent_lock, flags); in event_callback()
2104 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); in event_callback()
2106 atomic_inc(&md->event_nr); in event_callback()
2107 wake_up(&md->eventq); in event_callback()
2114 static void __set_size(struct mapped_device *md, sector_t size) in __set_size() argument
2116 lockdep_assert_held(&md->suspend_lock); in __set_size()
2118 set_capacity(md->disk, size); in __set_size()
2120 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT); in __set_size()
2126 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind() argument
2130 struct request_queue *q = md->queue; in __bind()
2135 lockdep_assert_held(&md->suspend_lock); in __bind()
2142 if (size != dm_get_size(md)) in __bind()
2143 memset(&md->geometry, 0, sizeof(md->geometry)); in __bind()
2145 __set_size(md, size); in __bind()
2147 dm_table_event_callback(t, event_callback, md); in __bind()
2159 if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) { in __bind()
2166 md->immutable_target = dm_table_get_immutable_target(t); in __bind()
2169 ret = __bind_mempools(md, t); in __bind()
2175 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __bind()
2176 rcu_assign_pointer(md->map, (void *)t); in __bind()
2177 md->immutable_target_type = dm_table_get_immutable_target_type(t); in __bind()
2181 dm_sync_table(md); in __bind()
2190 static struct dm_table *__unbind(struct mapped_device *md) in __unbind() argument
2192 struct dm_table *map = rcu_dereference_protected(md->map, 1); in __unbind()
2198 RCU_INIT_POINTER(md->map, NULL); in __unbind()
2199 dm_sync_table(md); in __unbind()
2210 struct mapped_device *md; in dm_create() local
2212 md = alloc_dev(minor); in dm_create()
2213 if (!md) in dm_create()
2216 r = dm_sysfs_init(md); in dm_create()
2218 free_dev(md); in dm_create()
2222 *result = md; in dm_create()
2230 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type() argument
2232 mutex_lock(&md->type_lock); in dm_lock_md_type()
2235 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type() argument
2237 mutex_unlock(&md->type_lock); in dm_unlock_md_type()
2240 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type() argument
2242 BUG_ON(!mutex_is_locked(&md->type_lock)); in dm_set_md_type()
2243 md->type = type; in dm_set_md_type()
2246 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type() argument
2248 return md->type; in dm_get_md_type()
2251 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type() argument
2253 return md->immutable_target_type; in dm_get_immutable_target_type()
2260 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits() argument
2262 BUG_ON(!atomic_read(&md->holders)); in dm_get_queue_limits()
2263 return &md->queue->limits; in dm_get_queue_limits()
2270 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue() argument
2274 enum dm_queue_mode type = dm_get_md_type(md); in dm_setup_md_queue()
2278 r = dm_mq_init_request_queue(md, t); in dm_setup_md_queue()
2287 dm_init_normal_md_queue(md); in dm_setup_md_queue()
2288 blk_queue_make_request(md->queue, dm_make_request); in dm_setup_md_queue()
2300 dm_table_set_restrictions(t, md->queue, &limits); in dm_setup_md_queue()
2301 blk_register_queue(md->disk); in dm_setup_md_queue()
2308 struct mapped_device *md; in dm_get_md() local
2316 md = idr_find(&_minor_idr, minor); in dm_get_md()
2317 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) || in dm_get_md()
2318 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_md()
2319 md = NULL; in dm_get_md()
2322 dm_get(md); in dm_get_md()
2326 return md; in dm_get_md()
2330 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr() argument
2332 return md->interface_ptr; in dm_get_mdptr()
2335 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr() argument
2337 md->interface_ptr = ptr; in dm_set_mdptr()
2340 void dm_get(struct mapped_device *md) in dm_get() argument
2342 atomic_inc(&md->holders); in dm_get()
2343 BUG_ON(test_bit(DMF_FREEING, &md->flags)); in dm_get()
2346 int dm_hold(struct mapped_device *md) in dm_hold() argument
2349 if (test_bit(DMF_FREEING, &md->flags)) { in dm_hold()
2353 dm_get(md); in dm_hold()
2359 const char *dm_device_name(struct mapped_device *md) in dm_device_name() argument
2361 return md->name; in dm_device_name()
2365 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy() argument
2373 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md)))); in __dm_destroy()
2374 set_bit(DMF_FREEING, &md->flags); in __dm_destroy()
2377 blk_set_queue_dying(md->queue); in __dm_destroy()
2383 mutex_lock(&md->suspend_lock); in __dm_destroy()
2384 map = dm_get_live_table(md, &srcu_idx); in __dm_destroy()
2385 if (!dm_suspended_md(md)) { in __dm_destroy()
2390 dm_put_live_table(md, srcu_idx); in __dm_destroy()
2391 mutex_unlock(&md->suspend_lock); in __dm_destroy()
2400 while (atomic_read(&md->holders)) in __dm_destroy()
2402 else if (atomic_read(&md->holders)) in __dm_destroy()
2404 dm_device_name(md), atomic_read(&md->holders)); in __dm_destroy()
2406 dm_sysfs_exit(md); in __dm_destroy()
2407 dm_table_destroy(__unbind(md)); in __dm_destroy()
2408 free_dev(md); in __dm_destroy()
2411 void dm_destroy(struct mapped_device *md) in dm_destroy() argument
2413 __dm_destroy(md, true); in dm_destroy()
2416 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate() argument
2418 __dm_destroy(md, false); in dm_destroy_immediate()
2421 void dm_put(struct mapped_device *md) in dm_put() argument
2423 atomic_dec(&md->holders); in dm_put()
2427 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion() argument
2433 prepare_to_wait(&md->wait, &wait, task_state); in dm_wait_for_completion()
2435 if (!md_in_flight(md)) in dm_wait_for_completion()
2445 finish_wait(&md->wait, &wait); in dm_wait_for_completion()
2455 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work() local
2461 map = dm_get_live_table(md, &srcu_idx); in dm_wq_work()
2463 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { in dm_wq_work()
2464 spin_lock_irq(&md->deferred_lock); in dm_wq_work()
2465 c = bio_list_pop(&md->deferred); in dm_wq_work()
2466 spin_unlock_irq(&md->deferred_lock); in dm_wq_work()
2471 if (dm_request_based(md)) in dm_wq_work()
2474 (void) dm_process_bio(md, map, c); in dm_wq_work()
2477 dm_put_live_table(md, srcu_idx); in dm_wq_work()
2480 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush() argument
2482 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_queue_flush()
2484 queue_work(md->wq, &md->work); in dm_queue_flush()
2490 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table() argument
2496 mutex_lock(&md->suspend_lock); in dm_swap_table()
2499 if (!dm_suspended_md(md)) in dm_swap_table()
2509 live_map = dm_get_live_table_fast(md); in dm_swap_table()
2511 limits = md->queue->limits; in dm_swap_table()
2512 dm_put_live_table_fast(md); in dm_swap_table()
2523 map = __bind(md, table, &limits); in dm_swap_table()
2527 mutex_unlock(&md->suspend_lock); in dm_swap_table()
2535 static int lock_fs(struct mapped_device *md) in lock_fs() argument
2539 WARN_ON(md->frozen_sb); in lock_fs()
2541 md->frozen_sb = freeze_bdev(md->bdev); in lock_fs()
2542 if (IS_ERR(md->frozen_sb)) { in lock_fs()
2543 r = PTR_ERR(md->frozen_sb); in lock_fs()
2544 md->frozen_sb = NULL; in lock_fs()
2548 set_bit(DMF_FROZEN, &md->flags); in lock_fs()
2553 static void unlock_fs(struct mapped_device *md) in unlock_fs() argument
2555 if (!test_bit(DMF_FROZEN, &md->flags)) in unlock_fs()
2558 thaw_bdev(md->bdev, md->frozen_sb); in unlock_fs()
2559 md->frozen_sb = NULL; in unlock_fs()
2560 clear_bit(DMF_FROZEN, &md->flags); in unlock_fs()
2572 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend() argument
2580 lockdep_assert_held(&md->suspend_lock); in __dm_suspend()
2587 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2589 pr_debug("%s: suspending with flush\n", dm_device_name(md)); in __dm_suspend()
2604 r = lock_fs(md); in __dm_suspend()
2623 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in __dm_suspend()
2625 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2631 if (dm_request_based(md)) in __dm_suspend()
2632 dm_stop_queue(md->queue); in __dm_suspend()
2634 flush_workqueue(md->wq); in __dm_suspend()
2641 r = dm_wait_for_completion(md, task_state); in __dm_suspend()
2643 set_bit(dmf_suspended_flag, &md->flags); in __dm_suspend()
2646 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); in __dm_suspend()
2648 synchronize_srcu(&md->io_barrier); in __dm_suspend()
2652 dm_queue_flush(md); in __dm_suspend()
2654 if (dm_request_based(md)) in __dm_suspend()
2655 dm_start_queue(md->queue); in __dm_suspend()
2657 unlock_fs(md); in __dm_suspend()
2681 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend() argument
2687 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_suspend()
2689 if (dm_suspended_md(md)) { in dm_suspend()
2694 if (dm_suspended_internally_md(md)) { in dm_suspend()
2696 mutex_unlock(&md->suspend_lock); in dm_suspend()
2697 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_suspend()
2703 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_suspend()
2705 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED); in dm_suspend()
2712 mutex_unlock(&md->suspend_lock); in dm_suspend()
2716 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume() argument
2724 dm_queue_flush(md); in __dm_resume()
2731 if (dm_request_based(md)) in __dm_resume()
2732 dm_start_queue(md->queue); in __dm_resume()
2734 unlock_fs(md); in __dm_resume()
2739 int dm_resume(struct mapped_device *md) in dm_resume() argument
2746 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING); in dm_resume()
2748 if (!dm_suspended_md(md)) in dm_resume()
2751 if (dm_suspended_internally_md(md)) { in dm_resume()
2753 mutex_unlock(&md->suspend_lock); in dm_resume()
2754 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE); in dm_resume()
2760 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in dm_resume()
2764 r = __dm_resume(md, map); in dm_resume()
2768 clear_bit(DMF_SUSPENDED, &md->flags); in dm_resume()
2770 mutex_unlock(&md->suspend_lock); in dm_resume()
2781 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend() argument
2785 lockdep_assert_held(&md->suspend_lock); in __dm_internal_suspend()
2787 if (md->internal_suspend_count++) in __dm_internal_suspend()
2790 if (dm_suspended_md(md)) { in __dm_internal_suspend()
2791 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_suspend()
2795 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock)); in __dm_internal_suspend()
2803 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE, in __dm_internal_suspend()
2809 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume() argument
2811 BUG_ON(!md->internal_suspend_count); in __dm_internal_resume()
2813 if (--md->internal_suspend_count) in __dm_internal_resume()
2816 if (dm_suspended_md(md)) in __dm_internal_resume()
2823 (void) __dm_resume(md, NULL); in __dm_internal_resume()
2826 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in __dm_internal_resume()
2828 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY); in __dm_internal_resume()
2831 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush() argument
2833 mutex_lock(&md->suspend_lock); in dm_internal_suspend_noflush()
2834 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG); in dm_internal_suspend_noflush()
2835 mutex_unlock(&md->suspend_lock); in dm_internal_suspend_noflush()
2839 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume() argument
2841 mutex_lock(&md->suspend_lock); in dm_internal_resume()
2842 __dm_internal_resume(md); in dm_internal_resume()
2843 mutex_unlock(&md->suspend_lock); in dm_internal_resume()
2852 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast() argument
2854 mutex_lock(&md->suspend_lock); in dm_internal_suspend_fast()
2855 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_suspend_fast()
2858 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); in dm_internal_suspend_fast()
2859 synchronize_srcu(&md->io_barrier); in dm_internal_suspend_fast()
2860 flush_workqueue(md->wq); in dm_internal_suspend_fast()
2861 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE); in dm_internal_suspend_fast()
2865 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast() argument
2867 if (dm_suspended_md(md) || dm_suspended_internally_md(md)) in dm_internal_resume_fast()
2870 dm_queue_flush(md); in dm_internal_resume_fast()
2873 mutex_unlock(&md->suspend_lock); in dm_internal_resume_fast()
2880 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent() argument
2887 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action); in dm_kobject_uevent()
2891 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj, in dm_kobject_uevent()
2896 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq() argument
2898 return atomic_add_return(1, &md->uevent_seq); in dm_next_uevent_seq()
2901 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr() argument
2903 return atomic_read(&md->event_nr); in dm_get_event_nr()
2906 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event() argument
2908 return wait_event_interruptible(md->eventq, in dm_wait_event()
2909 (event_nr != atomic_read(&md->event_nr))); in dm_wait_event()
2912 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add() argument
2916 spin_lock_irqsave(&md->uevent_lock, flags); in dm_uevent_add()
2917 list_add(elist, &md->uevent_list); in dm_uevent_add()
2918 spin_unlock_irqrestore(&md->uevent_lock, flags); in dm_uevent_add()
2925 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk() argument
2927 return md->disk; in dm_disk()
2931 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject() argument
2933 return &md->kobj_holder.kobj; in dm_kobject()
2938 struct mapped_device *md; in dm_get_from_kobject() local
2940 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2943 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) { in dm_get_from_kobject()
2944 md = NULL; in dm_get_from_kobject()
2947 dm_get(md); in dm_get_from_kobject()
2951 return md; in dm_get_from_kobject()
2954 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md() argument
2956 return test_bit(DMF_SUSPENDED, &md->flags); in dm_suspended_md()
2959 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md() argument
2961 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags); in dm_suspended_internally_md()
2964 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag() argument
2966 return test_bit(DMF_DEFERRED_REMOVE, &md->flags); in dm_test_deferred_remove_flag()
2981 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools() argument
2985 struct dm_md_mempools *pools = kzalloc_node(sizeof(*pools), GFP_KERNEL, md->numa_node_id); in dm_alloc_md_mempools()
3051 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr() local
3056 table = dm_get_live_table(md, &srcu_idx); in dm_call_pr()
3071 dm_put_live_table(md, srcu_idx); in dm_call_pr()
3116 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve() local
3120 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_reserve()
3130 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_reserve()
3136 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release() local
3140 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_release()
3150 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_release()
3157 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt() local
3161 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_preempt()
3171 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_preempt()
3177 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear() local
3181 r = dm_prepare_ioctl(md, &srcu_idx, &bdev); in dm_pr_clear()
3191 dm_unprepare_ioctl(md, srcu_idx); in dm_pr_clear()