Lines Matching refs:mapped_device

94 	struct mapped_device *md;
324 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md()
331 struct mapped_device *md; in dm_blk_open()
355 struct mapped_device *md; in dm_blk_close()
372 int dm_open_count(struct mapped_device *md) in dm_open_count()
380 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion()
400 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove()
421 sector_t dm_get_size(struct mapped_device *md) in dm_get_size()
426 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue()
431 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats()
438 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
447 struct mapped_device *md = disk->private_data; in dm_blk_report_zones()
492 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl()
527 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl()
536 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
565 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io()
592 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io()
629 static bool md_in_flight_bios(struct mapped_device *md) in md_in_flight_bios()
643 static bool md_in_flight(struct mapped_device *md) in md_in_flight()
653 struct mapped_device *md = io->md; in start_io_acct()
669 struct mapped_device *md = io->md; in end_io_acct()
689 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io()
704 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
711 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
716 void dm_sync_table(struct mapped_device *md) in dm_sync_table()
726 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast()
732 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast()
743 struct mapped_device *md) in open_table_device()
769 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device()
793 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device()
831 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device()
861 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry()
871 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry()
885 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending()
899 struct mapped_device *md = io->md; in dec_pending()
948 void disable_discard(struct mapped_device *md) in disable_discard()
957 void disable_write_same(struct mapped_device *md) in disable_write_same()
965 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes()
978 struct mapped_device *md = tio->io->md; in clone_endio()
1063 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target()
1084 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access()
1111 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_supported()
1130 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter()
1154 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter()
1269 struct mapped_device *md = io->md; in __map_bio()
1593 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info()
1607 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio()
1681 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map, in __process_bio()
1721 static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) in dm_queue_split()
1738 static blk_qc_t dm_process_bio(struct mapped_device *md, in dm_process_bio()
1776 struct mapped_device *md = q->queuedata; in dm_make_request()
1803 struct mapped_device *md = congested_data; in dm_any_congested()
1878 static void dm_init_normal_md_queue(struct mapped_device *md) in dm_init_normal_md_queue()
1886 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device()
1927 static struct mapped_device *alloc_dev(int minor) in alloc_dev()
1930 struct mapped_device *md; in alloc_dev()
2031 static void unlock_fs(struct mapped_device *md);
2033 static void free_dev(struct mapped_device *md) in free_dev()
2049 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools()
2098 struct mapped_device *md = (struct mapped_device *) context; in event_callback()
2114 static void __set_size(struct mapped_device *md, sector_t size) in __set_size()
2126 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind()
2190 static struct dm_table *__unbind(struct mapped_device *md) in __unbind()
2207 int dm_create(int minor, struct mapped_device **result) in dm_create()
2210 struct mapped_device *md; in dm_create()
2230 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type()
2235 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type()
2240 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type()
2246 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type()
2251 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type()
2260 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits()
2270 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue()
2306 struct mapped_device *dm_get_md(dev_t dev) in dm_get_md()
2308 struct mapped_device *md; in dm_get_md()
2330 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr()
2335 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr()
2340 void dm_get(struct mapped_device *md) in dm_get()
2346 int dm_hold(struct mapped_device *md) in dm_hold()
2359 const char *dm_device_name(struct mapped_device *md) in dm_device_name()
2365 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy()
2411 void dm_destroy(struct mapped_device *md) in dm_destroy()
2416 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate()
2421 void dm_put(struct mapped_device *md) in dm_put()
2427 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion()
2455 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work()
2480 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush()
2490 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table()
2535 static int lock_fs(struct mapped_device *md) in lock_fs()
2553 static void unlock_fs(struct mapped_device *md) in unlock_fs()
2572 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend()
2681 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend()
2716 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume()
2739 int dm_resume(struct mapped_device *md) in dm_resume()
2781 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend()
2809 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume()
2831 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush()
2839 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume()
2852 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast()
2865 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast()
2880 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent()
2896 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq()
2901 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr()
2906 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event()
2912 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add()
2925 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk()
2931 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject()
2936 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) in dm_get_from_kobject()
2938 struct mapped_device *md; in dm_get_from_kobject()
2940 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2954 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md()
2959 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md()
2964 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag()
2981 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools()
3051 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr()
3116 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve()
3136 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release()
3157 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt()
3177 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear()