Lines Matching refs:mapped_device

94 	struct mapped_device *md;
342 int dm_deleting_md(struct mapped_device *md) in dm_deleting_md()
349 struct mapped_device *md; in dm_blk_open()
373 struct mapped_device *md; in dm_blk_close()
390 int dm_open_count(struct mapped_device *md) in dm_open_count()
398 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred) in dm_lock_for_deletion()
418 int dm_cancel_deferred_remove(struct mapped_device *md) in dm_cancel_deferred_remove()
439 sector_t dm_get_size(struct mapped_device *md) in dm_get_size()
444 struct request_queue *dm_get_md_queue(struct mapped_device *md) in dm_get_md_queue()
449 struct dm_stats *dm_get_stats(struct mapped_device *md) in dm_get_stats()
456 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_getgeo()
461 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx, in dm_prepare_ioctl()
496 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx) in dm_unprepare_ioctl()
505 struct mapped_device *md = bdev->bd_disk->private_data; in dm_blk_ioctl()
534 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) in alloc_io()
561 static void free_io(struct mapped_device *md, struct dm_io *io) in free_io()
598 int md_in_flight(struct mapped_device *md) in md_in_flight()
606 struct mapped_device *md = io->md; in start_io_acct()
626 struct mapped_device *md = io->md; in end_io_acct()
656 static void queue_io(struct mapped_device *md, struct bio *bio) in queue_io()
671 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barri… in dm_get_live_table()
678 void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier) in dm_put_live_table()
683 void dm_sync_table(struct mapped_device *md) in dm_sync_table()
693 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU) in dm_get_live_table_fast()
699 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) in dm_put_live_table_fast()
710 struct mapped_device *md) in open_table_device()
736 static void close_table_device(struct table_device *td, struct mapped_device *md) in close_table_device()
759 int dm_get_table_device(struct mapped_device *md, dev_t dev, fmode_t mode, in dm_get_table_device()
796 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d) in dm_put_table_device()
826 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_get_geometry()
836 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) in dm_set_geometry()
850 static int __noflush_suspending(struct mapped_device *md) in __noflush_suspending()
864 struct mapped_device *md = io->md; in dec_pending()
913 void disable_write_same(struct mapped_device *md) in disable_write_same()
921 void disable_write_zeroes(struct mapped_device *md) in disable_write_zeroes()
934 struct mapped_device *md = tio->io->md; in clone_endio()
1024 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md, in dm_dax_get_live_target()
1045 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_direct_access()
1072 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_from_iter()
1096 struct mapped_device *md = dax_get_private(dax_dev); in dm_dax_copy_to_iter()
1255 struct mapped_device *md = io->md; in __map_bio()
1572 static void init_clone_info(struct clone_info *ci, struct mapped_device *md, in init_clone_info()
1583 static blk_qc_t __split_and_process_bio(struct mapped_device *md, in __split_and_process_bio()
1642 static blk_qc_t __process_bio(struct mapped_device *md, in __process_bio()
1688 typedef blk_qc_t (process_bio_fn)(struct mapped_device *, struct dm_table *, struct bio *);
1693 struct mapped_device *md = q->queuedata; in __dm_make_request()
1734 struct mapped_device *md = congested_data; in dm_any_congested()
1809 static void dm_init_normal_md_queue(struct mapped_device *md) in dm_init_normal_md_queue()
1819 static void cleanup_mapped_device(struct mapped_device *md) in cleanup_mapped_device()
1862 static struct mapped_device *alloc_dev(int minor) in alloc_dev()
1866 struct mapped_device *md; in alloc_dev()
1975 static void unlock_fs(struct mapped_device *md);
1977 static void free_dev(struct mapped_device *md) in free_dev()
1993 static int __bind_mempools(struct mapped_device *md, struct dm_table *t) in __bind_mempools()
2042 struct mapped_device *md = (struct mapped_device *) context; in event_callback()
2058 static void __set_size(struct mapped_device *md, sector_t size) in __set_size()
2070 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, in __bind()
2134 static struct dm_table *__unbind(struct mapped_device *md) in __unbind()
2151 int dm_create(int minor, struct mapped_device **result) in dm_create()
2154 struct mapped_device *md; in dm_create()
2174 void dm_lock_md_type(struct mapped_device *md) in dm_lock_md_type()
2179 void dm_unlock_md_type(struct mapped_device *md) in dm_unlock_md_type()
2184 void dm_set_md_type(struct mapped_device *md, enum dm_queue_mode type) in dm_set_md_type()
2190 enum dm_queue_mode dm_get_md_type(struct mapped_device *md) in dm_get_md_type()
2195 struct target_type *dm_get_immutable_target_type(struct mapped_device *md) in dm_get_immutable_target_type()
2204 struct queue_limits *dm_get_queue_limits(struct mapped_device *md) in dm_get_queue_limits()
2214 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t) in dm_setup_md_queue()
2261 struct mapped_device *dm_get_md(dev_t dev) in dm_get_md()
2263 struct mapped_device *md; in dm_get_md()
2285 void *dm_get_mdptr(struct mapped_device *md) in dm_get_mdptr()
2290 void dm_set_mdptr(struct mapped_device *md, void *ptr) in dm_set_mdptr()
2295 void dm_get(struct mapped_device *md) in dm_get()
2301 int dm_hold(struct mapped_device *md) in dm_hold()
2314 const char *dm_device_name(struct mapped_device *md) in dm_device_name()
2320 static void __dm_destroy(struct mapped_device *md, bool wait) in __dm_destroy()
2369 void dm_destroy(struct mapped_device *md) in dm_destroy()
2374 void dm_destroy_immediate(struct mapped_device *md) in dm_destroy_immediate()
2379 void dm_put(struct mapped_device *md) in dm_put()
2385 static int dm_wait_for_completion(struct mapped_device *md, long task_state) in dm_wait_for_completion()
2413 struct mapped_device *md = container_of(work, struct mapped_device, in dm_wq_work()
2438 static void dm_queue_flush(struct mapped_device *md) in dm_queue_flush()
2448 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) in dm_swap_table()
2493 static int lock_fs(struct mapped_device *md) in lock_fs()
2511 static void unlock_fs(struct mapped_device *md) in unlock_fs()
2530 static int __dm_suspend(struct mapped_device *md, struct dm_table *map, in __dm_suspend()
2642 int dm_suspend(struct mapped_device *md, unsigned suspend_flags) in dm_suspend()
2677 static int __dm_resume(struct mapped_device *md, struct dm_table *map) in __dm_resume()
2700 int dm_resume(struct mapped_device *md) in dm_resume()
2742 static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_flags) in __dm_internal_suspend()
2770 static void __dm_internal_resume(struct mapped_device *md) in __dm_internal_resume()
2792 void dm_internal_suspend_noflush(struct mapped_device *md) in dm_internal_suspend_noflush()
2800 void dm_internal_resume(struct mapped_device *md) in dm_internal_resume()
2813 void dm_internal_suspend_fast(struct mapped_device *md) in dm_internal_suspend_fast()
2826 void dm_internal_resume_fast(struct mapped_device *md) in dm_internal_resume_fast()
2841 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, in dm_kobject_uevent()
2857 uint32_t dm_next_uevent_seq(struct mapped_device *md) in dm_next_uevent_seq()
2862 uint32_t dm_get_event_nr(struct mapped_device *md) in dm_get_event_nr()
2867 int dm_wait_event(struct mapped_device *md, int event_nr) in dm_wait_event()
2873 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) in dm_uevent_add()
2886 struct gendisk *dm_disk(struct mapped_device *md) in dm_disk()
2892 struct kobject *dm_kobject(struct mapped_device *md) in dm_kobject()
2897 struct mapped_device *dm_get_from_kobject(struct kobject *kobj) in dm_get_from_kobject()
2899 struct mapped_device *md; in dm_get_from_kobject()
2901 md = container_of(kobj, struct mapped_device, kobj_holder.kobj); in dm_get_from_kobject()
2915 int dm_suspended_md(struct mapped_device *md) in dm_suspended_md()
2920 int dm_suspended_internally_md(struct mapped_device *md) in dm_suspended_internally_md()
2925 int dm_test_deferred_remove_flag(struct mapped_device *md) in dm_test_deferred_remove_flag()
2942 struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_queue_mode type, in dm_alloc_md_mempools()
3013 struct mapped_device *md = bdev->bd_disk->private_data; in dm_call_pr()
3078 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_reserve()
3098 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_release()
3119 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_preempt()
3139 struct mapped_device *md = bdev->bd_disk->private_data; in dm_pr_clear()