Lines Matching full:lo
102 * @lo: struct loop_device
103 * @global: true if @lo is about to bind another "struct loop_device", false otherwise
111 static int loop_global_lock_killable(struct loop_device *lo, bool global) in loop_global_lock_killable() argument
120 err = mutex_lock_killable(&lo->lo_mutex); in loop_global_lock_killable()
129 * @lo: struct loop_device
130 * @global: true if @lo was about to bind another "struct loop_device", false otherwise
132 static void loop_global_unlock(struct loop_device *lo, bool global) in loop_global_unlock() argument
134 mutex_unlock(&lo->lo_mutex); in loop_global_unlock()
163 static loff_t get_loop_size(struct loop_device *lo, struct file *file) in get_loop_size() argument
165 return get_size(lo->lo_offset, lo->lo_sizelimit, file); in get_loop_size()
168 static void __loop_update_dio(struct loop_device *lo, bool dio) in __loop_update_dio() argument
170 struct file *file = lo->lo_backing_file; in __loop_update_dio()
192 if (queue_logical_block_size(lo->lo_queue) >= sb_bsize && in __loop_update_dio()
193 !(lo->lo_offset & dio_align) && in __loop_update_dio()
202 if (lo->use_dio == use_dio) in __loop_update_dio()
213 if (lo->lo_state == Lo_bound) in __loop_update_dio()
214 blk_mq_freeze_queue(lo->lo_queue); in __loop_update_dio()
215 lo->use_dio = use_dio; in __loop_update_dio()
217 blk_queue_flag_clear(QUEUE_FLAG_NOMERGES, lo->lo_queue); in __loop_update_dio()
218 lo->lo_flags |= LO_FLAGS_DIRECT_IO; in __loop_update_dio()
220 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); in __loop_update_dio()
221 lo->lo_flags &= ~LO_FLAGS_DIRECT_IO; in __loop_update_dio()
223 if (lo->lo_state == Lo_bound) in __loop_update_dio()
224 blk_mq_unfreeze_queue(lo->lo_queue); in __loop_update_dio()
229 * @lo: struct loop_device to set the size for
235 static void loop_set_size(struct loop_device *lo, loff_t size) in loop_set_size() argument
237 if (!set_capacity_and_notify(lo->lo_disk, size)) in loop_set_size()
238 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); in loop_set_size()
263 static int lo_write_simple(struct loop_device *lo, struct request *rq, in lo_write_simple() argument
271 ret = lo_write_bvec(lo->lo_backing_file, &bvec, &pos); in lo_write_simple()
280 static int lo_read_simple(struct loop_device *lo, struct request *rq, in lo_read_simple() argument
290 len = vfs_iter_read(lo->lo_backing_file, &i, &pos, 0); in lo_read_simple()
309 static int lo_fallocate(struct loop_device *lo, struct request *rq, loff_t pos, in lo_fallocate() argument
316 struct file *file = lo->lo_backing_file; in lo_fallocate()
321 if (!bdev_max_discard_sectors(lo->lo_device)) in lo_fallocate()
330 static int lo_req_flush(struct loop_device *lo, struct request *rq) in lo_req_flush() argument
332 int ret = vfs_fsync(lo->lo_backing_file, 0); in lo_req_flush()
394 static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, in lo_rw_aio() argument
402 struct file *file = lo->lo_backing_file; in lo_rw_aio()
463 static int do_req_filebacked(struct loop_device *lo, struct request *rq) in do_req_filebacked() argument
466 loff_t pos = ((loff_t) blk_rq_pos(rq) << 9) + lo->lo_offset; in do_req_filebacked()
479 return lo_req_flush(lo, rq); in do_req_filebacked()
485 return lo_fallocate(lo, rq, pos, in do_req_filebacked()
490 return lo_fallocate(lo, rq, pos, FALLOC_FL_PUNCH_HOLE); in do_req_filebacked()
493 return lo_rw_aio(lo, cmd, pos, WRITE); in do_req_filebacked()
495 return lo_write_simple(lo, rq, pos); in do_req_filebacked()
498 return lo_rw_aio(lo, cmd, pos, READ); in do_req_filebacked()
500 return lo_read_simple(lo, rq, pos); in do_req_filebacked()
507 static inline void loop_update_dio(struct loop_device *lo) in loop_update_dio() argument
509 __loop_update_dio(lo, (lo->lo_backing_file->f_flags & O_DIRECT) | in loop_update_dio()
510 lo->use_dio); in loop_update_dio()
513 static void loop_reread_partitions(struct loop_device *lo) in loop_reread_partitions() argument
517 mutex_lock(&lo->lo_disk->open_mutex); in loop_reread_partitions()
518 rc = bdev_disk_changed(lo->lo_disk, false); in loop_reread_partitions()
519 mutex_unlock(&lo->lo_disk->open_mutex); in loop_reread_partitions()
522 __func__, lo->lo_number, lo->lo_file_name, rc); in loop_reread_partitions()
548 /* Order wrt setting lo->lo_backing_file in loop_configure(). */ in loop_validate_file()
565 static int loop_change_fd(struct loop_device *lo, struct block_device *bdev, in loop_change_fd() argument
578 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); in loop_change_fd()
581 error = loop_global_lock_killable(lo, is_loop); in loop_change_fd()
585 if (lo->lo_state != Lo_bound) in loop_change_fd()
590 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY)) in loop_change_fd()
597 old_file = lo->lo_backing_file; in loop_change_fd()
602 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) in loop_change_fd()
606 disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); in loop_change_fd()
607 blk_mq_freeze_queue(lo->lo_queue); in loop_change_fd()
608 mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask); in loop_change_fd()
609 lo->lo_backing_file = file; in loop_change_fd()
610 lo->old_gfp_mask = mapping_gfp_mask(file->f_mapping); in loop_change_fd()
612 lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_change_fd()
613 loop_update_dio(lo); in loop_change_fd()
614 blk_mq_unfreeze_queue(lo->lo_queue); in loop_change_fd()
615 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; in loop_change_fd()
616 loop_global_unlock(lo, is_loop); in loop_change_fd()
633 loop_reread_partitions(lo); in loop_change_fd()
638 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); in loop_change_fd()
642 loop_global_unlock(lo, is_loop); in loop_change_fd()
654 struct loop_device *lo = disk->private_data; in loop_attr_show() local
656 return callback(lo, page); in loop_attr_show()
669 static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) in loop_attr_backing_file_show() argument
674 spin_lock_irq(&lo->lo_lock); in loop_attr_backing_file_show()
675 if (lo->lo_backing_file) in loop_attr_backing_file_show()
676 p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); in loop_attr_backing_file_show()
677 spin_unlock_irq(&lo->lo_lock); in loop_attr_backing_file_show()
691 static ssize_t loop_attr_offset_show(struct loop_device *lo, char *buf) in loop_attr_offset_show() argument
693 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_offset); in loop_attr_offset_show()
696 static ssize_t loop_attr_sizelimit_show(struct loop_device *lo, char *buf) in loop_attr_sizelimit_show() argument
698 return sysfs_emit(buf, "%llu\n", (unsigned long long)lo->lo_sizelimit); in loop_attr_sizelimit_show()
701 static ssize_t loop_attr_autoclear_show(struct loop_device *lo, char *buf) in loop_attr_autoclear_show() argument
703 int autoclear = (lo->lo_flags & LO_FLAGS_AUTOCLEAR); in loop_attr_autoclear_show()
708 static ssize_t loop_attr_partscan_show(struct loop_device *lo, char *buf) in loop_attr_partscan_show() argument
710 int partscan = (lo->lo_flags & LO_FLAGS_PARTSCAN); in loop_attr_partscan_show()
715 static ssize_t loop_attr_dio_show(struct loop_device *lo, char *buf) in loop_attr_dio_show() argument
717 int dio = (lo->lo_flags & LO_FLAGS_DIRECT_IO); in loop_attr_dio_show()
744 static void loop_sysfs_init(struct loop_device *lo) in loop_sysfs_init() argument
746 lo->sysfs_inited = !sysfs_create_group(&disk_to_dev(lo->lo_disk)->kobj, in loop_sysfs_init()
750 static void loop_sysfs_exit(struct loop_device *lo) in loop_sysfs_exit() argument
752 if (lo->sysfs_inited) in loop_sysfs_exit()
753 sysfs_remove_group(&disk_to_dev(lo->lo_disk)->kobj, in loop_sysfs_exit()
757 static void loop_config_discard(struct loop_device *lo) in loop_config_discard() argument
759 struct file *file = lo->lo_backing_file; in loop_config_discard()
761 struct request_queue *q = lo->lo_queue; in loop_config_discard()
812 struct loop_device *lo; member
831 static void loop_queue_work(struct loop_device *lo, struct loop_cmd *cmd) in loop_queue_work() argument
838 spin_lock_irq(&lo->lo_work_lock); in loop_queue_work()
843 node = &lo->worker_tree.rb_node; in loop_queue_work()
878 worker->lo = lo; in loop_queue_work()
880 rb_insert_color(&worker->rb_node, &lo->worker_tree); in loop_queue_work()
893 work = &lo->rootcg_work; in loop_queue_work()
894 cmd_list = &lo->rootcg_cmd_list; in loop_queue_work()
897 queue_work(lo->workqueue, work); in loop_queue_work()
898 spin_unlock_irq(&lo->lo_work_lock); in loop_queue_work()
901 static void loop_set_timer(struct loop_device *lo) in loop_set_timer() argument
903 timer_reduce(&lo->timer, jiffies + LOOP_IDLE_WORKER_TIMEOUT); in loop_set_timer()
906 static void loop_free_idle_workers(struct loop_device *lo, bool delete_all) in loop_free_idle_workers() argument
910 spin_lock_irq(&lo->lo_work_lock); in loop_free_idle_workers()
911 list_for_each_entry_safe(worker, pos, &lo->idle_worker_list, in loop_free_idle_workers()
918 rb_erase(&worker->rb_node, &lo->worker_tree); in loop_free_idle_workers()
922 if (!list_empty(&lo->idle_worker_list)) in loop_free_idle_workers()
923 loop_set_timer(lo); in loop_free_idle_workers()
924 spin_unlock_irq(&lo->lo_work_lock); in loop_free_idle_workers()
929 struct loop_device *lo = container_of(timer, struct loop_device, timer); in loop_free_idle_workers_timer() local
931 return loop_free_idle_workers(lo, false); in loop_free_idle_workers_timer()
934 static void loop_update_rotational(struct loop_device *lo) in loop_update_rotational() argument
936 struct file *file = lo->lo_backing_file; in loop_update_rotational()
939 struct request_queue *q = lo->lo_queue; in loop_update_rotational()
954 * @lo: struct loop_device to configure
961 loop_set_status_from_info(struct loop_device *lo, in loop_set_status_from_info() argument
980 lo->lo_offset = info->lo_offset; in loop_set_status_from_info()
981 lo->lo_sizelimit = info->lo_sizelimit; in loop_set_status_from_info()
984 if (lo->lo_offset < 0 || lo->lo_sizelimit < 0) in loop_set_status_from_info()
987 memcpy(lo->lo_file_name, info->lo_file_name, LO_NAME_SIZE); in loop_set_status_from_info()
988 lo->lo_file_name[LO_NAME_SIZE-1] = 0; in loop_set_status_from_info()
989 lo->lo_flags = info->lo_flags; in loop_set_status_from_info()
993 static int loop_configure(struct loop_device *lo, fmode_t mode, in loop_configure() argument
1014 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 1); in loop_configure()
1026 error = loop_global_lock_killable(lo, is_loop); in loop_configure()
1031 if (lo->lo_state != Lo_unbound) in loop_configure()
1052 error = loop_set_status_from_info(lo, &config->info); in loop_configure()
1058 lo->lo_flags |= LO_FLAGS_READ_ONLY; in loop_configure()
1060 if (!lo->workqueue) { in loop_configure()
1061 lo->workqueue = alloc_workqueue("loop%d", in loop_configure()
1063 0, lo->lo_number); in loop_configure()
1064 if (!lo->workqueue) { in loop_configure()
1070 disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); in loop_configure()
1071 set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0); in loop_configure()
1073 lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO; in loop_configure()
1074 lo->lo_device = bdev; in loop_configure()
1075 lo->lo_backing_file = file; in loop_configure()
1076 lo->old_gfp_mask = mapping_gfp_mask(mapping); in loop_configure()
1077 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); in loop_configure()
1079 if (!(lo->lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) in loop_configure()
1080 blk_queue_write_cache(lo->lo_queue, true, false); in loop_configure()
1084 else if ((lo->lo_backing_file->f_flags & O_DIRECT) && inode->i_sb->s_bdev) in loop_configure()
1090 blk_queue_logical_block_size(lo->lo_queue, bsize); in loop_configure()
1091 blk_queue_physical_block_size(lo->lo_queue, bsize); in loop_configure()
1092 blk_queue_io_min(lo->lo_queue, bsize); in loop_configure()
1094 loop_config_discard(lo); in loop_configure()
1095 loop_update_rotational(lo); in loop_configure()
1096 loop_update_dio(lo); in loop_configure()
1097 loop_sysfs_init(lo); in loop_configure()
1099 size = get_loop_size(lo, file); in loop_configure()
1100 loop_set_size(lo, size); in loop_configure()
1105 lo->lo_state = Lo_bound; in loop_configure()
1107 lo->lo_flags |= LO_FLAGS_PARTSCAN; in loop_configure()
1108 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN; in loop_configure()
1110 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in loop_configure()
1112 loop_global_unlock(lo, is_loop); in loop_configure()
1114 loop_reread_partitions(lo); in loop_configure()
1121 dev_set_uevent_suppress(disk_to_dev(lo->lo_disk), 0); in loop_configure()
1125 loop_global_unlock(lo, is_loop); in loop_configure()
1136 static void __loop_clr_fd(struct loop_device *lo, bool release) in __loop_clr_fd() argument
1139 gfp_t gfp = lo->old_gfp_mask; in __loop_clr_fd()
1141 if (test_bit(QUEUE_FLAG_WC, &lo->lo_queue->queue_flags)) in __loop_clr_fd()
1142 blk_queue_write_cache(lo->lo_queue, false, false); in __loop_clr_fd()
1150 blk_mq_freeze_queue(lo->lo_queue); in __loop_clr_fd()
1152 spin_lock_irq(&lo->lo_lock); in __loop_clr_fd()
1153 filp = lo->lo_backing_file; in __loop_clr_fd()
1154 lo->lo_backing_file = NULL; in __loop_clr_fd()
1155 spin_unlock_irq(&lo->lo_lock); in __loop_clr_fd()
1157 lo->lo_device = NULL; in __loop_clr_fd()
1158 lo->lo_offset = 0; in __loop_clr_fd()
1159 lo->lo_sizelimit = 0; in __loop_clr_fd()
1160 memset(lo->lo_file_name, 0, LO_NAME_SIZE); in __loop_clr_fd()
1161 blk_queue_logical_block_size(lo->lo_queue, 512); in __loop_clr_fd()
1162 blk_queue_physical_block_size(lo->lo_queue, 512); in __loop_clr_fd()
1163 blk_queue_io_min(lo->lo_queue, 512); in __loop_clr_fd()
1164 invalidate_disk(lo->lo_disk); in __loop_clr_fd()
1165 loop_sysfs_exit(lo); in __loop_clr_fd()
1167 kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE); in __loop_clr_fd()
1172 blk_mq_unfreeze_queue(lo->lo_queue); in __loop_clr_fd()
1174 disk_force_media_change(lo->lo_disk, DISK_EVENT_MEDIA_CHANGE); in __loop_clr_fd()
1176 if (lo->lo_flags & LO_FLAGS_PARTSCAN) { in __loop_clr_fd()
1188 mutex_lock(&lo->lo_disk->open_mutex); in __loop_clr_fd()
1189 err = bdev_disk_changed(lo->lo_disk, false); in __loop_clr_fd()
1191 mutex_unlock(&lo->lo_disk->open_mutex); in __loop_clr_fd()
1194 __func__, lo->lo_number, err); in __loop_clr_fd()
1199 * lo->lo_state is set to Lo_unbound here after above partscan has in __loop_clr_fd()
1202 * change the 'lo' device. in __loop_clr_fd()
1204 lo->lo_flags = 0; in __loop_clr_fd()
1206 set_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in __loop_clr_fd()
1207 mutex_lock(&lo->lo_mutex); in __loop_clr_fd()
1208 lo->lo_state = Lo_unbound; in __loop_clr_fd()
1209 mutex_unlock(&lo->lo_mutex); in __loop_clr_fd()
1219 static int loop_clr_fd(struct loop_device *lo) in loop_clr_fd() argument
1232 err = loop_global_lock_killable(lo, true); in loop_clr_fd()
1235 if (lo->lo_state != Lo_bound) { in loop_clr_fd()
1236 loop_global_unlock(lo, true); in loop_clr_fd()
1249 if (disk_openers(lo->lo_disk) > 1) { in loop_clr_fd()
1250 lo->lo_flags |= LO_FLAGS_AUTOCLEAR; in loop_clr_fd()
1251 loop_global_unlock(lo, true); in loop_clr_fd()
1254 lo->lo_state = Lo_rundown; in loop_clr_fd()
1255 loop_global_unlock(lo, true); in loop_clr_fd()
1257 __loop_clr_fd(lo, false); in loop_clr_fd()
1262 loop_set_status(struct loop_device *lo, const struct loop_info64 *info) in loop_set_status() argument
1269 err = mutex_lock_killable(&lo->lo_mutex); in loop_set_status()
1272 if (lo->lo_state != Lo_bound) { in loop_set_status()
1277 if (lo->lo_offset != info->lo_offset || in loop_set_status()
1278 lo->lo_sizelimit != info->lo_sizelimit) { in loop_set_status()
1280 sync_blockdev(lo->lo_device); in loop_set_status()
1281 invalidate_bdev(lo->lo_device); in loop_set_status()
1285 blk_mq_freeze_queue(lo->lo_queue); in loop_set_status()
1287 prev_lo_flags = lo->lo_flags; in loop_set_status()
1289 err = loop_set_status_from_info(lo, info); in loop_set_status()
1294 lo->lo_flags &= LOOP_SET_STATUS_SETTABLE_FLAGS; in loop_set_status()
1296 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_SETTABLE_FLAGS; in loop_set_status()
1298 lo->lo_flags |= prev_lo_flags & ~LOOP_SET_STATUS_CLEARABLE_FLAGS; in loop_set_status()
1301 loff_t new_size = get_size(lo->lo_offset, lo->lo_sizelimit, in loop_set_status()
1302 lo->lo_backing_file); in loop_set_status()
1303 loop_set_size(lo, new_size); in loop_set_status()
1306 loop_config_discard(lo); in loop_set_status()
1309 __loop_update_dio(lo, lo->use_dio); in loop_set_status()
1312 blk_mq_unfreeze_queue(lo->lo_queue); in loop_set_status()
1314 if (!err && (lo->lo_flags & LO_FLAGS_PARTSCAN) && in loop_set_status()
1316 clear_bit(GD_SUPPRESS_PART_SCAN, &lo->lo_disk->state); in loop_set_status()
1320 mutex_unlock(&lo->lo_mutex); in loop_set_status()
1322 loop_reread_partitions(lo); in loop_set_status()
1328 loop_get_status(struct loop_device *lo, struct loop_info64 *info) in loop_get_status() argument
1334 ret = mutex_lock_killable(&lo->lo_mutex); in loop_get_status()
1337 if (lo->lo_state != Lo_bound) { in loop_get_status()
1338 mutex_unlock(&lo->lo_mutex); in loop_get_status()
1343 info->lo_number = lo->lo_number; in loop_get_status()
1344 info->lo_offset = lo->lo_offset; in loop_get_status()
1345 info->lo_sizelimit = lo->lo_sizelimit; in loop_get_status()
1346 info->lo_flags = lo->lo_flags; in loop_get_status()
1347 memcpy(info->lo_file_name, lo->lo_file_name, LO_NAME_SIZE); in loop_get_status()
1350 path = lo->lo_backing_file->f_path; in loop_get_status()
1352 mutex_unlock(&lo->lo_mutex); in loop_get_status()
1400 loop_set_status_old(struct loop_device *lo, const struct loop_info __user *arg) in loop_set_status_old() argument
1408 return loop_set_status(lo, &info64); in loop_set_status_old()
1412 loop_set_status64(struct loop_device *lo, const struct loop_info64 __user *arg) in loop_set_status64() argument
1418 return loop_set_status(lo, &info64); in loop_set_status64()
1422 loop_get_status_old(struct loop_device *lo, struct loop_info __user *arg) { in loop_get_status_old() argument
1429 err = loop_get_status(lo, &info64); in loop_get_status_old()
1439 loop_get_status64(struct loop_device *lo, struct loop_info64 __user *arg) { in loop_get_status64() argument
1445 err = loop_get_status(lo, &info64); in loop_get_status64()
1452 static int loop_set_capacity(struct loop_device *lo) in loop_set_capacity() argument
1456 if (unlikely(lo->lo_state != Lo_bound)) in loop_set_capacity()
1459 size = get_loop_size(lo, lo->lo_backing_file); in loop_set_capacity()
1460 loop_set_size(lo, size); in loop_set_capacity()
1465 static int loop_set_dio(struct loop_device *lo, unsigned long arg) in loop_set_dio() argument
1468 if (lo->lo_state != Lo_bound) in loop_set_dio()
1471 __loop_update_dio(lo, !!arg); in loop_set_dio()
1472 if (lo->use_dio == !!arg) in loop_set_dio()
1479 static int loop_set_block_size(struct loop_device *lo, unsigned long arg) in loop_set_block_size() argument
1483 if (lo->lo_state != Lo_bound) in loop_set_block_size()
1490 if (lo->lo_queue->limits.logical_block_size == arg) in loop_set_block_size()
1493 sync_blockdev(lo->lo_device); in loop_set_block_size()
1494 invalidate_bdev(lo->lo_device); in loop_set_block_size()
1496 blk_mq_freeze_queue(lo->lo_queue); in loop_set_block_size()
1497 blk_queue_logical_block_size(lo->lo_queue, arg); in loop_set_block_size()
1498 blk_queue_physical_block_size(lo->lo_queue, arg); in loop_set_block_size()
1499 blk_queue_io_min(lo->lo_queue, arg); in loop_set_block_size()
1500 loop_update_dio(lo); in loop_set_block_size()
1501 blk_mq_unfreeze_queue(lo->lo_queue); in loop_set_block_size()
1506 static int lo_simple_ioctl(struct loop_device *lo, unsigned int cmd, in lo_simple_ioctl() argument
1511 err = mutex_lock_killable(&lo->lo_mutex); in lo_simple_ioctl()
1516 err = loop_set_capacity(lo); in lo_simple_ioctl()
1519 err = loop_set_dio(lo, arg); in lo_simple_ioctl()
1522 err = loop_set_block_size(lo, arg); in lo_simple_ioctl()
1527 mutex_unlock(&lo->lo_mutex); in lo_simple_ioctl()
1534 struct loop_device *lo = bdev->bd_disk->private_data; in lo_ioctl() local
1550 return loop_configure(lo, mode, bdev, &config); in lo_ioctl()
1558 return loop_configure(lo, mode, bdev, &config); in lo_ioctl()
1561 return loop_change_fd(lo, bdev, arg); in lo_ioctl()
1563 return loop_clr_fd(lo); in lo_ioctl()
1567 err = loop_set_status_old(lo, argp); in lo_ioctl()
1571 return loop_get_status_old(lo, argp); in lo_ioctl()
1575 err = loop_set_status64(lo, argp); in lo_ioctl()
1579 return loop_get_status64(lo, argp); in lo_ioctl()
1587 err = lo_simple_ioctl(lo, cmd, arg); in lo_ioctl()
1667 loop_set_status_compat(struct loop_device *lo, in loop_set_status_compat() argument
1676 return loop_set_status(lo, &info64); in loop_set_status_compat()
1680 loop_get_status_compat(struct loop_device *lo, in loop_get_status_compat() argument
1688 err = loop_get_status(lo, &info64); in loop_get_status_compat()
1697 struct loop_device *lo = bdev->bd_disk->private_data; in lo_compat_ioctl() local
1702 err = loop_set_status_compat(lo, in lo_compat_ioctl()
1706 err = loop_get_status_compat(lo, in lo_compat_ioctl()
1732 struct loop_device *lo = disk->private_data; in lo_release() local
1737 mutex_lock(&lo->lo_mutex); in lo_release()
1738 if (lo->lo_state == Lo_bound && (lo->lo_flags & LO_FLAGS_AUTOCLEAR)) { in lo_release()
1739 lo->lo_state = Lo_rundown; in lo_release()
1740 mutex_unlock(&lo->lo_mutex); in lo_release()
1745 __loop_clr_fd(lo, true); in lo_release()
1748 mutex_unlock(&lo->lo_mutex); in lo_release()
1753 struct loop_device *lo = disk->private_data; in lo_free_disk() local
1755 if (lo->workqueue) in lo_free_disk()
1756 destroy_workqueue(lo->workqueue); in lo_free_disk()
1757 loop_free_idle_workers(lo, true); in lo_free_disk()
1758 del_timer_sync(&lo->timer); in lo_free_disk()
1759 mutex_destroy(&lo->lo_mutex); in lo_free_disk()
1760 kfree(lo); in lo_free_disk()
1807 struct loop_device *lo = rq->q->queuedata; in loop_queue_rq() local
1811 if (lo->lo_state != Lo_bound) in loop_queue_rq()
1821 cmd->use_aio = lo->use_dio; in loop_queue_rq()
1840 loop_queue_work(lo, cmd); in loop_queue_rq()
1849 struct loop_device *lo = rq->q->queuedata; in loop_handle_cmd() local
1853 if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { in loop_handle_cmd()
1864 ret = do_req_filebacked(lo, rq); in loop_handle_cmd()
1886 struct list_head *cmd_list, struct loop_device *lo) in loop_process_work() argument
1892 spin_lock_irq(&lo->lo_work_lock); in loop_process_work()
1897 spin_unlock_irq(&lo->lo_work_lock); in loop_process_work()
1902 spin_lock_irq(&lo->lo_work_lock); in loop_process_work()
1912 list_add_tail(&worker->idle_list, &lo->idle_worker_list); in loop_process_work()
1913 loop_set_timer(lo); in loop_process_work()
1915 spin_unlock_irq(&lo->lo_work_lock); in loop_process_work()
1923 loop_process_work(worker, &worker->cmd_list, worker->lo); in loop_workfn()
1928 struct loop_device *lo = in loop_rootcg_workfn() local
1930 loop_process_work(NULL, &lo->rootcg_cmd_list, lo); in loop_rootcg_workfn()
1940 struct loop_device *lo; in loop_add() local
1945 lo = kzalloc(sizeof(*lo), GFP_KERNEL); in loop_add()
1946 if (!lo) in loop_add()
1948 lo->worker_tree = RB_ROOT; in loop_add()
1949 INIT_LIST_HEAD(&lo->idle_worker_list); in loop_add()
1950 timer_setup(&lo->timer, loop_free_idle_workers_timer, TIMER_DEFERRABLE); in loop_add()
1951 lo->lo_state = Lo_unbound; in loop_add()
1959 err = idr_alloc(&loop_index_idr, lo, i, i + 1, GFP_KERNEL); in loop_add()
1963 err = idr_alloc(&loop_index_idr, lo, 0, 0, GFP_KERNEL); in loop_add()
1970 lo->tag_set.ops = &loop_mq_ops; in loop_add()
1971 lo->tag_set.nr_hw_queues = 1; in loop_add()
1972 lo->tag_set.queue_depth = hw_queue_depth; in loop_add()
1973 lo->tag_set.numa_node = NUMA_NO_NODE; in loop_add()
1974 lo->tag_set.cmd_size = sizeof(struct loop_cmd); in loop_add()
1975 lo->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_STACKING | in loop_add()
1977 lo->tag_set.driver_data = lo; in loop_add()
1979 err = blk_mq_alloc_tag_set(&lo->tag_set); in loop_add()
1983 disk = lo->lo_disk = blk_mq_alloc_disk(&lo->tag_set, lo); in loop_add()
1988 lo->lo_queue = lo->lo_disk->queue; in loop_add()
1990 blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS); in loop_add()
1998 blk_queue_flag_set(QUEUE_FLAG_NOMERGES, lo->lo_queue); in loop_add()
2020 mutex_init(&lo->lo_mutex); in loop_add()
2021 lo->lo_number = i; in loop_add()
2022 spin_lock_init(&lo->lo_lock); in loop_add()
2023 spin_lock_init(&lo->lo_work_lock); in loop_add()
2024 INIT_WORK(&lo->rootcg_work, loop_rootcg_workfn); in loop_add()
2025 INIT_LIST_HEAD(&lo->rootcg_cmd_list); in loop_add()
2030 disk->private_data = lo; in loop_add()
2031 disk->queue = lo->lo_queue; in loop_add()
2042 lo->idr_visible = true; in loop_add()
2050 blk_mq_free_tag_set(&lo->tag_set); in loop_add()
2056 kfree(lo); in loop_add()
2061 static void loop_remove(struct loop_device *lo) in loop_remove() argument
2064 del_gendisk(lo->lo_disk); in loop_remove()
2065 blk_mq_free_tag_set(&lo->tag_set); in loop_remove()
2068 idr_remove(&loop_index_idr, lo->lo_number); in loop_remove()
2071 put_disk(lo->lo_disk); in loop_remove()
2085 struct loop_device *lo; in loop_control_remove() local
2097 lo = idr_find(&loop_index_idr, idx); in loop_control_remove()
2098 if (!lo || !lo->idr_visible) in loop_control_remove()
2101 lo->idr_visible = false; in loop_control_remove()
2107 ret = mutex_lock_killable(&lo->lo_mutex); in loop_control_remove()
2110 if (lo->lo_state != Lo_unbound || disk_openers(lo->lo_disk) > 0) { in loop_control_remove()
2111 mutex_unlock(&lo->lo_mutex); in loop_control_remove()
2116 lo->lo_state = Lo_deleting; in loop_control_remove()
2117 mutex_unlock(&lo->lo_mutex); in loop_control_remove()
2119 loop_remove(lo); in loop_control_remove()
2125 lo->idr_visible = true; in loop_control_remove()
2132 struct loop_device *lo; in loop_control_get_free() local
2138 idr_for_each_entry(&loop_index_idr, lo, id) { in loop_control_get_free()
2140 if (lo->idr_visible && data_race(lo->lo_state) == Lo_unbound) in loop_control_get_free()
2250 struct loop_device *lo; in loop_exit() local
2262 idr_for_each_entry(&loop_index_idr, lo, id) in loop_exit()
2263 loop_remove(lo); in loop_exit()