Lines Matching refs:ubd_dev

238 	struct ubd *ubd_dev;  in ubd_setup_common()  local
269 ubd_dev = &ubd_devs[n]; in ubd_setup_common()
270 if(ubd_dev->file != NULL){ in ubd_setup_common()
288 ubd_dev->no_cow = 1; in ubd_setup_common()
291 ubd_dev->shared = 1; in ubd_setup_common()
294 ubd_dev->no_trim = 1; in ubd_setup_common()
326 if (backing_file && ubd_dev->no_cow) { in ubd_setup_common()
332 ubd_dev->file = file; in ubd_setup_common()
333 ubd_dev->cow.file = backing_file; in ubd_setup_common()
334 ubd_dev->serial = serial; in ubd_setup_common()
335 ubd_dev->boot_openflags = flags; in ubd_setup_common()
510 static inline int ubd_file_size(struct ubd *ubd_dev, __u64 *size_out) in ubd_file_size() argument
524 if (ubd_dev->file && ubd_dev->cow.file) { in ubd_file_size()
525 file = ubd_dev->cow.file; in ubd_file_size()
530 fd = os_open_file(ubd_dev->file, of_read(OPENFLAGS()), 0); in ubd_file_size()
539 file = ubd_dev->file; in ubd_file_size()
729 static void ubd_close_dev(struct ubd *ubd_dev) in ubd_close_dev() argument
731 os_close_file(ubd_dev->fd); in ubd_close_dev()
732 if(ubd_dev->cow.file == NULL) in ubd_close_dev()
735 os_close_file(ubd_dev->cow.fd); in ubd_close_dev()
736 vfree(ubd_dev->cow.bitmap); in ubd_close_dev()
737 ubd_dev->cow.bitmap = NULL; in ubd_close_dev()
740 static int ubd_open_dev(struct ubd *ubd_dev) in ubd_open_dev() argument
747 ubd_dev->openflags = ubd_dev->boot_openflags; in ubd_open_dev()
749 create_ptr = (ubd_dev->cow.file != NULL) ? &create_cow : NULL; in ubd_open_dev()
750 back_ptr = ubd_dev->no_cow ? NULL : &ubd_dev->cow.file; in ubd_open_dev()
752 fd = open_ubd_file(ubd_dev->file, &ubd_dev->openflags, ubd_dev->shared, in ubd_open_dev()
753 back_ptr, &ubd_dev->cow.bitmap_offset, in ubd_open_dev()
754 &ubd_dev->cow.bitmap_len, &ubd_dev->cow.data_offset, in ubd_open_dev()
758 fd = create_cow_file(ubd_dev->file, ubd_dev->cow.file, in ubd_open_dev()
759 ubd_dev->openflags, SECTOR_SIZE, PAGE_SIZE, in ubd_open_dev()
760 &ubd_dev->cow.bitmap_offset, in ubd_open_dev()
761 &ubd_dev->cow.bitmap_len, in ubd_open_dev()
762 &ubd_dev->cow.data_offset); in ubd_open_dev()
765 "\"%s\"\n", ubd_dev->file, ubd_dev->cow.file); in ubd_open_dev()
770 printk("Failed to open '%s', errno = %d\n", ubd_dev->file, in ubd_open_dev()
774 ubd_dev->fd = fd; in ubd_open_dev()
776 if(ubd_dev->cow.file != NULL){ in ubd_open_dev()
777 blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); in ubd_open_dev()
780 ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); in ubd_open_dev()
781 if(ubd_dev->cow.bitmap == NULL){ in ubd_open_dev()
787 err = read_cow_bitmap(ubd_dev->fd, ubd_dev->cow.bitmap, in ubd_open_dev()
788 ubd_dev->cow.bitmap_offset, in ubd_open_dev()
789 ubd_dev->cow.bitmap_len); in ubd_open_dev()
793 flags = ubd_dev->openflags; in ubd_open_dev()
795 err = open_ubd_file(ubd_dev->cow.file, &flags, ubd_dev->shared, NULL, in ubd_open_dev()
798 ubd_dev->cow.fd = err; in ubd_open_dev()
800 if (ubd_dev->no_trim == 0) { in ubd_open_dev()
801 ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE; in ubd_open_dev()
802 ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE; in ubd_open_dev()
803 blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
804 blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
805 blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue); in ubd_open_dev()
807 blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue); in ubd_open_dev()
810 os_close_file(ubd_dev->fd); in ubd_open_dev()
816 struct ubd *ubd_dev = dev_get_drvdata(dev); in ubd_device_release() local
818 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_device_release()
819 *ubd_dev = ((struct ubd) DEFAULT_UBD); in ubd_device_release()
826 struct ubd *ubd_dev = disk->private_data; in serial_show() local
828 if (!ubd_dev) in serial_show()
831 return sprintf(buf, "%s", ubd_dev->serial); in serial_show()
886 struct ubd *ubd_dev = &ubd_devs[n]; in ubd_add() local
890 if(ubd_dev->file == NULL) in ubd_add()
893 err = ubd_file_size(ubd_dev, &ubd_dev->size); in ubd_add()
899 ubd_dev->size = ROUND_BLOCK(ubd_dev->size); in ubd_add()
901 ubd_dev->tag_set.ops = &ubd_mq_ops; in ubd_add()
902 ubd_dev->tag_set.queue_depth = 64; in ubd_add()
903 ubd_dev->tag_set.numa_node = NUMA_NO_NODE; in ubd_add()
904 ubd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in ubd_add()
905 ubd_dev->tag_set.driver_data = ubd_dev; in ubd_add()
906 ubd_dev->tag_set.nr_hw_queues = 1; in ubd_add()
908 err = blk_mq_alloc_tag_set(&ubd_dev->tag_set); in ubd_add()
912 disk = blk_mq_alloc_disk(&ubd_dev->tag_set, ubd_dev); in ubd_add()
917 ubd_dev->queue = disk->queue; in ubd_add()
919 blk_queue_write_cache(ubd_dev->queue, true, false); in ubd_add()
920 blk_queue_max_segments(ubd_dev->queue, MAX_SG); in ubd_add()
921 blk_queue_segment_boundary(ubd_dev->queue, PAGE_SIZE - 1); in ubd_add()
922 ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, disk); in ubd_add()
927 blk_mq_free_tag_set(&ubd_dev->tag_set); in ubd_add()
971 struct ubd *ubd_dev; in ubd_get_config() local
980 ubd_dev = &ubd_devs[n]; in ubd_get_config()
983 if(ubd_dev->file == NULL){ in ubd_get_config()
988 CONFIG_CHUNK(str, size, len, ubd_dev->file, 0); in ubd_get_config()
990 if(ubd_dev->cow.file != NULL){ in ubd_get_config()
992 CONFIG_CHUNK(str, size, len, ubd_dev->cow.file, 1); in ubd_get_config()
1014 struct ubd *ubd_dev; in ubd_remove() local
1019 ubd_dev = &ubd_devs[n]; in ubd_remove()
1021 if(ubd_dev->file == NULL) in ubd_remove()
1026 if(ubd_dev->count > 0) in ubd_remove()
1036 platform_device_unregister(&ubd_dev->pdev); in ubd_remove()
1064 struct ubd *ubd_dev = &ubd_devs[0]; in ubd0_init() local
1067 if(ubd_dev->file == NULL) in ubd0_init()
1068 ubd_dev->file = "root_fs"; in ubd0_init()
1157 struct ubd *ubd_dev = disk->private_data; in ubd_open() local
1161 if(ubd_dev->count == 0){ in ubd_open()
1162 err = ubd_open_dev(ubd_dev); in ubd_open()
1165 disk->disk_name, ubd_dev->file, -err); in ubd_open()
1169 ubd_dev->count++; in ubd_open()
1170 set_disk_ro(disk, !ubd_dev->openflags.w); in ubd_open()
1185 struct ubd *ubd_dev = disk->private_data; in ubd_release() local
1188 if(--ubd_dev->count == 0) in ubd_release()
1189 ubd_close_dev(ubd_dev); in ubd_release()
1354 struct ubd *ubd_dev = hctx->queue->queuedata; in ubd_queue_rq() local
1360 spin_lock_irq(&ubd_dev->lock); in ubd_queue_rq()
1368 ret = ubd_submit_request(ubd_dev, req); in ubd_queue_rq()
1375 spin_unlock_irq(&ubd_dev->lock); in ubd_queue_rq()
1389 struct ubd *ubd_dev = bdev->bd_disk->private_data; in ubd_getgeo() local
1393 geo->cylinders = ubd_dev->size / (128 * 32 * 512); in ubd_getgeo()
1400 struct ubd *ubd_dev = bdev->bd_disk->private_data; in ubd_ioctl() local
1407 ubd_id[ATA_ID_CYLS] = ubd_dev->size / (128 * 32 * 512); in ubd_ioctl()