Lines Matching refs:vblk
134 struct virtio_blk *vblk = hctx->queue->queuedata; in get_virtio_blk_vq() local
135 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in get_virtio_blk_vq()
339 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_request_done() local
345 req->__sector = virtio64_to_cpu(vblk->vdev, in virtblk_request_done()
353 struct virtio_blk *vblk = vq->vdev->priv; in virtblk_done() local
360 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtblk_done()
363 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) { in virtblk_done()
376 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_done()
377 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtblk_done()
382 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_commit_rqs() local
383 struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num]; in virtio_commit_rqs()
408 struct virtio_blk *vblk, in virtblk_prep_rq() argument
415 status = virtblk_setup_cmd(vblk->vdev, req, vbr); in virtblk_prep_rq()
432 struct virtio_blk *vblk = hctx->queue->queuedata; in virtio_queue_rq() local
441 status = virtblk_prep_rq(hctx, vblk, req, vbr); in virtio_queue_rq()
445 spin_lock_irqsave(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
446 err = virtblk_add_req(vblk->vqs[qid].vq, vbr); in virtio_queue_rq()
448 virtqueue_kick(vblk->vqs[qid].vq); in virtio_queue_rq()
454 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
459 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) in virtio_queue_rq()
461 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); in virtio_queue_rq()
464 virtqueue_notify(vblk->vqs[qid].vq); in virtio_queue_rq()
470 struct virtio_blk *vblk = req->mq_hctx->queue->queuedata; in virtblk_prep_rq_batch() local
475 return virtblk_prep_rq(req->mq_hctx, vblk, req, vbr) == BLK_STS_OK; in virtblk_prep_rq_batch()
537 static void *virtblk_alloc_report_buffer(struct virtio_blk *vblk, in virtblk_alloc_report_buffer() argument
541 struct request_queue *q = vblk->disk->queue; in virtblk_alloc_report_buffer()
546 get_capacity(vblk->disk) >> ilog2(vblk->zone_sectors)); in virtblk_alloc_report_buffer()
566 static int virtblk_submit_zone_report(struct virtio_blk *vblk, in virtblk_submit_zone_report() argument
570 struct request_queue *q = vblk->disk->queue; in virtblk_submit_zone_report()
581 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_ZONE_REPORT); in virtblk_submit_zone_report()
582 vbr->out_hdr.sector = cpu_to_virtio64(vblk->vdev, sector); in virtblk_submit_zone_report()
595 static int virtblk_parse_zone(struct virtio_blk *vblk, in virtblk_parse_zone() argument
601 zone.start = virtio64_to_cpu(vblk->vdev, entry->z_start); in virtblk_parse_zone()
602 if (zone.start + vblk->zone_sectors <= get_capacity(vblk->disk)) in virtblk_parse_zone()
603 zone.len = vblk->zone_sectors; in virtblk_parse_zone()
605 zone.len = get_capacity(vblk->disk) - zone.start; in virtblk_parse_zone()
606 zone.capacity = virtio64_to_cpu(vblk->vdev, entry->z_cap); in virtblk_parse_zone()
607 zone.wp = virtio64_to_cpu(vblk->vdev, entry->z_wp); in virtblk_parse_zone()
620 dev_err(&vblk->vdev->dev, "zone %llu: invalid type %#x\n", in virtblk_parse_zone()
654 dev_err(&vblk->vdev->dev, "zone %llu: invalid condition %#x\n", in virtblk_parse_zone()
670 struct virtio_blk *vblk = disk->private_data; in virtblk_report_zones() local
677 if (WARN_ON_ONCE(!vblk->zone_sectors)) in virtblk_report_zones()
680 report = virtblk_alloc_report_buffer(vblk, nr_zones, &buflen); in virtblk_report_zones()
684 mutex_lock(&vblk->vdev_mutex); in virtblk_report_zones()
686 if (!vblk->vdev) { in virtblk_report_zones()
691 while (zone_idx < nr_zones && sector < get_capacity(vblk->disk)) { in virtblk_report_zones()
694 ret = virtblk_submit_zone_report(vblk, (char *)report, in virtblk_report_zones()
699 nz = min_t(u64, virtio64_to_cpu(vblk->vdev, report->nr_zones), in virtblk_report_zones()
705 ret = virtblk_parse_zone(vblk, &report->zones[i], in virtblk_report_zones()
710 sector = virtio64_to_cpu(vblk->vdev, in virtblk_report_zones()
712 vblk->zone_sectors; in virtblk_report_zones()
722 mutex_unlock(&vblk->vdev_mutex); in virtblk_report_zones()
727 static void virtblk_revalidate_zones(struct virtio_blk *vblk) in virtblk_revalidate_zones() argument
731 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_revalidate_zones()
735 dev_err(&vblk->vdev->dev, "unknown zone model %d\n", model); in virtblk_revalidate_zones()
739 disk_set_zoned(vblk->disk, BLK_ZONED_NONE); in virtblk_revalidate_zones()
742 WARN_ON_ONCE(!vblk->zone_sectors); in virtblk_revalidate_zones()
743 if (!blk_revalidate_disk_zones(vblk->disk, NULL)) in virtblk_revalidate_zones()
744 set_capacity_and_notify(vblk->disk, 0); in virtblk_revalidate_zones()
749 struct virtio_blk *vblk, in virtblk_probe_zoned_device() argument
772 disk_set_zoned(vblk->disk, BLK_ZONED_HM); in virtblk_probe_zoned_device()
777 disk_set_max_open_zones(vblk->disk, v); in virtblk_probe_zoned_device()
782 disk_set_max_active_zones(vblk->disk, v); in virtblk_probe_zoned_device()
801 &vblk->zone_sectors); in virtblk_probe_zoned_device()
802 if (vblk->zone_sectors == 0 || !is_power_of_2(vblk->zone_sectors)) { in virtblk_probe_zoned_device()
805 vblk->zone_sectors); in virtblk_probe_zoned_device()
808 blk_queue_chunk_sectors(q, vblk->zone_sectors); in virtblk_probe_zoned_device()
809 dev_dbg(&vdev->dev, "zone sectors = %u\n", vblk->zone_sectors); in virtblk_probe_zoned_device()
812 dev_warn(&vblk->vdev->dev, in virtblk_probe_zoned_device()
832 return blk_revalidate_disk_zones(vblk->disk, NULL); in virtblk_probe_zoned_device()
844 static inline void virtblk_revalidate_zones(struct virtio_blk *vblk) in virtblk_revalidate_zones() argument
849 struct virtio_blk *vblk, struct request_queue *q) in virtblk_probe_zoned_device() argument
868 struct virtio_blk *vblk = disk->private_data; in virtblk_get_id() local
869 struct request_queue *q = vblk->disk->queue; in virtblk_get_id()
880 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); in virtblk_get_id()
897 struct virtio_blk *vblk = bd->bd_disk->private_data; in virtblk_getgeo() local
900 mutex_lock(&vblk->vdev_mutex); in virtblk_getgeo()
902 if (!vblk->vdev) { in virtblk_getgeo()
908 if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) { in virtblk_getgeo()
909 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
911 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
913 virtio_cread(vblk->vdev, struct virtio_blk_config, in virtblk_getgeo()
922 mutex_unlock(&vblk->vdev_mutex); in virtblk_getgeo()
928 struct virtio_blk *vblk = disk->private_data; in virtblk_free_disk() local
930 ida_free(&vd_index_ida, vblk->index); in virtblk_free_disk()
931 mutex_destroy(&vblk->vdev_mutex); in virtblk_free_disk()
932 kfree(vblk); in virtblk_free_disk()
975 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize) in virtblk_update_capacity() argument
977 struct virtio_device *vdev = vblk->vdev; in virtblk_update_capacity()
978 struct request_queue *q = vblk->disk->queue; in virtblk_update_capacity()
995 vblk->disk->disk_name, in virtblk_update_capacity()
1002 set_capacity_and_notify(vblk->disk, capacity); in virtblk_update_capacity()
1007 struct virtio_blk *vblk = in virtblk_config_changed_work() local
1010 virtblk_revalidate_zones(vblk); in virtblk_config_changed_work()
1011 virtblk_update_capacity(vblk, true); in virtblk_config_changed_work()
1016 struct virtio_blk *vblk = vdev->priv; in virtblk_config_changed() local
1018 queue_work(virtblk_wq, &vblk->config_work); in virtblk_config_changed()
1021 static int init_vq(struct virtio_blk *vblk) in init_vq() argument
1030 struct virtio_device *vdev = vblk->vdev; in init_vq()
1050 vblk->io_queues[HCTX_TYPE_DEFAULT] = num_vqs - num_poll_vqs; in init_vq()
1051 vblk->io_queues[HCTX_TYPE_READ] = 0; in init_vq()
1052 vblk->io_queues[HCTX_TYPE_POLL] = num_poll_vqs; in init_vq()
1055 vblk->io_queues[HCTX_TYPE_DEFAULT], in init_vq()
1056 vblk->io_queues[HCTX_TYPE_READ], in init_vq()
1057 vblk->io_queues[HCTX_TYPE_POLL]); in init_vq()
1059 vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL); in init_vq()
1060 if (!vblk->vqs) in init_vq()
1073 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i); in init_vq()
1074 names[i] = vblk->vqs[i].name; in init_vq()
1079 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req_poll.%d", i); in init_vq()
1080 names[i] = vblk->vqs[i].name; in init_vq()
1089 spin_lock_init(&vblk->vqs[i].lock); in init_vq()
1090 vblk->vqs[i].vq = vqs[i]; in init_vq()
1092 vblk->num_vqs = num_vqs; in init_vq()
1099 kfree(vblk->vqs); in init_vq()
1153 struct virtio_blk *vblk = vdev->priv; in virtblk_update_cache_mode() local
1155 blk_queue_write_cache(vblk->disk->queue, writeback, false); in virtblk_update_cache_mode()
1167 struct virtio_blk *vblk = disk->private_data; in cache_type_store() local
1168 struct virtio_device *vdev = vblk->vdev; in cache_type_store()
1171 BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); in cache_type_store()
1185 struct virtio_blk *vblk = disk->private_data; in cache_type_show() local
1186 u8 writeback = virtblk_get_cache_mode(vblk->vdev); in cache_type_show()
1205 struct virtio_blk *vblk = disk->private_data; in virtblk_attrs_are_visible() local
1206 struct virtio_device *vdev = vblk->vdev; in virtblk_attrs_are_visible()
1227 struct virtio_blk *vblk = set->driver_data; in virtblk_map_queues() local
1233 map->nr_queues = vblk->io_queues[i]; in virtblk_map_queues()
1248 blk_mq_virtio_map_queues(&set->map[i], vblk->vdev, 0); in virtblk_map_queues()
1265 struct virtio_blk *vblk = hctx->queue->queuedata; in virtblk_poll() local
1285 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); in virtblk_poll()
1306 struct virtio_blk *vblk; in virtblk_probe() local
1341 vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL); in virtblk_probe()
1342 if (!vblk) { in virtblk_probe()
1347 mutex_init(&vblk->vdev_mutex); in virtblk_probe()
1349 vblk->vdev = vdev; in virtblk_probe()
1351 INIT_WORK(&vblk->config_work, virtblk_config_changed_work); in virtblk_probe()
1353 err = init_vq(vblk); in virtblk_probe()
1359 queue_depth = vblk->vqs[0].vq->num_free; in virtblk_probe()
1367 memset(&vblk->tag_set, 0, sizeof(vblk->tag_set)); in virtblk_probe()
1368 vblk->tag_set.ops = &virtio_mq_ops; in virtblk_probe()
1369 vblk->tag_set.queue_depth = queue_depth; in virtblk_probe()
1370 vblk->tag_set.numa_node = NUMA_NO_NODE; in virtblk_probe()
1371 vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in virtblk_probe()
1372 vblk->tag_set.cmd_size = in virtblk_probe()
1375 vblk->tag_set.driver_data = vblk; in virtblk_probe()
1376 vblk->tag_set.nr_hw_queues = vblk->num_vqs; in virtblk_probe()
1377 vblk->tag_set.nr_maps = 1; in virtblk_probe()
1378 if (vblk->io_queues[HCTX_TYPE_POLL]) in virtblk_probe()
1379 vblk->tag_set.nr_maps = 3; in virtblk_probe()
1381 err = blk_mq_alloc_tag_set(&vblk->tag_set); in virtblk_probe()
1385 vblk->disk = blk_mq_alloc_disk(&vblk->tag_set, vblk); in virtblk_probe()
1386 if (IS_ERR(vblk->disk)) { in virtblk_probe()
1387 err = PTR_ERR(vblk->disk); in virtblk_probe()
1390 q = vblk->disk->queue; in virtblk_probe()
1392 virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); in virtblk_probe()
1394 vblk->disk->major = major; in virtblk_probe()
1395 vblk->disk->first_minor = index_to_minor(index); in virtblk_probe()
1396 vblk->disk->minors = 1 << PART_BITS; in virtblk_probe()
1397 vblk->disk->private_data = vblk; in virtblk_probe()
1398 vblk->disk->fops = &virtblk_fops; in virtblk_probe()
1399 vblk->index = index; in virtblk_probe()
1406 set_disk_ro(vblk->disk, 1); in virtblk_probe()
1565 virtblk_update_capacity(vblk, false); in virtblk_probe()
1573 err = virtblk_probe_zoned_device(vdev, vblk, q); in virtblk_probe()
1578 err = device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups); in virtblk_probe()
1585 put_disk(vblk->disk); in virtblk_probe()
1587 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_probe()
1590 kfree(vblk->vqs); in virtblk_probe()
1592 kfree(vblk); in virtblk_probe()
1601 struct virtio_blk *vblk = vdev->priv; in virtblk_remove() local
1604 flush_work(&vblk->config_work); in virtblk_remove()
1606 del_gendisk(vblk->disk); in virtblk_remove()
1607 blk_mq_free_tag_set(&vblk->tag_set); in virtblk_remove()
1609 mutex_lock(&vblk->vdev_mutex); in virtblk_remove()
1615 vblk->vdev = NULL; in virtblk_remove()
1618 kfree(vblk->vqs); in virtblk_remove()
1620 mutex_unlock(&vblk->vdev_mutex); in virtblk_remove()
1622 put_disk(vblk->disk); in virtblk_remove()
1628 struct virtio_blk *vblk = vdev->priv; in virtblk_freeze() local
1634 flush_work(&vblk->config_work); in virtblk_freeze()
1636 blk_mq_quiesce_queue(vblk->disk->queue); in virtblk_freeze()
1639 kfree(vblk->vqs); in virtblk_freeze()
1646 struct virtio_blk *vblk = vdev->priv; in virtblk_restore() local
1655 blk_mq_unquiesce_queue(vblk->disk->queue); in virtblk_restore()