Lines Matching +full:virtio +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Virtio memory mapped device driver
5 * Copyright 2011-2014, ARM Ltd.
7 * This module allows virtio devices to be used over a virtual, memory mapped
8 * platform device.
10 * The guest device(s) may be instantiated in one of three equivalent ways:
12 * 1. Static platform device in board's code, eg.:
15 * .name = "virtio-mmio",
16 * .id = -1,
31 * 2. Device Tree node, eg.:
34 * compatible = "virtio,mmio";
39 * 3. Kernel module (or command line) parameter. Can be used more than once -
40 * one device will be created for each one. Syntax:
42 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
47 * <id> := (optional) platform device id
49 * virtio_mmio.device=0x100@0x100b0000:48 \
50 * virtio_mmio.device=1K@0x1001e000:74
52 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
55 #define pr_fmt(fmt) "virtio-mmio: " fmt
58 #include <linux/dma-mapping.h>
68 #include <linux/virtio.h>
113 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); in vm_get_features()
114 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); in vm_get_features()
117 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL); in vm_get_features()
118 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES); in vm_get_features()
131 if (vm_dev->version == 2 && in vm_finalize_features()
133 …dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\… in vm_finalize_features()
134 return -EINVAL; in vm_finalize_features()
137 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); in vm_finalize_features()
138 writel((u32)(vdev->features >> 32), in vm_finalize_features()
139 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); in vm_finalize_features()
141 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL); in vm_finalize_features()
142 writel((u32)vdev->features, in vm_finalize_features()
143 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES); in vm_finalize_features()
152 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; in vm_get()
157 if (vm_dev->version == 1) { in vm_get()
194 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG; in vm_set()
199 if (vm_dev->version == 1) { in vm_set()
237 if (vm_dev->version == 1) in vm_generation()
240 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION); in vm_generation()
247 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff; in vm_get_status()
258 * Per memory-barriers.txt, wmb() is not needed to guarantee in vm_set_status()
262 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); in vm_set_status()
270 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS); in vm_reset()
280 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_notify()
284 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY); in vm_notify()
298 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS); in vm_interrupt()
299 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK); in vm_interrupt()
302 virtio_config_changed(&vm_dev->vdev); in vm_interrupt()
307 spin_lock_irqsave(&vm_dev->lock, flags); in vm_interrupt()
308 list_for_each_entry(info, &vm_dev->virtqueues, node) in vm_interrupt()
309 ret |= vring_interrupt(irq, info->vq); in vm_interrupt()
310 spin_unlock_irqrestore(&vm_dev->lock, flags); in vm_interrupt()
320 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev); in vm_del_vq()
321 struct virtio_mmio_vq_info *info = vq->priv; in vm_del_vq()
323 unsigned int index = vq->index; in vm_del_vq()
325 spin_lock_irqsave(&vm_dev->lock, flags); in vm_del_vq()
326 list_del(&info->node); in vm_del_vq()
327 spin_unlock_irqrestore(&vm_dev->lock, flags); in vm_del_vq()
330 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); in vm_del_vq()
331 if (vm_dev->version == 1) { in vm_del_vq()
332 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); in vm_del_vq()
334 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); in vm_del_vq()
335 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); in vm_del_vq()
348 list_for_each_entry_safe(vq, n, &vdev->vqs, list) in vm_del_vqs()
351 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev); in vm_del_vqs()
358 synchronize_irq(platform_get_irq(vm_dev->pdev, 0)); in vm_synchronize_cbs()
376 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL); in vm_setup_vq()
379 if (readl(vm_dev->base + (vm_dev->version == 1 ? in vm_setup_vq()
381 err = -ENOENT; in vm_setup_vq()
388 err = -ENOMEM; in vm_setup_vq()
392 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX); in vm_setup_vq()
394 err = -ENOENT; in vm_setup_vq()
402 err = -ENOMEM; in vm_setup_vq()
406 vq->num_max = num; in vm_setup_vq()
409 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM); in vm_setup_vq()
410 if (vm_dev->version == 1) { in vm_setup_vq()
414 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something in vm_setup_vq()
419 dev_err(&vdev->dev, in vm_setup_vq()
420 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n", in vm_setup_vq()
421 0x1ULL << (32 + PAGE_SHIFT - 30)); in vm_setup_vq()
422 err = -E2BIG; in vm_setup_vq()
426 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN); in vm_setup_vq()
427 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); in vm_setup_vq()
432 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW); in vm_setup_vq()
434 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH); in vm_setup_vq()
437 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW); in vm_setup_vq()
439 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH); in vm_setup_vq()
442 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW); in vm_setup_vq()
444 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH); in vm_setup_vq()
446 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); in vm_setup_vq()
449 vq->priv = info; in vm_setup_vq()
450 info->vq = vq; in vm_setup_vq()
452 spin_lock_irqsave(&vm_dev->lock, flags); in vm_setup_vq()
453 list_add(&info->node, &vm_dev->virtqueues); in vm_setup_vq()
454 spin_unlock_irqrestore(&vm_dev->lock, flags); in vm_setup_vq()
461 if (vm_dev->version == 1) { in vm_setup_vq()
462 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN); in vm_setup_vq()
464 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY); in vm_setup_vq()
465 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY)); in vm_setup_vq()
481 int irq = platform_get_irq(vm_dev->pdev, 0); in vm_find_vqs()
488 dev_name(&vdev->dev), vm_dev); in vm_find_vqs()
492 if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source")) in vm_find_vqs()
516 return vm_dev->pdev->name; in vm_bus_name()
526 writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL); in vm_get_shm_region()
529 len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW); in vm_get_shm_region()
530 len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32; in vm_get_shm_region()
532 region->len = len; in vm_get_shm_region()
534 /* Check if region length is -1. If that's the case, the shared memory in vm_get_shm_region()
541 addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW); in vm_get_shm_region()
542 addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32; in vm_get_shm_region()
544 region->addr = addr; in vm_get_shm_region()
566 static int virtio_mmio_freeze(struct device *dev) in virtio_mmio_freeze()
570 return virtio_device_freeze(&vm_dev->vdev); in virtio_mmio_freeze()
573 static int virtio_mmio_restore(struct device *dev) in virtio_mmio_restore()
577 if (vm_dev->version == 1) in virtio_mmio_restore()
578 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); in virtio_mmio_restore()
580 return virtio_device_restore(&vm_dev->vdev); in virtio_mmio_restore()
588 static void virtio_mmio_release_dev(struct device *_d) in virtio_mmio_release_dev()
593 struct platform_device *pdev = vm_dev->pdev; in virtio_mmio_release_dev()
595 devm_kfree(&pdev->dev, vm_dev); in virtio_mmio_release_dev()
598 /* Platform device */
606 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); in virtio_mmio_probe()
608 return -ENOMEM; in virtio_mmio_probe()
610 vm_dev->vdev.dev.parent = &pdev->dev; in virtio_mmio_probe()
611 vm_dev->vdev.dev.release = virtio_mmio_release_dev; in virtio_mmio_probe()
612 vm_dev->vdev.config = &virtio_mmio_config_ops; in virtio_mmio_probe()
613 vm_dev->pdev = pdev; in virtio_mmio_probe()
614 INIT_LIST_HEAD(&vm_dev->virtqueues); in virtio_mmio_probe()
615 spin_lock_init(&vm_dev->lock); in virtio_mmio_probe()
617 vm_dev->base = devm_platform_ioremap_resource(pdev, 0); in virtio_mmio_probe()
618 if (IS_ERR(vm_dev->base)) in virtio_mmio_probe()
619 return PTR_ERR(vm_dev->base); in virtio_mmio_probe()
622 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE); in virtio_mmio_probe()
624 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic); in virtio_mmio_probe()
625 return -ENODEV; in virtio_mmio_probe()
628 /* Check device version */ in virtio_mmio_probe()
629 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION); in virtio_mmio_probe()
630 if (vm_dev->version < 1 || vm_dev->version > 2) { in virtio_mmio_probe()
631 dev_err(&pdev->dev, "Version %ld not supported!\n", in virtio_mmio_probe()
632 vm_dev->version); in virtio_mmio_probe()
633 return -ENXIO; in virtio_mmio_probe()
636 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID); in virtio_mmio_probe()
637 if (vm_dev->vdev.id.device == 0) { in virtio_mmio_probe()
639 * virtio-mmio device with an ID 0 is a (dummy) placeholder in virtio_mmio_probe()
642 return -ENODEV; in virtio_mmio_probe()
644 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID); in virtio_mmio_probe()
646 if (vm_dev->version == 1) { in virtio_mmio_probe()
647 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE); in virtio_mmio_probe()
649 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); in virtio_mmio_probe()
651 * In the legacy case, ensure our coherently-allocated virtio in virtio_mmio_probe()
652 * ring will be at an address expressable as a 32-bit PFN. in virtio_mmio_probe()
655 dma_set_coherent_mask(&pdev->dev, in virtio_mmio_probe()
658 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in virtio_mmio_probe()
661 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in virtio_mmio_probe()
663 …dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might n… in virtio_mmio_probe()
667 rc = register_virtio_device(&vm_dev->vdev); in virtio_mmio_probe()
669 put_device(&vm_dev->vdev.dev); in virtio_mmio_probe()
677 unregister_virtio_device(&vm_dev->vdev); in virtio_mmio_remove()
688 static struct device vm_cmdline_parent = {
689 .init_name = "virtio-mmio-cmdline",
695 static int vm_cmdline_set(const char *device, in vm_cmdline_set() argument
707 size = memparse(device, &str); in vm_cmdline_set()
720 return -EINVAL; in vm_cmdline_set()
724 resources[0].end = base + size - 1; in vm_cmdline_set()
733 pr_err("Failed to register parent device!\n"); in vm_cmdline_set()
739 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n", in vm_cmdline_set()
746 "virtio-mmio", vm_cmdline_id++, in vm_cmdline_set()
752 static int vm_cmdline_get_device(struct device *dev, void *data) in vm_cmdline_get_device()
758 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n", in vm_cmdline_get_device()
759 pdev->resource[0].end - pdev->resource[0].start + 1ULL, in vm_cmdline_get_device()
760 (unsigned long long)pdev->resource[0].start, in vm_cmdline_get_device()
761 (unsigned long long)pdev->resource[1].start, in vm_cmdline_get_device()
762 pdev->id); in vm_cmdline_get_device()
779 device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
781 static int vm_unregister_cmdline_device(struct device *dev, in vm_unregister_cmdline_device()
810 { .compatible = "virtio,mmio", },
827 .name = "virtio-mmio",
851 MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");