Lines Matching full:iommu
10 #include <linux/dma-iommu.h>
15 #include <linux/iommu.h>
97 struct iommu_device iommu; member
99 /* Lock to modify the IOMMU registers */
122 struct sun50i_iommu *iommu; member
135 static u32 iommu_read(struct sun50i_iommu *iommu, u32 offset) in iommu_read() argument
137 return readl(iommu->base + offset); in iommu_read()
140 static void iommu_write(struct sun50i_iommu *iommu, u32 offset, u32 value) in iommu_write() argument
142 writel(value, iommu->base + offset); in iommu_write()
146 * The Allwinner H6 IOMMU uses a 2-level page table.
155 * The IOMMU supports a single DT, pointed by the IOMMU_TTB_REG
222 * The way permissions work is that the IOMMU has 16 "domains" that
233 * In order to make it work with the IOMMU framework, we will be using
236 * have each master setup in the same way, since the IOMMU framework
291 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_table_flush() local
295 dma_sync_single_for_device(iommu->dev, dma, size, DMA_TO_DEVICE); in sun50i_table_flush()
298 static int sun50i_iommu_flush_all_tlb(struct sun50i_iommu *iommu) in sun50i_iommu_flush_all_tlb() argument
303 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_flush_all_tlb()
305 iommu_write(iommu, in sun50i_iommu_flush_all_tlb()
316 ret = readl_poll_timeout_atomic(iommu->base + IOMMU_TLB_FLUSH_REG, in sun50i_iommu_flush_all_tlb()
320 dev_warn(iommu->dev, "TLB Flush timed out!\n"); in sun50i_iommu_flush_all_tlb()
328 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_iommu_flush_iotlb_all() local
333 * .probe_device, and since we link our (single) domain to our iommu in in sun50i_iommu_flush_iotlb_all()
339 if (!iommu) in sun50i_iommu_flush_iotlb_all()
342 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_flush_iotlb_all()
343 sun50i_iommu_flush_all_tlb(iommu); in sun50i_iommu_flush_iotlb_all()
344 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_flush_iotlb_all()
353 static int sun50i_iommu_enable(struct sun50i_iommu *iommu) in sun50i_iommu_enable() argument
359 if (!iommu->domain) in sun50i_iommu_enable()
362 sun50i_domain = to_sun50i_domain(iommu->domain); in sun50i_iommu_enable()
364 ret = reset_control_deassert(iommu->reset); in sun50i_iommu_enable()
368 ret = clk_prepare_enable(iommu->clk); in sun50i_iommu_enable()
372 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
374 iommu_write(iommu, IOMMU_TTB_REG, sun50i_domain->dt_dma); in sun50i_iommu_enable()
375 iommu_write(iommu, IOMMU_TLB_PREFETCH_REG, in sun50i_iommu_enable()
382 iommu_write(iommu, IOMMU_INT_ENABLE_REG, IOMMU_INT_MASK); in sun50i_iommu_enable()
383 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_NONE), in sun50i_iommu_enable()
397 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_RD), in sun50i_iommu_enable()
405 iommu_write(iommu, IOMMU_DM_AUT_CTRL_REG(SUN50I_IOMMU_ACI_WR), in sun50i_iommu_enable()
413 ret = sun50i_iommu_flush_all_tlb(iommu); in sun50i_iommu_enable()
415 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
419 iommu_write(iommu, IOMMU_AUTO_GATING_REG, IOMMU_AUTO_GATING_ENABLE); in sun50i_iommu_enable()
420 iommu_write(iommu, IOMMU_ENABLE_REG, IOMMU_ENABLE_ENABLE); in sun50i_iommu_enable()
422 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_enable()
427 clk_disable_unprepare(iommu->clk); in sun50i_iommu_enable()
430 reset_control_assert(iommu->reset); in sun50i_iommu_enable()
435 static void sun50i_iommu_disable(struct sun50i_iommu *iommu) in sun50i_iommu_disable() argument
439 spin_lock_irqsave(&iommu->iommu_lock, flags); in sun50i_iommu_disable()
441 iommu_write(iommu, IOMMU_ENABLE_REG, 0); in sun50i_iommu_disable()
442 iommu_write(iommu, IOMMU_TTB_REG, 0); in sun50i_iommu_disable()
444 spin_unlock_irqrestore(&iommu->iommu_lock, flags); in sun50i_iommu_disable()
446 clk_disable_unprepare(iommu->clk); in sun50i_iommu_disable()
447 reset_control_assert(iommu->reset); in sun50i_iommu_disable()
450 static void *sun50i_iommu_alloc_page_table(struct sun50i_iommu *iommu, in sun50i_iommu_alloc_page_table() argument
456 page_table = kmem_cache_zalloc(iommu->pt_pool, gfp); in sun50i_iommu_alloc_page_table()
460 pt_dma = dma_map_single(iommu->dev, page_table, PT_SIZE, DMA_TO_DEVICE); in sun50i_iommu_alloc_page_table()
461 if (dma_mapping_error(iommu->dev, pt_dma)) { in sun50i_iommu_alloc_page_table()
462 dev_err(iommu->dev, "Couldn't map L2 Page Table\n"); in sun50i_iommu_alloc_page_table()
463 kmem_cache_free(iommu->pt_pool, page_table); in sun50i_iommu_alloc_page_table()
473 static void sun50i_iommu_free_page_table(struct sun50i_iommu *iommu, in sun50i_iommu_free_page_table() argument
478 dma_unmap_single(iommu->dev, pt_phys, PT_SIZE, DMA_TO_DEVICE); in sun50i_iommu_free_page_table()
479 kmem_cache_free(iommu->pt_pool, page_table); in sun50i_iommu_free_page_table()
485 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_dte_get_page_table() local
498 page_table = sun50i_iommu_alloc_page_table(iommu, gfp); in sun50i_dte_get_page_table()
512 sun50i_iommu_free_page_table(iommu, drop_pt); in sun50i_dte_get_page_table()
525 struct sun50i_iommu *iommu = sun50i_domain->iommu; in sun50i_iommu_map() local
540 dev_err(iommu->dev, in sun50i_iommu_map()
652 static int sun50i_iommu_attach_domain(struct sun50i_iommu *iommu, in sun50i_iommu_attach_domain() argument
655 iommu->domain = &sun50i_domain->domain; in sun50i_iommu_attach_domain()
656 sun50i_domain->iommu = iommu; in sun50i_iommu_attach_domain()
658 sun50i_domain->dt_dma = dma_map_single(iommu->dev, sun50i_domain->dt, in sun50i_iommu_attach_domain()
660 if (dma_mapping_error(iommu->dev, sun50i_domain->dt_dma)) { in sun50i_iommu_attach_domain()
661 dev_err(iommu->dev, "Couldn't map L1 Page Table\n"); in sun50i_iommu_attach_domain()
665 return sun50i_iommu_enable(iommu); in sun50i_iommu_attach_domain()
668 static void sun50i_iommu_detach_domain(struct sun50i_iommu *iommu, in sun50i_iommu_detach_domain() argument
689 sun50i_iommu_free_page_table(iommu, page_table); in sun50i_iommu_detach_domain()
693 sun50i_iommu_disable(iommu); in sun50i_iommu_detach_domain()
695 dma_unmap_single(iommu->dev, virt_to_phys(sun50i_domain->dt), in sun50i_iommu_detach_domain()
698 iommu->domain = NULL; in sun50i_iommu_detach_domain()
705 struct sun50i_iommu *iommu = dev_iommu_priv_get(dev); in sun50i_iommu_detach_device() local
707 dev_dbg(dev, "Detaching from IOMMU domain\n"); in sun50i_iommu_detach_device()
709 if (iommu->domain != domain) in sun50i_iommu_detach_device()
713 sun50i_iommu_detach_domain(iommu, sun50i_domain); in sun50i_iommu_detach_device()
720 struct sun50i_iommu *iommu; in sun50i_iommu_attach_device() local
722 iommu = sun50i_iommu_from_dev(dev); in sun50i_iommu_attach_device()
723 if (!iommu) in sun50i_iommu_attach_device()
726 dev_dbg(dev, "Attaching to IOMMU domain\n"); in sun50i_iommu_attach_device()
730 if (iommu->domain == domain) in sun50i_iommu_attach_device()
733 if (iommu->domain) in sun50i_iommu_attach_device()
734 sun50i_iommu_detach_device(iommu->domain, dev); in sun50i_iommu_attach_device()
736 sun50i_iommu_attach_domain(iommu, sun50i_domain); in sun50i_iommu_attach_device()
743 struct sun50i_iommu *iommu; in sun50i_iommu_probe_device() local
745 iommu = sun50i_iommu_from_dev(dev); in sun50i_iommu_probe_device()
746 if (!iommu) in sun50i_iommu_probe_device()
749 return &iommu->iommu; in sun50i_iommu_probe_device()
756 struct sun50i_iommu *iommu = sun50i_iommu_from_dev(dev); in sun50i_iommu_device_group() local
758 return iommu_group_ref_get(iommu->group); in sun50i_iommu_device_group()
789 static void sun50i_iommu_report_fault(struct sun50i_iommu *iommu, in sun50i_iommu_report_fault() argument
793 dev_err(iommu->dev, "Page fault for %pad (master %d, dir %s)\n", in sun50i_iommu_report_fault()
796 if (iommu->domain) in sun50i_iommu_report_fault()
797 report_iommu_fault(iommu->domain, iommu->dev, iova, prot); in sun50i_iommu_report_fault()
799 dev_err(iommu->dev, "Page fault while iommu not attached to any domain?\n"); in sun50i_iommu_report_fault()
802 static phys_addr_t sun50i_iommu_handle_pt_irq(struct sun50i_iommu *iommu, in sun50i_iommu_handle_pt_irq() argument
810 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_handle_pt_irq()
812 iova = iommu_read(iommu, addr_reg); in sun50i_iommu_handle_pt_irq()
813 blame = iommu_read(iommu, blame_reg); in sun50i_iommu_handle_pt_irq()
821 sun50i_iommu_report_fault(iommu, master, iova, IOMMU_FAULT_READ); in sun50i_iommu_handle_pt_irq()
826 static phys_addr_t sun50i_iommu_handle_perm_irq(struct sun50i_iommu *iommu) in sun50i_iommu_handle_perm_irq() argument
834 assert_spin_locked(&iommu->iommu_lock); in sun50i_iommu_handle_perm_irq()
836 blame = iommu_read(iommu, IOMMU_INT_STA_REG); in sun50i_iommu_handle_perm_irq()
838 iova = iommu_read(iommu, IOMMU_INT_ERR_ADDR_REG(master)); in sun50i_iommu_handle_perm_irq()
839 aci = sun50i_get_pte_aci(iommu_read(iommu, in sun50i_iommu_handle_perm_irq()
876 sun50i_iommu_report_fault(iommu, master, iova, dir); in sun50i_iommu_handle_perm_irq()
883 struct sun50i_iommu *iommu = dev_id; in sun50i_iommu_irq() local
886 spin_lock(&iommu->iommu_lock); in sun50i_iommu_irq()
888 status = iommu_read(iommu, IOMMU_INT_STA_REG); in sun50i_iommu_irq()
890 spin_unlock(&iommu->iommu_lock); in sun50i_iommu_irq()
895 sun50i_iommu_handle_pt_irq(iommu, in sun50i_iommu_irq()
899 sun50i_iommu_handle_pt_irq(iommu, in sun50i_iommu_irq()
903 sun50i_iommu_handle_perm_irq(iommu); in sun50i_iommu_irq()
905 iommu_write(iommu, IOMMU_INT_CLR_REG, status); in sun50i_iommu_irq()
907 iommu_write(iommu, IOMMU_RESET_REG, ~status); in sun50i_iommu_irq()
908 iommu_write(iommu, IOMMU_RESET_REG, status); in sun50i_iommu_irq()
910 spin_unlock(&iommu->iommu_lock); in sun50i_iommu_irq()
917 struct sun50i_iommu *iommu; in sun50i_iommu_probe() local
920 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); in sun50i_iommu_probe()
921 if (!iommu) in sun50i_iommu_probe()
923 spin_lock_init(&iommu->iommu_lock); in sun50i_iommu_probe()
924 platform_set_drvdata(pdev, iommu); in sun50i_iommu_probe()
925 iommu->dev = &pdev->dev; in sun50i_iommu_probe()
927 iommu->pt_pool = kmem_cache_create(dev_name(&pdev->dev), in sun50i_iommu_probe()
931 if (!iommu->pt_pool) in sun50i_iommu_probe()
934 iommu->group = iommu_group_alloc(); in sun50i_iommu_probe()
935 if (IS_ERR(iommu->group)) { in sun50i_iommu_probe()
936 ret = PTR_ERR(iommu->group); in sun50i_iommu_probe()
940 iommu->base = devm_platform_ioremap_resource(pdev, 0); in sun50i_iommu_probe()
941 if (IS_ERR(iommu->base)) { in sun50i_iommu_probe()
942 ret = PTR_ERR(iommu->base); in sun50i_iommu_probe()
952 iommu->clk = devm_clk_get(&pdev->dev, NULL); in sun50i_iommu_probe()
953 if (IS_ERR(iommu->clk)) { in sun50i_iommu_probe()
955 ret = PTR_ERR(iommu->clk); in sun50i_iommu_probe()
959 iommu->reset = devm_reset_control_get(&pdev->dev, NULL); in sun50i_iommu_probe()
960 if (IS_ERR(iommu->reset)) { in sun50i_iommu_probe()
962 ret = PTR_ERR(iommu->reset); in sun50i_iommu_probe()
966 ret = iommu_device_sysfs_add(&iommu->iommu, &pdev->dev, in sun50i_iommu_probe()
971 iommu_device_set_ops(&iommu->iommu, &sun50i_iommu_ops); in sun50i_iommu_probe()
972 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); in sun50i_iommu_probe()
974 ret = iommu_device_register(&iommu->iommu); in sun50i_iommu_probe()
979 dev_name(&pdev->dev), iommu); in sun50i_iommu_probe()
988 iommu_device_unregister(&iommu->iommu); in sun50i_iommu_probe()
991 iommu_device_sysfs_remove(&iommu->iommu); in sun50i_iommu_probe()
994 iommu_group_put(iommu->group); in sun50i_iommu_probe()
997 kmem_cache_destroy(iommu->pt_pool); in sun50i_iommu_probe()
1003 { .compatible = "allwinner,sun50i-h6-iommu", },
1010 .name = "sun50i-iommu",
1017 MODULE_DESCRIPTION("Allwinner H6 IOMMU driver");