Lines Matching full:iommu

3  * IOMMU API for Rockchip
13 #include <linux/dma-iommu.h>
18 #include <linux/iommu.h>
94 /* list of clocks required by IOMMU */
107 struct iommu_device iommu; member
109 struct iommu_domain *domain; /* domain to which iommu is attached */
114 struct device_link *link; /* runtime PM link from IOMMU to master */
115 struct rk_iommu *iommu; member
134 * The Rockchip rk3288 iommu uses a 2-level page table.
143 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
244 * rk3288 iova (IOMMU Virtual Address) format
285 static void rk_iommu_command(struct rk_iommu *iommu, u32 command) in rk_iommu_command() argument
289 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_command()
290 writel(command, iommu->bases[i] + RK_MMU_COMMAND); in rk_iommu_command()
297 static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova_start, in rk_iommu_zap_lines() argument
306 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_zap_lines()
310 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines()
314 static bool rk_iommu_is_stall_active(struct rk_iommu *iommu) in rk_iommu_is_stall_active() argument
319 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_stall_active()
320 active &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_stall_active()
326 static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu) in rk_iommu_is_paging_enabled() argument
331 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_paging_enabled()
332 enable &= !!(rk_iommu_read(iommu->bases[i], RK_MMU_STATUS) & in rk_iommu_is_paging_enabled()
338 static bool rk_iommu_is_reset_done(struct rk_iommu *iommu) in rk_iommu_is_reset_done() argument
343 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_is_reset_done()
344 done &= rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR) == 0; in rk_iommu_is_reset_done()
349 static int rk_iommu_enable_stall(struct rk_iommu *iommu) in rk_iommu_enable_stall() argument
354 if (rk_iommu_is_stall_active(iommu)) in rk_iommu_enable_stall()
358 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_stall()
361 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL); in rk_iommu_enable_stall()
363 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_enable_stall()
367 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_stall()
368 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n", in rk_iommu_enable_stall()
369 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_stall()
374 static int rk_iommu_disable_stall(struct rk_iommu *iommu) in rk_iommu_disable_stall() argument
379 if (!rk_iommu_is_stall_active(iommu)) in rk_iommu_disable_stall()
382 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL); in rk_iommu_disable_stall()
384 ret = readx_poll_timeout(rk_iommu_is_stall_active, iommu, val, in rk_iommu_disable_stall()
388 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_stall()
389 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n", in rk_iommu_disable_stall()
390 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_stall()
395 static int rk_iommu_enable_paging(struct rk_iommu *iommu) in rk_iommu_enable_paging() argument
400 if (rk_iommu_is_paging_enabled(iommu)) in rk_iommu_enable_paging()
403 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING); in rk_iommu_enable_paging()
405 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_enable_paging()
409 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_enable_paging()
410 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n", in rk_iommu_enable_paging()
411 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_enable_paging()
416 static int rk_iommu_disable_paging(struct rk_iommu *iommu) in rk_iommu_disable_paging() argument
421 if (!rk_iommu_is_paging_enabled(iommu)) in rk_iommu_disable_paging()
424 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING); in rk_iommu_disable_paging()
426 ret = readx_poll_timeout(rk_iommu_is_paging_enabled, iommu, val, in rk_iommu_disable_paging()
430 for (i = 0; i < iommu->num_mmu; i++) in rk_iommu_disable_paging()
431 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n", in rk_iommu_disable_paging()
432 rk_iommu_read(iommu->bases[i], RK_MMU_STATUS)); in rk_iommu_disable_paging()
437 static int rk_iommu_force_reset(struct rk_iommu *iommu) in rk_iommu_force_reset() argument
443 if (iommu->reset_disabled) in rk_iommu_force_reset()
450 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_force_reset()
451 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY); in rk_iommu_force_reset()
453 dte_addr = rk_iommu_read(iommu->bases[i], RK_MMU_DTE_ADDR); in rk_iommu_force_reset()
455 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n"); in rk_iommu_force_reset()
460 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET); in rk_iommu_force_reset()
462 ret = readx_poll_timeout(rk_iommu_is_reset_done, iommu, val, in rk_iommu_force_reset()
466 dev_err(iommu->dev, "FORCE_RESET command timed out\n"); in rk_iommu_force_reset()
473 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument
475 void __iomem *base = iommu->bases[index]; in log_iova()
512 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n", in log_iova()
514 …dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa … in log_iova()
522 struct rk_iommu *iommu = dev_id; in rk_iommu_irq() local
529 err = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_irq()
533 if (WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks))) in rk_iommu_irq()
536 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_irq()
537 int_status = rk_iommu_read(iommu->bases[i], RK_MMU_INT_STATUS); in rk_iommu_irq()
542 iova = rk_iommu_read(iommu->bases[i], RK_MMU_PAGE_FAULT_ADDR); in rk_iommu_irq()
547 status = rk_iommu_read(iommu->bases[i], RK_MMU_STATUS); in rk_iommu_irq()
551 dev_err(iommu->dev, "Page fault at %pad of type %s\n", in rk_iommu_irq()
555 log_iova(iommu, i, iova); in rk_iommu_irq()
562 if (iommu->domain) in rk_iommu_irq()
563 report_iommu_fault(iommu->domain, iommu->dev, iova, in rk_iommu_irq()
566 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n"); in rk_iommu_irq()
568 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_irq()
569 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_PAGE_FAULT_DONE); in rk_iommu_irq()
573 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova); in rk_iommu_irq()
576 dev_err(iommu->dev, "unexpected int_status: %#08x\n", in rk_iommu_irq()
579 rk_iommu_write(iommu->bases[i], RK_MMU_INT_CLEAR, int_status); in rk_iommu_irq()
582 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_irq()
585 pm_runtime_put(iommu->dev); in rk_iommu_irq()
626 struct rk_iommu *iommu; in rk_iommu_zap_iova() local
629 iommu = list_entry(pos, struct rk_iommu, node); in rk_iommu_zap_iova()
632 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_zap_iova()
636 WARN_ON(clk_bulk_enable(iommu->num_clocks, in rk_iommu_zap_iova()
637 iommu->clocks)); in rk_iommu_zap_iova()
638 rk_iommu_zap_lines(iommu, iova, size); in rk_iommu_zap_iova()
639 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_zap_iova()
640 pm_runtime_put(iommu->dev); in rk_iommu_zap_iova()
841 return data ? data->iommu : NULL; in rk_iommu_from_dev()
844 /* Must be called with iommu powered on and attached */
845 static void rk_iommu_disable(struct rk_iommu *iommu) in rk_iommu_disable() argument
850 WARN_ON(clk_bulk_enable(iommu->num_clocks, iommu->clocks)); in rk_iommu_disable()
851 rk_iommu_enable_stall(iommu); in rk_iommu_disable()
852 rk_iommu_disable_paging(iommu); in rk_iommu_disable()
853 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_disable()
854 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, 0); in rk_iommu_disable()
855 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, 0); in rk_iommu_disable()
857 rk_iommu_disable_stall(iommu); in rk_iommu_disable()
858 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_disable()
861 /* Must be called with iommu powered on and attached */
862 static int rk_iommu_enable(struct rk_iommu *iommu) in rk_iommu_enable() argument
864 struct iommu_domain *domain = iommu->domain; in rk_iommu_enable()
868 ret = clk_bulk_enable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
872 ret = rk_iommu_enable_stall(iommu); in rk_iommu_enable()
876 ret = rk_iommu_force_reset(iommu); in rk_iommu_enable()
880 for (i = 0; i < iommu->num_mmu; i++) { in rk_iommu_enable()
881 rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, in rk_iommu_enable()
883 rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE); in rk_iommu_enable()
884 rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK); in rk_iommu_enable()
887 ret = rk_iommu_enable_paging(iommu); in rk_iommu_enable()
890 rk_iommu_disable_stall(iommu); in rk_iommu_enable()
892 clk_bulk_disable(iommu->num_clocks, iommu->clocks); in rk_iommu_enable()
899 struct rk_iommu *iommu; in rk_iommu_detach_device() local
905 iommu = rk_iommu_from_dev(dev); in rk_iommu_detach_device()
906 if (!iommu) in rk_iommu_detach_device()
909 dev_dbg(dev, "Detaching from iommu domain\n"); in rk_iommu_detach_device()
911 /* iommu already detached */ in rk_iommu_detach_device()
912 if (iommu->domain != domain) in rk_iommu_detach_device()
915 iommu->domain = NULL; in rk_iommu_detach_device()
918 list_del_init(&iommu->node); in rk_iommu_detach_device()
921 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_detach_device()
924 rk_iommu_disable(iommu); in rk_iommu_detach_device()
925 pm_runtime_put(iommu->dev); in rk_iommu_detach_device()
932 struct rk_iommu *iommu; in rk_iommu_attach_device() local
939 * Such a device does not belong to an iommu group. in rk_iommu_attach_device()
941 iommu = rk_iommu_from_dev(dev); in rk_iommu_attach_device()
942 if (!iommu) in rk_iommu_attach_device()
945 dev_dbg(dev, "Attaching to iommu domain\n"); in rk_iommu_attach_device()
947 /* iommu already attached */ in rk_iommu_attach_device()
948 if (iommu->domain == domain) in rk_iommu_attach_device()
951 if (iommu->domain) in rk_iommu_attach_device()
952 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
954 iommu->domain = domain; in rk_iommu_attach_device()
957 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
960 ret = pm_runtime_get_if_in_use(iommu->dev); in rk_iommu_attach_device()
964 ret = rk_iommu_enable(iommu); in rk_iommu_attach_device()
966 rk_iommu_detach_device(iommu->domain, dev); in rk_iommu_attach_device()
968 pm_runtime_put(iommu->dev); in rk_iommu_attach_device()
1060 struct rk_iommu *iommu; in rk_iommu_probe_device() local
1066 iommu = rk_iommu_from_dev(dev); in rk_iommu_probe_device()
1068 data->link = device_link_add(dev, iommu->dev, in rk_iommu_probe_device()
1071 return &iommu->iommu; in rk_iommu_probe_device()
1083 struct rk_iommu *iommu; in rk_iommu_device_group() local
1085 iommu = rk_iommu_from_dev(dev); in rk_iommu_device_group()
1087 return iommu_group_ref_get(iommu->group); in rk_iommu_device_group()
1102 data->iommu = platform_get_drvdata(iommu_dev); in rk_iommu_of_xlate()
1128 struct rk_iommu *iommu; in rk_iommu_probe() local
1133 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL); in rk_iommu_probe()
1134 if (!iommu) in rk_iommu_probe()
1137 platform_set_drvdata(pdev, iommu); in rk_iommu_probe()
1138 iommu->dev = dev; in rk_iommu_probe()
1139 iommu->num_mmu = 0; in rk_iommu_probe()
1141 iommu->bases = devm_kcalloc(dev, num_res, sizeof(*iommu->bases), in rk_iommu_probe()
1143 if (!iommu->bases) in rk_iommu_probe()
1150 iommu->bases[i] = devm_ioremap_resource(&pdev->dev, res); in rk_iommu_probe()
1151 if (IS_ERR(iommu->bases[i])) in rk_iommu_probe()
1153 iommu->num_mmu++; in rk_iommu_probe()
1155 if (iommu->num_mmu == 0) in rk_iommu_probe()
1156 return PTR_ERR(iommu->bases[0]); in rk_iommu_probe()
1158 iommu->num_irq = platform_irq_count(pdev); in rk_iommu_probe()
1159 if (iommu->num_irq < 0) in rk_iommu_probe()
1160 return iommu->num_irq; in rk_iommu_probe()
1162 iommu->reset_disabled = device_property_read_bool(dev, in rk_iommu_probe()
1165 iommu->num_clocks = ARRAY_SIZE(rk_iommu_clocks); in rk_iommu_probe()
1166 iommu->clocks = devm_kcalloc(iommu->dev, iommu->num_clocks, in rk_iommu_probe()
1167 sizeof(*iommu->clocks), GFP_KERNEL); in rk_iommu_probe()
1168 if (!iommu->clocks) in rk_iommu_probe()
1171 for (i = 0; i < iommu->num_clocks; ++i) in rk_iommu_probe()
1172 iommu->clocks[i].id = rk_iommu_clocks[i]; in rk_iommu_probe()
1175 * iommu clocks should be present for all new devices and devicetrees in rk_iommu_probe()
1179 err = devm_clk_bulk_get(iommu->dev, iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1181 iommu->num_clocks = 0; in rk_iommu_probe()
1185 err = clk_bulk_prepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1189 iommu->group = iommu_group_alloc(); in rk_iommu_probe()
1190 if (IS_ERR(iommu->group)) { in rk_iommu_probe()
1191 err = PTR_ERR(iommu->group); in rk_iommu_probe()
1195 err = iommu_device_sysfs_add(&iommu->iommu, dev, NULL, dev_name(dev)); in rk_iommu_probe()
1199 iommu_device_set_ops(&iommu->iommu, &rk_iommu_ops); in rk_iommu_probe()
1200 iommu_device_set_fwnode(&iommu->iommu, &dev->of_node->fwnode); in rk_iommu_probe()
1202 err = iommu_device_register(&iommu->iommu); in rk_iommu_probe()
1207 * Use the first registered IOMMU device for domain to use with DMA in rk_iommu_probe()
1209 * IOMMU device.. in rk_iommu_probe()
1218 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_probe()
1224 err = devm_request_irq(iommu->dev, irq, rk_iommu_irq, in rk_iommu_probe()
1225 IRQF_SHARED, dev_name(dev), iommu); in rk_iommu_probe()
1234 iommu_device_sysfs_remove(&iommu->iommu); in rk_iommu_probe()
1236 iommu_group_put(iommu->group); in rk_iommu_probe()
1238 clk_bulk_unprepare(iommu->num_clocks, iommu->clocks); in rk_iommu_probe()
1244 struct rk_iommu *iommu = platform_get_drvdata(pdev); in rk_iommu_shutdown() local
1247 for (i = 0; i < iommu->num_irq; i++) { in rk_iommu_shutdown()
1250 devm_free_irq(iommu->dev, irq, iommu); in rk_iommu_shutdown()
1258 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_suspend() local
1260 if (!iommu->domain) in rk_iommu_suspend()
1263 rk_iommu_disable(iommu); in rk_iommu_suspend()
1269 struct rk_iommu *iommu = dev_get_drvdata(dev); in rk_iommu_resume() local
1271 if (!iommu->domain) in rk_iommu_resume()
1274 return rk_iommu_enable(iommu); in rk_iommu_resume()
1284 { .compatible = "rockchip,iommu" },