Lines Matching +full:iommu +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
13 #include <linux/io-pgtable.h>
18 #include <linux/iommu.h>
26 #include "msm_iommu_hw-8xxx.h"
55 static int __enable_clocks(struct msm_iommu_dev *iommu) in __enable_clocks() argument
59 ret = clk_enable(iommu->pclk); in __enable_clocks()
63 if (iommu->clk) { in __enable_clocks()
64 ret = clk_enable(iommu->clk); in __enable_clocks()
66 clk_disable(iommu->pclk); in __enable_clocks()
72 static void __disable_clocks(struct msm_iommu_dev *iommu) in __disable_clocks() argument
74 if (iommu->clk) in __disable_clocks()
75 clk_disable(iommu->clk); in __disable_clocks()
76 clk_disable(iommu->pclk); in __disable_clocks()
121 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb() local
125 list_for_each_entry(iommu, &priv->list_attached, dom_node) { in __flush_iotlb()
126 ret = __enable_clocks(iommu); in __flush_iotlb()
130 list_for_each_entry(master, &iommu->ctx_list, list) in __flush_iotlb()
131 SET_CTX_TLBIALL(iommu->base, master->num, 0); in __flush_iotlb()
133 __disable_clocks(iommu); in __flush_iotlb()
143 struct msm_iommu_dev *iommu = NULL; in __flush_iotlb_range() local
148 list_for_each_entry(iommu, &priv->list_attached, dom_node) { in __flush_iotlb_range()
149 ret = __enable_clocks(iommu); in __flush_iotlb_range()
153 list_for_each_entry(master, &iommu->ctx_list, list) { in __flush_iotlb_range()
157 iova |= GET_CONTEXTIDR_ASID(iommu->base, in __flush_iotlb_range()
158 master->num); in __flush_iotlb_range()
159 SET_TLBIVA(iommu->base, master->num, iova); in __flush_iotlb_range()
161 } while (temp_size -= granule); in __flush_iotlb_range()
164 __disable_clocks(iommu); in __flush_iotlb_range()
196 static int msm_iommu_alloc_ctx(unsigned long *map, int start, int end) in msm_iommu_alloc_ctx() argument
201 idx = find_next_zero_bit(map, end, start); in msm_iommu_alloc_ctx()
203 return -ENOSPC; in msm_iommu_alloc_ctx()
204 } while (test_and_set_bit(idx, map)); in msm_iommu_alloc_ctx()
209 static void msm_iommu_free_ctx(unsigned long *map, int idx) in msm_iommu_free_ctx() argument
211 clear_bit(idx, map); in msm_iommu_free_ctx()
214 static void config_mids(struct msm_iommu_dev *iommu, in config_mids() argument
219 for (i = 0; i < master->num_mids; i++) { in config_mids()
220 mid = master->mids[i]; in config_mids()
221 ctx = master->num; in config_mids()
223 SET_M2VCBR_N(iommu->base, mid, 0); in config_mids()
224 SET_CBACR_N(iommu->base, ctx, 0); in config_mids()
227 SET_VMID(iommu->base, mid, 0); in config_mids()
230 SET_CBNDX(iommu->base, mid, ctx); in config_mids()
233 SET_CBVMID(iommu->base, ctx, 0); in config_mids()
236 SET_CONTEXTIDR_ASID(iommu->base, ctx, ctx); in config_mids()
238 /* Set security bit override to be Non-secure */ in config_mids()
239 SET_NSCFG(iommu->base, mid, 3); in config_mids()
281 SET_TTBCR(base, ctx, priv->cfg.arm_v7s_cfg.tcr); in __program_context()
282 SET_TTBR0(base, ctx, priv->cfg.arm_v7s_cfg.ttbr); in __program_context()
286 SET_PRRR(base, ctx, priv->cfg.arm_v7s_cfg.prrr); in __program_context()
287 SET_NMRR(base, ctx, priv->cfg.arm_v7s_cfg.nmrr); in __program_context()
324 INIT_LIST_HEAD(&priv->list_attached); in msm_iommu_domain_alloc()
326 priv->domain.geometry.aperture_start = 0; in msm_iommu_domain_alloc()
327 priv->domain.geometry.aperture_end = (1ULL << 32) - 1; in msm_iommu_domain_alloc()
328 priv->domain.geometry.force_aperture = true; in msm_iommu_domain_alloc()
330 return &priv->domain; in msm_iommu_domain_alloc()
350 spin_lock_init(&priv->pgtlock); in msm_iommu_domain_config()
352 priv->cfg = (struct io_pgtable_cfg) { in msm_iommu_domain_config()
358 .iommu_dev = priv->dev, in msm_iommu_domain_config()
361 priv->iop = alloc_io_pgtable_ops(ARM_V7S, &priv->cfg, priv); in msm_iommu_domain_config()
362 if (!priv->iop) { in msm_iommu_domain_config()
363 dev_err(priv->dev, "Failed to allocate pgtable\n"); in msm_iommu_domain_config()
364 return -EINVAL; in msm_iommu_domain_config()
367 msm_iommu_ops.pgsize_bitmap = priv->cfg.pgsize_bitmap; in msm_iommu_domain_config()
375 struct msm_iommu_dev *iommu, *ret = NULL; in find_iommu_for_dev() local
378 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { in find_iommu_for_dev()
379 master = list_first_entry(&iommu->ctx_list, in find_iommu_for_dev()
382 if (master->of_node == dev->of_node) { in find_iommu_for_dev()
383 ret = iommu; in find_iommu_for_dev()
393 struct msm_iommu_dev *iommu; in msm_iommu_probe_device() local
397 iommu = find_iommu_for_dev(dev); in msm_iommu_probe_device()
400 if (!iommu) in msm_iommu_probe_device()
401 return ERR_PTR(-ENODEV); in msm_iommu_probe_device()
403 return &iommu->iommu; in msm_iommu_probe_device()
414 struct msm_iommu_dev *iommu; in msm_iommu_attach_dev() local
418 priv->dev = dev; in msm_iommu_attach_dev()
422 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) { in msm_iommu_attach_dev()
423 master = list_first_entry(&iommu->ctx_list, in msm_iommu_attach_dev()
426 if (master->of_node == dev->of_node) { in msm_iommu_attach_dev()
427 ret = __enable_clocks(iommu); in msm_iommu_attach_dev()
431 list_for_each_entry(master, &iommu->ctx_list, list) { in msm_iommu_attach_dev()
432 if (master->num) { in msm_iommu_attach_dev()
434 ret = -EEXIST; in msm_iommu_attach_dev()
437 master->num = in msm_iommu_attach_dev()
438 msm_iommu_alloc_ctx(iommu->context_map, in msm_iommu_attach_dev()
439 0, iommu->ncb); in msm_iommu_attach_dev()
440 if (IS_ERR_VALUE(master->num)) { in msm_iommu_attach_dev()
441 ret = -ENODEV; in msm_iommu_attach_dev()
444 config_mids(iommu, master); in msm_iommu_attach_dev()
445 __program_context(iommu->base, master->num, in msm_iommu_attach_dev()
448 __disable_clocks(iommu); in msm_iommu_attach_dev()
449 list_add(&iommu->dom_node, &priv->list_attached); in msm_iommu_attach_dev()
464 struct msm_iommu_dev *iommu; in msm_iommu_detach_dev() local
468 free_io_pgtable_ops(priv->iop); in msm_iommu_detach_dev()
471 list_for_each_entry(iommu, &priv->list_attached, dom_node) { in msm_iommu_detach_dev()
472 ret = __enable_clocks(iommu); in msm_iommu_detach_dev()
476 list_for_each_entry(master, &iommu->ctx_list, list) { in msm_iommu_detach_dev()
477 msm_iommu_free_ctx(iommu->context_map, master->num); in msm_iommu_detach_dev()
478 __reset_context(iommu->base, master->num); in msm_iommu_detach_dev()
480 __disable_clocks(iommu); in msm_iommu_detach_dev()
493 spin_lock_irqsave(&priv->pgtlock, flags); in msm_iommu_map()
494 ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC); in msm_iommu_map()
495 spin_unlock_irqrestore(&priv->pgtlock, flags); in msm_iommu_map()
506 spin_lock_irqsave(&priv->pgtlock, flags); in msm_iommu_unmap()
507 len = priv->iop->unmap(priv->iop, iova, len, gather); in msm_iommu_unmap()
508 spin_unlock_irqrestore(&priv->pgtlock, flags); in msm_iommu_unmap()
517 struct msm_iommu_dev *iommu; in msm_iommu_iova_to_phys() local
526 iommu = list_first_entry(&priv->list_attached, in msm_iommu_iova_to_phys()
529 if (list_empty(&iommu->ctx_list)) in msm_iommu_iova_to_phys()
532 master = list_first_entry(&iommu->ctx_list, in msm_iommu_iova_to_phys()
537 ret = __enable_clocks(iommu); in msm_iommu_iova_to_phys()
542 SET_CTX_TLBIALL(iommu->base, master->num, 0); in msm_iommu_iova_to_phys()
543 SET_V2PPR(iommu->base, master->num, va & V2Pxx_VA); in msm_iommu_iova_to_phys()
545 par = GET_PAR(iommu->base, master->num); in msm_iommu_iova_to_phys()
548 if (GET_NOFAULT_SS(iommu->base, master->num)) in msm_iommu_iova_to_phys()
553 if (GET_FAULT(iommu->base, master->num)) in msm_iommu_iova_to_phys()
556 __disable_clocks(iommu); in msm_iommu_iova_to_phys()
593 struct msm_iommu_dev **iommu, in insert_iommu_master() argument
599 if (list_empty(&(*iommu)->ctx_list)) { in insert_iommu_master()
601 master->of_node = dev->of_node; in insert_iommu_master()
602 list_add(&master->list, &(*iommu)->ctx_list); in insert_iommu_master()
606 for (sid = 0; sid < master->num_mids; sid++) in insert_iommu_master()
607 if (master->mids[sid] == spec->args[0]) { in insert_iommu_master()
613 master->mids[master->num_mids++] = spec->args[0]; in insert_iommu_master()
619 struct msm_iommu_dev *iommu; in qcom_iommu_of_xlate() local
624 list_for_each_entry(iommu, &qcom_iommu_devices, dev_node) in qcom_iommu_of_xlate()
625 if (iommu->dev->of_node == spec->np) in qcom_iommu_of_xlate()
628 if (!iommu || iommu->dev->of_node != spec->np) { in qcom_iommu_of_xlate()
629 ret = -ENODEV; in qcom_iommu_of_xlate()
633 insert_iommu_master(dev, &iommu, spec); in qcom_iommu_of_xlate()
642 struct msm_iommu_dev *iommu = dev_id; in msm_iommu_fault_handler() local
648 if (!iommu) { in msm_iommu_fault_handler()
653 pr_err("Unexpected IOMMU page fault!\n"); in msm_iommu_fault_handler()
654 pr_err("base = %08x\n", (unsigned int)iommu->base); in msm_iommu_fault_handler()
656 ret = __enable_clocks(iommu); in msm_iommu_fault_handler()
660 for (i = 0; i < iommu->ncb; i++) { in msm_iommu_fault_handler()
661 fsr = GET_FSR(iommu->base, i); in msm_iommu_fault_handler()
665 print_ctx_regs(iommu->base, i); in msm_iommu_fault_handler()
666 SET_FSR(iommu->base, i, 0x4000000F); in msm_iommu_fault_handler()
669 __disable_clocks(iommu); in msm_iommu_fault_handler()
681 .map = msm_iommu_map,
686 * taken care when the iommu client does a writel before
702 struct msm_iommu_dev *iommu; in msm_iommu_probe() local
705 iommu = devm_kzalloc(&pdev->dev, sizeof(*iommu), GFP_KERNEL); in msm_iommu_probe()
706 if (!iommu) in msm_iommu_probe()
707 return -ENODEV; in msm_iommu_probe()
709 iommu->dev = &pdev->dev; in msm_iommu_probe()
710 INIT_LIST_HEAD(&iommu->ctx_list); in msm_iommu_probe()
712 iommu->pclk = devm_clk_get(iommu->dev, "smmu_pclk"); in msm_iommu_probe()
713 if (IS_ERR(iommu->pclk)) { in msm_iommu_probe()
714 dev_err(iommu->dev, "could not get smmu_pclk\n"); in msm_iommu_probe()
715 return PTR_ERR(iommu->pclk); in msm_iommu_probe()
718 ret = clk_prepare(iommu->pclk); in msm_iommu_probe()
720 dev_err(iommu->dev, "could not prepare smmu_pclk\n"); in msm_iommu_probe()
724 iommu->clk = devm_clk_get(iommu->dev, "iommu_clk"); in msm_iommu_probe()
725 if (IS_ERR(iommu->clk)) { in msm_iommu_probe()
726 dev_err(iommu->dev, "could not get iommu_clk\n"); in msm_iommu_probe()
727 clk_unprepare(iommu->pclk); in msm_iommu_probe()
728 return PTR_ERR(iommu->clk); in msm_iommu_probe()
731 ret = clk_prepare(iommu->clk); in msm_iommu_probe()
733 dev_err(iommu->dev, "could not prepare iommu_clk\n"); in msm_iommu_probe()
734 clk_unprepare(iommu->pclk); in msm_iommu_probe()
739 iommu->base = devm_ioremap_resource(iommu->dev, r); in msm_iommu_probe()
740 if (IS_ERR(iommu->base)) { in msm_iommu_probe()
741 dev_err(iommu->dev, "could not get iommu base\n"); in msm_iommu_probe()
742 ret = PTR_ERR(iommu->base); in msm_iommu_probe()
745 ioaddr = r->start; in msm_iommu_probe()
747 iommu->irq = platform_get_irq(pdev, 0); in msm_iommu_probe()
748 if (iommu->irq < 0) { in msm_iommu_probe()
749 ret = -ENODEV; in msm_iommu_probe()
753 ret = of_property_read_u32(iommu->dev->of_node, "qcom,ncb", &val); in msm_iommu_probe()
755 dev_err(iommu->dev, "could not get ncb\n"); in msm_iommu_probe()
758 iommu->ncb = val; in msm_iommu_probe()
760 msm_iommu_reset(iommu->base, iommu->ncb); in msm_iommu_probe()
761 SET_M(iommu->base, 0, 1); in msm_iommu_probe()
762 SET_PAR(iommu->base, 0, 0); in msm_iommu_probe()
763 SET_V2PCFG(iommu->base, 0, 1); in msm_iommu_probe()
764 SET_V2PPR(iommu->base, 0, 0); in msm_iommu_probe()
765 par = GET_PAR(iommu->base, 0); in msm_iommu_probe()
766 SET_V2PCFG(iommu->base, 0, 0); in msm_iommu_probe()
767 SET_M(iommu->base, 0, 0); in msm_iommu_probe()
771 ret = -ENODEV; in msm_iommu_probe()
775 ret = devm_request_threaded_irq(iommu->dev, iommu->irq, NULL, in msm_iommu_probe()
779 iommu); in msm_iommu_probe()
781 pr_err("Request IRQ %d failed with ret=%d\n", iommu->irq, ret); in msm_iommu_probe()
785 list_add(&iommu->dev_node, &qcom_iommu_devices); in msm_iommu_probe()
787 ret = iommu_device_sysfs_add(&iommu->iommu, iommu->dev, NULL, in msm_iommu_probe()
788 "msm-smmu.%pa", &ioaddr); in msm_iommu_probe()
790 pr_err("Could not add msm-smmu at %pa to sysfs\n", &ioaddr); in msm_iommu_probe()
794 iommu_device_set_ops(&iommu->iommu, &msm_iommu_ops); in msm_iommu_probe()
795 iommu_device_set_fwnode(&iommu->iommu, &pdev->dev.of_node->fwnode); in msm_iommu_probe()
797 ret = iommu_device_register(&iommu->iommu); in msm_iommu_probe()
799 pr_err("Could not register msm-smmu at %pa\n", &ioaddr); in msm_iommu_probe()
806 iommu->base, iommu->irq, iommu->ncb); in msm_iommu_probe()
810 clk_unprepare(iommu->clk); in msm_iommu_probe()
811 clk_unprepare(iommu->pclk); in msm_iommu_probe()
816 { .compatible = "qcom,apq8064-iommu" },
822 struct msm_iommu_dev *iommu = platform_get_drvdata(pdev); in msm_iommu_remove() local
824 clk_unprepare(iommu->clk); in msm_iommu_remove()
825 clk_unprepare(iommu->pclk); in msm_iommu_remove()
844 pr_err("Failed to register IOMMU driver\n"); in msm_iommu_driver_init()