Lines Matching +full:protection +full:- +full:domain
1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/dma-mapping.h>
37 #define SECT_MASK (~(SECT_SIZE - 1))
38 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
39 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
54 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
61 static short PG_ENT_SHIFT = -1;
97 #define section_offs(iova) (iova & (SECT_SIZE - 1))
99 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
101 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
113 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset()
151 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
155 /* v1.x - v3.x registers */
200 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
201 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
203 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
204 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
205 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
206 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
212 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
213 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
214 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
217 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
218 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
219 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
223 * This structure is attached to dev->iommu->priv of the master device
230 struct iommu_domain *domain; /* domain this device is attached */ member
237 * been attached to this domain and page tables of IO address space defined by
238 * it. It is usually referenced by 'domain' pointer.
246 struct iommu_domain domain; /* generic domain data structure */ member
281 struct exynos_iommu_domain *domain; /* domain we belong to */ member
282 struct list_head domain_node; /* node for domain clients list */
294 #define SYSMMU_REG(data, reg) ((data)->sfrbase + (data)->variant->reg)
305 /* SysMMU v5 and v7 (non-VM capable) */
331 return container_of(dom, struct exynos_iommu_domain, domain); in to_exynos_domain()
336 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in sysmmu_unblock()
343 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in sysmmu_block()
344 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1)) in sysmmu_block()
345 --i; in sysmmu_block()
347 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) { in sysmmu_block()
365 if (MMU_MAJ_VER(data->version) < 5 || num_inv == 1) { in __sysmmu_tlb_invalidate_entry()
373 writel((iova & SPAGE_MASK) + (num_inv - 1) * SPAGE_SIZE, in __sysmmu_tlb_invalidate_entry()
383 if (MMU_MAJ_VER(data->version) < 5) in __sysmmu_set_ptbase()
394 BUG_ON(clk_prepare_enable(data->clk_master)); in __sysmmu_enable_clocks()
395 BUG_ON(clk_prepare_enable(data->clk)); in __sysmmu_enable_clocks()
396 BUG_ON(clk_prepare_enable(data->pclk)); in __sysmmu_enable_clocks()
397 BUG_ON(clk_prepare_enable(data->aclk)); in __sysmmu_enable_clocks()
402 clk_disable_unprepare(data->aclk); in __sysmmu_disable_clocks()
403 clk_disable_unprepare(data->pclk); in __sysmmu_disable_clocks()
404 clk_disable_unprepare(data->clk); in __sysmmu_disable_clocks()
405 clk_disable_unprepare(data->clk_master); in __sysmmu_disable_clocks()
410 u32 capa0 = readl(data->sfrbase + REG_V7_CAPA0); in __sysmmu_has_capa1()
417 u32 capa1 = readl(data->sfrbase + REG_V7_CAPA1); in __sysmmu_get_vcr()
419 data->has_vcr = capa1 & CAPA1_VCR_ENABLED; in __sysmmu_get_vcr()
428 ver = readl(data->sfrbase + REG_MMU_VERSION); in __sysmmu_get_version()
432 data->version = MAKE_MMU_VER(1, 0); in __sysmmu_get_version()
434 data->version = MMU_RAW_VER(ver); in __sysmmu_get_version()
436 dev_dbg(data->sysmmu, "hardware version: %d.%d\n", in __sysmmu_get_version()
437 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version)); in __sysmmu_get_version()
439 if (MMU_MAJ_VER(data->version) < 5) { in __sysmmu_get_version()
440 data->variant = &sysmmu_v1_variant; in __sysmmu_get_version()
441 } else if (MMU_MAJ_VER(data->version) < 7) { in __sysmmu_get_version()
442 data->variant = &sysmmu_v5_variant; in __sysmmu_get_version()
446 if (data->has_vcr) in __sysmmu_get_version()
447 data->variant = &sysmmu_v7_vm_variant; in __sysmmu_get_version()
449 data->variant = &sysmmu_v5_variant; in __sysmmu_get_version()
461 dev_err(data->sysmmu, "%s: %s FAULT occurred at %#x\n", in show_fault_information()
462 dev_name(data->master), finfo->name, fault_addr); in show_fault_information()
463 dev_dbg(data->sysmmu, "Page table base: %pa\n", &data->pgtable); in show_fault_information()
464 ent = section_entry(phys_to_virt(data->pgtable), fault_addr); in show_fault_information()
465 dev_dbg(data->sysmmu, "\tLv1 entry: %#x\n", *ent); in show_fault_information()
468 dev_dbg(data->sysmmu, "\t Lv2 entry: %#x\n", *ent); in show_fault_information()
479 int ret = -ENOSYS; in exynos_sysmmu_irq()
481 WARN_ON(!data->active); in exynos_sysmmu_irq()
483 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_irq()
491 spin_lock(&data->lock); in exynos_sysmmu_irq()
493 clk_enable(data->clk_master); in exynos_sysmmu_irq()
497 if (finfo->bit == itype) in exynos_sysmmu_irq()
503 fault_addr = readl(data->sfrbase + finfo->addr_reg); in exynos_sysmmu_irq()
506 if (data->domain) in exynos_sysmmu_irq()
507 ret = report_iommu_fault(&data->domain->domain, in exynos_sysmmu_irq()
508 data->master, fault_addr, finfo->type); in exynos_sysmmu_irq()
516 clk_disable(data->clk_master); in exynos_sysmmu_irq()
518 spin_unlock(&data->lock); in exynos_sysmmu_irq()
527 clk_enable(data->clk_master); in __sysmmu_disable()
529 spin_lock_irqsave(&data->lock, flags); in __sysmmu_disable()
530 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_disable()
531 writel(0, data->sfrbase + REG_MMU_CFG); in __sysmmu_disable()
532 data->active = false; in __sysmmu_disable()
533 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_disable()
542 if (data->version <= MAKE_MMU_VER(3, 1)) in __sysmmu_init_config()
544 else if (data->version <= MAKE_MMU_VER(3, 2)) in __sysmmu_init_config()
549 cfg |= CFG_EAP; /* enable access protection bits check */ in __sysmmu_init_config()
551 writel(cfg, data->sfrbase + REG_MMU_CFG); in __sysmmu_init_config()
558 if (MMU_MAJ_VER(data->version) < 7 || !data->has_vcr) in __sysmmu_enable_vid()
561 ctrl = readl(data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
563 writel(ctrl, data->sfrbase + REG_V7_CTRL_VM); in __sysmmu_enable_vid()
572 spin_lock_irqsave(&data->lock, flags); in __sysmmu_enable()
573 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
575 __sysmmu_set_ptbase(data, data->pgtable); in __sysmmu_enable()
577 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL); in __sysmmu_enable()
578 data->active = true; in __sysmmu_enable()
579 spin_unlock_irqrestore(&data->lock, flags); in __sysmmu_enable()
587 clk_disable(data->clk_master); in __sysmmu_enable()
595 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
596 if (data->active && data->version >= MAKE_MMU_VER(3, 3)) { in sysmmu_tlb_invalidate_flpdcache()
597 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
599 if (data->version >= MAKE_MMU_VER(5, 0)) in sysmmu_tlb_invalidate_flpdcache()
605 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_flpdcache()
607 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_flpdcache()
615 spin_lock_irqsave(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
616 if (data->active) { in sysmmu_tlb_invalidate_entry()
619 clk_enable(data->clk_master); in sysmmu_tlb_invalidate_entry()
626 * because it is set-associative TLB in sysmmu_tlb_invalidate_entry()
627 * with 8-way and 64 sets. in sysmmu_tlb_invalidate_entry()
631 if (MMU_MAJ_VER(data->version) == 2) in sysmmu_tlb_invalidate_entry()
638 clk_disable(data->clk_master); in sysmmu_tlb_invalidate_entry()
640 spin_unlock_irqrestore(&data->lock, flags); in sysmmu_tlb_invalidate_entry()
648 struct device *dev = &pdev->dev; in exynos_sysmmu_probe()
654 return -ENOMEM; in exynos_sysmmu_probe()
657 data->sfrbase = devm_ioremap_resource(dev, res); in exynos_sysmmu_probe()
658 if (IS_ERR(data->sfrbase)) in exynos_sysmmu_probe()
659 return PTR_ERR(data->sfrbase); in exynos_sysmmu_probe()
672 data->clk = devm_clk_get(dev, "sysmmu"); in exynos_sysmmu_probe()
673 if (PTR_ERR(data->clk) == -ENOENT) in exynos_sysmmu_probe()
674 data->clk = NULL; in exynos_sysmmu_probe()
675 else if (IS_ERR(data->clk)) in exynos_sysmmu_probe()
676 return PTR_ERR(data->clk); in exynos_sysmmu_probe()
678 data->aclk = devm_clk_get(dev, "aclk"); in exynos_sysmmu_probe()
679 if (PTR_ERR(data->aclk) == -ENOENT) in exynos_sysmmu_probe()
680 data->aclk = NULL; in exynos_sysmmu_probe()
681 else if (IS_ERR(data->aclk)) in exynos_sysmmu_probe()
682 return PTR_ERR(data->aclk); in exynos_sysmmu_probe()
684 data->pclk = devm_clk_get(dev, "pclk"); in exynos_sysmmu_probe()
685 if (PTR_ERR(data->pclk) == -ENOENT) in exynos_sysmmu_probe()
686 data->pclk = NULL; in exynos_sysmmu_probe()
687 else if (IS_ERR(data->pclk)) in exynos_sysmmu_probe()
688 return PTR_ERR(data->pclk); in exynos_sysmmu_probe()
690 if (!data->clk && (!data->aclk || !data->pclk)) { in exynos_sysmmu_probe()
692 return -ENOSYS; in exynos_sysmmu_probe()
695 data->clk_master = devm_clk_get(dev, "master"); in exynos_sysmmu_probe()
696 if (PTR_ERR(data->clk_master) == -ENOENT) in exynos_sysmmu_probe()
697 data->clk_master = NULL; in exynos_sysmmu_probe()
698 else if (IS_ERR(data->clk_master)) in exynos_sysmmu_probe()
699 return PTR_ERR(data->clk_master); in exynos_sysmmu_probe()
701 data->sysmmu = dev; in exynos_sysmmu_probe()
702 spin_lock_init(&data->lock); in exynos_sysmmu_probe()
706 ret = iommu_device_sysfs_add(&data->iommu, &pdev->dev, NULL, in exynos_sysmmu_probe()
707 dev_name(data->sysmmu)); in exynos_sysmmu_probe()
711 ret = iommu_device_register(&data->iommu, &exynos_iommu_ops, dev); in exynos_sysmmu_probe()
718 if (MMU_MAJ_VER(data->version) < 5) { in exynos_sysmmu_probe()
729 if (MMU_MAJ_VER(data->version) >= 5) { in exynos_sysmmu_probe()
742 dma_dev = &pdev->dev; in exynos_sysmmu_probe()
749 iommu_device_unregister(&data->iommu); in exynos_sysmmu_probe()
751 iommu_device_sysfs_remove(&data->iommu); in exynos_sysmmu_probe()
758 struct device *master = data->master; in exynos_sysmmu_suspend()
763 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_suspend()
764 if (data->domain) { in exynos_sysmmu_suspend()
765 dev_dbg(data->sysmmu, "saving state\n"); in exynos_sysmmu_suspend()
768 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_suspend()
776 struct device *master = data->master; in exynos_sysmmu_resume()
781 mutex_lock(&owner->rpm_lock); in exynos_sysmmu_resume()
782 if (data->domain) { in exynos_sysmmu_resume()
783 dev_dbg(data->sysmmu, "restoring state\n"); in exynos_sysmmu_resume()
786 mutex_unlock(&owner->rpm_lock); in exynos_sysmmu_resume()
798 { .compatible = "samsung,exynos-sysmmu", },
805 .name = "exynos-sysmmu",
823 struct exynos_iommu_domain *domain; in exynos_iommu_domain_alloc() local
833 domain = kzalloc(sizeof(*domain), GFP_KERNEL); in exynos_iommu_domain_alloc()
834 if (!domain) in exynos_iommu_domain_alloc()
837 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2); in exynos_iommu_domain_alloc()
838 if (!domain->pgtable) in exynos_iommu_domain_alloc()
841 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1); in exynos_iommu_domain_alloc()
842 if (!domain->lv2entcnt) in exynos_iommu_domain_alloc()
847 domain->pgtable[i] = ZERO_LV2LINK; in exynos_iommu_domain_alloc()
849 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE, in exynos_iommu_domain_alloc()
852 BUG_ON(handle != virt_to_phys(domain->pgtable)); in exynos_iommu_domain_alloc()
856 spin_lock_init(&domain->lock); in exynos_iommu_domain_alloc()
857 spin_lock_init(&domain->pgtablelock); in exynos_iommu_domain_alloc()
858 INIT_LIST_HEAD(&domain->clients); in exynos_iommu_domain_alloc()
860 domain->domain.geometry.aperture_start = 0; in exynos_iommu_domain_alloc()
861 domain->domain.geometry.aperture_end = ~0UL; in exynos_iommu_domain_alloc()
862 domain->domain.geometry.force_aperture = true; in exynos_iommu_domain_alloc()
864 return &domain->domain; in exynos_iommu_domain_alloc()
867 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_alloc()
869 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_alloc()
871 kfree(domain); in exynos_iommu_domain_alloc()
877 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_domain_free() local
882 WARN_ON(!list_empty(&domain->clients)); in exynos_iommu_domain_free()
884 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_domain_free()
886 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_domain_free()
887 spin_lock(&data->lock); in exynos_iommu_domain_free()
889 data->pgtable = 0; in exynos_iommu_domain_free()
890 data->domain = NULL; in exynos_iommu_domain_free()
891 list_del_init(&data->domain_node); in exynos_iommu_domain_free()
892 spin_unlock(&data->lock); in exynos_iommu_domain_free()
895 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_domain_free()
897 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE, in exynos_iommu_domain_free()
901 if (lv1ent_page(domain->pgtable + i)) { in exynos_iommu_domain_free()
902 phys_addr_t base = lv2table_base(domain->pgtable + i); in exynos_iommu_domain_free()
910 free_pages((unsigned long)domain->pgtable, 2); in exynos_iommu_domain_free()
911 free_pages((unsigned long)domain->lv2entcnt, 1); in exynos_iommu_domain_free()
912 kfree(domain); in exynos_iommu_domain_free()
918 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_detach_device() local
920 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_detach_device()
924 if (!has_sysmmu(dev) || owner->domain != iommu_domain) in exynos_iommu_detach_device()
927 mutex_lock(&owner->rpm_lock); in exynos_iommu_detach_device()
929 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_detach_device()
930 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_detach_device()
931 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_detach_device()
933 pm_runtime_put(data->sysmmu); in exynos_iommu_detach_device()
936 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_detach_device()
937 list_for_each_entry_safe(data, next, &domain->clients, domain_node) { in exynos_iommu_detach_device()
938 spin_lock(&data->lock); in exynos_iommu_detach_device()
939 data->pgtable = 0; in exynos_iommu_detach_device()
940 data->domain = NULL; in exynos_iommu_detach_device()
941 list_del_init(&data->domain_node); in exynos_iommu_detach_device()
942 spin_unlock(&data->lock); in exynos_iommu_detach_device()
944 owner->domain = NULL; in exynos_iommu_detach_device()
945 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_detach_device()
947 mutex_unlock(&owner->rpm_lock); in exynos_iommu_detach_device()
956 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_attach_device() local
959 phys_addr_t pagetable = virt_to_phys(domain->pgtable); in exynos_iommu_attach_device()
963 return -ENODEV; in exynos_iommu_attach_device()
965 if (owner->domain) in exynos_iommu_attach_device()
966 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_attach_device()
968 mutex_lock(&owner->rpm_lock); in exynos_iommu_attach_device()
970 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_attach_device()
971 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
972 spin_lock(&data->lock); in exynos_iommu_attach_device()
973 data->pgtable = pagetable; in exynos_iommu_attach_device()
974 data->domain = domain; in exynos_iommu_attach_device()
975 list_add_tail(&data->domain_node, &domain->clients); in exynos_iommu_attach_device()
976 spin_unlock(&data->lock); in exynos_iommu_attach_device()
978 owner->domain = iommu_domain; in exynos_iommu_attach_device()
979 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_attach_device()
981 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_attach_device()
982 pm_runtime_get_noresume(data->sysmmu); in exynos_iommu_attach_device()
983 if (pm_runtime_active(data->sysmmu)) in exynos_iommu_attach_device()
985 pm_runtime_put(data->sysmmu); in exynos_iommu_attach_device()
988 mutex_unlock(&owner->rpm_lock); in exynos_iommu_attach_device()
996 static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain, in alloc_lv2entry() argument
1001 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1010 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1)); in alloc_lv2entry()
1012 return ERR_PTR(-ENOMEM); in alloc_lv2entry()
1021 return ERR_PTR(-EADDRINUSE); in alloc_lv2entry()
1025 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table, in alloc_lv2entry()
1044 spin_lock(&domain->lock); in alloc_lv2entry()
1045 list_for_each_entry(data, &domain->clients, domain_node) in alloc_lv2entry()
1047 spin_unlock(&domain->lock); in alloc_lv2entry()
1054 static int lv1set_section(struct exynos_iommu_domain *domain, in lv1set_section() argument
1061 return -EADDRINUSE; in lv1set_section()
1068 return -EADDRINUSE; in lv1set_section()
1077 spin_lock(&domain->lock); in lv1set_section()
1084 list_for_each_entry(data, &domain->clients, domain_node) in lv1set_section()
1087 spin_unlock(&domain->lock); in lv1set_section()
1097 return -EADDRINUSE; in lv2set_page()
1100 *pgcnt -= 1; in lv2set_page()
1111 memset(pent - i, 0, sizeof(*pent) * i); in lv2set_page()
1112 return -EADDRINUSE; in lv2set_page()
1120 *pgcnt -= SPAGES_PER_LPAGE; in lv2set_page()
1127 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1148 * - Any two consecutive I/O virtual regions must have a hole of size larger
1150 * - Start address of an I/O virtual region must be aligned by 128KiB.
1156 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_map() local
1160 int ret = -ENOMEM; in exynos_iommu_map()
1162 BUG_ON(domain->pgtable == NULL); in exynos_iommu_map()
1165 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_map()
1167 entry = section_entry(domain->pgtable, iova); in exynos_iommu_map()
1170 ret = lv1set_section(domain, entry, iova, paddr, prot, in exynos_iommu_map()
1171 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1175 pent = alloc_lv2entry(domain, entry, iova, in exynos_iommu_map()
1176 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1182 &domain->lv2entcnt[lv1ent_offset(iova)]); in exynos_iommu_map()
1189 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_map()
1194 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain, in exynos_iommu_tlb_invalidate_entry() argument
1200 spin_lock_irqsave(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1202 list_for_each_entry(data, &domain->clients, domain_node) in exynos_iommu_tlb_invalidate_entry()
1205 spin_unlock_irqrestore(&domain->lock, flags); in exynos_iommu_tlb_invalidate_entry()
1212 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_unmap() local
1218 BUG_ON(domain->pgtable == NULL); in exynos_iommu_unmap()
1220 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1222 ent = section_entry(domain->pgtable, iova); in exynos_iommu_unmap()
1254 domain->lv2entcnt[lv1ent_offset(iova)] += 1; in exynos_iommu_unmap()
1272 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE; in exynos_iommu_unmap()
1274 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1276 exynos_iommu_tlb_invalidate_entry(domain, iova, size); in exynos_iommu_unmap()
1280 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_unmap()
1291 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain); in exynos_iommu_iova_to_phys() local
1296 spin_lock_irqsave(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1298 entry = section_entry(domain->pgtable, iova); in exynos_iommu_iova_to_phys()
1311 spin_unlock_irqrestore(&domain->pgtablelock, flags); in exynos_iommu_iova_to_phys()
1322 return ERR_PTR(-ENODEV); in exynos_iommu_probe_device()
1324 list_for_each_entry(data, &owner->controllers, owner_node) { in exynos_iommu_probe_device()
1330 data->link = device_link_add(dev, data->sysmmu, in exynos_iommu_probe_device()
1336 data = list_first_entry(&owner->controllers, in exynos_iommu_probe_device()
1339 return &data->iommu; in exynos_iommu_probe_device()
1347 if (owner->domain) { in exynos_iommu_release_device()
1351 WARN_ON(owner->domain != in exynos_iommu_release_device()
1353 exynos_iommu_detach_device(owner->domain, dev); in exynos_iommu_release_device()
1358 list_for_each_entry(data, &owner->controllers, owner_node) in exynos_iommu_release_device()
1359 device_link_del(data->link); in exynos_iommu_release_device()
1365 struct platform_device *sysmmu = of_find_device_by_node(spec->np); in exynos_iommu_of_xlate()
1370 return -ENODEV; in exynos_iommu_of_xlate()
1374 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1375 return -ENODEV; in exynos_iommu_of_xlate()
1381 put_device(&sysmmu->dev); in exynos_iommu_of_xlate()
1382 return -ENOMEM; in exynos_iommu_of_xlate()
1385 INIT_LIST_HEAD(&owner->controllers); in exynos_iommu_of_xlate()
1386 mutex_init(&owner->rpm_lock); in exynos_iommu_of_xlate()
1390 list_for_each_entry(entry, &owner->controllers, owner_node) in exynos_iommu_of_xlate()
1394 list_add_tail(&data->owner_node, &owner->controllers); in exynos_iommu_of_xlate()
1395 data->master = dev; in exynos_iommu_of_xlate()
1428 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", in exynos_iommu_init()
1432 return -ENOMEM; in exynos_iommu_init()
1445 ret = -ENOMEM; in exynos_iommu_init()