Lines Matching full:mmu
74 struct ipmmu_vmsa_device *mmu; member
102 /* MMU "context" registers */
152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
154 return mmu->root == mmu; in ipmmu_is_root()
159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
162 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
163 *rootp = mmu; in __ipmmu_check_device()
180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
182 return ioread32(mmu->base + offset); in ipmmu_read()
185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
188 iowrite32(data, mmu->base + offset); in ipmmu_write()
191 static unsigned int ipmmu_ctx_reg(struct ipmmu_vmsa_device *mmu, in ipmmu_ctx_reg() argument
194 return mmu->features->ctx_offset_base + in ipmmu_ctx_reg()
195 context_id * mmu->features->ctx_offset_stride + reg; in ipmmu_ctx_reg()
198 static u32 ipmmu_ctx_read(struct ipmmu_vmsa_device *mmu, in ipmmu_ctx_read() argument
201 return ipmmu_read(mmu, ipmmu_ctx_reg(mmu, context_id, reg)); in ipmmu_ctx_read()
204 static void ipmmu_ctx_write(struct ipmmu_vmsa_device *mmu, in ipmmu_ctx_write() argument
207 ipmmu_write(mmu, ipmmu_ctx_reg(mmu, context_id, reg), data); in ipmmu_ctx_write()
213 return ipmmu_ctx_read(domain->mmu->root, domain->context_id, reg); in ipmmu_ctx_read_root()
219 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_root()
225 if (domain->mmu != domain->mmu->root) in ipmmu_ctx_write_all()
226 ipmmu_ctx_write(domain->mmu, domain->context_id, reg, data); in ipmmu_ctx_write_all()
228 ipmmu_ctx_write(domain->mmu->root, domain->context_id, reg, data); in ipmmu_ctx_write_all()
231 static u32 ipmmu_utlb_reg(struct ipmmu_vmsa_device *mmu, unsigned int reg) in ipmmu_utlb_reg() argument
233 return mmu->features->utlb_offset_base + reg; in ipmmu_utlb_reg()
236 static void ipmmu_imuasid_write(struct ipmmu_vmsa_device *mmu, in ipmmu_imuasid_write() argument
239 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUASID(utlb)), data); in ipmmu_imuasid_write()
242 static void ipmmu_imuctr_write(struct ipmmu_vmsa_device *mmu, in ipmmu_imuctr_write() argument
245 ipmmu_write(mmu, ipmmu_utlb_reg(mmu, IMUCTR(utlb)), data); in ipmmu_imuctr_write()
260 dev_err_ratelimited(domain->mmu->dev, in ipmmu_tlb_sync()
261 "TLB sync timed out -- MMU may be deadlocked\n"); in ipmmu_tlb_sync()
280 * Enable MMU translation for the microTLB.
285 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_enable() local
293 ipmmu_imuasid_write(mmu, utlb, 0); in ipmmu_utlb_enable()
295 ipmmu_imuctr_write(mmu, utlb, IMUCTR_TTSEL_MMU(domain->context_id) | in ipmmu_utlb_enable()
297 mmu->utlb_ctx[utlb] = domain->context_id; in ipmmu_utlb_enable()
301 * Disable MMU translation for the microTLB.
306 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_utlb_disable() local
308 ipmmu_imuctr_write(mmu, utlb, 0); in ipmmu_utlb_disable()
309 mmu->utlb_ctx[utlb] = IPMMU_CTX_INVALID; in ipmmu_utlb_disable()
335 static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, in ipmmu_domain_allocate_context() argument
341 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_domain_allocate_context()
343 ret = find_first_zero_bit(mmu->ctx, mmu->num_ctx); in ipmmu_domain_allocate_context()
344 if (ret != mmu->num_ctx) { in ipmmu_domain_allocate_context()
345 mmu->domains[ret] = domain; in ipmmu_domain_allocate_context()
346 set_bit(ret, mmu->ctx); in ipmmu_domain_allocate_context()
350 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_domain_allocate_context()
355 static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, in ipmmu_domain_free_context() argument
360 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_domain_free_context()
362 clear_bit(context_id, mmu->ctx); in ipmmu_domain_free_context()
363 mmu->domains[context_id] = NULL; in ipmmu_domain_free_context()
365 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_domain_free_context()
383 if (domain->mmu->features->twobit_imttbcr_sl0) in ipmmu_domain_setup_context()
388 if (domain->mmu->features->cache_snoop) in ipmmu_domain_setup_context()
399 if (domain->mmu->features->setup_imbuscr) in ipmmu_domain_setup_context()
412 * Enable the MMU and interrupt generation. The long-descriptor in ipmmu_domain_setup_context()
448 domain->cfg.iommu_dev = domain->mmu->root->dev; in ipmmu_domain_init_context()
453 ret = ipmmu_domain_allocate_context(domain->mmu->root, domain); in ipmmu_domain_init_context()
462 ipmmu_domain_free_context(domain->mmu->root, in ipmmu_domain_init_context()
473 if (!domain->mmu) in ipmmu_domain_destroy_context()
484 ipmmu_domain_free_context(domain->mmu->root, domain->context_id); in ipmmu_domain_destroy_context()
494 struct ipmmu_vmsa_device *mmu = domain->mmu; in ipmmu_domain_irq() local
516 dev_err_ratelimited(mmu->dev, "Multiple TLB hits @0x%lx\n", in ipmmu_domain_irq()
519 dev_err_ratelimited(mmu->dev, "Page Table Walk Abort @0x%lx\n", in ipmmu_domain_irq()
531 if (!report_iommu_fault(&domain->io_domain, mmu->dev, iova, 0)) in ipmmu_domain_irq()
534 dev_err_ratelimited(mmu->dev, in ipmmu_domain_irq()
543 struct ipmmu_vmsa_device *mmu = dev; in ipmmu_irq() local
548 spin_lock_irqsave(&mmu->lock, flags); in ipmmu_irq()
553 for (i = 0; i < mmu->num_ctx; i++) { in ipmmu_irq()
554 if (!mmu->domains[i]) in ipmmu_irq()
556 if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) in ipmmu_irq()
560 spin_unlock_irqrestore(&mmu->lock, flags); in ipmmu_irq()
621 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_attach_device() local
626 if (!mmu) { in ipmmu_attach_device()
633 if (!domain->mmu) { in ipmmu_attach_device()
635 domain->mmu = mmu; in ipmmu_attach_device()
639 domain->mmu = NULL; in ipmmu_attach_device()
644 } else if (domain->mmu != mmu) { in ipmmu_attach_device()
650 dev_name(mmu->dev), dev_name(domain->mmu->dev)); in ipmmu_attach_device()
704 if (domain->mmu) in ipmmu_flush_iotlb_all()
810 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_init_arm_mapping() local
822 if (!mmu->mapping) { in ipmmu_init_arm_mapping()
828 dev_err(mmu->dev, "failed to create ARM IOMMU mapping\n"); in ipmmu_init_arm_mapping()
833 mmu->mapping = mapping; in ipmmu_init_arm_mapping()
837 ret = arm_iommu_attach_device(dev, mmu->mapping); in ipmmu_init_arm_mapping()
846 if (mmu->mapping) in ipmmu_init_arm_mapping()
847 arm_iommu_release_mapping(mmu->mapping); in ipmmu_init_arm_mapping()
854 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_probe_device() local
859 if (!mmu) in ipmmu_probe_device()
862 return &mmu->iommu; in ipmmu_probe_device()
883 struct ipmmu_vmsa_device *mmu = to_ipmmu(dev); in ipmmu_find_group() local
886 if (mmu->group) in ipmmu_find_group()
887 return iommu_group_ref_get(mmu->group); in ipmmu_find_group()
891 mmu->group = group; in ipmmu_find_group()
919 static void ipmmu_device_reset(struct ipmmu_vmsa_device *mmu) in ipmmu_device_reset() argument
924 for (i = 0; i < mmu->num_ctx; ++i) in ipmmu_device_reset()
925 ipmmu_ctx_write(mmu, i, IMCTR, 0); in ipmmu_device_reset()
1000 struct ipmmu_vmsa_device *mmu; in ipmmu_probe() local
1005 mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); in ipmmu_probe()
1006 if (!mmu) { in ipmmu_probe()
1011 mmu->dev = &pdev->dev; in ipmmu_probe()
1012 spin_lock_init(&mmu->lock); in ipmmu_probe()
1013 bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); in ipmmu_probe()
1014 mmu->features = of_device_get_match_data(&pdev->dev); in ipmmu_probe()
1015 memset(mmu->utlb_ctx, IPMMU_CTX_INVALID, mmu->features->num_utlbs); in ipmmu_probe()
1020 mmu->base = devm_ioremap_resource(&pdev->dev, res); in ipmmu_probe()
1021 if (IS_ERR(mmu->base)) in ipmmu_probe()
1022 return PTR_ERR(mmu->base); in ipmmu_probe()
1036 if (mmu->features->use_ns_alias_offset) in ipmmu_probe()
1037 mmu->base += IM_NS_ALIAS_OFFSET; in ipmmu_probe()
1039 mmu->num_ctx = min(IPMMU_CTX_MAX, mmu->features->number_of_contexts); in ipmmu_probe()
1045 if (!mmu->features->has_cache_leaf_nodes || in ipmmu_probe()
1047 mmu->root = mmu; in ipmmu_probe()
1049 mmu->root = ipmmu_find_root(); in ipmmu_probe()
1054 if (!mmu->root) in ipmmu_probe()
1058 if (ipmmu_is_root(mmu)) { in ipmmu_probe()
1064 dev_name(&pdev->dev), mmu); in ipmmu_probe()
1070 ipmmu_device_reset(mmu); in ipmmu_probe()
1072 if (mmu->features->reserved_context) { in ipmmu_probe()
1074 set_bit(0, mmu->ctx); in ipmmu_probe()
1083 if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { in ipmmu_probe()
1084 ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, in ipmmu_probe()
1089 iommu_device_set_ops(&mmu->iommu, &ipmmu_ops); in ipmmu_probe()
1090 iommu_device_set_fwnode(&mmu->iommu, in ipmmu_probe()
1093 ret = iommu_device_register(&mmu->iommu); in ipmmu_probe()
1109 platform_set_drvdata(pdev, mmu); in ipmmu_probe()
1116 struct ipmmu_vmsa_device *mmu = platform_get_drvdata(pdev); in ipmmu_remove() local
1118 iommu_device_sysfs_remove(&mmu->iommu); in ipmmu_remove()
1119 iommu_device_unregister(&mmu->iommu); in ipmmu_remove()
1121 arm_iommu_release_mapping(mmu->mapping); in ipmmu_remove()
1123 ipmmu_device_reset(mmu); in ipmmu_remove()
1131 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in ipmmu_resume_noirq() local
1134 /* Reset root MMU and restore contexts */ in ipmmu_resume_noirq()
1135 if (ipmmu_is_root(mmu)) { in ipmmu_resume_noirq()
1136 ipmmu_device_reset(mmu); in ipmmu_resume_noirq()
1138 for (i = 0; i < mmu->num_ctx; i++) { in ipmmu_resume_noirq()
1139 if (!mmu->domains[i]) in ipmmu_resume_noirq()
1142 ipmmu_domain_setup_context(mmu->domains[i]); in ipmmu_resume_noirq()
1147 for (i = 0; i < mmu->features->num_utlbs; i++) { in ipmmu_resume_noirq()
1148 if (mmu->utlb_ctx[i] == IPMMU_CTX_INVALID) in ipmmu_resume_noirq()
1151 ipmmu_utlb_enable(mmu->root->domains[mmu->utlb_ctx[i]], i); in ipmmu_resume_noirq()