Lines Matching +full:mmu +full:- +full:500

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-mapping.h>
41 #include "arm-smmu.h"
42 #include "../../dma-iommu.h"
51 #define QCOM_DUMMY_VAL -1
74 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
75 return pm_runtime_resume_and_get(smmu->dev); in arm_smmu_rpm_get()
82 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_put()
83 pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put()
98 struct pci_bus *bus = to_pci_dev(dev)->bus; in dev_get_dev_node()
101 bus = bus->parent; in dev_get_dev_node()
102 return of_node_get(bus->bridge->parent->of_node); in dev_get_dev_node()
105 return of_node_get(dev->of_node); in dev_get_dev_node()
117 struct device_node *np = it->node; in __find_legacy_master_phandle()
120 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", in __find_legacy_master_phandle()
121 "#stream-id-cells", -1) in __find_legacy_master_phandle()
122 if (it->node == np) { in __find_legacy_master_phandle()
126 it->node = np; in __find_legacy_master_phandle()
127 return err == -ENOENT ? 0 : err; in __find_legacy_master_phandle()
142 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { in arm_smmu_register_legacy_master()
144 return -ENODEV; in arm_smmu_register_legacy_master()
153 return -ENODEV; in arm_smmu_register_legacy_master()
158 /* "mmu-masters" assumes Stream ID == Requester ID */ in arm_smmu_register_legacy_master()
165 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, in arm_smmu_register_legacy_master()
172 return -ENOMEM; in arm_smmu_register_legacy_master()
184 return -ENODEV; in arm_smmu_register_legacy_master()
200 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) in __arm_smmu_tlb_sync()
201 return smmu->impl->tlb_sync(smmu, page, sync, status); in __arm_smmu_tlb_sync()
205 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in __arm_smmu_tlb_sync()
213 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
214 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
221 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
224 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
229 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context()
232 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
233 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
235 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
246 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
247 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); in arm_smmu_tlb_inv_context_s1()
254 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2()
258 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
266 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1()
267 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_s1()
268 int idx = cfg->cbndx; in arm_smmu_tlb_inv_range_s1()
270 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s1()
273 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_tlb_inv_range_s1()
275 iova |= cfg->asid; in arm_smmu_tlb_inv_range_s1()
279 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
282 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_s1()
286 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
294 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2()
295 int idx = smmu_domain->cfg.cbndx; in arm_smmu_tlb_inv_range_s2()
297 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s2()
302 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_tlb_inv_range_s2()
307 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
314 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_walk_s1()
316 if (cfg->flush_walk_prefer_tlbiasid) { in arm_smmu_tlb_inv_walk_s1()
355 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
358 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
366 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1()
368 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_add_page_s2_v1()
371 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
398 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault()
399 int idx = smmu_domain->cfg.cbndx; in arm_smmu_context_fault()
413 if (ret == -ENOSYS) in arm_smmu_context_fault()
414 dev_err_ratelimited(smmu->dev, in arm_smmu_context_fault()
440 dev_err(smmu->dev, in arm_smmu_global_fault()
441 …"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may h… in arm_smmu_global_fault()
444 dev_err(smmu->dev, in arm_smmu_global_fault()
446 dev_err(smmu->dev, in arm_smmu_global_fault()
458 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
459 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
460 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_init_context_bank()
462 cb->cfg = cfg; in arm_smmu_init_context_bank()
466 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
467 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; in arm_smmu_init_context_bank()
469 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); in arm_smmu_init_context_bank()
470 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); in arm_smmu_init_context_bank()
471 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_init_context_bank()
472 cb->tcr[1] |= ARM_SMMU_TCR2_AS; in arm_smmu_init_context_bank()
474 cb->tcr[0] |= ARM_SMMU_TCR_EAE; in arm_smmu_init_context_bank()
477 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); in arm_smmu_init_context_bank()
482 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
483 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; in arm_smmu_init_context_bank()
484 cb->ttbr[1] = 0; in arm_smmu_init_context_bank()
486 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
487 cfg->asid); in arm_smmu_init_context_bank()
488 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
489 cfg->asid); in arm_smmu_init_context_bank()
491 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_smmu_init_context_bank()
492 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
494 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
497 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_init_context_bank()
500 /* MAIRs (stage-1 only) */ in arm_smmu_init_context_bank()
502 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
503 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; in arm_smmu_init_context_bank()
504 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; in arm_smmu_init_context_bank()
506 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_init_context_bank()
507 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; in arm_smmu_init_context_bank()
516 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
517 struct arm_smmu_cfg *cfg = cb->cfg; in arm_smmu_write_context_bank()
525 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_write_context_bank()
528 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
529 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_write_context_bank()
533 /* 16-bit VMIDs live in CBA2R */ in arm_smmu_write_context_bank()
534 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
535 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); in arm_smmu_write_context_bank()
541 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); in arm_smmu_write_context_bank()
542 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
543 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); in arm_smmu_write_context_bank()
554 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
555 /* 8-bit VMIDs live in CBAR */ in arm_smmu_write_context_bank()
556 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); in arm_smmu_write_context_bank()
565 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
566 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); in arm_smmu_write_context_bank()
567 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); in arm_smmu_write_context_bank()
570 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_write_context_bank()
571 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); in arm_smmu_write_context_bank()
572 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
573 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); in arm_smmu_write_context_bank()
575 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
578 cb->ttbr[1]); in arm_smmu_write_context_bank()
581 /* MAIRs (stage-1 only) */ in arm_smmu_write_context_bank()
583 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); in arm_smmu_write_context_bank()
584 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); in arm_smmu_write_context_bank()
595 if (smmu->impl && smmu->impl->write_sctlr) in arm_smmu_write_context_bank()
596 smmu->impl->write_sctlr(smmu, idx, reg); in arm_smmu_write_context_bank()
605 if (smmu->impl && smmu->impl->alloc_context_bank) in arm_smmu_alloc_context_bank()
606 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
608 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); in arm_smmu_alloc_context_bank()
621 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
624 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
625 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
628 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_init_domain_context()
629 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_init_domain_context()
630 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
650 * Note that you can't actually request stage-2 mappings. in arm_smmu_init_domain_context()
652 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
653 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
654 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
655 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
660 * the decision into the io-pgtable code where it arguably belongs, in arm_smmu_init_domain_context()
665 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
666 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; in arm_smmu_init_domain_context()
669 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
670 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
671 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; in arm_smmu_init_domain_context()
672 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && in arm_smmu_init_domain_context()
673 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
676 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; in arm_smmu_init_domain_context()
678 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { in arm_smmu_init_domain_context()
679 ret = -EINVAL; in arm_smmu_init_domain_context()
683 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
685 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; in arm_smmu_init_domain_context()
686 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
687 ias = smmu->va_size; in arm_smmu_init_domain_context()
688 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
689 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
691 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { in arm_smmu_init_domain_context()
700 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
708 cfg->cbar = CBAR_TYPE_S2_TRANS; in arm_smmu_init_domain_context()
710 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
711 oas = smmu->pa_size; in arm_smmu_init_domain_context()
712 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
719 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
720 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
722 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
725 ret = -EINVAL; in arm_smmu_init_domain_context()
734 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
736 cfg->cbndx = ret; in arm_smmu_init_domain_context()
737 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
738 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
739 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
741 cfg->irptndx = cfg->cbndx; in arm_smmu_init_domain_context()
744 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
745 cfg->vmid = cfg->cbndx + 1; in arm_smmu_init_domain_context()
747 cfg->asid = cfg->cbndx; in arm_smmu_init_domain_context()
750 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
753 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, in arm_smmu_init_domain_context()
754 .tlb = smmu_domain->flush_ops, in arm_smmu_init_domain_context()
755 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
758 if (smmu->impl && smmu->impl->init_context) { in arm_smmu_init_domain_context()
759 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
764 if (smmu_domain->pgtbl_quirks) in arm_smmu_init_domain_context()
765 pgtbl_cfg.quirks |= smmu_domain->pgtbl_quirks; in arm_smmu_init_domain_context()
769 ret = -ENOMEM; in arm_smmu_init_domain_context()
774 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_init_domain_context()
777 domain->geometry.aperture_start = ~0UL << ias; in arm_smmu_init_domain_context()
778 domain->geometry.aperture_end = ~0UL; in arm_smmu_init_domain_context()
780 domain->geometry.aperture_end = (1UL << ias) - 1; in arm_smmu_init_domain_context()
783 domain->geometry.force_aperture = true; in arm_smmu_init_domain_context()
787 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
791 * handler seeing a half-initialised domain state. in arm_smmu_init_domain_context()
793 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_init_domain_context()
795 if (smmu->impl && smmu->impl->context_fault) in arm_smmu_init_domain_context()
796 context_fault = smmu->impl->context_fault; in arm_smmu_init_domain_context()
800 ret = devm_request_irq(smmu->dev, irq, context_fault, in arm_smmu_init_domain_context()
801 IRQF_SHARED, "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
803 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
804 cfg->irptndx, irq); in arm_smmu_init_domain_context()
805 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; in arm_smmu_init_domain_context()
808 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
811 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
815 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_init_domain_context()
816 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
818 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
825 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context()
826 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
829 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_destroy_domain_context()
840 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
841 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
843 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { in arm_smmu_destroy_domain_context()
844 irq = smmu->irqs[cfg->irptndx]; in arm_smmu_destroy_domain_context()
845 devm_free_irq(smmu->dev, irq, domain); in arm_smmu_destroy_domain_context()
848 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
849 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
872 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
873 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc()
875 return &smmu_domain->domain; in arm_smmu_domain_alloc()
892 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
893 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | in arm_smmu_write_smr()
894 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); in arm_smmu_write_smr()
896 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
903 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
906 if (smmu->impl && smmu->impl->write_s2cr) { in arm_smmu_write_s2cr()
907 smmu->impl->write_s2cr(smmu, idx); in arm_smmu_write_s2cr()
911 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | in arm_smmu_write_s2cr()
912 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | in arm_smmu_write_s2cr()
913 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); in arm_smmu_write_s2cr()
915 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
916 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
924 if (smmu->smrs) in arm_smmu_write_sme()
937 if (!smmu->smrs) in arm_smmu_test_smr_masks()
947 for (i = 0; i < smmu->num_mapping_groups; i++) in arm_smmu_test_smr_masks()
948 if (!smmu->smrs[i].valid) in arm_smmu_test_smr_masks()
957 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); in arm_smmu_test_smr_masks()
960 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); in arm_smmu_test_smr_masks()
962 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); in arm_smmu_test_smr_masks()
965 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); in arm_smmu_test_smr_masks()
970 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
971 int i, free_idx = -ENOSPC; in arm_smmu_find_sme()
978 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1004 return -EINVAL; in arm_smmu_find_sme()
1012 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1015 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1016 if (smmu->smrs) in arm_smmu_free_sme()
1017 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1026 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes()
1027 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1030 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1033 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1034 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1037 ret = -EEXIST; in arm_smmu_master_alloc_smes()
1046 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1051 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1052 cfg->smendx[i] = (s16)idx; in arm_smmu_master_alloc_smes()
1059 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1063 while (i--) { in arm_smmu_master_alloc_smes()
1064 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1065 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_alloc_smes()
1067 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1074 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_free_smes()
1077 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1081 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_free_smes()
1083 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1090 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master()
1091 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_domain_add_master()
1092 u8 cbndx = smmu_domain->cfg.cbndx; in arm_smmu_domain_add_master()
1096 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_domain_add_master()
1121 if (!fwspec || fwspec->ops != &arm_smmu_ops) { in arm_smmu_attach_dev()
1123 return -ENXIO; in arm_smmu_attach_dev()
1128 * domains between of_xlate() and probe_device() - we have no way to cope in arm_smmu_attach_dev()
1135 return -ENODEV; in arm_smmu_attach_dev()
1137 smmu = cfg->smmu; in arm_smmu_attach_dev()
1152 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1155 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); in arm_smmu_attach_dev()
1156 ret = -EINVAL; in arm_smmu_attach_dev()
1171 * to 5-10sec worth of reprogramming the context bank, while in arm_smmu_attach_dev()
1174 pm_runtime_set_autosuspend_delay(smmu->dev, 20); in arm_smmu_attach_dev()
1175 pm_runtime_use_autosuspend(smmu->dev); in arm_smmu_attach_dev()
1186 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map_pages()
1187 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_map_pages()
1191 return -ENODEV; in arm_smmu_map_pages()
1194 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, gfp, mapped); in arm_smmu_map_pages()
1204 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_unmap_pages()
1205 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_unmap_pages()
1212 ret = ops->unmap_pages(ops, iova, pgsize, pgcount, iotlb_gather); in arm_smmu_unmap_pages()
1221 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all()
1223 if (smmu_domain->flush_ops) { in arm_smmu_flush_iotlb_all()
1225 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); in arm_smmu_flush_iotlb_all()
1234 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync()
1240 if (smmu->version == ARM_SMMU_V2 || in arm_smmu_iotlb_sync()
1241 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iotlb_sync()
1252 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard()
1253 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1254 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1255 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1260 int ret, idx = cfg->cbndx; in arm_smmu_iova_to_phys_hard()
1267 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1269 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_iova_to_phys_hard()
1277 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1282 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys_hard()
1286 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1304 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1309 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1310 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1313 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
1323 return cfg->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_capable()
1351 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() in arm_smmu_probe_device()
1358 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { in arm_smmu_probe_device()
1359 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
1361 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
1364 ret = -EINVAL; in arm_smmu_probe_device()
1365 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_probe_device()
1366 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_probe_device()
1367 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_probe_device()
1369 if (sid & ~smmu->streamid_mask) { in arm_smmu_probe_device()
1371 sid, smmu->streamid_mask); in arm_smmu_probe_device()
1374 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_probe_device()
1376 mask, smmu->smr_mask_mask); in arm_smmu_probe_device()
1381 ret = -ENOMEM; in arm_smmu_probe_device()
1387 cfg->smmu = smmu; in arm_smmu_probe_device()
1389 while (i--) in arm_smmu_probe_device()
1390 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_probe_device()
1402 device_link_add(dev, smmu->dev, in arm_smmu_probe_device()
1405 return &smmu->iommu; in arm_smmu_probe_device()
1420 ret = arm_smmu_rpm_get(cfg->smmu); in arm_smmu_release_device()
1426 arm_smmu_rpm_put(cfg->smmu); in arm_smmu_release_device()
1438 smmu = cfg->smmu; in arm_smmu_probe_finalize()
1440 if (smmu->impl && smmu->impl->probe_finalize) in arm_smmu_probe_finalize()
1441 smmu->impl->probe_finalize(smmu, dev); in arm_smmu_probe_finalize()
1448 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_device_group()
1452 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1454 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1455 group != smmu->s2crs[idx].group) { in arm_smmu_device_group()
1456 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1457 return ERR_PTR(-EINVAL); in arm_smmu_device_group()
1460 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1464 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1478 smmu->s2crs[idx].group = group; in arm_smmu_device_group()
1480 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_device_group()
1489 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1490 if (smmu_domain->smmu) in arm_smmu_enable_nesting()
1491 ret = -EPERM; in arm_smmu_enable_nesting()
1493 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_enable_nesting()
1494 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_enable_nesting()
1505 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1506 if (smmu_domain->smmu) in arm_smmu_set_pgtable_quirks()
1507 ret = -EPERM; in arm_smmu_set_pgtable_quirks()
1509 smmu_domain->pgtbl_quirks = quirks; in arm_smmu_set_pgtable_quirks()
1510 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_set_pgtable_quirks()
1519 if (args->args_count > 0) in arm_smmu_of_xlate()
1520 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); in arm_smmu_of_xlate()
1522 if (args->args_count > 1) in arm_smmu_of_xlate()
1523 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); in arm_smmu_of_xlate()
1524 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) in arm_smmu_of_xlate()
1541 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
1549 const struct arm_smmu_impl *impl = cfg->smmu->impl; in arm_smmu_def_domain_type()
1554 if (impl && impl->def_domain_type) in arm_smmu_def_domain_type()
1555 return impl->def_domain_type(dev); in arm_smmu_def_domain_type()
1570 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1598 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1602 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1633 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1636 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1639 if (smmu->impl && smmu->impl->reset) in arm_smmu_device_reset()
1640 smmu->impl->reset(smmu); in arm_smmu_device_reset()
1670 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1673 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1674 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1675 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1687 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1688 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1692 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1693 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1697 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1698 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1701 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1703 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1704 return -ENODEV; in arm_smmu_device_cfg_probe()
1708 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1709 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1710 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1721 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1722 cttw_fw ? "" : "non-"); in arm_smmu_device_cfg_probe()
1724 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1728 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1729 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1734 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1736 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1739 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1740 "stream-matching supported, but no SMRs present!\n"); in arm_smmu_device_cfg_probe()
1741 return -ENODEV; in arm_smmu_device_cfg_probe()
1744 /* Zero-initialised to mark as invalid */ in arm_smmu_device_cfg_probe()
1745 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1747 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1748 return -ENOMEM; in arm_smmu_device_cfg_probe()
1750 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1753 /* s2cr->type == 0 means translation, so initialise explicitly */ in arm_smmu_device_cfg_probe()
1754 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1756 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1757 return -ENOMEM; in arm_smmu_device_cfg_probe()
1759 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1761 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1762 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1763 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1765 if (smmu->version < ARM_SMMU_V2 || in arm_smmu_device_cfg_probe()
1767 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1769 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1774 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1778 if (smmu->numpage != 2 * size << smmu->pgshift) in arm_smmu_device_cfg_probe()
1779 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1781 2 * size << smmu->pgshift, smmu->numpage); in arm_smmu_device_cfg_probe()
1783 smmu->numpage = size; in arm_smmu_device_cfg_probe()
1785 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); in arm_smmu_device_cfg_probe()
1786 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); in arm_smmu_device_cfg_probe()
1787 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1788 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1789 return -ENODEV; in arm_smmu_device_cfg_probe()
1791 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1792 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1793 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1794 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1795 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1796 return -ENOMEM; in arm_smmu_device_cfg_probe()
1801 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1805 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1808 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1815 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1816 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1819 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1820 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1821 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1822 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1825 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1827 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1829 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1831 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1834 if (smmu->impl && smmu->impl->cfg_probe) { in arm_smmu_device_cfg_probe()
1835 ret = smmu->impl->cfg_probe(smmu); in arm_smmu_device_cfg_probe()
1841 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1842 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1843 if (smmu->features & in arm_smmu_device_cfg_probe()
1845 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1846 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1847 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1848 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1849 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1851 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_cfg_probe()
1852 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1854 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1855 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1856 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1859 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1860 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1861 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1863 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1864 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1865 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1886 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1887 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1888 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1889 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1890 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1891 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1892 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1893 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1906 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1907 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1910 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1911 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1914 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1915 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1918 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1919 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1922 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1923 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1926 ret = -ENODEV; in acpi_smmu_get_data()
1935 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
1942 iort_smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_device_acpi_probe()
1944 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
1952 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) in arm_smmu_device_acpi_probe()
1953 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
1961 return -ENODEV; in arm_smmu_device_acpi_probe()
1969 struct device *dev = smmu->dev; in arm_smmu_device_dt_probe()
1972 if (of_property_read_u32(dev->of_node, "#global-interrupts", global_irqs)) in arm_smmu_device_dt_probe()
1973 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_dt_probe()
1974 "missing #global-interrupts property\n"); in arm_smmu_device_dt_probe()
1978 smmu->version = data->version; in arm_smmu_device_dt_probe()
1979 smmu->model = data->model; in arm_smmu_device_dt_probe()
1981 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); in arm_smmu_device_dt_probe()
1984 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", in arm_smmu_device_dt_probe()
1992 return -ENODEV; in arm_smmu_device_dt_probe()
1995 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
1996 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2009 iort_get_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2026 for (i = 0; i < rmr->num_sids; i++) { in arm_smmu_rmr_install_bypass_smr()
2027 idx = arm_smmu_find_sme(smmu, rmr->sids[i], ~0); in arm_smmu_rmr_install_bypass_smr()
2031 if (smmu->s2crs[idx].count == 0) { in arm_smmu_rmr_install_bypass_smr()
2032 smmu->smrs[idx].id = rmr->sids[i]; in arm_smmu_rmr_install_bypass_smr()
2033 smmu->smrs[idx].mask = 0; in arm_smmu_rmr_install_bypass_smr()
2034 smmu->smrs[idx].valid = true; in arm_smmu_rmr_install_bypass_smr()
2036 smmu->s2crs[idx].count++; in arm_smmu_rmr_install_bypass_smr()
2037 smmu->s2crs[idx].type = S2CR_TYPE_BYPASS; in arm_smmu_rmr_install_bypass_smr()
2038 smmu->s2crs[idx].privcfg = S2CR_PRIVCFG_DEFAULT; in arm_smmu_rmr_install_bypass_smr()
2044 dev_notice(smmu->dev, "\tpreserved %d boot mapping%s\n", cnt, in arm_smmu_rmr_install_bypass_smr()
2046 iort_put_rmr_sids(dev_fwnode(smmu->dev), &rmr_list); in arm_smmu_rmr_install_bypass_smr()
2053 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
2061 return -ENOMEM; in arm_smmu_device_probe()
2063 smmu->dev = dev; in arm_smmu_device_probe()
2065 if (dev->of_node) in arm_smmu_device_probe()
2072 smmu->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in arm_smmu_device_probe()
2073 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2074 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2075 smmu->ioaddr = res->start; in arm_smmu_device_probe()
2081 smmu->numpage = resource_size(res); in arm_smmu_device_probe()
2089 smmu->num_context_irqs = num_irqs - global_irqs - pmu_irqs; in arm_smmu_device_probe()
2090 if (smmu->num_context_irqs <= 0) in arm_smmu_device_probe()
2091 return dev_err_probe(dev, -ENODEV, in arm_smmu_device_probe()
2095 smmu->irqs = devm_kcalloc(dev, smmu->num_context_irqs, in arm_smmu_device_probe()
2096 sizeof(*smmu->irqs), GFP_KERNEL); in arm_smmu_device_probe()
2097 if (!smmu->irqs) in arm_smmu_device_probe()
2098 return dev_err_probe(dev, -ENOMEM, "failed to allocate %d irqs\n", in arm_smmu_device_probe()
2099 smmu->num_context_irqs); in arm_smmu_device_probe()
2101 for (i = 0; i < smmu->num_context_irqs; i++) { in arm_smmu_device_probe()
2106 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2109 err = devm_clk_bulk_get_all(dev, &smmu->clks); in arm_smmu_device_probe()
2114 smmu->num_clks = err; in arm_smmu_device_probe()
2116 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); in arm_smmu_device_probe()
2124 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2125 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2128 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2129 return -ENODEV; in arm_smmu_device_probe()
2133 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2136 if (smmu->impl && smmu->impl->global_fault) in arm_smmu_device_probe()
2137 global_fault = smmu->impl->global_fault; in arm_smmu_device_probe()
2148 "arm-smmu global fault", smmu); in arm_smmu_device_probe()
2155 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2156 "smmu.%pa", &smmu->ioaddr); in arm_smmu_device_probe()
2162 err = iommu_device_register(&smmu->iommu, &arm_smmu_ops, dev); in arm_smmu_device_probe()
2165 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_probe()
2178 * We want to avoid touching dev->power.lock in fastpaths unless in arm_smmu_device_probe()
2179 * it's really going to do something useful - pm_runtime_enabled() in arm_smmu_device_probe()
2183 if (dev->pm_domain) { in arm_smmu_device_probe()
2196 return -ENODEV; in arm_smmu_device_remove()
2198 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_remove()
2199 dev_notice(&pdev->dev, "disabling translation\n"); in arm_smmu_device_remove()
2201 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
2202 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
2209 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_device_remove()
2210 pm_runtime_force_suspend(smmu->dev); in arm_smmu_device_remove()
2212 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2214 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2228 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_resume()
2241 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_suspend()
2251 ret = clk_bulk_prepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2260 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_resume()
2278 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_pm_suspend()
2290 .name = "arm-smmu",
2303 MODULE_ALIAS("platform:arm-smmu");