Lines Matching +full:mmu +full:- +full:500

1 // SPDX-License-Identifier: GPL-2.0-only
10 * - SMMUv1 and v2 implementations
11 * - Stream-matching and stream-indexing
12 * - v7/v8 long-descriptor format
13 * - Non-secure access to the SMMU
14 * - Context fault reporting
15 * - Extended Stream ID (16 bit)
18 #define pr_fmt(fmt) "arm-smmu: " fmt
24 #include <linux/dma-iommu.h>
25 #include <linux/dma-mapping.h>
44 #include "arm-smmu.h"
53 #define QCOM_DUMMY_VAL -1
76 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_get()
77 return pm_runtime_get_sync(smmu->dev); in arm_smmu_rpm_get()
84 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_rpm_put()
85 pm_runtime_put_autosuspend(smmu->dev); in arm_smmu_rpm_put()
102 struct pci_bus *bus = to_pci_dev(dev)->bus; in dev_get_dev_node()
105 bus = bus->parent; in dev_get_dev_node()
106 return of_node_get(bus->bridge->parent->of_node); in dev_get_dev_node()
109 return of_node_get(dev->of_node); in dev_get_dev_node()
121 struct device_node *np = it->node; in __find_legacy_master_phandle()
124 of_for_each_phandle(it, err, dev->of_node, "mmu-masters", in __find_legacy_master_phandle()
125 "#stream-id-cells", -1) in __find_legacy_master_phandle()
126 if (it->node == np) { in __find_legacy_master_phandle()
130 it->node = np; in __find_legacy_master_phandle()
131 return err == -ENOENT ? 0 : err; in __find_legacy_master_phandle()
146 if (!np || !of_find_property(np, "#stream-id-cells", NULL)) { in arm_smmu_register_legacy_master()
148 return -ENODEV; in arm_smmu_register_legacy_master()
157 return -ENODEV; in arm_smmu_register_legacy_master()
162 /* "mmu-masters" assumes Stream ID == Requester ID */ in arm_smmu_register_legacy_master()
169 err = iommu_fwspec_init(dev, &smmu_dev->of_node->fwnode, in arm_smmu_register_legacy_master()
176 return -ENOMEM; in arm_smmu_register_legacy_master()
202 return -ENODEV; in arm_smmu_register_legacy_master()
218 if (smmu->impl && unlikely(smmu->impl->tlb_sync)) in __arm_smmu_tlb_sync()
219 return smmu->impl->tlb_sync(smmu, page, sync, status); in __arm_smmu_tlb_sync()
223 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in __arm_smmu_tlb_sync()
231 dev_err_ratelimited(smmu->dev, in __arm_smmu_tlb_sync()
232 "TLB sync timed out -- SMMU may be deadlocked\n"); in __arm_smmu_tlb_sync()
239 spin_lock_irqsave(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
242 spin_unlock_irqrestore(&smmu->global_sync_lock, flags); in arm_smmu_tlb_sync_global()
247 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_sync_context()
250 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
251 __arm_smmu_tlb_sync(smmu, ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx), in arm_smmu_tlb_sync_context()
253 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_tlb_sync_context()
264 arm_smmu_cb_write(smmu_domain->smmu, smmu_domain->cfg.cbndx, in arm_smmu_tlb_inv_context_s1()
265 ARM_SMMU_CB_S1_TLBIASID, smmu_domain->cfg.asid); in arm_smmu_tlb_inv_context_s1()
272 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_context_s2()
276 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_inv_context_s2()
284 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s1()
285 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_tlb_inv_range_s1()
286 int idx = cfg->cbndx; in arm_smmu_tlb_inv_range_s1()
288 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s1()
291 if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_tlb_inv_range_s1()
293 iova |= cfg->asid; in arm_smmu_tlb_inv_range_s1()
297 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
300 iova |= (u64)cfg->asid << 48; in arm_smmu_tlb_inv_range_s1()
304 } while (size -= granule); in arm_smmu_tlb_inv_range_s1()
312 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_inv_range_s2()
313 int idx = smmu_domain->cfg.cbndx; in arm_smmu_tlb_inv_range_s2()
315 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_inv_range_s2()
320 if (smmu_domain->cfg.fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_tlb_inv_range_s2()
325 } while (size -= granule); in arm_smmu_tlb_inv_range_s2()
382 * On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
385 * no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
393 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_tlb_add_page_s2_v1()
395 if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) in arm_smmu_tlb_add_page_s2_v1()
398 arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); in arm_smmu_tlb_add_page_s2_v1()
428 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_context_fault()
429 int idx = smmu_domain->cfg.cbndx; in arm_smmu_context_fault()
439 dev_err_ratelimited(smmu->dev, in arm_smmu_context_fault()
465 dev_err(smmu->dev, in arm_smmu_global_fault()
466 …"Blocked unknown Stream ID 0x%hx; boot with \"arm-smmu.disable_bypass=0\" to allow, but this may h… in arm_smmu_global_fault()
469 dev_err(smmu->dev, in arm_smmu_global_fault()
471 dev_err(smmu->dev, in arm_smmu_global_fault()
483 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_context_bank()
484 struct arm_smmu_cb *cb = &smmu_domain->smmu->cbs[cfg->cbndx]; in arm_smmu_init_context_bank()
485 bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_init_context_bank()
487 cb->cfg = cfg; in arm_smmu_init_context_bank()
491 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
492 cb->tcr[0] = pgtbl_cfg->arm_v7s_cfg.tcr; in arm_smmu_init_context_bank()
494 cb->tcr[0] = arm_smmu_lpae_tcr(pgtbl_cfg); in arm_smmu_init_context_bank()
495 cb->tcr[1] = arm_smmu_lpae_tcr2(pgtbl_cfg); in arm_smmu_init_context_bank()
496 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_init_context_bank()
497 cb->tcr[1] |= ARM_SMMU_TCR2_AS; in arm_smmu_init_context_bank()
499 cb->tcr[0] |= ARM_SMMU_TCR_EAE; in arm_smmu_init_context_bank()
502 cb->tcr[0] = arm_smmu_lpae_vtcr(pgtbl_cfg); in arm_smmu_init_context_bank()
507 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
508 cb->ttbr[0] = pgtbl_cfg->arm_v7s_cfg.ttbr; in arm_smmu_init_context_bank()
509 cb->ttbr[1] = 0; in arm_smmu_init_context_bank()
511 cb->ttbr[0] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
512 cfg->asid); in arm_smmu_init_context_bank()
513 cb->ttbr[1] = FIELD_PREP(ARM_SMMU_TTBRn_ASID, in arm_smmu_init_context_bank()
514 cfg->asid); in arm_smmu_init_context_bank()
516 if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) in arm_smmu_init_context_bank()
517 cb->ttbr[1] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
519 cb->ttbr[0] |= pgtbl_cfg->arm_lpae_s1_cfg.ttbr; in arm_smmu_init_context_bank()
522 cb->ttbr[0] = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; in arm_smmu_init_context_bank()
525 /* MAIRs (stage-1 only) */ in arm_smmu_init_context_bank()
527 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_init_context_bank()
528 cb->mair[0] = pgtbl_cfg->arm_v7s_cfg.prrr; in arm_smmu_init_context_bank()
529 cb->mair[1] = pgtbl_cfg->arm_v7s_cfg.nmrr; in arm_smmu_init_context_bank()
531 cb->mair[0] = pgtbl_cfg->arm_lpae_s1_cfg.mair; in arm_smmu_init_context_bank()
532 cb->mair[1] = pgtbl_cfg->arm_lpae_s1_cfg.mair >> 32; in arm_smmu_init_context_bank()
541 struct arm_smmu_cb *cb = &smmu->cbs[idx]; in arm_smmu_write_context_bank()
542 struct arm_smmu_cfg *cfg = cb->cfg; in arm_smmu_write_context_bank()
550 stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; in arm_smmu_write_context_bank()
553 if (smmu->version > ARM_SMMU_V1) { in arm_smmu_write_context_bank()
554 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_write_context_bank()
558 /* 16-bit VMIDs live in CBA2R */ in arm_smmu_write_context_bank()
559 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_write_context_bank()
560 reg |= FIELD_PREP(ARM_SMMU_CBA2R_VMID16, cfg->vmid); in arm_smmu_write_context_bank()
566 reg = FIELD_PREP(ARM_SMMU_CBAR_TYPE, cfg->cbar); in arm_smmu_write_context_bank()
567 if (smmu->version < ARM_SMMU_V2) in arm_smmu_write_context_bank()
568 reg |= FIELD_PREP(ARM_SMMU_CBAR_IRPTNDX, cfg->irptndx); in arm_smmu_write_context_bank()
579 } else if (!(smmu->features & ARM_SMMU_FEAT_VMID16)) { in arm_smmu_write_context_bank()
580 /* 8-bit VMIDs live in CBAR */ in arm_smmu_write_context_bank()
581 reg |= FIELD_PREP(ARM_SMMU_CBAR_VMID, cfg->vmid); in arm_smmu_write_context_bank()
590 if (stage1 && smmu->version > ARM_SMMU_V1) in arm_smmu_write_context_bank()
591 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR2, cb->tcr[1]); in arm_smmu_write_context_bank()
592 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TCR, cb->tcr[0]); in arm_smmu_write_context_bank()
595 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_S) { in arm_smmu_write_context_bank()
596 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_CONTEXTIDR, cfg->asid); in arm_smmu_write_context_bank()
597 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
598 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_TTBR1, cb->ttbr[1]); in arm_smmu_write_context_bank()
600 arm_smmu_cb_writeq(smmu, idx, ARM_SMMU_CB_TTBR0, cb->ttbr[0]); in arm_smmu_write_context_bank()
603 cb->ttbr[1]); in arm_smmu_write_context_bank()
606 /* MAIRs (stage-1 only) */ in arm_smmu_write_context_bank()
608 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR0, cb->mair[0]); in arm_smmu_write_context_bank()
609 arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_S1_MAIR1, cb->mair[1]); in arm_smmu_write_context_bank()
627 if (smmu->impl && smmu->impl->alloc_context_bank) in arm_smmu_alloc_context_bank()
628 return smmu->impl->alloc_context_bank(smmu_domain, smmu, dev, start); in arm_smmu_alloc_context_bank()
630 return __arm_smmu_alloc_bitmap(smmu->context_map, start, smmu->num_context_banks); in arm_smmu_alloc_context_bank()
643 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_init_domain_context()
646 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
647 if (smmu_domain->smmu) in arm_smmu_init_domain_context()
650 if (domain->type == IOMMU_DOMAIN_IDENTITY) { in arm_smmu_init_domain_context()
651 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; in arm_smmu_init_domain_context()
652 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
672 * Note that you can't actually request stage-2 mappings. in arm_smmu_init_domain_context()
674 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) in arm_smmu_init_domain_context()
675 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; in arm_smmu_init_domain_context()
676 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) in arm_smmu_init_domain_context()
677 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_init_domain_context()
682 * the decision into the io-pgtable code where it arguably belongs, in arm_smmu_init_domain_context()
687 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) in arm_smmu_init_domain_context()
688 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; in arm_smmu_init_domain_context()
691 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && in arm_smmu_init_domain_context()
692 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) in arm_smmu_init_domain_context()
693 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; in arm_smmu_init_domain_context()
694 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && in arm_smmu_init_domain_context()
695 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | in arm_smmu_init_domain_context()
698 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; in arm_smmu_init_domain_context()
700 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { in arm_smmu_init_domain_context()
701 ret = -EINVAL; in arm_smmu_init_domain_context()
705 switch (smmu_domain->stage) { in arm_smmu_init_domain_context()
707 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; in arm_smmu_init_domain_context()
708 start = smmu->num_s2_context_banks; in arm_smmu_init_domain_context()
709 ias = smmu->va_size; in arm_smmu_init_domain_context()
710 oas = smmu->ipa_size; in arm_smmu_init_domain_context()
711 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
713 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { in arm_smmu_init_domain_context()
722 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; in arm_smmu_init_domain_context()
730 cfg->cbar = CBAR_TYPE_S2_TRANS; in arm_smmu_init_domain_context()
732 ias = smmu->ipa_size; in arm_smmu_init_domain_context()
733 oas = smmu->pa_size; in arm_smmu_init_domain_context()
734 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { in arm_smmu_init_domain_context()
741 if (smmu->version == ARM_SMMU_V2) in arm_smmu_init_domain_context()
742 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; in arm_smmu_init_domain_context()
744 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; in arm_smmu_init_domain_context()
747 ret = -EINVAL; in arm_smmu_init_domain_context()
756 smmu_domain->smmu = smmu; in arm_smmu_init_domain_context()
758 cfg->cbndx = ret; in arm_smmu_init_domain_context()
759 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_init_domain_context()
760 cfg->irptndx = atomic_inc_return(&smmu->irptndx); in arm_smmu_init_domain_context()
761 cfg->irptndx %= smmu->num_context_irqs; in arm_smmu_init_domain_context()
763 cfg->irptndx = cfg->cbndx; in arm_smmu_init_domain_context()
766 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) in arm_smmu_init_domain_context()
767 cfg->vmid = cfg->cbndx + 1; in arm_smmu_init_domain_context()
769 cfg->asid = cfg->cbndx; in arm_smmu_init_domain_context()
772 .pgsize_bitmap = smmu->pgsize_bitmap, in arm_smmu_init_domain_context()
775 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, in arm_smmu_init_domain_context()
776 .tlb = smmu_domain->flush_ops, in arm_smmu_init_domain_context()
777 .iommu_dev = smmu->dev, in arm_smmu_init_domain_context()
780 if (smmu->impl && smmu->impl->init_context) { in arm_smmu_init_domain_context()
781 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg, dev); in arm_smmu_init_domain_context()
786 if (smmu_domain->non_strict) in arm_smmu_init_domain_context()
791 ret = -ENOMEM; in arm_smmu_init_domain_context()
796 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; in arm_smmu_init_domain_context()
799 domain->geometry.aperture_start = ~0UL << ias; in arm_smmu_init_domain_context()
800 domain->geometry.aperture_end = ~0UL; in arm_smmu_init_domain_context()
802 domain->geometry.aperture_end = (1UL << ias) - 1; in arm_smmu_init_domain_context()
805 domain->geometry.force_aperture = true; in arm_smmu_init_domain_context()
809 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_init_domain_context()
813 * handler seeing a half-initialised domain state. in arm_smmu_init_domain_context()
815 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_init_domain_context()
817 if (smmu->impl && smmu->impl->context_fault) in arm_smmu_init_domain_context()
818 context_fault = smmu->impl->context_fault; in arm_smmu_init_domain_context()
822 ret = devm_request_irq(smmu->dev, irq, context_fault, in arm_smmu_init_domain_context()
823 IRQF_SHARED, "arm-smmu-context-fault", domain); in arm_smmu_init_domain_context()
825 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", in arm_smmu_init_domain_context()
826 cfg->irptndx, irq); in arm_smmu_init_domain_context()
827 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; in arm_smmu_init_domain_context()
830 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
833 smmu_domain->pgtbl_ops = pgtbl_ops; in arm_smmu_init_domain_context()
837 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_init_domain_context()
838 smmu_domain->smmu = NULL; in arm_smmu_init_domain_context()
840 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_init_domain_context()
847 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_destroy_domain_context()
848 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_destroy_domain_context()
851 if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_destroy_domain_context()
862 smmu->cbs[cfg->cbndx].cfg = NULL; in arm_smmu_destroy_domain_context()
863 arm_smmu_write_context_bank(smmu, cfg->cbndx); in arm_smmu_destroy_domain_context()
865 if (cfg->irptndx != ARM_SMMU_INVALID_IRPTNDX) { in arm_smmu_destroy_domain_context()
866 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; in arm_smmu_destroy_domain_context()
867 devm_free_irq(smmu->dev, irq, domain); in arm_smmu_destroy_domain_context()
870 free_io_pgtable_ops(smmu_domain->pgtbl_ops); in arm_smmu_destroy_domain_context()
871 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); in arm_smmu_destroy_domain_context()
894 iommu_get_dma_cookie(&smmu_domain->domain))) { in arm_smmu_domain_alloc()
899 mutex_init(&smmu_domain->init_mutex); in arm_smmu_domain_alloc()
900 spin_lock_init(&smmu_domain->cb_lock); in arm_smmu_domain_alloc()
902 return &smmu_domain->domain; in arm_smmu_domain_alloc()
920 struct arm_smmu_smr *smr = smmu->smrs + idx; in arm_smmu_write_smr()
921 u32 reg = FIELD_PREP(ARM_SMMU_SMR_ID, smr->id) | in arm_smmu_write_smr()
922 FIELD_PREP(ARM_SMMU_SMR_MASK, smr->mask); in arm_smmu_write_smr()
924 if (!(smmu->features & ARM_SMMU_FEAT_EXIDS) && smr->valid) in arm_smmu_write_smr()
931 struct arm_smmu_s2cr *s2cr = smmu->s2crs + idx; in arm_smmu_write_s2cr()
932 u32 reg = FIELD_PREP(ARM_SMMU_S2CR_TYPE, s2cr->type) | in arm_smmu_write_s2cr()
933 FIELD_PREP(ARM_SMMU_S2CR_CBNDX, s2cr->cbndx) | in arm_smmu_write_s2cr()
934 FIELD_PREP(ARM_SMMU_S2CR_PRIVCFG, s2cr->privcfg); in arm_smmu_write_s2cr()
936 if (smmu->features & ARM_SMMU_FEAT_EXIDS && smmu->smrs && in arm_smmu_write_s2cr()
937 smmu->smrs[idx].valid) in arm_smmu_write_s2cr()
945 if (smmu->smrs) in arm_smmu_write_sme()
958 if (!smmu->smrs) in arm_smmu_test_smr_masks()
968 for (i = 0; i < smmu->num_mapping_groups; i++) in arm_smmu_test_smr_masks()
969 if (!smmu->smrs[i].valid) in arm_smmu_test_smr_masks()
978 smr = FIELD_PREP(ARM_SMMU_SMR_ID, smmu->streamid_mask); in arm_smmu_test_smr_masks()
981 smmu->streamid_mask = FIELD_GET(ARM_SMMU_SMR_ID, smr); in arm_smmu_test_smr_masks()
983 smr = FIELD_PREP(ARM_SMMU_SMR_MASK, smmu->streamid_mask); in arm_smmu_test_smr_masks()
986 smmu->smr_mask_mask = FIELD_GET(ARM_SMMU_SMR_MASK, smr); in arm_smmu_test_smr_masks()
991 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_find_sme()
992 int i, free_idx = -ENOSPC; in arm_smmu_find_sme()
999 for (i = 0; i < smmu->num_mapping_groups; ++i) { in arm_smmu_find_sme()
1025 return -EINVAL; in arm_smmu_find_sme()
1033 if (--smmu->s2crs[idx].count) in arm_smmu_free_sme()
1036 smmu->s2crs[idx] = s2cr_init_val; in arm_smmu_free_sme()
1037 if (smmu->smrs) in arm_smmu_free_sme()
1038 smmu->smrs[idx].valid = false; in arm_smmu_free_sme()
1047 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_alloc_smes()
1048 struct arm_smmu_smr *smrs = smmu->smrs; in arm_smmu_master_alloc_smes()
1051 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1054 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1055 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_master_alloc_smes()
1058 ret = -EEXIST; in arm_smmu_master_alloc_smes()
1067 if (smrs && smmu->s2crs[idx].count == 0) { in arm_smmu_master_alloc_smes()
1072 smmu->s2crs[idx].count++; in arm_smmu_master_alloc_smes()
1073 cfg->smendx[i] = (s16)idx; in arm_smmu_master_alloc_smes()
1080 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1084 while (i--) { in arm_smmu_master_alloc_smes()
1085 arm_smmu_free_sme(smmu, cfg->smendx[i]); in arm_smmu_master_alloc_smes()
1086 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_alloc_smes()
1088 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_alloc_smes()
1095 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_master_free_smes()
1098 mutex_lock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1102 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_master_free_smes()
1104 mutex_unlock(&smmu->stream_map_mutex); in arm_smmu_master_free_smes()
1111 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_domain_add_master()
1112 struct arm_smmu_s2cr *s2cr = smmu->s2crs; in arm_smmu_domain_add_master()
1113 u8 cbndx = smmu_domain->cfg.cbndx; in arm_smmu_domain_add_master()
1117 if (smmu_domain->stage == ARM_SMMU_DOMAIN_BYPASS) in arm_smmu_domain_add_master()
1142 if (!fwspec || fwspec->ops != &arm_smmu_ops) { in arm_smmu_attach_dev()
1144 return -ENXIO; in arm_smmu_attach_dev()
1149 * domains between of_xlate() and probe_device() - we have no way to cope in arm_smmu_attach_dev()
1156 return -ENODEV; in arm_smmu_attach_dev()
1158 smmu = cfg->smmu; in arm_smmu_attach_dev()
1173 if (smmu_domain->smmu != smmu) { in arm_smmu_attach_dev()
1176 dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); in arm_smmu_attach_dev()
1177 ret = -EINVAL; in arm_smmu_attach_dev()
1192 * to 5-10sec worth of reprogramming the context bank, while in arm_smmu_attach_dev()
1195 pm_runtime_set_autosuspend_delay(smmu->dev, 20); in arm_smmu_attach_dev()
1196 pm_runtime_use_autosuspend(smmu->dev); in arm_smmu_attach_dev()
1206 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_map()
1207 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_map()
1211 return -ENODEV; in arm_smmu_map()
1214 ret = ops->map(ops, iova, paddr, size, prot, gfp); in arm_smmu_map()
1223 struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; in arm_smmu_unmap()
1224 struct arm_smmu_device *smmu = to_smmu_domain(domain)->smmu; in arm_smmu_unmap()
1231 ret = ops->unmap(ops, iova, size, gather); in arm_smmu_unmap()
1240 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_flush_iotlb_all()
1242 if (smmu_domain->flush_ops) { in arm_smmu_flush_iotlb_all()
1244 smmu_domain->flush_ops->tlb_flush_all(smmu_domain); in arm_smmu_flush_iotlb_all()
1253 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iotlb_sync()
1259 if (smmu->version == ARM_SMMU_V2 || in arm_smmu_iotlb_sync()
1260 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iotlb_sync()
1271 struct arm_smmu_device *smmu = smmu_domain->smmu; in arm_smmu_iova_to_phys_hard()
1272 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; in arm_smmu_iova_to_phys_hard()
1273 struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys_hard()
1274 struct device *dev = smmu->dev; in arm_smmu_iova_to_phys_hard()
1279 int ret, idx = cfg->cbndx; in arm_smmu_iova_to_phys_hard()
1285 spin_lock_irqsave(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1287 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) in arm_smmu_iova_to_phys_hard()
1295 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1299 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys_hard()
1303 spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); in arm_smmu_iova_to_phys_hard()
1319 struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; in arm_smmu_iova_to_phys()
1321 if (domain->type == IOMMU_DOMAIN_IDENTITY) in arm_smmu_iova_to_phys()
1327 if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && in arm_smmu_iova_to_phys()
1328 smmu_domain->stage == ARM_SMMU_DOMAIN_S1) in arm_smmu_iova_to_phys()
1331 return ops->iova_to_phys(ops, iova); in arm_smmu_iova_to_phys()
1370 * If dev->iommu_fwspec is initally NULL, arm_smmu_register_legacy_master() in arm_smmu_probe_device()
1377 } else if (fwspec && fwspec->ops == &arm_smmu_ops) { in arm_smmu_probe_device()
1378 smmu = arm_smmu_get_by_fwnode(fwspec->iommu_fwnode); in arm_smmu_probe_device()
1380 return ERR_PTR(-ENODEV); in arm_smmu_probe_device()
1383 ret = -EINVAL; in arm_smmu_probe_device()
1384 for (i = 0; i < fwspec->num_ids; i++) { in arm_smmu_probe_device()
1385 u16 sid = FIELD_GET(ARM_SMMU_SMR_ID, fwspec->ids[i]); in arm_smmu_probe_device()
1386 u16 mask = FIELD_GET(ARM_SMMU_SMR_MASK, fwspec->ids[i]); in arm_smmu_probe_device()
1388 if (sid & ~smmu->streamid_mask) { in arm_smmu_probe_device()
1390 sid, smmu->streamid_mask); in arm_smmu_probe_device()
1393 if (mask & ~smmu->smr_mask_mask) { in arm_smmu_probe_device()
1395 mask, smmu->smr_mask_mask); in arm_smmu_probe_device()
1400 ret = -ENOMEM; in arm_smmu_probe_device()
1406 cfg->smmu = smmu; in arm_smmu_probe_device()
1408 while (i--) in arm_smmu_probe_device()
1409 cfg->smendx[i] = INVALID_SMENDX; in arm_smmu_probe_device()
1421 device_link_add(dev, smmu->dev, in arm_smmu_probe_device()
1424 return &smmu->iommu; in arm_smmu_probe_device()
1440 if (!fwspec || fwspec->ops != &arm_smmu_ops) in arm_smmu_release_device()
1444 smmu = cfg->smmu; in arm_smmu_release_device()
1463 struct arm_smmu_device *smmu = cfg->smmu; in arm_smmu_device_group()
1468 if (group && smmu->s2crs[idx].group && in arm_smmu_device_group()
1469 group != smmu->s2crs[idx].group) in arm_smmu_device_group()
1470 return ERR_PTR(-EINVAL); in arm_smmu_device_group()
1472 group = smmu->s2crs[idx].group; in arm_smmu_device_group()
1488 smmu->s2crs[idx].group = group; in arm_smmu_device_group()
1498 switch(domain->type) { in arm_smmu_domain_get_attr()
1502 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); in arm_smmu_domain_get_attr()
1505 return -ENODEV; in arm_smmu_domain_get_attr()
1511 *(int *)data = smmu_domain->non_strict; in arm_smmu_domain_get_attr()
1514 return -ENODEV; in arm_smmu_domain_get_attr()
1518 return -EINVAL; in arm_smmu_domain_get_attr()
1528 mutex_lock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
1530 switch(domain->type) { in arm_smmu_domain_set_attr()
1534 if (smmu_domain->smmu) { in arm_smmu_domain_set_attr()
1535 ret = -EPERM; in arm_smmu_domain_set_attr()
1540 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; in arm_smmu_domain_set_attr()
1542 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; in arm_smmu_domain_set_attr()
1545 ret = -ENODEV; in arm_smmu_domain_set_attr()
1551 smmu_domain->non_strict = *(int *)data; in arm_smmu_domain_set_attr()
1554 ret = -ENODEV; in arm_smmu_domain_set_attr()
1558 ret = -EINVAL; in arm_smmu_domain_set_attr()
1561 mutex_unlock(&smmu_domain->init_mutex); in arm_smmu_domain_set_attr()
1569 if (args->args_count > 0) in arm_smmu_of_xlate()
1570 fwid |= FIELD_PREP(ARM_SMMU_SMR_ID, args->args[0]); in arm_smmu_of_xlate()
1572 if (args->args_count > 1) in arm_smmu_of_xlate()
1573 fwid |= FIELD_PREP(ARM_SMMU_SMR_MASK, args->args[1]); in arm_smmu_of_xlate()
1574 else if (!of_property_read_u32(args->np, "stream-match-mask", &mask)) in arm_smmu_of_xlate()
1591 list_add_tail(&region->list, head); in arm_smmu_get_resv_regions()
1599 const struct arm_smmu_impl *impl = cfg->smmu->impl; in arm_smmu_def_domain_type()
1601 if (impl && impl->def_domain_type) in arm_smmu_def_domain_type()
1602 return impl->def_domain_type(dev); in arm_smmu_def_domain_type()
1626 .pgsize_bitmap = -1UL, /* Restricted during device attach */
1642 for (i = 0; i < smmu->num_mapping_groups; ++i) in arm_smmu_device_reset()
1646 for (i = 0; i < smmu->num_context_banks; ++i) { in arm_smmu_device_reset()
1677 if (smmu->features & ARM_SMMU_FEAT_VMID16) in arm_smmu_device_reset()
1680 if (smmu->features & ARM_SMMU_FEAT_EXIDS) in arm_smmu_device_reset()
1683 if (smmu->impl && smmu->impl->reset) in arm_smmu_device_reset()
1684 smmu->impl->reset(smmu); in arm_smmu_device_reset()
1714 bool cttw_reg, cttw_fw = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_cfg_probe()
1717 dev_notice(smmu->dev, "probing hardware configuration...\n"); in arm_smmu_device_cfg_probe()
1718 dev_notice(smmu->dev, "SMMUv%d with:\n", in arm_smmu_device_cfg_probe()
1719 smmu->version == ARM_SMMU_V2 ? 2 : 1); in arm_smmu_device_cfg_probe()
1731 smmu->features |= ARM_SMMU_FEAT_TRANS_S1; in arm_smmu_device_cfg_probe()
1732 dev_notice(smmu->dev, "\tstage 1 translation\n"); in arm_smmu_device_cfg_probe()
1736 smmu->features |= ARM_SMMU_FEAT_TRANS_S2; in arm_smmu_device_cfg_probe()
1737 dev_notice(smmu->dev, "\tstage 2 translation\n"); in arm_smmu_device_cfg_probe()
1741 smmu->features |= ARM_SMMU_FEAT_TRANS_NESTED; in arm_smmu_device_cfg_probe()
1742 dev_notice(smmu->dev, "\tnested translation\n"); in arm_smmu_device_cfg_probe()
1745 if (!(smmu->features & in arm_smmu_device_cfg_probe()
1747 dev_err(smmu->dev, "\tno translation support!\n"); in arm_smmu_device_cfg_probe()
1748 return -ENODEV; in arm_smmu_device_cfg_probe()
1752 ((smmu->version < ARM_SMMU_V2) || !(id & ARM_SMMU_ID0_ATOSNS))) { in arm_smmu_device_cfg_probe()
1753 smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; in arm_smmu_device_cfg_probe()
1754 dev_notice(smmu->dev, "\taddress translation ops\n"); in arm_smmu_device_cfg_probe()
1765 dev_notice(smmu->dev, "\t%scoherent table walk\n", in arm_smmu_device_cfg_probe()
1766 cttw_fw ? "" : "non-"); in arm_smmu_device_cfg_probe()
1768 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1772 if (smmu->version == ARM_SMMU_V2 && id & ARM_SMMU_ID0_EXIDS) { in arm_smmu_device_cfg_probe()
1773 smmu->features |= ARM_SMMU_FEAT_EXIDS; in arm_smmu_device_cfg_probe()
1778 smmu->streamid_mask = size - 1; in arm_smmu_device_cfg_probe()
1780 smmu->features |= ARM_SMMU_FEAT_STREAM_MATCH; in arm_smmu_device_cfg_probe()
1783 dev_err(smmu->dev, in arm_smmu_device_cfg_probe()
1784 "stream-matching supported, but no SMRs present!\n"); in arm_smmu_device_cfg_probe()
1785 return -ENODEV; in arm_smmu_device_cfg_probe()
1788 /* Zero-initialised to mark as invalid */ in arm_smmu_device_cfg_probe()
1789 smmu->smrs = devm_kcalloc(smmu->dev, size, sizeof(*smmu->smrs), in arm_smmu_device_cfg_probe()
1791 if (!smmu->smrs) in arm_smmu_device_cfg_probe()
1792 return -ENOMEM; in arm_smmu_device_cfg_probe()
1794 dev_notice(smmu->dev, in arm_smmu_device_cfg_probe()
1797 /* s2cr->type == 0 means translation, so initialise explicitly */ in arm_smmu_device_cfg_probe()
1798 smmu->s2crs = devm_kmalloc_array(smmu->dev, size, sizeof(*smmu->s2crs), in arm_smmu_device_cfg_probe()
1800 if (!smmu->s2crs) in arm_smmu_device_cfg_probe()
1801 return -ENOMEM; in arm_smmu_device_cfg_probe()
1803 smmu->s2crs[i] = s2cr_init_val; in arm_smmu_device_cfg_probe()
1805 smmu->num_mapping_groups = size; in arm_smmu_device_cfg_probe()
1806 mutex_init(&smmu->stream_map_mutex); in arm_smmu_device_cfg_probe()
1807 spin_lock_init(&smmu->global_sync_lock); in arm_smmu_device_cfg_probe()
1809 if (smmu->version < ARM_SMMU_V2 || in arm_smmu_device_cfg_probe()
1811 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_L; in arm_smmu_device_cfg_probe()
1813 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH32_S; in arm_smmu_device_cfg_probe()
1818 smmu->pgshift = (id & ARM_SMMU_ID1_PAGESIZE) ? 16 : 12; in arm_smmu_device_cfg_probe()
1822 if (smmu->numpage != 2 * size << smmu->pgshift) in arm_smmu_device_cfg_probe()
1823 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1825 2 * size << smmu->pgshift, smmu->numpage); in arm_smmu_device_cfg_probe()
1827 smmu->numpage = size; in arm_smmu_device_cfg_probe()
1829 smmu->num_s2_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMS2CB, id); in arm_smmu_device_cfg_probe()
1830 smmu->num_context_banks = FIELD_GET(ARM_SMMU_ID1_NUMCB, id); in arm_smmu_device_cfg_probe()
1831 if (smmu->num_s2_context_banks > smmu->num_context_banks) { in arm_smmu_device_cfg_probe()
1832 dev_err(smmu->dev, "impossible number of S2 context banks!\n"); in arm_smmu_device_cfg_probe()
1833 return -ENODEV; in arm_smmu_device_cfg_probe()
1835 dev_notice(smmu->dev, "\t%u context banks (%u stage-2 only)\n", in arm_smmu_device_cfg_probe()
1836 smmu->num_context_banks, smmu->num_s2_context_banks); in arm_smmu_device_cfg_probe()
1837 smmu->cbs = devm_kcalloc(smmu->dev, smmu->num_context_banks, in arm_smmu_device_cfg_probe()
1838 sizeof(*smmu->cbs), GFP_KERNEL); in arm_smmu_device_cfg_probe()
1839 if (!smmu->cbs) in arm_smmu_device_cfg_probe()
1840 return -ENOMEM; in arm_smmu_device_cfg_probe()
1845 smmu->ipa_size = size; in arm_smmu_device_cfg_probe()
1849 smmu->pa_size = size; in arm_smmu_device_cfg_probe()
1852 smmu->features |= ARM_SMMU_FEAT_VMID16; in arm_smmu_device_cfg_probe()
1859 if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) in arm_smmu_device_cfg_probe()
1860 dev_warn(smmu->dev, in arm_smmu_device_cfg_probe()
1863 if (smmu->version < ARM_SMMU_V2) { in arm_smmu_device_cfg_probe()
1864 smmu->va_size = smmu->ipa_size; in arm_smmu_device_cfg_probe()
1865 if (smmu->version == ARM_SMMU_V1_64K) in arm_smmu_device_cfg_probe()
1866 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1869 smmu->va_size = arm_smmu_id_size_to_bits(size); in arm_smmu_device_cfg_probe()
1871 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_4K; in arm_smmu_device_cfg_probe()
1873 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_16K; in arm_smmu_device_cfg_probe()
1875 smmu->features |= ARM_SMMU_FEAT_FMT_AARCH64_64K; in arm_smmu_device_cfg_probe()
1878 if (smmu->impl && smmu->impl->cfg_probe) { in arm_smmu_device_cfg_probe()
1879 ret = smmu->impl->cfg_probe(smmu); in arm_smmu_device_cfg_probe()
1885 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) in arm_smmu_device_cfg_probe()
1886 smmu->pgsize_bitmap |= SZ_4K | SZ_64K | SZ_1M | SZ_16M; in arm_smmu_device_cfg_probe()
1887 if (smmu->features & in arm_smmu_device_cfg_probe()
1889 smmu->pgsize_bitmap |= SZ_4K | SZ_2M | SZ_1G; in arm_smmu_device_cfg_probe()
1890 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_16K) in arm_smmu_device_cfg_probe()
1891 smmu->pgsize_bitmap |= SZ_16K | SZ_32M; in arm_smmu_device_cfg_probe()
1892 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH64_64K) in arm_smmu_device_cfg_probe()
1893 smmu->pgsize_bitmap |= SZ_64K | SZ_512M; in arm_smmu_device_cfg_probe()
1895 if (arm_smmu_ops.pgsize_bitmap == -1UL) in arm_smmu_device_cfg_probe()
1896 arm_smmu_ops.pgsize_bitmap = smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1898 arm_smmu_ops.pgsize_bitmap |= smmu->pgsize_bitmap; in arm_smmu_device_cfg_probe()
1899 dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", in arm_smmu_device_cfg_probe()
1900 smmu->pgsize_bitmap); in arm_smmu_device_cfg_probe()
1903 if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) in arm_smmu_device_cfg_probe()
1904 dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", in arm_smmu_device_cfg_probe()
1905 smmu->va_size, smmu->ipa_size); in arm_smmu_device_cfg_probe()
1907 if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) in arm_smmu_device_cfg_probe()
1908 dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", in arm_smmu_device_cfg_probe()
1909 smmu->ipa_size, smmu->pa_size); in arm_smmu_device_cfg_probe()
1930 { .compatible = "arm,smmu-v1", .data = &smmu_generic_v1 },
1931 { .compatible = "arm,smmu-v2", .data = &smmu_generic_v2 },
1932 { .compatible = "arm,mmu-400", .data = &smmu_generic_v1 },
1933 { .compatible = "arm,mmu-401", .data = &arm_mmu401 },
1934 { .compatible = "arm,mmu-500", .data = &arm_mmu500 },
1935 { .compatible = "cavium,smmu-v2", .data = &cavium_smmuv2 },
1936 { .compatible = "nvidia,smmu-500", .data = &arm_mmu500 },
1937 { .compatible = "qcom,smmu-v2", .data = &qcom_smmuv2 },
1950 smmu->version = ARM_SMMU_V1; in acpi_smmu_get_data()
1951 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1954 smmu->version = ARM_SMMU_V1_64K; in acpi_smmu_get_data()
1955 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1958 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1959 smmu->model = GENERIC_SMMU; in acpi_smmu_get_data()
1962 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1963 smmu->model = ARM_MMU500; in acpi_smmu_get_data()
1966 smmu->version = ARM_SMMU_V2; in acpi_smmu_get_data()
1967 smmu->model = CAVIUM_SMMUV2; in acpi_smmu_get_data()
1970 ret = -ENODEV; in acpi_smmu_get_data()
1979 struct device *dev = smmu->dev; in arm_smmu_device_acpi_probe()
1986 iort_smmu = (struct acpi_iort_smmu *)node->node_data; in arm_smmu_device_acpi_probe()
1988 ret = acpi_smmu_get_data(iort_smmu->model, smmu); in arm_smmu_device_acpi_probe()
1993 smmu->num_global_irqs = 1; in arm_smmu_device_acpi_probe()
1995 if (iort_smmu->flags & ACPI_IORT_SMMU_COHERENT_WALK) in arm_smmu_device_acpi_probe()
1996 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_acpi_probe()
2004 return -ENODEV; in arm_smmu_device_acpi_probe()
2012 struct device *dev = &pdev->dev; in arm_smmu_device_dt_probe()
2015 if (of_property_read_u32(dev->of_node, "#global-interrupts", in arm_smmu_device_dt_probe()
2016 &smmu->num_global_irqs)) { in arm_smmu_device_dt_probe()
2017 dev_err(dev, "missing #global-interrupts property\n"); in arm_smmu_device_dt_probe()
2018 return -ENODEV; in arm_smmu_device_dt_probe()
2022 smmu->version = data->version; in arm_smmu_device_dt_probe()
2023 smmu->model = data->model; in arm_smmu_device_dt_probe()
2025 legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); in arm_smmu_device_dt_probe()
2028 pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n", in arm_smmu_device_dt_probe()
2036 return -ENODEV; in arm_smmu_device_dt_probe()
2039 if (of_dma_is_coherent(dev->of_node)) in arm_smmu_device_dt_probe()
2040 smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; in arm_smmu_device_dt_probe()
2096 struct device *dev = &pdev->dev; in arm_smmu_device_probe()
2103 return -ENOMEM; in arm_smmu_device_probe()
2105 smmu->dev = dev; in arm_smmu_device_probe()
2107 if (dev->of_node) in arm_smmu_device_probe()
2116 ioaddr = res->start; in arm_smmu_device_probe()
2117 smmu->base = devm_ioremap_resource(dev, res); in arm_smmu_device_probe()
2118 if (IS_ERR(smmu->base)) in arm_smmu_device_probe()
2119 return PTR_ERR(smmu->base); in arm_smmu_device_probe()
2124 smmu->numpage = resource_size(res); in arm_smmu_device_probe()
2133 if (num_irqs > smmu->num_global_irqs) in arm_smmu_device_probe()
2134 smmu->num_context_irqs++; in arm_smmu_device_probe()
2137 if (!smmu->num_context_irqs) { in arm_smmu_device_probe()
2139 num_irqs, smmu->num_global_irqs + 1); in arm_smmu_device_probe()
2140 return -ENODEV; in arm_smmu_device_probe()
2143 smmu->irqs = devm_kcalloc(dev, num_irqs, sizeof(*smmu->irqs), in arm_smmu_device_probe()
2145 if (!smmu->irqs) { in arm_smmu_device_probe()
2147 return -ENOMEM; in arm_smmu_device_probe()
2154 return -ENODEV; in arm_smmu_device_probe()
2155 smmu->irqs[i] = irq; in arm_smmu_device_probe()
2158 err = devm_clk_bulk_get_all(dev, &smmu->clks); in arm_smmu_device_probe()
2163 smmu->num_clks = err; in arm_smmu_device_probe()
2165 err = clk_bulk_prepare_enable(smmu->num_clks, smmu->clks); in arm_smmu_device_probe()
2173 if (smmu->version == ARM_SMMU_V2) { in arm_smmu_device_probe()
2174 if (smmu->num_context_banks > smmu->num_context_irqs) { in arm_smmu_device_probe()
2177 smmu->num_context_irqs, smmu->num_context_banks); in arm_smmu_device_probe()
2178 return -ENODEV; in arm_smmu_device_probe()
2182 smmu->num_context_irqs = smmu->num_context_banks; in arm_smmu_device_probe()
2185 if (smmu->impl && smmu->impl->global_fault) in arm_smmu_device_probe()
2186 global_fault = smmu->impl->global_fault; in arm_smmu_device_probe()
2190 for (i = 0; i < smmu->num_global_irqs; ++i) { in arm_smmu_device_probe()
2191 err = devm_request_irq(smmu->dev, smmu->irqs[i], in arm_smmu_device_probe()
2194 "arm-smmu global fault", in arm_smmu_device_probe()
2198 i, smmu->irqs[i]); in arm_smmu_device_probe()
2203 err = iommu_device_sysfs_add(&smmu->iommu, smmu->dev, NULL, in arm_smmu_device_probe()
2210 iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops); in arm_smmu_device_probe()
2211 iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); in arm_smmu_device_probe()
2213 err = iommu_device_register(&smmu->iommu); in arm_smmu_device_probe()
2224 * We want to avoid touching dev->power.lock in fastpaths unless in arm_smmu_device_probe()
2225 * it's really going to do something useful - pm_runtime_enabled() in arm_smmu_device_probe()
2229 if (dev->pm_domain) { in arm_smmu_device_probe()
2250 return -ENODEV; in arm_smmu_device_remove()
2252 if (!bitmap_empty(smmu->context_map, ARM_SMMU_MAX_CBS)) in arm_smmu_device_remove()
2253 dev_notice(&pdev->dev, "disabling translation\n"); in arm_smmu_device_remove()
2256 iommu_device_unregister(&smmu->iommu); in arm_smmu_device_remove()
2257 iommu_device_sysfs_remove(&smmu->iommu); in arm_smmu_device_remove()
2264 if (pm_runtime_enabled(smmu->dev)) in arm_smmu_device_remove()
2265 pm_runtime_force_suspend(smmu->dev); in arm_smmu_device_remove()
2267 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2269 clk_bulk_unprepare(smmu->num_clks, smmu->clks); in arm_smmu_device_remove()
2283 ret = clk_bulk_enable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_resume()
2296 clk_bulk_disable(smmu->num_clks, smmu->clks); in arm_smmu_runtime_suspend()
2325 .name = "arm-smmu",
2338 MODULE_ALIAS("platform:arm-smmu");