Lines Matching +full:mmu +full:- +full:500 +full:s

1 // SPDX-License-Identifier: GPL-2.0-only
2 // Copyright (C) 2019-2020 NVIDIA CORPORATION. All rights reserved.
12 #include "arm-smmu.h"
15 * Tegra194 has three ARM MMU-500 Instances.
18 * non-isochronous HW devices.
23 * memory client. This is necessary to allow for use-case such as seamlessly
52 return nvidia_smmu->bases[inst] + (page << smmu->pgshift); in nvidia_smmu_page()
69 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_write_reg()
90 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_write_reg64()
108 for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) { in nvidia_smmu_tlb_sync()
112 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_tlb_sync()
128 dev_err_ratelimited(smmu->dev, in nvidia_smmu_tlb_sync()
129 "TLB sync timed out -- SMMU may be deadlocked\n"); in nvidia_smmu_tlb_sync()
137 for (i = 0; i < nvidia->num_instances; i++) { in nvidia_smmu_reset()
165 dev_err_ratelimited(smmu->dev, in nvidia_smmu_global_fault_inst()
167 dev_err_ratelimited(smmu->dev, in nvidia_smmu_global_fault_inst()
182 for (inst = 0; inst < nvidia->num_instances; inst++) { in nvidia_smmu_global_fault()
200 void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx); in nvidia_smmu_context_fault_bank()
210 dev_err_ratelimited(smmu->dev, in nvidia_smmu_context_fault_bank()
229 smmu = smmu_domain->smmu; in nvidia_smmu_context_fault()
232 for (inst = 0; inst < nvidia->num_instances; inst++) { in nvidia_smmu_context_fault()
239 for (idx = 0; idx < smmu->num_context_banks; idx++) { in nvidia_smmu_context_fault()
255 err = tegra_mc_probe_device(nvidia->mc, dev); in nvidia_smmu_probe_finalize()
257 dev_err(smmu->dev, "memory controller probe failed for %s: %d\n", in nvidia_smmu_probe_finalize()
265 struct arm_smmu_device *smmu = smmu_domain->smmu; in nvidia_smmu_init_context()
266 const struct device_node *np = smmu->dev->of_node; in nvidia_smmu_init_context()
280 if (of_device_is_compatible(np, "nvidia,tegra234-smmu") || in nvidia_smmu_init_context()
281 of_device_is_compatible(np, "nvidia,tegra194-smmu")) { in nvidia_smmu_init_context()
282 smmu->pgsize_bitmap = PAGE_SIZE; in nvidia_smmu_init_context()
283 pgtbl_cfg->pgsize_bitmap = smmu->pgsize_bitmap; in nvidia_smmu_init_context()
310 struct device *dev = smmu->dev; in nvidia_smmu_impl_init()
317 return ERR_PTR(-ENOMEM); in nvidia_smmu_impl_init()
319 nvidia_smmu->mc = devm_tegra_memory_controller_get(dev); in nvidia_smmu_impl_init()
320 if (IS_ERR(nvidia_smmu->mc)) in nvidia_smmu_impl_init()
321 return ERR_CAST(nvidia_smmu->mc); in nvidia_smmu_impl_init()
323 /* Instance 0 is ioremapped by arm-smmu.c. */ in nvidia_smmu_impl_init()
324 nvidia_smmu->bases[0] = smmu->base; in nvidia_smmu_impl_init()
325 nvidia_smmu->num_instances++; in nvidia_smmu_impl_init()
332 nvidia_smmu->bases[i] = devm_ioremap_resource(dev, res); in nvidia_smmu_impl_init()
333 if (IS_ERR(nvidia_smmu->bases[i])) in nvidia_smmu_impl_init()
334 return ERR_CAST(nvidia_smmu->bases[i]); in nvidia_smmu_impl_init()
336 nvidia_smmu->num_instances++; in nvidia_smmu_impl_init()
339 if (nvidia_smmu->num_instances == 1) in nvidia_smmu_impl_init()
340 nvidia_smmu->smmu.impl = &nvidia_smmu_single_impl; in nvidia_smmu_impl_init()
342 nvidia_smmu->smmu.impl = &nvidia_smmu_impl; in nvidia_smmu_impl_init()
344 return &nvidia_smmu->smmu; in nvidia_smmu_impl_init()