Lines Matching +full:smmu +full:- +full:v3
1 // SPDX-License-Identifier: GPL-2.0
11 #include "arm-smmu-v3.h"
12 #include "../../iommu-sva-lib.h"
13 #include "../../io-pgtable-arm.h"
40 * Check if the CPU ASID is available on the SMMU side. If a private context
49 struct arm_smmu_device *smmu; in arm_smmu_share_asid() local
56 if (cd->mm) { in arm_smmu_share_asid()
57 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid()
58 return ERR_PTR(-EINVAL); in arm_smmu_share_asid()
60 refcount_inc(&cd->refs); in arm_smmu_share_asid()
65 smmu = smmu_domain->smmu; in arm_smmu_share_asid()
68 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_share_asid()
70 return ERR_PTR(-ENOSPC); in arm_smmu_share_asid()
73 * which isn't assigned yet. We'll do an invalidate-all on the old ASID in arm_smmu_share_asid()
76 cd->asid = new_asid; in arm_smmu_share_asid()
85 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid()
101 return ERR_PTR(-ESRCH); in arm_smmu_alloc_shared_cd()
105 err = -ENOMEM; in arm_smmu_alloc_shared_cd()
109 refcount_set(&cd->refs, 1); in arm_smmu_alloc_shared_cd()
124 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | in arm_smmu_alloc_shared_cd()
142 err = -EINVAL; in arm_smmu_alloc_shared_cd()
150 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd()
151 cd->tcr = tcr; in arm_smmu_alloc_shared_cd()
156 cd->mair = read_sysreg(mair_el1); in arm_smmu_alloc_shared_cd()
157 cd->asid = asid; in arm_smmu_alloc_shared_cd()
158 cd->mm = mm; in arm_smmu_alloc_shared_cd()
175 arm64_mm_context_put(cd->mm); in arm_smmu_free_shared_cd()
185 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_invalidate_range()
186 size_t size = end - start + 1; in arm_smmu_mm_invalidate_range()
188 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) in arm_smmu_mm_invalidate_range()
189 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, in arm_smmu_mm_invalidate_range()
191 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); in arm_smmu_mm_invalidate_range()
197 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_release()
200 if (smmu_mn->cleared) { in arm_smmu_mm_release()
209 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); in arm_smmu_mm_release()
211 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); in arm_smmu_mm_release()
212 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mm_release()
214 smmu_mn->cleared = true; in arm_smmu_mm_release()
238 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { in arm_smmu_mmu_notifier_get()
239 if (smmu_mn->mn.mm == mm) { in arm_smmu_mmu_notifier_get()
240 refcount_inc(&smmu_mn->refs); in arm_smmu_mmu_notifier_get()
251 ret = -ENOMEM; in arm_smmu_mmu_notifier_get()
255 refcount_set(&smmu_mn->refs, 1); in arm_smmu_mmu_notifier_get()
256 smmu_mn->cd = cd; in arm_smmu_mmu_notifier_get()
257 smmu_mn->domain = smmu_domain; in arm_smmu_mmu_notifier_get()
258 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; in arm_smmu_mmu_notifier_get()
260 ret = mmu_notifier_register(&smmu_mn->mn, mm); in arm_smmu_mmu_notifier_get()
266 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); in arm_smmu_mmu_notifier_get()
270 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); in arm_smmu_mmu_notifier_get()
275 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_get()
283 struct mm_struct *mm = smmu_mn->mn.mm; in arm_smmu_mmu_notifier_put()
284 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; in arm_smmu_mmu_notifier_put()
285 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mmu_notifier_put()
287 if (!refcount_dec_and_test(&smmu_mn->refs)) in arm_smmu_mmu_notifier_put()
290 list_del(&smmu_mn->list); in arm_smmu_mmu_notifier_put()
291 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); in arm_smmu_mmu_notifier_put()
297 if (!smmu_mn->cleared) { in arm_smmu_mmu_notifier_put()
298 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); in arm_smmu_mmu_notifier_put()
299 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mmu_notifier_put()
303 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_put()
316 if (!master || !master->sva_enabled) in __arm_smmu_sva_bind()
317 return ERR_PTR(-ENODEV); in __arm_smmu_sva_bind()
320 list_for_each_entry(bond, &master->bonds, list) { in __arm_smmu_sva_bind()
321 if (bond->mm == mm) { in __arm_smmu_sva_bind()
322 refcount_inc(&bond->refs); in __arm_smmu_sva_bind()
323 return &bond->sva; in __arm_smmu_sva_bind()
329 return ERR_PTR(-ENOMEM); in __arm_smmu_sva_bind()
332 ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1); in __arm_smmu_sva_bind()
336 bond->mm = mm; in __arm_smmu_sva_bind()
337 bond->sva.dev = dev; in __arm_smmu_sva_bind()
338 refcount_set(&bond->refs, 1); in __arm_smmu_sva_bind()
340 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); in __arm_smmu_sva_bind()
341 if (IS_ERR(bond->smmu_mn)) { in __arm_smmu_sva_bind()
342 ret = PTR_ERR(bond->smmu_mn); in __arm_smmu_sva_bind()
346 list_add(&bond->list, &master->bonds); in __arm_smmu_sva_bind()
347 return &bond->sva; in __arm_smmu_sva_bind()
363 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) in arm_smmu_sva_bind()
364 return ERR_PTR(-EINVAL); in arm_smmu_sva_bind()
377 if (refcount_dec_and_test(&bond->refs)) { in arm_smmu_sva_unbind()
378 list_del(&bond->list); in arm_smmu_sva_unbind()
379 arm_smmu_mmu_notifier_put(bond->smmu_mn); in arm_smmu_sva_unbind()
380 iommu_sva_free_pasid(bond->mm); in arm_smmu_sva_unbind()
390 return bond->mm->pasid; in arm_smmu_sva_get_pasid()
393 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
403 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported()
406 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported()
417 if (smmu->oas < oas) in arm_smmu_sva_supported()
423 if (smmu->asid_bits < asid_bits) in arm_smmu_sva_supported()
431 asid_bits--; in arm_smmu_sva_supported()
432 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - in arm_smmu_sva_supported()
433 num_possible_cpus() - 2); in arm_smmu_sva_supported()
441 if (master->num_streams != 1) in arm_smmu_master_iopf_supported()
444 return master->stall_enabled; in arm_smmu_master_iopf_supported()
449 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) in arm_smmu_master_sva_supported()
453 return master->ssid_bits; in arm_smmu_master_sva_supported()
461 enabled = master->sva_enabled; in arm_smmu_master_sva_enabled()
469 struct device *dev = master->dev; in arm_smmu_master_sva_enable_iopf()
473 * Others have device-specific fault handlers and don't need IOPF. in arm_smmu_master_sva_enable_iopf()
478 if (!master->iopf_enabled) in arm_smmu_master_sva_enable_iopf()
479 return -EINVAL; in arm_smmu_master_sva_enable_iopf()
481 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
487 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
495 struct device *dev = master->dev; in arm_smmu_master_sva_disable_iopf()
497 if (!master->iopf_enabled) in arm_smmu_master_sva_disable_iopf()
501 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_disable_iopf()
511 master->sva_enabled = true; in arm_smmu_master_enable_sva()
520 if (!list_empty(&master->bonds)) { in arm_smmu_master_disable_sva()
521 dev_err(master->dev, "cannot disable SVA, device is bound\n"); in arm_smmu_master_disable_sva()
523 return -EBUSY; in arm_smmu_master_disable_sva()
526 master->sva_enabled = false; in arm_smmu_master_disable_sva()