Lines Matching +full:smmu +full:- +full:v3
1 // SPDX-License-Identifier: GPL-2.0
12 #include "arm-smmu-v3.h"
13 #include "../../iommu-sva-lib.h"
14 #include "../../io-pgtable-arm.h"
41 * Check if the CPU ASID is available on the SMMU side. If a private context
50 struct arm_smmu_device *smmu; in arm_smmu_share_asid() local
57 if (cd->mm) { in arm_smmu_share_asid()
58 if (WARN_ON(cd->mm != mm)) in arm_smmu_share_asid()
59 return ERR_PTR(-EINVAL); in arm_smmu_share_asid()
61 refcount_inc(&cd->refs); in arm_smmu_share_asid()
66 smmu = smmu_domain->smmu; in arm_smmu_share_asid()
69 XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); in arm_smmu_share_asid()
71 return ERR_PTR(-ENOSPC); in arm_smmu_share_asid()
74 * which isn't assigned yet. We'll do an invalidate-all on the old ASID in arm_smmu_share_asid()
77 cd->asid = new_asid; in arm_smmu_share_asid()
86 arm_smmu_tlb_inv_asid(smmu, asid); in arm_smmu_share_asid()
105 err = -ESRCH; in arm_smmu_alloc_shared_cd()
111 err = -ENOMEM; in arm_smmu_alloc_shared_cd()
115 refcount_set(&cd->refs, 1); in arm_smmu_alloc_shared_cd()
130 tcr = FIELD_PREP(CTXDESC_CD_0_TCR_T0SZ, 64ULL - vabits_actual) | in arm_smmu_alloc_shared_cd()
148 err = -EINVAL; in arm_smmu_alloc_shared_cd()
156 cd->ttbr = virt_to_phys(mm->pgd); in arm_smmu_alloc_shared_cd()
157 cd->tcr = tcr; in arm_smmu_alloc_shared_cd()
162 cd->mair = read_sysreg(mair_el1); in arm_smmu_alloc_shared_cd()
163 cd->asid = asid; in arm_smmu_alloc_shared_cd()
164 cd->mm = mm; in arm_smmu_alloc_shared_cd()
183 arm64_mm_context_put(cd->mm); in arm_smmu_free_shared_cd()
184 mmdrop(cd->mm); in arm_smmu_free_shared_cd()
194 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_invalidate_range()
202 size = end - start; in arm_smmu_mm_invalidate_range()
204 if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) in arm_smmu_mm_invalidate_range()
205 arm_smmu_tlb_inv_range_asid(start, size, smmu_mn->cd->asid, in arm_smmu_mm_invalidate_range()
207 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, start, size); in arm_smmu_mm_invalidate_range()
213 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mm_release()
216 if (smmu_mn->cleared) { in arm_smmu_mm_release()
225 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, &quiet_cd); in arm_smmu_mm_release()
227 arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); in arm_smmu_mm_release()
228 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mm_release()
230 smmu_mn->cleared = true; in arm_smmu_mm_release()
254 list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) { in arm_smmu_mmu_notifier_get()
255 if (smmu_mn->mn.mm == mm) { in arm_smmu_mmu_notifier_get()
256 refcount_inc(&smmu_mn->refs); in arm_smmu_mmu_notifier_get()
267 ret = -ENOMEM; in arm_smmu_mmu_notifier_get()
271 refcount_set(&smmu_mn->refs, 1); in arm_smmu_mmu_notifier_get()
272 smmu_mn->cd = cd; in arm_smmu_mmu_notifier_get()
273 smmu_mn->domain = smmu_domain; in arm_smmu_mmu_notifier_get()
274 smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops; in arm_smmu_mmu_notifier_get()
276 ret = mmu_notifier_register(&smmu_mn->mn, mm); in arm_smmu_mmu_notifier_get()
282 ret = arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, cd); in arm_smmu_mmu_notifier_get()
286 list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers); in arm_smmu_mmu_notifier_get()
291 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_get()
299 struct mm_struct *mm = smmu_mn->mn.mm; in arm_smmu_mmu_notifier_put()
300 struct arm_smmu_ctx_desc *cd = smmu_mn->cd; in arm_smmu_mmu_notifier_put()
301 struct arm_smmu_domain *smmu_domain = smmu_mn->domain; in arm_smmu_mmu_notifier_put()
303 if (!refcount_dec_and_test(&smmu_mn->refs)) in arm_smmu_mmu_notifier_put()
306 list_del(&smmu_mn->list); in arm_smmu_mmu_notifier_put()
307 arm_smmu_write_ctx_desc(smmu_domain, mm->pasid, NULL); in arm_smmu_mmu_notifier_put()
313 if (!smmu_mn->cleared) { in arm_smmu_mmu_notifier_put()
314 arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid); in arm_smmu_mmu_notifier_put()
315 arm_smmu_atc_inv_domain(smmu_domain, mm->pasid, 0, 0); in arm_smmu_mmu_notifier_put()
319 mmu_notifier_put(&smmu_mn->mn); in arm_smmu_mmu_notifier_put()
332 if (!master || !master->sva_enabled) in __arm_smmu_sva_bind()
333 return ERR_PTR(-ENODEV); in __arm_smmu_sva_bind()
336 list_for_each_entry(bond, &master->bonds, list) { in __arm_smmu_sva_bind()
337 if (bond->mm == mm) { in __arm_smmu_sva_bind()
338 refcount_inc(&bond->refs); in __arm_smmu_sva_bind()
339 return &bond->sva; in __arm_smmu_sva_bind()
345 return ERR_PTR(-ENOMEM); in __arm_smmu_sva_bind()
348 ret = iommu_sva_alloc_pasid(mm, 1, (1U << master->ssid_bits) - 1); in __arm_smmu_sva_bind()
352 bond->mm = mm; in __arm_smmu_sva_bind()
353 bond->sva.dev = dev; in __arm_smmu_sva_bind()
354 refcount_set(&bond->refs, 1); in __arm_smmu_sva_bind()
356 bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm); in __arm_smmu_sva_bind()
357 if (IS_ERR(bond->smmu_mn)) { in __arm_smmu_sva_bind()
358 ret = PTR_ERR(bond->smmu_mn); in __arm_smmu_sva_bind()
362 list_add(&bond->list, &master->bonds); in __arm_smmu_sva_bind()
363 return &bond->sva; in __arm_smmu_sva_bind()
377 if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1) in arm_smmu_sva_bind()
378 return ERR_PTR(-EINVAL); in arm_smmu_sva_bind()
391 if (refcount_dec_and_test(&bond->refs)) { in arm_smmu_sva_unbind()
392 list_del(&bond->list); in arm_smmu_sva_unbind()
393 arm_smmu_mmu_notifier_put(bond->smmu_mn); in arm_smmu_sva_unbind()
403 return bond->mm->pasid; in arm_smmu_sva_get_pasid()
406 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) in arm_smmu_sva_supported() argument
416 if ((smmu->features & feat_mask) != feat_mask) in arm_smmu_sva_supported()
419 if (!(smmu->pgsize_bitmap & PAGE_SIZE)) in arm_smmu_sva_supported()
430 if (smmu->oas < oas) in arm_smmu_sva_supported()
436 if (smmu->asid_bits < asid_bits) in arm_smmu_sva_supported()
444 asid_bits--; in arm_smmu_sva_supported()
445 dev_dbg(smmu->dev, "%d shared contexts\n", (1 << asid_bits) - in arm_smmu_sva_supported()
446 num_possible_cpus() - 2); in arm_smmu_sva_supported()
454 if (master->num_streams != 1) in arm_smmu_master_iopf_supported()
457 return master->stall_enabled; in arm_smmu_master_iopf_supported()
462 if (!(master->smmu->features & ARM_SMMU_FEAT_SVA)) in arm_smmu_master_sva_supported()
466 return master->ssid_bits; in arm_smmu_master_sva_supported()
474 enabled = master->sva_enabled; in arm_smmu_master_sva_enabled()
482 struct device *dev = master->dev; in arm_smmu_master_sva_enable_iopf()
486 * Others have device-specific fault handlers and don't need IOPF. in arm_smmu_master_sva_enable_iopf()
491 if (!master->iopf_enabled) in arm_smmu_master_sva_enable_iopf()
492 return -EINVAL; in arm_smmu_master_sva_enable_iopf()
494 ret = iopf_queue_add_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
500 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_enable_iopf()
508 struct device *dev = master->dev; in arm_smmu_master_sva_disable_iopf()
510 if (!master->iopf_enabled) in arm_smmu_master_sva_disable_iopf()
514 iopf_queue_remove_device(master->smmu->evtq.iopf, dev); in arm_smmu_master_sva_disable_iopf()
524 master->sva_enabled = true; in arm_smmu_master_enable_sva()
533 if (!list_empty(&master->bonds)) { in arm_smmu_master_disable_sva()
534 dev_err(master->dev, "cannot disable SVA, device is bound\n"); in arm_smmu_master_disable_sva()
536 return -EBUSY; in arm_smmu_master_disable_sva()
539 master->sva_enabled = false; in arm_smmu_master_disable_sva()