Lines Matching full:as

277 	struct tegra_smmu_as *as;  in tegra_smmu_domain_alloc()  local
282 as = kzalloc(sizeof(*as), GFP_KERNEL); in tegra_smmu_domain_alloc()
283 if (!as) in tegra_smmu_domain_alloc()
286 as->attr = SMMU_PD_READABLE | SMMU_PD_WRITABLE | SMMU_PD_NONSECURE; in tegra_smmu_domain_alloc()
288 as->pd = alloc_page(GFP_KERNEL | __GFP_DMA | __GFP_ZERO); in tegra_smmu_domain_alloc()
289 if (!as->pd) { in tegra_smmu_domain_alloc()
290 kfree(as); in tegra_smmu_domain_alloc()
294 as->count = kcalloc(SMMU_NUM_PDE, sizeof(u32), GFP_KERNEL); in tegra_smmu_domain_alloc()
295 if (!as->count) { in tegra_smmu_domain_alloc()
296 __free_page(as->pd); in tegra_smmu_domain_alloc()
297 kfree(as); in tegra_smmu_domain_alloc()
301 as->pts = kcalloc(SMMU_NUM_PDE, sizeof(*as->pts), GFP_KERNEL); in tegra_smmu_domain_alloc()
302 if (!as->pts) { in tegra_smmu_domain_alloc()
303 kfree(as->count); in tegra_smmu_domain_alloc()
304 __free_page(as->pd); in tegra_smmu_domain_alloc()
305 kfree(as); in tegra_smmu_domain_alloc()
309 spin_lock_init(&as->lock); in tegra_smmu_domain_alloc()
312 as->domain.geometry.aperture_start = 0; in tegra_smmu_domain_alloc()
313 as->domain.geometry.aperture_end = 0xffffffff; in tegra_smmu_domain_alloc()
314 as->domain.geometry.force_aperture = true; in tegra_smmu_domain_alloc()
316 return &as->domain; in tegra_smmu_domain_alloc()
321 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_domain_free() local
325 WARN_ON_ONCE(as->use_count); in tegra_smmu_domain_free()
326 kfree(as->count); in tegra_smmu_domain_free()
327 kfree(as->pts); in tegra_smmu_domain_free()
328 kfree(as); in tegra_smmu_domain_free()
409 struct tegra_smmu_as *as) in tegra_smmu_as_prepare() argument
416 if (as->use_count > 0) { in tegra_smmu_as_prepare()
417 as->use_count++; in tegra_smmu_as_prepare()
421 as->pd_dma = dma_map_page(smmu->dev, as->pd, 0, SMMU_SIZE_PD, in tegra_smmu_as_prepare()
423 if (dma_mapping_error(smmu->dev, as->pd_dma)) { in tegra_smmu_as_prepare()
429 if (!smmu_dma_addr_valid(smmu, as->pd_dma)) { in tegra_smmu_as_prepare()
434 err = tegra_smmu_alloc_asid(smmu, &as->id); in tegra_smmu_as_prepare()
438 smmu_flush_ptc(smmu, as->pd_dma, 0); in tegra_smmu_as_prepare()
439 smmu_flush_tlb_asid(smmu, as->id); in tegra_smmu_as_prepare()
441 smmu_writel(smmu, as->id & 0x7f, SMMU_PTB_ASID); in tegra_smmu_as_prepare()
442 value = SMMU_PTB_DATA_VALUE(as->pd_dma, as->attr); in tegra_smmu_as_prepare()
446 as->smmu = smmu; in tegra_smmu_as_prepare()
447 as->use_count++; in tegra_smmu_as_prepare()
454 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_prepare()
462 struct tegra_smmu_as *as) in tegra_smmu_as_unprepare() argument
466 if (--as->use_count > 0) { in tegra_smmu_as_unprepare()
471 tegra_smmu_free_asid(smmu, as->id); in tegra_smmu_as_unprepare()
473 dma_unmap_page(smmu->dev, as->pd_dma, SMMU_SIZE_PD, DMA_TO_DEVICE); in tegra_smmu_as_unprepare()
475 as->smmu = NULL; in tegra_smmu_as_unprepare()
485 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_attach_dev() local
493 err = tegra_smmu_as_prepare(smmu, as); in tegra_smmu_attach_dev()
497 tegra_smmu_enable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
507 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_attach_dev()
508 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_attach_dev()
517 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_detach_dev() local
518 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_detach_dev()
525 tegra_smmu_disable(smmu, fwspec->ids[index], as->id); in tegra_smmu_detach_dev()
526 tegra_smmu_as_unprepare(smmu, as); in tegra_smmu_detach_dev()
530 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument
534 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pde()
535 u32 *pd = page_address(as->pd); in tegra_smmu_set_pde()
542 dma_sync_single_range_for_device(smmu->dev, as->pd_dma, offset, in tegra_smmu_set_pde()
546 smmu_flush_ptc(smmu, as->pd_dma, offset); in tegra_smmu_set_pde()
547 smmu_flush_tlb_section(smmu, as->id, iova); in tegra_smmu_set_pde()
558 static u32 *tegra_smmu_pte_lookup(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_pte_lookup() argument
562 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_lookup()
566 pt_page = as->pts[pd_index]; in tegra_smmu_pte_lookup()
570 pd = page_address(as->pd); in tegra_smmu_pte_lookup()
576 static u32 *as_get_pte(struct tegra_smmu_as *as, dma_addr_t iova, in as_get_pte() argument
580 struct tegra_smmu *smmu = as->smmu; in as_get_pte()
582 if (!as->pts[pde]) { in as_get_pte()
599 as->pts[pde] = page; in as_get_pte()
601 tegra_smmu_set_pde(as, iova, SMMU_MK_PDE(dma, SMMU_PDE_ATTR | in as_get_pte()
606 u32 *pd = page_address(as->pd); in as_get_pte()
611 return tegra_smmu_pte_offset(as->pts[pde], iova); in as_get_pte()
614 static void tegra_smmu_pte_get_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_get_use() argument
618 as->count[pd_index]++; in tegra_smmu_pte_get_use()
621 static void tegra_smmu_pte_put_use(struct tegra_smmu_as *as, unsigned long iova) in tegra_smmu_pte_put_use() argument
624 struct page *page = as->pts[pde]; in tegra_smmu_pte_put_use()
630 if (--as->count[pde] == 0) { in tegra_smmu_pte_put_use()
631 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_pte_put_use()
632 u32 *pd = page_address(as->pd); in tegra_smmu_pte_put_use()
635 tegra_smmu_set_pde(as, iova, 0); in tegra_smmu_pte_put_use()
639 as->pts[pde] = NULL; in tegra_smmu_pte_put_use()
643 static void tegra_smmu_set_pte(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pte() argument
646 struct tegra_smmu *smmu = as->smmu; in tegra_smmu_set_pte()
654 smmu_flush_tlb_group(smmu, as->id, iova); in tegra_smmu_set_pte()
658 static struct page *as_get_pde_page(struct tegra_smmu_as *as, in as_get_pde_page() argument
663 struct page *page = as->pts[pde]; in as_get_pde_page()
675 spin_unlock_irqrestore(&as->lock, *flags); in as_get_pde_page()
680 spin_lock_irqsave(&as->lock, *flags); in as_get_pde_page()
687 if (as->pts[pde]) { in as_get_pde_page()
691 page = as->pts[pde]; in as_get_pde_page()
702 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_map() local
708 page = as_get_pde_page(as, iova, gfp, flags); in __tegra_smmu_map()
712 pte = as_get_pte(as, iova, &pte_dma, page); in __tegra_smmu_map()
718 tegra_smmu_pte_get_use(as, iova); in __tegra_smmu_map()
728 tegra_smmu_set_pte(as, iova, pte, pte_dma, in __tegra_smmu_map()
738 struct tegra_smmu_as *as = to_smmu_as(domain); in __tegra_smmu_unmap() local
742 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in __tegra_smmu_unmap()
746 tegra_smmu_set_pte(as, iova, pte, pte_dma, 0); in __tegra_smmu_unmap()
747 tegra_smmu_pte_put_use(as, iova); in __tegra_smmu_unmap()
755 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_map() local
759 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_map()
761 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_map()
769 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_unmap() local
772 spin_lock_irqsave(&as->lock, flags); in tegra_smmu_unmap()
774 spin_unlock_irqrestore(&as->lock, flags); in tegra_smmu_unmap()
782 struct tegra_smmu_as *as = to_smmu_as(domain); in tegra_smmu_iova_to_phys() local
787 pte = tegra_smmu_pte_lookup(as, iova, &pte_dma); in tegra_smmu_iova_to_phys()
791 pfn = *pte & as->smmu->pfn_mask; in tegra_smmu_iova_to_phys()
954 * the SMMU parent device is the same as the MC, so the reference count in tegra_smmu_of_xlate()