Lines Matching refs:rk_domain

681 	struct rk_iommu_domain *rk_domain = to_rk_domain(domain);  in rk_iommu_iova_to_phys()  local
687 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
689 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_iova_to_phys()
701 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_iova_to_phys()
706 static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova() argument
713 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
714 list_for_each(pos, &rk_domain->iommus) { in rk_iommu_zap_iova()
732 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_zap_iova()
735 static void rk_iommu_zap_iova_first_last(struct rk_iommu_domain *rk_domain, in rk_iommu_zap_iova_first_last() argument
738 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE); in rk_iommu_zap_iova_first_last()
740 rk_iommu_zap_iova(rk_domain, iova + size - SPAGE_SIZE, in rk_iommu_zap_iova_first_last()
744 static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain, in rk_dte_get_page_table() argument
752 assert_spin_locked(&rk_domain->dt_lock); in rk_dte_get_page_table()
755 dte_addr = &rk_domain->dt[dte_index]; in rk_dte_get_page_table()
774 rk_table_flush(rk_domain, in rk_dte_get_page_table()
775 rk_domain->dt_dma + dte_index * sizeof(u32), 1); in rk_dte_get_page_table()
781 static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain, in rk_iommu_unmap_iova() argument
788 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_unmap_iova()
798 rk_table_flush(rk_domain, pte_dma, pte_count); in rk_iommu_unmap_iova()
803 static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr, in rk_iommu_map_iova() argument
811 assert_spin_locked(&rk_domain->dt_lock); in rk_iommu_map_iova()
824 rk_table_flush(rk_domain, pte_dma, pte_total); in rk_iommu_map_iova()
832 rk_iommu_zap_iova_first_last(rk_domain, iova, size); in rk_iommu_map_iova()
837 rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, in rk_iommu_map_iova()
851 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_map() local
858 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_map()
867 page_table = rk_dte_get_page_table(rk_domain, iova); in rk_iommu_map()
869 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
873 dte_index = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_map()
878 ret = rk_iommu_map_iova(rk_domain, pte_addr, pte_dma, iova, in rk_iommu_map()
881 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_map()
889 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_unmap() local
897 spin_lock_irqsave(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
906 dte = rk_domain->dt[rk_iova_dte_index(iova)]; in rk_iommu_unmap()
909 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
916 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, pte_dma, size); in rk_iommu_unmap()
918 spin_unlock_irqrestore(&rk_domain->dt_lock, flags); in rk_iommu_unmap()
921 rk_iommu_zap_iova(rk_domain, iova, unmap_size); in rk_iommu_unmap()
954 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_enable() local
971 rk_ops->dma_addr_dte(rk_domain->dt_dma)); in rk_iommu_enable()
989 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_detach_device() local
1006 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_detach_device()
1008 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_detach_device()
1022 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_attach_device() local
1045 spin_lock_irqsave(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1046 list_add_tail(&iommu->node, &rk_domain->iommus); in rk_iommu_attach_device()
1047 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags); in rk_iommu_attach_device()
1064 struct rk_iommu_domain *rk_domain; in rk_iommu_domain_alloc() local
1072 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL); in rk_iommu_domain_alloc()
1073 if (!rk_domain) in rk_iommu_domain_alloc()
1081 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32); in rk_iommu_domain_alloc()
1082 if (!rk_domain->dt) in rk_iommu_domain_alloc()
1085 rk_domain->dt_dma = dma_map_single(dma_dev, rk_domain->dt, in rk_iommu_domain_alloc()
1087 if (dma_mapping_error(dma_dev, rk_domain->dt_dma)) { in rk_iommu_domain_alloc()
1092 spin_lock_init(&rk_domain->iommus_lock); in rk_iommu_domain_alloc()
1093 spin_lock_init(&rk_domain->dt_lock); in rk_iommu_domain_alloc()
1094 INIT_LIST_HEAD(&rk_domain->iommus); in rk_iommu_domain_alloc()
1096 rk_domain->domain.geometry.aperture_start = 0; in rk_iommu_domain_alloc()
1097 rk_domain->domain.geometry.aperture_end = DMA_BIT_MASK(32); in rk_iommu_domain_alloc()
1098 rk_domain->domain.geometry.force_aperture = true; in rk_iommu_domain_alloc()
1100 return &rk_domain->domain; in rk_iommu_domain_alloc()
1103 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_alloc()
1105 kfree(rk_domain); in rk_iommu_domain_alloc()
1112 struct rk_iommu_domain *rk_domain = to_rk_domain(domain); in rk_iommu_domain_free() local
1115 WARN_ON(!list_empty(&rk_domain->iommus)); in rk_iommu_domain_free()
1118 u32 dte = rk_domain->dt[i]; in rk_iommu_domain_free()
1128 dma_unmap_single(dma_dev, rk_domain->dt_dma, in rk_iommu_domain_free()
1130 free_page((unsigned long)rk_domain->dt); in rk_iommu_domain_free()
1132 kfree(rk_domain); in rk_iommu_domain_free()