/Linux-v4.19/include/linux/ |
D | iova.h | 21 struct iova { struct 78 struct iova anchor; /* rbtree lookup anchor */ 101 static inline unsigned long iova_size(struct iova *iova) in iova_size() argument 103 return iova->pfn_hi - iova->pfn_lo + 1; in iova_size() 116 static inline size_t iova_offset(struct iova_domain *iovad, dma_addr_t iova) in iova_offset() argument 118 return iova & iova_mask(iovad); in iova_offset() 126 static inline dma_addr_t iova_dma_addr(struct iova_domain *iovad, struct iova *iova) in iova_dma_addr() argument 128 return (dma_addr_t)iova->pfn_lo << iova_shift(iovad); in iova_dma_addr() 131 static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova) in iova_pfn() argument 133 return iova >> iova_shift(iovad); in iova_pfn() [all …]
|
D | iommu.h | 198 int (*map)(struct iommu_domain *domain, unsigned long iova, 200 size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, 204 unsigned long iova, size_t size); 206 phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); 296 extern int iommu_map(struct iommu_domain *domain, unsigned long iova, 298 extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, 301 unsigned long iova, size_t size); 302 extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova, 304 extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); 355 unsigned long iova, int flags); [all …]
|
/Linux-v4.19/drivers/iommu/ |
D | iova.c | 127 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new) in __cached_rbnode_insert_update() 136 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update() 138 struct iova *cached_iova; in __cached_rbnode_delete_update() 140 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update() 145 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update() 152 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument 160 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree() 164 if (iova->pfn_lo < this->pfn_lo) in iova_insert_rbtree() 166 else if (iova->pfn_lo > this->pfn_lo) in iova_insert_rbtree() 174 rb_link_node(&iova->node, parent, new); in iova_insert_rbtree() [all …]
|
D | io-pgtable-arm-v7s.c | 369 unsigned long iova, phys_addr_t paddr, int prot, in arm_v7s_init_pte() argument 385 tblp = ptep - ARM_V7S_LVL_IDX(iova, lvl); in arm_v7s_init_pte() 386 if (WARN_ON(__arm_v7s_unmap(data, iova + i * sz, in arm_v7s_init_pte() 429 static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, in __arm_v7s_map() argument 438 ptep += ARM_V7S_LVL_IDX(iova, lvl); in __arm_v7s_map() 442 return arm_v7s_init_pte(data, iova, paddr, prot, in __arm_v7s_map() 473 return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_v7s_map() 476 static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_v7s_map() argument 487 if (WARN_ON(upper_32_bits(iova) || upper_32_bits(paddr))) in arm_v7s_map() 490 ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd); in arm_v7s_map() [all …]
|
D | tegra-gart.c | 92 #define for_each_gart_pte(gart, iova) \ argument 93 for (iova = gart->iovmm_base; \ 94 iova < gart->iovmm_base + GART_PAGE_SIZE * gart->page_count; \ 95 iova += GART_PAGE_SIZE) 120 unsigned long iova; in do_gart_setup() local 122 for_each_gart_pte(gart, iova) in do_gart_setup() 123 gart_set_pte(gart, iova, data ? *(data++) : 0); in do_gart_setup() 132 unsigned long iova; in gart_dump_table() local 136 for_each_gart_pte(gart, iova) { in gart_dump_table() 139 pte = gart_read_pte(gart, iova); in gart_dump_table() [all …]
|
D | io-pgtable-arm.c | 295 unsigned long iova, size_t size, int lvl, 319 unsigned long iova, phys_addr_t paddr, in arm_lpae_init_pte() argument 337 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data); in arm_lpae_init_pte() 338 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz)) in arm_lpae_init_pte() 378 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, in __arm_lpae_map() argument 388 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); in __arm_lpae_map() 392 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map() 422 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_lpae_map() 466 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, in arm_lpae_map() argument 478 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) || in arm_lpae_map() [all …]
|
D | exynos-iommu.c | 102 #define section_offs(iova) (iova & (SECT_SIZE - 1)) argument 104 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1)) argument 106 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1)) argument 111 static u32 lv1ent_offset(sysmmu_iova_t iova) in lv1ent_offset() argument 113 return iova >> SECT_ORDER; in lv1ent_offset() 116 static u32 lv2ent_offset(sysmmu_iova_t iova) in lv2ent_offset() argument 118 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1); in lv2ent_offset() 186 static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova) in section_entry() argument 188 return pgtable + lv1ent_offset(iova); in section_entry() 191 static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova) in page_entry() argument [all …]
|
D | dma-iommu.c | 39 dma_addr_t iova; member 199 msi_page[i].iova = start; in cookie_init_hw_msi_region() 352 unsigned long shift, iova_len, iova = 0; in iommu_dma_alloc_iova() local 378 iova = alloc_iova_fast(iovad, iova_len, in iommu_dma_alloc_iova() 381 if (!iova) in iommu_dma_alloc_iova() 382 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, in iommu_dma_alloc_iova() 385 return (dma_addr_t)iova << shift; in iommu_dma_alloc_iova() 389 dma_addr_t iova, size_t size) in iommu_dma_free_iova() argument 397 free_iova_fast(iovad, iova_pfn(iovad, iova), in iommu_dma_free_iova() 526 dma_addr_t iova; in iommu_dma_alloc() local [all …]
|
D | rockchip-iommu.c | 257 static u32 rk_iova_dte_index(dma_addr_t iova) in rk_iova_dte_index() argument 259 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT; in rk_iova_dte_index() 262 static u32 rk_iova_pte_index(dma_addr_t iova) in rk_iova_pte_index() argument 264 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT; in rk_iova_pte_index() 267 static u32 rk_iova_page_offset(dma_addr_t iova) in rk_iova_page_offset() argument 269 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT; in rk_iova_page_offset() 304 dma_addr_t iova; in rk_iommu_zap_lines() local 306 for (iova = iova_start; iova < iova_end; iova += SPAGE_SIZE) in rk_iommu_zap_lines() 307 rk_iommu_write(iommu->bases[i], RK_MMU_ZAP_ONE_LINE, iova); in rk_iommu_zap_lines() 470 static void log_iova(struct rk_iommu *iommu, int index, dma_addr_t iova) in log_iova() argument [all …]
|
D | tegra-smmu.c | 152 static unsigned int iova_pd_index(unsigned long iova) in iova_pd_index() argument 154 return (iova >> SMMU_PDE_SHIFT) & (SMMU_NUM_PDE - 1); in iova_pd_index() 157 static unsigned int iova_pt_index(unsigned long iova) in iova_pt_index() argument 159 return (iova >> SMMU_PTE_SHIFT) & (SMMU_NUM_PTE - 1); in iova_pt_index() 215 unsigned long iova) in smmu_flush_tlb_section() argument 220 SMMU_TLB_FLUSH_VA_SECTION(iova); in smmu_flush_tlb_section() 226 unsigned long iova) in smmu_flush_tlb_group() argument 231 SMMU_TLB_FLUSH_VA_GROUP(iova); in smmu_flush_tlb_group() 511 static void tegra_smmu_set_pde(struct tegra_smmu_as *as, unsigned long iova, in tegra_smmu_set_pde() argument 514 unsigned int pd_index = iova_pd_index(iova); in tegra_smmu_set_pde() [all …]
|
D | io-pgtable.h | 32 void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, 120 int (*map)(struct io_pgtable_ops *ops, unsigned long iova, 122 size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, 125 unsigned long iova); 180 unsigned long iova, size_t size, size_t granule, bool leaf) in io_pgtable_tlb_add_flush() argument 182 iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); in io_pgtable_tlb_add_flush()
|
D | s390-iommu.c | 267 static int s390_iommu_map(struct iommu_domain *domain, unsigned long iova, in s390_iommu_map() argument 279 rc = s390_iommu_update_trans(s390_domain, (unsigned long) paddr, iova, in s390_iommu_map() 286 dma_addr_t iova) in s390_iommu_iova_to_phys() argument 293 if (iova < domain->geometry.aperture_start || in s390_iommu_iova_to_phys() 294 iova > domain->geometry.aperture_end) in s390_iommu_iova_to_phys() 297 rtx = calc_rtx(iova); in s390_iommu_iova_to_phys() 298 sx = calc_sx(iova); in s390_iommu_iova_to_phys() 299 px = calc_px(iova); in s390_iommu_iova_to_phys() 317 unsigned long iova, size_t size) in s390_iommu_unmap() argument 324 paddr = s390_iommu_iova_to_phys(domain, iova); in s390_iommu_unmap() [all …]
|
/Linux-v4.19/drivers/fpga/ |
D | dfl-afu-dma-region.c | 179 u64 iova, u64 size) in dma_region_check_iova() argument 181 if (!size && region->iova != iova) in dma_region_check_iova() 184 return (region->iova <= iova) && in dma_region_check_iova() 185 (region->length + region->iova >= iova + size); in dma_region_check_iova() 204 (unsigned long long)region->iova); in afu_dma_region_add() 215 if (dma_region_check_iova(this, region->iova, region->length)) in afu_dma_region_add() 218 if (region->iova < this->iova) in afu_dma_region_add() 220 else if (region->iova > this->iova) in afu_dma_region_add() 245 (unsigned long long)region->iova); in afu_dma_region_remove() 267 (unsigned long long)region->iova); in afu_dma_region_destroy() [all …]
|
D | dfl-afu.h | 56 u64 iova; member 95 u64 user_addr, u64 length, u64 *iova); 96 int afu_dma_unmap_region(struct dfl_feature_platform_data *pdata, u64 iova); 99 u64 iova, u64 size);
|
/Linux-v4.19/include/trace/events/ |
D | iommu.h | 88 TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), 90 TP_ARGS(iova, paddr, size), 93 __field(u64, iova) 99 __entry->iova = iova; 105 __entry->iova, __entry->paddr, __entry->size 111 TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), 113 TP_ARGS(iova, size, unmapped_size), 116 __field(u64, iova) 122 __entry->iova = iova; 128 __entry->iova, __entry->size, __entry->unmapped_size [all …]
|
/Linux-v4.19/drivers/vfio/ |
D | vfio_iommu_type1.c | 81 dma_addr_t iova; /* Device address */ member 101 dma_addr_t iova; /* Device address */ member 108 dma_addr_t iova; member 131 if (start + size <= dma->iova) in vfio_find_dma() 133 else if (start >= dma->iova + dma->size) in vfio_find_dma() 151 if (new->iova + new->size <= dma->iova) in vfio_link_dma() 169 static struct vfio_pfn *vfio_find_vpfn(struct vfio_dma *dma, dma_addr_t iova) in vfio_find_vpfn() argument 177 if (iova < vpfn->iova) in vfio_find_vpfn() 179 else if (iova > vpfn->iova) in vfio_find_vpfn() 198 if (new->iova < vpfn->iova) in vfio_link_pfn() [all …]
|
/Linux-v4.19/drivers/gpu/drm/etnaviv/ |
D | etnaviv_mmu.c | 15 unsigned long iova, size_t size) in etnaviv_domain_unmap() argument 20 if (!IS_ALIGNED(iova | size, pgsize)) { in etnaviv_domain_unmap() 22 iova, size, pgsize); in etnaviv_domain_unmap() 27 unmapped_page = domain->ops->unmap(domain, iova, pgsize); in etnaviv_domain_unmap() 31 iova += unmapped_page; in etnaviv_domain_unmap() 37 unsigned long iova, phys_addr_t paddr, in etnaviv_domain_map() argument 40 unsigned long orig_iova = iova; in etnaviv_domain_map() 45 if (!IS_ALIGNED(iova | paddr | size, pgsize)) { in etnaviv_domain_map() 47 iova, &paddr, size, pgsize); in etnaviv_domain_map() 52 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in etnaviv_domain_map() [all …]
|
/Linux-v4.19/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 54 int mem_check_range(struct rxe_mem *mem, u64 iova, size_t length) in mem_check_range() argument 62 if (iova < mem->iova || in mem_check_range() 64 iova > mem->iova + mem->length - length) in mem_check_range() 162 u64 length, u64 iova, int access, struct ib_udata *udata, in rxe_mem_init_user() argument 227 mem->iova = iova; in rxe_mem_init_user() 266 u64 iova, in lookup_iova() argument 271 size_t offset = iova - mem->iova + mem->offset; in lookup_iova() 304 void *iova_to_vaddr(struct rxe_mem *mem, u64 iova, int length) in iova_to_vaddr() argument 317 addr = (void *)(uintptr_t)iova; in iova_to_vaddr() 321 if (mem_check_range(mem, iova, length)) { in iova_to_vaddr() [all …]
|
/Linux-v4.19/drivers/gpu/drm/msm/ |
D | msm_gem_submit.c | 126 submit->bos[i].iova = submit_bo.presumed; in submit_lookup_objects() 176 submit->bos[i].iova = 0; in submit_unlock_unpin_bo() 269 uint64_t iova; in submit_pin_objects() local 273 submit->gpu->aspace, &iova); in submit_pin_objects() 280 if (iova == submit->bos[i].iova) { in submit_pin_objects() 283 submit->bos[i].iova = iova; in submit_pin_objects() 294 struct msm_gem_object **obj, uint64_t *iova, bool *valid) in submit_bo() argument 304 if (iova) in submit_bo() 305 *iova = submit->bos[idx].iova; in submit_bo() 341 uint64_t iova; in submit_reloc() local [all …]
|
D | msm_iommu.c | 28 unsigned long iova, int flags, void *arg) in msm_fault_handler() argument 32 return iommu->base.handler(iommu->base.arg, iova, flags); in msm_fault_handler() 33 pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags); in msm_fault_handler() 60 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, in msm_iommu_map() argument 67 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); in msm_iommu_map() 74 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, in msm_iommu_unmap() argument 80 iommu_unmap(iommu->domain, iova, len); in msm_iommu_unmap()
|
D | msm_rd.c | 316 uint64_t iova, uint32_t size) in snapshot_buf() argument 321 if (iova) { in snapshot_buf() 322 buf += iova - submit->bos[idx].iova; in snapshot_buf() 324 iova = submit->bos[idx].iova; in snapshot_buf() 333 (uint32_t[3]){ iova, size, iova >> 32 }, 12); in snapshot_buf() 393 uint64_t iova = submit->cmd[i].iova; in msm_rd_dump_submit() local 399 submit->cmd[i].iova, szd * 4); in msm_rd_dump_submit() 412 (uint32_t[3]){ iova, szd, iova >> 32 }, 12); in msm_rd_dump_submit()
|
D | msm_mmu.h | 26 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, 36 int (*handler)(void *arg, unsigned long iova, int flags); 51 int (*handler)(void *arg, unsigned long iova, int flags)) in msm_mmu_set_fault_handler() argument
|
D | msm_gem_vma.c | 45 if (!aspace || !vma->iova) in msm_gem_unmap_vma() 50 aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size); in msm_gem_unmap_vma() 57 vma->iova = 0; in msm_gem_unmap_vma() 80 vma->iova = vma->node.start << PAGE_SHIFT; in msm_gem_map_vma() 84 ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, in msm_gem_map_vma()
|
/Linux-v4.19/drivers/soc/qcom/ |
D | qcom-geni-se.c | 613 dma_addr_t *iova) in geni_se_tx_dma_prep() argument 618 *iova = dma_map_single(wrapper->dev, buf, len, DMA_TO_DEVICE); in geni_se_tx_dma_prep() 619 if (dma_mapping_error(wrapper->dev, *iova)) in geni_se_tx_dma_prep() 626 writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_TX_PTR_L); in geni_se_tx_dma_prep() 627 writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_TX_PTR_H); in geni_se_tx_dma_prep() 646 dma_addr_t *iova) in geni_se_rx_dma_prep() argument 651 *iova = dma_map_single(wrapper->dev, buf, len, DMA_FROM_DEVICE); in geni_se_rx_dma_prep() 652 if (dma_mapping_error(wrapper->dev, *iova)) in geni_se_rx_dma_prep() 659 writel_relaxed(lower_32_bits(*iova), se->base + SE_DMA_RX_PTR_L); in geni_se_rx_dma_prep() 660 writel_relaxed(upper_32_bits(*iova), se->base + SE_DMA_RX_PTR_H); in geni_se_rx_dma_prep() [all …]
|
/Linux-v4.19/drivers/s390/cio/ |
D | vfio_ccw_cp.c | 72 u64 iova, unsigned int len) in pfn_array_alloc_pin() argument 82 pa->pa_iova = iova; in pfn_array_alloc_pin() 84 pa->pa_nr = ((iova & ~PAGE_MASK) + len + (PAGE_SIZE - 1)) >> PAGE_SHIFT; in pfn_array_alloc_pin() 158 unsigned long iova) in pfn_array_table_iova_pinned() argument 161 unsigned long iova_pfn = iova >> PAGE_SHIFT; in pfn_array_table_iova_pinned() 204 void *to, u64 iova, in copy_from_iova() argument 212 ret = pfn_array_alloc_pin(&pa, mdev, iova, n); in copy_from_iova() 221 from += iova & (PAGE_SIZE - 1); in copy_from_iova() 222 m -= iova & (PAGE_SIZE - 1); in copy_from_iova() 239 struct ccw1 *to, u64 iova, in copy_ccw_from_iova() argument [all …]
|