Searched refs:ib_umem_start (Results 1 – 3 of 3) sorted by relevance
276 ib_umem_odp_unmap_dma_pages(umem_odp, ib_umem_start(umem_odp), in ib_umem_odp_release()367 if (user_virt < ib_umem_start(umem_odp) || in ib_umem_odp_map_dma_and_lock()387 pfn_start_idx = (range.start - ib_umem_start(umem_odp)) >> PAGE_SHIFT; in ib_umem_odp_map_dma_and_lock()412 start_idx = (range.start - ib_umem_start(umem_odp)) >> page_shift; in ib_umem_odp_map_dma_and_lock()488 virt = max_t(u64, virt, ib_umem_start(umem_odp)); in ib_umem_odp_unmap_dma_pages()491 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in ib_umem_odp_unmap_dma_pages()496 unsigned long pfn_idx = (addr - ib_umem_start(umem_odp)) >> PAGE_SHIFT; in ib_umem_odp_unmap_dma_pages()
53 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() function66 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages()
194 ib_umem_odp_unmap_dma_pages(odp, ib_umem_start(odp), in dma_fence_odp_mr()218 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; in free_implicit_child_mr()264 unsigned long idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; in destroy_unused_implicit_child_mr()313 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range()323 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range()685 start_idx = (user_va - ib_umem_start(odp)) >> page_shift; in pagefault_real_mr()