Lines Matching refs:odp

96 static int check_parent(struct ib_umem_odp *odp,  in check_parent()  argument
99 struct mlx5_ib_mr *mr = odp->private; in check_parent()
101 return mr && mr->parent == parent && !odp->dying; in check_parent()
112 static struct ib_umem_odp *odp_next(struct ib_umem_odp *odp) in odp_next() argument
114 struct mlx5_ib_mr *mr = odp->private, *parent = mr->parent; in odp_next()
115 struct ib_ucontext_per_mm *per_mm = odp->per_mm; in odp_next()
120 rb = rb_next(&odp->interval_tree.rb); in odp_next()
123 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb); in odp_next()
124 if (check_parent(odp, parent)) in odp_next()
128 odp = NULL; in odp_next()
131 return odp; in odp_next()
138 struct ib_umem_odp *odp; in odp_lookup() local
142 odp = rbt_ib_umem_lookup(&per_mm->umem_tree, start, length); in odp_lookup()
143 if (!odp) in odp_lookup()
147 if (check_parent(odp, parent)) in odp_lookup()
149 rb = rb_next(&odp->interval_tree.rb); in odp_lookup()
152 odp = rb_entry(rb, struct ib_umem_odp, interval_tree.rb); in odp_lookup()
153 if (ib_umem_start(odp) > start + length) in odp_lookup()
157 odp = NULL; in odp_lookup()
160 return odp; in odp_lookup()
168 struct ib_umem_odp *odp; in mlx5_odp_populate_klm() local
204 odp = odp_lookup(offset * MLX5_IMR_MTT_SIZE, in mlx5_odp_populate_klm()
210 if (odp && ib_umem_start(odp) == va) { in mlx5_odp_populate_klm()
211 struct mlx5_ib_mr *mtt = odp->private; in mlx5_odp_populate_klm()
214 odp = odp_next(odp); in mlx5_odp_populate_klm()
225 struct ib_umem_odp *odp = container_of(work, struct ib_umem_odp, work); in mr_leaf_free_action() local
226 int idx = ib_umem_start(odp) >> MLX5_IMR_MTT_SHIFT; in mr_leaf_free_action()
227 struct mlx5_ib_mr *mr = odp->private, *imr = mr->parent; in mr_leaf_free_action()
243 ib_umem_odp_release(odp); in mr_leaf_free_action()
479 struct ib_umem_odp *odp, *result = NULL; in implicit_mr_get_data() local
486 odp = odp_lookup(addr, 1, mr); in implicit_mr_get_data()
489 io_virt, bcnt, addr, odp); in implicit_mr_get_data()
492 if (likely(odp)) { in implicit_mr_get_data()
496 odp = ib_umem_odp_alloc_child(odp_mr, addr, MLX5_IMR_MTT_SIZE); in implicit_mr_get_data()
497 if (IS_ERR(odp)) { in implicit_mr_get_data()
499 return ERR_CAST(odp); in implicit_mr_get_data()
502 mtt = implicit_mr_alloc(mr->ibmr.pd, odp, 0, in implicit_mr_get_data()
506 ib_umem_odp_release(odp); in implicit_mr_get_data()
510 odp->private = mtt; in implicit_mr_get_data()
511 mtt->umem = &odp->umem; in implicit_mr_get_data()
514 INIT_WORK(&odp->work, mr_leaf_free_action); in implicit_mr_get_data()
525 result = odp; in implicit_mr_get_data()
529 odp = odp_next(odp); in implicit_mr_get_data()
530 if (odp && ib_umem_start(odp) != addr) in implicit_mr_get_data()
531 odp = NULL; in implicit_mr_get_data()
621 struct ib_umem_odp *odp; in pagefault_mr() local
625 odp = implicit_mr_get_data(mr, io_virt, bcnt); in pagefault_mr()
627 if (IS_ERR(odp)) in pagefault_mr()
628 return PTR_ERR(odp); in pagefault_mr()
629 mr = odp->private; in pagefault_mr()
631 odp = odp_mr; in pagefault_mr()
635 size = min_t(size_t, bcnt, ib_umem_end(odp) - io_virt); in pagefault_mr()
637 page_shift = odp->page_shift; in pagefault_mr()
642 if (prefetch && !downgrade && !odp->umem.writable) { in pagefault_mr()
650 if (odp->umem.writable && !downgrade) in pagefault_mr()
653 current_seq = READ_ONCE(odp->notifiers_seq); in pagefault_mr()
660 ret = ib_umem_odp_map_dma_pages(odp, io_virt, size, access_mask, in pagefault_mr()
668 mutex_lock(&odp->umem_mutex); in pagefault_mr()
669 if (!ib_umem_mmu_notifier_retry(odp, current_seq)) { in pagefault_mr()
680 mutex_unlock(&odp->umem_mutex); in pagefault_mr()
701 next = odp_next(odp); in pagefault_mr()
707 odp = next; in pagefault_mr()
708 mr = odp->private; in pagefault_mr()
718 if (!wait_for_completion_timeout(&odp->notifier_completion, in pagefault_mr()
723 current_seq, odp->notifiers_seq, in pagefault_mr()
724 odp->notifiers_count); in pagefault_mr()