Searched refs:umem_odp (Results 1 – 7 of 7) sorted by relevance
/Linux-v5.4/drivers/infiniband/core/ |
D | umem_odp.c | 51 static void ib_umem_notifier_start_account(struct ib_umem_odp *umem_odp) in ib_umem_notifier_start_account() argument 53 mutex_lock(&umem_odp->umem_mutex); in ib_umem_notifier_start_account() 54 if (umem_odp->notifiers_count++ == 0) in ib_umem_notifier_start_account() 60 reinit_completion(&umem_odp->notifier_completion); in ib_umem_notifier_start_account() 61 mutex_unlock(&umem_odp->umem_mutex); in ib_umem_notifier_start_account() 64 static void ib_umem_notifier_end_account(struct ib_umem_odp *umem_odp) in ib_umem_notifier_end_account() argument 66 mutex_lock(&umem_odp->umem_mutex); in ib_umem_notifier_end_account() 71 ++umem_odp->notifiers_seq; in ib_umem_notifier_end_account() 72 if (--umem_odp->notifiers_count == 0) in ib_umem_notifier_end_account() 73 complete_all(&umem_odp->notifier_completion); in ib_umem_notifier_end_account() [all …]
|
D | Makefile | 38 ib_uverbs-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o
|
/Linux-v5.4/include/rdma/ |
D | ib_umem_odp.h | 92 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() argument 94 return umem_odp->interval_tree.start; in ib_umem_start() 98 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) in ib_umem_end() argument 100 return umem_odp->interval_tree.last + 1; in ib_umem_end() 103 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) in ib_umem_odp_num_pages() argument 105 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages() 106 umem_odp->page_shift; in ib_umem_odp_num_pages() 139 void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 141 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, 145 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, [all …]
|
D | ib_verbs.h | 2425 void (*invalidate_range)(struct ib_umem_odp *umem_odp,
|
/Linux-v5.4/drivers/infiniband/hw/mlx5/ |
D | odp.c | 250 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, in mlx5_ib_invalidate_range() argument 260 if (!umem_odp) { in mlx5_ib_invalidate_range() 265 mr = umem_odp->private; in mlx5_ib_invalidate_range() 270 start = max_t(u64, ib_umem_start(umem_odp), start); in mlx5_ib_invalidate_range() 271 end = min_t(u64, ib_umem_end(umem_odp), end); in mlx5_ib_invalidate_range() 279 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range() 280 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range() 281 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range() 288 if (umem_odp->dma_list[idx] & in mlx5_ib_invalidate_range() 317 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); in mlx5_ib_invalidate_range() [all …]
|
D | mr.c | 1579 struct ib_umem_odp *umem_odp = to_ib_umem_odp(umem); in dereg_mr() local 1595 if (!umem_odp->is_implicit_odp) in dereg_mr() 1596 mlx5_ib_invalidate_range(umem_odp, in dereg_mr() 1597 ib_umem_start(umem_odp), in dereg_mr() 1598 ib_umem_end(umem_odp)); in dereg_mr() 1606 ib_umem_odp_release(umem_odp); in dereg_mr()
|
D | mlx5_ib.h | 1254 void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, unsigned long start, 1285 static inline void mlx5_ib_invalidate_range(struct ib_umem_odp *umem_odp, in mlx5_ib_invalidate_range() argument
|