Lines Matching refs:iova

127 __cached_rbnode_insert_update(struct iova_domain *iovad, struct iova *new)  in __cached_rbnode_insert_update()
136 __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) in __cached_rbnode_delete_update()
138 struct iova *cached_iova; in __cached_rbnode_delete_update()
140 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
145 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
152 iova_insert_rbtree(struct rb_root *root, struct iova *iova, in iova_insert_rbtree() argument
160 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree()
164 if (iova->pfn_lo < this->pfn_lo) in iova_insert_rbtree()
166 else if (iova->pfn_lo > this->pfn_lo) in iova_insert_rbtree()
174 rb_link_node(&iova->node, parent, new); in iova_insert_rbtree()
175 rb_insert_color(&iova->node, root); in iova_insert_rbtree()
180 struct iova *new, bool size_aligned) in __alloc_and_insert_iova_range()
183 struct iova *curr_iova; in __alloc_and_insert_iova_range()
194 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
200 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
226 struct iova *alloc_iova_mem(void) in alloc_iova_mem()
232 void free_iova_mem(struct iova *iova) in free_iova_mem() argument
234 if (iova->pfn_lo != IOVA_ANCHOR) in free_iova_mem()
235 kmem_cache_free(iova_cache, iova); in free_iova_mem()
244 "iommu_iova", sizeof(struct iova), 0, in iova_cache_get()
285 struct iova *
290 struct iova *new_iova; in alloc_iova()
309 static struct iova *
317 struct iova *iova = rb_entry(node, struct iova, node); in private_find_iova() local
319 if (pfn < iova->pfn_lo) in private_find_iova()
321 else if (pfn > iova->pfn_hi) in private_find_iova()
324 return iova; /* pfn falls within iova's range */ in private_find_iova()
330 static void private_free_iova(struct iova_domain *iovad, struct iova *iova) in private_free_iova() argument
333 __cached_rbnode_delete_update(iovad, iova); in private_free_iova()
334 rb_erase(&iova->node, &iovad->rbroot); in private_free_iova()
335 free_iova_mem(iova); in private_free_iova()
345 struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn) in find_iova()
348 struct iova *iova; in find_iova() local
352 iova = private_find_iova(iovad, pfn); in find_iova()
354 return iova; in find_iova()
365 __free_iova(struct iova_domain *iovad, struct iova *iova) in __free_iova() argument
370 private_free_iova(iovad, iova); in __free_iova()
385 struct iova *iova = find_iova(iovad, pfn); in free_iova() local
387 if (iova) in free_iova()
388 __free_iova(iovad, iova); in free_iova()
408 struct iova *new_iova; in alloc_iova_fast()
585 struct iova *iova, *tmp; in put_iova_domain() local
589 rbtree_postorder_for_each_entry_safe(iova, tmp, &iovad->rbroot, node) in put_iova_domain()
590 free_iova_mem(iova); in put_iova_domain()
598 struct iova *iova = rb_entry(node, struct iova, node); in __is_range_overlap() local
600 if ((pfn_lo <= iova->pfn_hi) && (pfn_hi >= iova->pfn_lo)) in __is_range_overlap()
605 static inline struct iova *
608 struct iova *iova; in alloc_and_init_iova() local
610 iova = alloc_iova_mem(); in alloc_and_init_iova()
611 if (iova) { in alloc_and_init_iova()
612 iova->pfn_lo = pfn_lo; in alloc_and_init_iova()
613 iova->pfn_hi = pfn_hi; in alloc_and_init_iova()
616 return iova; in alloc_and_init_iova()
619 static struct iova *
623 struct iova *iova; in __insert_new_range() local
625 iova = alloc_and_init_iova(pfn_lo, pfn_hi); in __insert_new_range()
626 if (iova) in __insert_new_range()
627 iova_insert_rbtree(&iovad->rbroot, iova, NULL); in __insert_new_range()
629 return iova; in __insert_new_range()
633 __adjust_overlap_range(struct iova *iova, in __adjust_overlap_range() argument
636 if (*pfn_lo < iova->pfn_lo) in __adjust_overlap_range()
637 iova->pfn_lo = *pfn_lo; in __adjust_overlap_range()
638 if (*pfn_hi > iova->pfn_hi) in __adjust_overlap_range()
639 *pfn_lo = iova->pfn_hi + 1; in __adjust_overlap_range()
650 struct iova *
656 struct iova *iova; in reserve_iova() local
666 iova = rb_entry(node, struct iova, node); in reserve_iova()
667 __adjust_overlap_range(iova, &pfn_lo, &pfn_hi); in reserve_iova()
668 if ((pfn_lo >= iova->pfn_lo) && in reserve_iova()
669 (pfn_hi <= iova->pfn_hi)) in reserve_iova()
680 iova = __insert_new_range(iovad, pfn_lo, pfn_hi); in reserve_iova()
684 return iova; in reserve_iova()
703 struct iova *iova = rb_entry(node, struct iova, node); in copy_reserved_iova() local
704 struct iova *new_iova; in copy_reserved_iova()
706 if (iova->pfn_lo == IOVA_ANCHOR) in copy_reserved_iova()
709 new_iova = reserve_iova(to, iova->pfn_lo, iova->pfn_hi); in copy_reserved_iova()
712 iova->pfn_lo, iova->pfn_lo); in copy_reserved_iova()
718 struct iova *
719 split_and_remove_iova(struct iova_domain *iovad, struct iova *iova, in split_and_remove_iova() argument
723 struct iova *prev = NULL, *next = NULL; in split_and_remove_iova()
726 if (iova->pfn_lo < pfn_lo) { in split_and_remove_iova()
727 prev = alloc_and_init_iova(iova->pfn_lo, pfn_lo - 1); in split_and_remove_iova()
731 if (iova->pfn_hi > pfn_hi) { in split_and_remove_iova()
732 next = alloc_and_init_iova(pfn_hi + 1, iova->pfn_hi); in split_and_remove_iova()
737 __cached_rbnode_delete_update(iovad, iova); in split_and_remove_iova()
738 rb_erase(&iova->node, &iovad->rbroot); in split_and_remove_iova()
742 iova->pfn_lo = pfn_lo; in split_and_remove_iova()
746 iova->pfn_hi = pfn_hi; in split_and_remove_iova()
750 return iova; in split_and_remove_iova()
802 struct iova *iova = private_find_iova(iovad, mag->pfns[i]); in iova_magazine_free_pfns() local
804 BUG_ON(!iova); in iova_magazine_free_pfns()
805 private_free_iova(iovad, iova); in iova_magazine_free_pfns()