Lines Matching refs:va

374 va_size(struct vmap_area *va)  in va_size()  argument
376 return (va->va_end - va->va_start); in va_size()
382 struct vmap_area *va; in get_subtree_max_size() local
384 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size()
385 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
392 compute_subtree_max_size(struct vmap_area *va) in compute_subtree_max_size() argument
394 return max3(va_size(va), in compute_subtree_max_size()
395 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
396 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
418 struct vmap_area *va; in __find_vmap_area() local
420 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area()
421 if (addr < va->va_start) in __find_vmap_area()
423 else if (addr >= va->va_end) in __find_vmap_area()
426 return va; in __find_vmap_area()
437 find_va_links(struct vmap_area *va, in find_va_links() argument
467 if (va->va_start < tmp_va->va_end && in find_va_links()
468 va->va_end <= tmp_va->va_start) in find_va_links()
470 else if (va->va_end > tmp_va->va_start && in find_va_links()
471 va->va_start >= tmp_va->va_end) in find_va_links()
500 link_va(struct vmap_area *va, struct rb_root *root, in link_va() argument
514 rb_link_node(&va->rb_node, parent, link); in link_va()
527 rb_insert_augmented(&va->rb_node, in link_va()
529 va->subtree_max_size = 0; in link_va()
531 rb_insert_color(&va->rb_node, root); in link_va()
535 list_add(&va->list, head); in link_va()
539 unlink_va(struct vmap_area *va, struct rb_root *root) in unlink_va() argument
541 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in unlink_va()
545 rb_erase_augmented(&va->rb_node, in unlink_va()
548 rb_erase(&va->rb_node, root); in unlink_va()
550 list_del(&va->list); in unlink_va()
551 RB_CLEAR_NODE(&va->rb_node); in unlink_va()
558 struct vmap_area *va; in augment_tree_propagate_check() local
566 va = rb_entry(n, struct vmap_area, rb_node); in augment_tree_propagate_check()
567 size = va->subtree_max_size; in augment_tree_propagate_check()
571 va = rb_entry(node, struct vmap_area, rb_node); in augment_tree_propagate_check()
576 if (va_size(va) == size) { in augment_tree_propagate_check()
586 va = rb_entry(n, struct vmap_area, rb_node); in augment_tree_propagate_check()
588 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
624 augment_tree_propagate_from(struct vmap_area *va) in augment_tree_propagate_from() argument
626 struct rb_node *node = &va->rb_node; in augment_tree_propagate_from()
630 va = rb_entry(node, struct vmap_area, rb_node); in augment_tree_propagate_from()
631 new_va_sub_max_size = compute_subtree_max_size(va); in augment_tree_propagate_from()
639 if (va->subtree_max_size == new_va_sub_max_size) in augment_tree_propagate_from()
642 va->subtree_max_size = new_va_sub_max_size; in augment_tree_propagate_from()
643 node = rb_parent(&va->rb_node); in augment_tree_propagate_from()
652 insert_vmap_area(struct vmap_area *va, in insert_vmap_area() argument
658 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area()
659 link_va(va, root, parent, link, head); in insert_vmap_area()
663 insert_vmap_area_augment(struct vmap_area *va, in insert_vmap_area_augment() argument
671 link = find_va_links(va, NULL, from, &parent); in insert_vmap_area_augment()
673 link = find_va_links(va, root, NULL, &parent); in insert_vmap_area_augment()
675 link_va(va, root, parent, link, head); in insert_vmap_area_augment()
676 augment_tree_propagate_from(va); in insert_vmap_area_augment()
686 merge_or_add_vmap_area(struct vmap_area *va, in merge_or_add_vmap_area() argument
699 link = find_va_links(va, root, NULL, &parent); in merge_or_add_vmap_area()
717 if (sibling->va_start == va->va_end) { in merge_or_add_vmap_area()
718 sibling->va_start = va->va_start; in merge_or_add_vmap_area()
724 kmem_cache_free(vmap_area_cachep, va); in merge_or_add_vmap_area()
727 va = sibling; in merge_or_add_vmap_area()
741 if (sibling->va_end == va->va_start) { in merge_or_add_vmap_area()
742 sibling->va_end = va->va_end; in merge_or_add_vmap_area()
748 unlink_va(va, root); in merge_or_add_vmap_area()
751 kmem_cache_free(vmap_area_cachep, va); in merge_or_add_vmap_area()
758 link_va(va, root, parent, link, head); in merge_or_add_vmap_area()
759 augment_tree_propagate_from(va); in merge_or_add_vmap_area()
764 is_within_this_va(struct vmap_area *va, unsigned long size, in is_within_this_va() argument
769 if (va->va_start > vstart) in is_within_this_va()
770 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
779 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
791 struct vmap_area *va; in find_vmap_lowest_match() local
802 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
805 vstart < va->va_start) { in find_vmap_lowest_match()
808 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
809 return va; in find_vmap_lowest_match()
827 va = rb_entry(node, struct vmap_area, rb_node); in find_vmap_lowest_match()
828 if (is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_match()
829 return va; in find_vmap_lowest_match()
832 vstart <= va->va_start) { in find_vmap_lowest_match()
850 struct vmap_area *va; in find_vmap_lowest_linear_match() local
852 list_for_each_entry(va, &free_vmap_area_list, list) { in find_vmap_lowest_linear_match()
853 if (!is_within_this_va(va, size, align, vstart)) in find_vmap_lowest_linear_match()
856 return va; in find_vmap_lowest_linear_match()
890 classify_va_fit_type(struct vmap_area *va, in classify_va_fit_type() argument
896 if (nva_start_addr < va->va_start || in classify_va_fit_type()
897 nva_start_addr + size > va->va_end) in classify_va_fit_type()
901 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
902 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
906 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
916 adjust_va_to_fit_type(struct vmap_area *va, in adjust_va_to_fit_type() argument
930 unlink_va(va, &free_vmap_area_root); in adjust_va_to_fit_type()
931 kmem_cache_free(vmap_area_cachep, va); in adjust_va_to_fit_type()
940 va->va_start += size; in adjust_va_to_fit_type()
949 va->va_end = nva_start_addr; in adjust_va_to_fit_type()
980 lva->va_start = va->va_start; in adjust_va_to_fit_type()
986 va->va_start = nva_start_addr + size; in adjust_va_to_fit_type()
992 augment_tree_propagate_from(va); in adjust_va_to_fit_type()
995 insert_vmap_area_augment(lva, &va->rb_node, in adjust_va_to_fit_type()
1011 struct vmap_area *va; in __alloc_vmap_area() local
1015 va = find_vmap_lowest_match(size, align, vstart); in __alloc_vmap_area()
1016 if (unlikely(!va)) in __alloc_vmap_area()
1019 if (va->va_start > vstart) in __alloc_vmap_area()
1020 nva_start_addr = ALIGN(va->va_start, align); in __alloc_vmap_area()
1029 type = classify_va_fit_type(va, nva_start_addr, size); in __alloc_vmap_area()
1034 ret = adjust_va_to_fit_type(va, nva_start_addr, size, type); in __alloc_vmap_area()
1054 struct vmap_area *va, *pva; in alloc_vmap_area() local
1067 va = kmem_cache_alloc_node(vmap_area_cachep, in alloc_vmap_area()
1069 if (unlikely(!va)) in alloc_vmap_area()
1076 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK); in alloc_vmap_area()
1114 va->va_start = addr; in alloc_vmap_area()
1115 va->va_end = addr + size; in alloc_vmap_area()
1116 va->vm = NULL; in alloc_vmap_area()
1117 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); in alloc_vmap_area()
1121 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
1122 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
1123 BUG_ON(va->va_end > vend); in alloc_vmap_area()
1125 return va; in alloc_vmap_area()
1148 kmem_cache_free(vmap_area_cachep, va); in alloc_vmap_area()
1164 static void __free_vmap_area(struct vmap_area *va) in __free_vmap_area() argument
1169 unlink_va(va, &vmap_area_root); in __free_vmap_area()
1174 merge_or_add_vmap_area(va, in __free_vmap_area()
1181 static void free_vmap_area(struct vmap_area *va) in free_vmap_area() argument
1184 __free_vmap_area(va); in free_vmap_area()
1191 static void unmap_vmap_area(struct vmap_area *va) in unmap_vmap_area() argument
1193 vunmap_page_range(va->va_start, va->va_end); in unmap_vmap_area()
1249 struct vmap_area *va; in __purge_vmap_area_lazy() local
1268 llist_for_each_entry(va, valist, purge_list) { in __purge_vmap_area_lazy()
1269 if (va->va_start < start) in __purge_vmap_area_lazy()
1270 start = va->va_start; in __purge_vmap_area_lazy()
1271 if (va->va_end > end) in __purge_vmap_area_lazy()
1272 end = va->va_end; in __purge_vmap_area_lazy()
1279 llist_for_each_entry_safe(va, n_va, valist, purge_list) { in __purge_vmap_area_lazy()
1280 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
1287 merge_or_add_vmap_area(va, in __purge_vmap_area_lazy()
1327 static void free_vmap_area_noflush(struct vmap_area *va) in free_vmap_area_noflush() argument
1332 unlink_va(va, &vmap_area_root); in free_vmap_area_noflush()
1335 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> in free_vmap_area_noflush()
1339 llist_add(&va->purge_list, &vmap_purge_list); in free_vmap_area_noflush()
1348 static void free_unmap_vmap_area(struct vmap_area *va) in free_unmap_vmap_area() argument
1350 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
1351 unmap_vmap_area(va); in free_unmap_vmap_area()
1353 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
1355 free_vmap_area_noflush(va); in free_unmap_vmap_area()
1360 struct vmap_area *va; in find_vmap_area() local
1363 va = __find_vmap_area(addr); in find_vmap_area()
1366 return va; in find_vmap_area()
1406 struct vmap_area *va; member
1460 struct vmap_area *va; in new_vmap_block() local
1472 va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE, in new_vmap_block()
1475 if (IS_ERR(va)) { in new_vmap_block()
1477 return ERR_CAST(va); in new_vmap_block()
1483 free_vmap_area(va); in new_vmap_block()
1487 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
1489 vb->va = va; in new_vmap_block()
1498 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
1519 vb_idx = addr_to_vb_idx(vb->va->va_start); in free_vmap_block()
1525 free_vmap_area_noflush(vb->va); in free_vmap_block()
1603 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
1686 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
1740 struct vmap_area *va; in vm_unmap_ram() local
1754 va = find_vmap_area(addr); in vm_unmap_ram()
1755 BUG_ON(!va); in vm_unmap_ram()
1756 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
1757 (va->va_end - va->va_start)); in vm_unmap_ram()
1758 free_unmap_vmap_area(va); in vm_unmap_ram()
1789 struct vmap_area *va; in vm_map_ram() local
1790 va = alloc_vmap_area(size, PAGE_SIZE, in vm_map_ram()
1792 if (IS_ERR(va)) in vm_map_ram()
1795 addr = va->va_start; in vm_map_ram()
1902 struct vmap_area *va; in vmalloc_init() local
1925 va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT); in vmalloc_init()
1926 if (WARN_ON_ONCE(!va)) in vmalloc_init()
1929 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1930 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1931 va->vm = tmp; in vmalloc_init()
1932 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); in vmalloc_init()
2017 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
2022 vm->addr = (void *)va->va_start; in setup_vmalloc_vm()
2023 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm()
2025 va->vm = vm; in setup_vmalloc_vm()
2044 struct vmap_area *va; in __get_vm_area_node() local
2063 va = alloc_vmap_area(size, align, start, end, node, gfp_mask); in __get_vm_area_node()
2064 if (IS_ERR(va)) { in __get_vm_area_node()
2069 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2127 struct vmap_area *va; in find_vm_area() local
2129 va = find_vmap_area((unsigned long)addr); in find_vm_area()
2130 if (!va) in find_vm_area()
2133 return va->vm; in find_vm_area()
2148 struct vmap_area *va; in remove_vm_area() local
2153 va = __find_vmap_area((unsigned long)addr); in remove_vm_area()
2154 if (va && va->vm) { in remove_vm_area()
2155 struct vm_struct *vm = va->vm; in remove_vm_area()
2157 va->vm = NULL; in remove_vm_area()
2161 free_unmap_vmap_area(va); in remove_vm_area()
2847 struct vmap_area *va; in vread() local
2858 list_for_each_entry(va, &vmap_area_list, list) { in vread()
2862 if (!va->vm) in vread()
2865 vm = va->vm; in vread()
2926 struct vmap_area *va; in vwrite() local
2938 list_for_each_entry(va, &vmap_area_list, list) { in vwrite()
2942 if (!va->vm) in vwrite()
2945 vm = va->vm; in vwrite()
3139 struct vmap_area *va, *tmp; in pvm_find_va_enclose_addr() local
3143 va = NULL; in pvm_find_va_enclose_addr()
3148 va = tmp; in pvm_find_va_enclose_addr()
3158 return va; in pvm_find_va_enclose_addr()
3171 pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align) in pvm_determine_end_from_reverse() argument
3176 if (likely(*va)) { in pvm_determine_end_from_reverse()
3177 list_for_each_entry_from_reverse((*va), in pvm_determine_end_from_reverse()
3179 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
3180 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
3218 struct vmap_area **vas, *va; in pcpu_get_vm_areas() local
3272 va = pvm_find_va_enclose_addr(vmalloc_end); in pcpu_get_vm_areas()
3273 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3286 if (va == NULL) in pcpu_get_vm_areas()
3293 if (base + end > va->va_end) { in pcpu_get_vm_areas()
3294 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3302 if (base + start < va->va_start) { in pcpu_get_vm_areas()
3303 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
3304 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3319 va = pvm_find_va_enclose_addr(base + end); in pcpu_get_vm_areas()
3329 va = pvm_find_va_enclose_addr(start); in pcpu_get_vm_areas()
3330 if (WARN_ON_ONCE(va == NULL)) in pcpu_get_vm_areas()
3334 type = classify_va_fit_type(va, start, size); in pcpu_get_vm_areas()
3339 ret = adjust_va_to_fit_type(va, start, size, type); in pcpu_get_vm_areas()
3344 va = vas[area]; in pcpu_get_vm_areas()
3345 va->va_start = start; in pcpu_get_vm_areas()
3346 va->va_end = start + size; in pcpu_get_vm_areas()
3348 insert_vmap_area(va, &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3464 struct vmap_area *va; in show_purge_info() local
3470 llist_for_each_entry(va, head, purge_list) { in show_purge_info()
3472 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
3473 va->va_end - va->va_start); in show_purge_info()
3479 struct vmap_area *va; in s_show() local
3482 va = list_entry(p, struct vmap_area, list); in s_show()
3488 if (!va->vm) { in s_show()
3490 (void *)va->va_start, (void *)va->va_end, in s_show()
3491 va->va_end - va->va_start); in s_show()
3496 v = va->vm; in s_show()
3537 if (list_is_last(&va->list, &vmap_area_list)) in s_show()