Lines Matching +full:vm +full:- +full:map
1 // SPDX-License-Identifier: GPL-2.0-only
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
45 #include "pgalloc-track.h"
48 static unsigned int __ro_after_init ioremap_max_page_shift = BITS_PER_LONG - 1;
94 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
110 return -ENOMEM; in vmap_pte_range()
143 if ((end - addr) != PMD_SIZE) in vmap_try_huge_pmd()
167 return -ENOMEM; in vmap_pmd_range()
178 return -ENOMEM; in vmap_pmd_range()
179 } while (pmd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pmd_range()
193 if ((end - addr) != PUD_SIZE) in vmap_try_huge_pud()
217 return -ENOMEM; in vmap_pud_range()
229 return -ENOMEM; in vmap_pud_range()
230 } while (pud++, phys_addr += (next - addr), addr = next, addr != end); in vmap_pud_range()
244 if ((end - addr) != P4D_SIZE) in vmap_try_huge_p4d()
268 return -ENOMEM; in vmap_p4d_range()
280 return -ENOMEM; in vmap_p4d_range()
281 } while (p4d++, phys_addr += (next - addr), addr = next, addr != end); in vmap_p4d_range()
306 } while (pgd++, phys_addr += (next - addr), addr = next, addr != end); in vmap_range_noflush()
416 * or be re-mapped for something else, if TLB flushes are being delayed or
444 * vunmap_range - unmap kernel virtual addresses
445 * @addr: start of the VM area to unmap
446 * @end: end of the VM area to unmap (non-inclusive)
449 * caches. Any subsequent access to the address before it has been re-mapped
472 return -ENOMEM; in vmap_pages_pte_range()
477 return -EBUSY; in vmap_pages_pte_range()
479 return -ENOMEM; in vmap_pages_pte_range()
496 return -ENOMEM; in vmap_pages_pmd_range()
500 return -ENOMEM; in vmap_pages_pmd_range()
514 return -ENOMEM; in vmap_pages_pud_range()
518 return -ENOMEM; in vmap_pages_pud_range()
532 return -ENOMEM; in vmap_pages_p4d_range()
536 return -ENOMEM; in vmap_pages_p4d_range()
580 unsigned int i, nr = (end - addr) >> PAGE_SHIFT; in vmap_pages_range_noflush()
588 for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) { in vmap_pages_range_noflush()
604 * vmap_pages_range - map pages to a kernel virtual address
605 * @addr: start of the VM area to map
606 * @end: end of the VM area to map (non-inclusive)
608 * @pages: pages to map (always PAGE_SIZE pages)
613 * 0 on success, -errno on failure.
628 * ARM, x86-64 and sparc64 put modules in a special place, in is_vmalloc_or_module_addr()
703 * Map a vmalloc()-space virtual address to the physical page frame number.
744 * This augment red-black tree represents the free vmap space.
745 * All vmap_area objects in this tree are sorted by va->va_start
750 * of its sub-tree, right or left. Therefore it is possible to
765 return (va->va_end - va->va_start); in va_size()
774 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
784 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
785 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
811 if (tmp->va_end > addr) { in find_vmap_area_exceed_addr()
813 if (tmp->va_start <= addr) in find_vmap_area_exceed_addr()
816 n = n->rb_left; in find_vmap_area_exceed_addr()
818 n = n->rb_right; in find_vmap_area_exceed_addr()
832 if (addr < va->va_start) in __find_vmap_area()
833 n = n->rb_left; in __find_vmap_area()
834 else if (addr >= va->va_end) in __find_vmap_area()
835 n = n->rb_right; in __find_vmap_area()
860 link = &root->rb_node; in find_va_links()
872 * it link, where the new va->rb_node will be attached to. in find_va_links()
882 if (va->va_start < tmp_va->va_end && in find_va_links()
883 va->va_end <= tmp_va->va_start) in find_va_links()
884 link = &(*link)->rb_left; in find_va_links()
885 else if (va->va_end > tmp_va->va_start && in find_va_links()
886 va->va_start >= tmp_va->va_end) in find_va_links()
887 link = &(*link)->rb_right; in find_va_links()
889 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", in find_va_links()
890 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
896 *parent = &tmp_va->rb_node; in find_va_links()
907 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
914 list = &rb_entry(parent, struct vmap_area, rb_node)->list; in get_va_next_sibling()
915 return (&parent->rb_right == link ? list->next : list); in get_va_next_sibling()
927 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va()
928 if (&parent->rb_right != link) in link_va()
929 head = head->prev; in link_va()
932 /* Insert to the rb-tree */ in link_va()
933 rb_link_node(&va->rb_node, parent, link); in link_va()
937 * to the tree. We do not set va->subtree_max_size to in link_va()
946 rb_insert_augmented(&va->rb_node, in link_va()
948 va->subtree_max_size = 0; in link_va()
950 rb_insert_color(&va->rb_node, root); in link_va()
953 /* Address-sort this list */ in link_va()
954 list_add(&va->list, head); in link_va()
960 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in unlink_va()
964 rb_erase_augmented(&va->rb_node, in unlink_va()
967 rb_erase(&va->rb_node, root); in unlink_va()
969 list_del(&va->list); in unlink_va()
970 RB_CLEAR_NODE(&va->rb_node); in unlink_va()
982 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
984 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
996 * - After VA has been inserted to the tree(free path);
997 * - After VA has been shrunk(allocation path);
998 * - After VA has been increased(merging path).
1004 * 4--8
1008 * 2--2 8--8
1014 * node becomes 4--6.
1024 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
1063 * Merge de-allocated chunk of VA memory with previous
1101 * |<------VA------>|<-----Next----->| in merge_or_add_vmap_area()
1107 if (sibling->va_start == va->va_end) { in merge_or_add_vmap_area()
1108 sibling->va_start = va->va_start; in merge_or_add_vmap_area()
1122 * |<-----Prev----->|<------VA------>| in merge_or_add_vmap_area()
1126 if (next->prev != head) { in merge_or_add_vmap_area()
1127 sibling = list_entry(next->prev, struct vmap_area, list); in merge_or_add_vmap_area()
1128 if (sibling->va_end == va->va_start) { in merge_or_add_vmap_area()
1139 sibling->va_end = va->va_end; in merge_or_add_vmap_area()
1174 if (va->va_start > vstart) in is_within_this_va()
1175 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
1184 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
1204 length = size + align - 1; in find_vmap_lowest_match()
1209 if (get_subtree_max_size(node->rb_left) >= length && in find_vmap_lowest_match()
1210 vstart < va->va_start) { in find_vmap_lowest_match()
1211 node = node->rb_left; in find_vmap_lowest_match()
1218 * sub-tree if it does not have a free block that is in find_vmap_lowest_match()
1221 if (get_subtree_max_size(node->rb_right) >= length) { in find_vmap_lowest_match()
1222 node = node->rb_right; in find_vmap_lowest_match()
1227 * OK. We roll back and find the first right sub-tree, in find_vmap_lowest_match()
1236 if (get_subtree_max_size(node->rb_right) >= length && in find_vmap_lowest_match()
1237 vstart <= va->va_start) { in find_vmap_lowest_match()
1238 node = node->rb_right; in find_vmap_lowest_match()
1301 if (nva_start_addr < va->va_start || in classify_va_fit_type()
1302 nva_start_addr + size > va->va_end) in classify_va_fit_type()
1306 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
1307 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
1311 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
1333 * |---------------| in adjust_va_to_fit_type()
1343 * |-------|-------| in adjust_va_to_fit_type()
1345 va->va_start += size; in adjust_va_to_fit_type()
1352 * |-------|-------| in adjust_va_to_fit_type()
1354 va->va_end = nva_start_addr; in adjust_va_to_fit_type()
1361 * |---|-------|---| in adjust_va_to_fit_type()
1366 * For percpu allocator we do not do any pre-allocation in adjust_va_to_fit_type()
1392 return -1; in adjust_va_to_fit_type()
1398 lva->va_start = va->va_start; in adjust_va_to_fit_type()
1399 lva->va_end = nva_start_addr; in adjust_va_to_fit_type()
1404 va->va_start = nva_start_addr + size; in adjust_va_to_fit_type()
1406 return -1; in adjust_va_to_fit_type()
1413 insert_vmap_area_augment(lva, &va->rb_node, in adjust_va_to_fit_type()
1437 if (va->va_start > vstart) in __alloc_vmap_area()
1438 nva_start_addr = ALIGN(va->va_start, align); in __alloc_vmap_area()
1493 * We do it in non-atomic context, thus it allows us to use more in preload_this_cpu_lock()
1526 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1533 return ERR_PTR(-ENOMEM); in alloc_vmap_area()
1539 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1553 va->va_start = addr; in alloc_vmap_area()
1554 va->va_end = addr + size; in alloc_vmap_area()
1555 va->vm = NULL; in alloc_vmap_area()
1561 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
1562 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
1563 BUG_ON(va->va_end > vend); in alloc_vmap_area()
1593 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1642 /* for per-CPU blocks */
1657 * Purges all lazily-freed vmap areas.
1677 struct vmap_area, list)->va_start); in __purge_vmap_area_lazy()
1681 struct vmap_area, list)->va_end); in __purge_vmap_area_lazy()
1688 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
1689 unsigned long orig_start = va->va_start; in __purge_vmap_area_lazy()
1690 unsigned long orig_end = va->va_end; in __purge_vmap_area_lazy()
1693 * Finally insert or merge lazily-freed area. It is in __purge_vmap_area_lazy()
1705 va->va_start, va->va_end); in __purge_vmap_area_lazy()
1752 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> in free_vmap_area_noflush()
1773 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
1774 vunmap_range_noflush(va->va_start, va->va_end); in free_unmap_vmap_area()
1776 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
1800 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1856 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
1871 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1876 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1892 return ERR_PTR(-ENOMEM); in new_vmap_block()
1902 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
1903 spin_lock_init(&vb->lock); in new_vmap_block()
1904 vb->va = va; in new_vmap_block()
1907 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
1908 vb->dirty = 0; in new_vmap_block()
1909 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
1910 vb->dirty_max = 0; in new_vmap_block()
1911 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
1913 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
1922 spin_lock(&vbq->lock); in new_vmap_block()
1923 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
1924 spin_unlock(&vbq->lock); in new_vmap_block()
1934 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
1937 free_vmap_area_noflush(vb->va); in free_vmap_block()
1949 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
1951 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks()
1954 spin_lock(&vb->lock); in purge_fragmented_blocks()
1955 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks()
1956 vb->free = 0; /* prevent further allocs after releasing lock */ in purge_fragmented_blocks()
1957 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks()
1958 vb->dirty_min = 0; in purge_fragmented_blocks()
1959 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_blocks()
1960 spin_lock(&vbq->lock); in purge_fragmented_blocks()
1961 list_del_rcu(&vb->free_list); in purge_fragmented_blocks()
1962 spin_unlock(&vbq->lock); in purge_fragmented_blocks()
1963 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1964 list_add_tail(&vb->purge, &purge); in purge_fragmented_blocks()
1966 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1971 list_del(&vb->purge); in purge_fragmented_blocks()
2005 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
2008 spin_lock(&vb->lock); in vb_alloc()
2009 if (vb->free < (1UL << order)) { in vb_alloc()
2010 spin_unlock(&vb->lock); in vb_alloc()
2014 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
2015 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
2016 vb->free -= 1UL << order; in vb_alloc()
2017 if (vb->free == 0) { in vb_alloc()
2018 spin_lock(&vbq->lock); in vb_alloc()
2019 list_del_rcu(&vb->free_list); in vb_alloc()
2020 spin_unlock(&vbq->lock); in vb_alloc()
2023 spin_unlock(&vb->lock); in vb_alloc()
2049 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; in vb_free()
2057 spin_lock(&vb->lock); in vb_free()
2060 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
2061 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
2063 vb->dirty += 1UL << order; in vb_free()
2064 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
2065 BUG_ON(vb->free); in vb_free()
2066 spin_unlock(&vb->lock); in vb_free()
2069 spin_unlock(&vb->lock); in vb_free()
2086 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in _vm_unmap_aliases()
2087 spin_lock(&vb->lock); in _vm_unmap_aliases()
2088 if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { in _vm_unmap_aliases()
2089 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
2092 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases()
2093 e = va_start + (vb->dirty_max << PAGE_SHIFT); in _vm_unmap_aliases()
2100 spin_unlock(&vb->lock); in _vm_unmap_aliases()
2113 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2135 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2161 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
2162 (va->va_end - va->va_start)); in vm_unmap_ram()
2168 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2174 * faster than vmap so it's good. But if you mix long-life and short-life
2177 * the end. Please use this function for short-lived objects.
2199 addr = va->va_start; in vm_map_ram()
2217 static inline unsigned int vm_area_page_order(struct vm_struct *vm) in vm_area_page_order() argument
2220 return vm->page_order; in vm_area_page_order()
2226 static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order) in set_vm_area_page_order() argument
2229 vm->page_order = order; in set_vm_area_page_order()
2236 * vm_area_add_early - add vmap area early during boot
2237 * @vm: vm_struct to add
2239 * This function is used to add fixed kernel vm area to vmlist before
2240 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
2245 void __init vm_area_add_early(struct vm_struct *vm) in vm_area_add_early() argument
2250 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
2251 if (tmp->addr >= vm->addr) { in vm_area_add_early()
2252 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
2255 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
2257 vm->next = *p; in vm_area_add_early()
2258 *p = vm; in vm_area_add_early()
2262 * vm_area_register_early - register vmap area early during boot
2263 * @vm: vm_struct to register
2266 * This function is used to register kernel vm area before
2267 * vmalloc_init() is called. @vm->size and @vm->flags should contain
2269 * vm->addr contains the allocated address.
2273 void __init vm_area_register_early(struct vm_struct *vm, size_t align) in vm_area_register_early() argument
2279 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
2281 vm->addr = (void *)addr; in vm_area_register_early()
2283 vm_area_add_early(vm); in vm_area_register_early()
2294 * -|-----|.....|-----|-----|-----|.....|- in vmap_init_free_space()
2296 * |<--------------------------------->| in vmap_init_free_space()
2299 if (busy->va_start - vmap_start > 0) { in vmap_init_free_space()
2302 free->va_start = vmap_start; in vmap_init_free_space()
2303 free->va_end = busy->va_start; in vmap_init_free_space()
2311 vmap_start = busy->va_end; in vmap_init_free_space()
2314 if (vmap_end - vmap_start > 0) { in vmap_init_free_space()
2317 free->va_start = vmap_start; in vmap_init_free_space()
2318 free->va_end = vmap_end; in vmap_init_free_space()
2343 spin_lock_init(&vbq->lock); in vmalloc_init()
2344 INIT_LIST_HEAD(&vbq->free); in vmalloc_init()
2346 init_llist_head(&p->list); in vmalloc_init()
2347 INIT_WORK(&p->wq, free_work); in vmalloc_init()
2351 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init()
2356 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
2357 va->va_end = va->va_start + tmp->size; in vmalloc_init()
2358 va->vm = tmp; in vmalloc_init()
2369 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, in setup_vmalloc_vm_locked() argument
2372 vm->flags = flags; in setup_vmalloc_vm_locked()
2373 vm->addr = (void *)va->va_start; in setup_vmalloc_vm_locked()
2374 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm_locked()
2375 vm->caller = caller; in setup_vmalloc_vm_locked()
2376 va->vm = vm; in setup_vmalloc_vm_locked()
2379 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
2383 setup_vmalloc_vm_locked(vm, va, flags, caller); in setup_vmalloc_vm()
2387 static void clear_vm_uninitialized_flag(struct vm_struct *vm) in clear_vm_uninitialized_flag() argument
2391 * we should make sure that vm has proper values. in clear_vm_uninitialized_flag()
2395 vm->flags &= ~VM_UNINITIALIZED; in clear_vm_uninitialized_flag()
2429 kasan_unpoison_vmalloc((void *)va->va_start, requested_size); in __get_vm_area_node()
2445 * get_vm_area - reserve a contiguous kernel virtual area
2472 * find_vm_area - find a continuous kernel virtual area
2475 * Search for the kernel VM area starting at @addr, and return it.
2489 return va->vm; in find_vm_area()
2493 * remove_vm_area - find and remove a continuous kernel virtual area
2496 * Search for the kernel VM area starting at @addr, and remove it.
2497 * This function returns the found VM area, but using it is NOT safe
2510 if (va && va->vm) { in remove_vm_area()
2511 struct vm_struct *vm = va->vm; in remove_vm_area() local
2513 va->vm = NULL; in remove_vm_area()
2516 kasan_free_shadow(vm); in remove_vm_area()
2519 return vm; in remove_vm_area()
2532 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2533 if (page_address(area->pages[i])) in set_area_direct_map()
2534 set_direct_map(area->pages[i]); in set_area_direct_map()
2537 /* Handle removing and resetting vm mappings related to the vm_struct. */
2542 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2546 remove_vm_area(area->addr); in vm_remove_mappings()
2553 * If not deallocating pages, just do the flush of the VM area and in vm_remove_mappings()
2562 * If execution gets here, flush the vm mapping and reset the direct in vm_remove_mappings()
2563 * map. Find the start and end range of the direct mappings to make sure in vm_remove_mappings()
2564 * the vm_unmap_aliases() flush includes the direct map. in vm_remove_mappings()
2566 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_remove_mappings()
2567 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2579 * Set direct map to something invalid so that it won't be cached if in vm_remove_mappings()
2581 * reset the direct map permissions to the default. in vm_remove_mappings()
2601 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
2606 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2607 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2609 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2617 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in __vunmap()
2618 struct page *page = area->pages[i]; in __vunmap()
2624 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2626 kvfree(area->pages); in __vunmap()
2642 if (llist_add((struct llist_node *)addr, &p->list)) in __vfree_deferred()
2643 schedule_work(&p->wq); in __vfree_deferred()
2647 * vfree_atomic - release memory allocated by vmalloc()
2673 * vfree - Release memory allocated by vmalloc()
2687 * conventions for vfree() arch-dependent would be a really bad idea).
2705 * vunmap - release virtual mapping obtained by vmap()
2723 * vmap - map an array of pages into virtually contiguous space
2725 * @count: number of pages to map
2726 * @flags: vm_area->flags
2754 addr = (unsigned long)area->addr; in vmap()
2757 vunmap(area->addr); in vmap()
2762 area->pages = pages; in vmap()
2763 area->nr_pages = count; in vmap()
2765 return area->addr; in vmap()
2780 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) in vmap_pfn_apply()
2781 return -EINVAL; in vmap_pfn_apply()
2782 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); in vmap_pfn_apply()
2787 * vmap_pfn - map an array of PFNs into virtually contiguous space
2789 * @count: number of pages to map
2804 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2809 return area->addr; in vmap_pfn()
2823 * For order-0 pages we make use of bulk allocator, if in vm_area_alloc_pages()
2833 * A maximum allowed request is hard-coded and is 100 in vm_area_alloc_pages()
2835 * long preemption off scenario in the bulk-allocator in vm_area_alloc_pages()
2838 nr_pages_request = min(100U, nr_pages - nr_allocated); in vm_area_alloc_pages()
2856 * high-order pages. in vm_area_alloc_pages()
2860 /* High-order pages or fallback path if "bulk" fails. */ in vm_area_alloc_pages()
2871 * Careful, we allocate and map page-order pages, but in vm_area_alloc_pages()
2890 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
2903 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
2904 area->caller); in __vmalloc_area_node()
2906 area->pages = kmalloc_node(array_size, nested_gfp, node); in __vmalloc_area_node()
2909 if (!area->pages) { in __vmalloc_area_node()
2917 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
2920 area->nr_pages = vm_area_alloc_pages(gfp_mask, node, in __vmalloc_area_node()
2921 page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
2923 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2929 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
2932 area->nr_pages * PAGE_SIZE, page_order); in __vmalloc_area_node()
2936 if (vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
2939 "vmalloc error: size %lu, failed to map pages", in __vmalloc_area_node()
2940 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
2944 return area->addr; in __vmalloc_area_node()
2947 __vfree(area->addr); in __vmalloc_area_node()
2952 * __vmalloc_node_range - allocate virtually contiguous memory
2955 * @start: vm area range start
2956 * @end: vm area range end
2959 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2964 * allocator with @gfp_mask flags. Map them into contiguous
3051 * __vmalloc_node - allocate virtually contiguous memory
3059 * @gfp_mask flags. Map them into contiguous kernel virtual space.
3061 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3092 * vmalloc - allocate virtually contiguous memory
3096 * allocator and map them into contiguous kernel virtual space.
3111 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3114 * Allocate enough non-huge pages to cover @size from the page level
3115 * allocator and map them into contiguous kernel virtual space.
3128 * vzalloc - allocate virtually contiguous memory with zero fill
3132 * allocator and map them into contiguous kernel virtual space.
3148 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3166 * vmalloc_node - allocate memory on a specific node
3171 * allocator and map them into contiguous kernel virtual space.
3186 * vzalloc_node - allocate memory on a specific node with zero fill
3191 * allocator and map them into contiguous kernel virtual space.
3216 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3220 * page level allocator and map them into contiguous kernel virtual space.
3232 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3263 length = PAGE_SIZE - offset; in aligned_vread()
3275 /* We can expect USER0 is not used -- see vread() */ in aligned_vread()
3276 void *map = kmap_atomic(p); in aligned_vread() local
3277 memcpy(buf, map + offset, length); in aligned_vread()
3278 kunmap_atomic(map); in aligned_vread()
3285 count -= length; in aligned_vread()
3291 * vread() - read vmalloc area in a safe way.
3293 * @addr: vm address.
3299 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3317 struct vm_struct *vm; in vread() local
3324 count = -(unsigned long) addr; in vread()
3332 if ((unsigned long)addr + count <= va->va_start) in vread()
3339 if (!va->vm) in vread()
3342 vm = va->vm; in vread()
3343 vaddr = (char *) vm->addr; in vread()
3344 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
3352 count--; in vread()
3354 n = vaddr + get_vm_area_size(vm) - addr; in vread()
3357 if (!(vm->flags & VM_IOREMAP)) in vread()
3363 count -= n; in vread()
3370 /* zero-fill memory holes */ in vread()
3372 memset(buf, 0, buflen - (buf - buf_start)); in vread()
3378 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3383 * @size: size of map area
3385 * Returns: 0 for success, -Exxx on failure
3403 return -EINVAL; in remap_vmalloc_range_partial()
3408 return -EINVAL; in remap_vmalloc_range_partial()
3412 return -EINVAL; in remap_vmalloc_range_partial()
3414 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3415 return -EINVAL; in remap_vmalloc_range_partial()
3419 return -EINVAL; in remap_vmalloc_range_partial()
3432 size -= PAGE_SIZE; in remap_vmalloc_range_partial()
3435 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in remap_vmalloc_range_partial()
3441 * remap_vmalloc_range - map vmalloc pages to userspace
3442 * @vma: vma to cover (map full range of vma)
3444 * @pgoff: number of pages into addr before first page to map
3446 * Returns: 0 for success, -Exxx on failure
3457 return remap_vmalloc_range_partial(vma, vma->vm_start, in remap_vmalloc_range()
3459 vma->vm_end - vma->vm_start); in remap_vmalloc_range()
3466 ret = remove_vm_area(area->addr); in free_vm_area()
3479 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3484 * i.e. va->va_start < addr && va->va_end < addr or NULL
3498 if (tmp->va_start <= addr) { in pvm_find_va_enclose_addr()
3500 if (tmp->va_end >= addr) in pvm_find_va_enclose_addr()
3503 n = n->rb_right; in pvm_find_va_enclose_addr()
3505 n = n->rb_left; in pvm_find_va_enclose_addr()
3513 * pvm_determine_end_from_reverse - find the highest aligned address
3516 * in - the VA we start the search(reverse order);
3517 * out - the VA with the highest aligned end address.
3525 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pvm_determine_end_from_reverse()
3531 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
3532 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
3541 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3550 * Percpu allocator wants to use congruent vm areas so that it can
3558 * does everything top-down and scans free blocks from the end looking
3569 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pcpu_get_vm_areas()
3600 if (vmalloc_end - vmalloc_start < last_end) { in pcpu_get_vm_areas()
3619 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
3625 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3645 if (base + end > va->va_end) { in pcpu_get_vm_areas()
3646 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3654 if (base + start < va->va_start) { in pcpu_get_vm_areas()
3655 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
3656 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3665 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3697 va->va_start = start; in pcpu_get_vm_areas()
3698 va->va_end = start + size; in pcpu_get_vm_areas()
3705 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3708 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3712 /* insert all vm's */ in pcpu_get_vm_areas()
3732 while (area--) { in pcpu_get_vm_areas()
3733 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3734 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3739 va->va_start, va->va_end); in pcpu_get_vm_areas()
3783 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3784 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3789 va->va_start, va->va_end); in pcpu_get_vm_areas()
3800 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3819 struct vm_struct *vm; in vmalloc_dump_obj() local
3822 vm = find_vm_area(objp); in vmalloc_dump_obj()
3823 if (!vm) in vmalloc_dump_obj()
3825 pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n", in vmalloc_dump_obj()
3826 vm->nr_pages, (unsigned long)vm->addr, vm->caller); in vmalloc_dump_obj()
3858 unsigned int nr, *counters = m->private; in show_numa_info()
3863 if (v->flags & VM_UNINITIALIZED) in show_numa_info()
3870 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info()
3871 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
3885 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", in show_purge_info()
3886 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
3887 va->va_end - va->va_start); in show_purge_info()
3900 * s_show can encounter race with remove_vm_area, !vm on behalf in s_show()
3903 if (!va->vm) { in s_show()
3904 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", in s_show()
3905 (void *)va->va_start, (void *)va->va_end, in s_show()
3906 va->va_end - va->va_start); in s_show()
3911 v = va->vm; in s_show()
3913 seq_printf(m, "0x%pK-0x%pK %7ld", in s_show()
3914 v->addr, v->addr + v->size, v->size); in s_show()
3916 if (v->caller) in s_show()
3917 seq_printf(m, " %pS", v->caller); in s_show()
3919 if (v->nr_pages) in s_show()
3920 seq_printf(m, " pages=%d", v->nr_pages); in s_show()
3922 if (v->phys_addr) in s_show()
3923 seq_printf(m, " phys=%pa", &v->phys_addr); in s_show()
3925 if (v->flags & VM_IOREMAP) in s_show()
3928 if (v->flags & VM_ALLOC) in s_show()
3931 if (v->flags & VM_MAP) in s_show()
3934 if (v->flags & VM_USERMAP) in s_show()
3937 if (v->flags & VM_DMA_COHERENT) in s_show()
3938 seq_puts(m, " dma-coherent"); in s_show()
3940 if (is_vmalloc_addr(v->pages)) in s_show()
3949 if (list_is_last(&va->list, &vmap_area_list)) in s_show()