Lines Matching +full:vm +full:- +full:map

1 // SPDX-License-Identifier: GPL-2.0-only
5 * SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
43 #include "pgalloc-track.h"
66 llist_for_each_safe(llnode, t, llist_del_all(&p->list)) in free_work()
157 * unmap_kernel_range_noflush - unmap kernel VM area
158 * @start: start of the VM area to unmap
159 * @size: size of the VM area to unmap
161 * Unmap PFN_UP(@size) pages at @addr. The VM area @addr and @size specify
166 * for calling flush_cache_vunmap() on to-be-mapped areas before calling this
205 return -ENOMEM; in vmap_pte_range()
210 return -EBUSY; in vmap_pte_range()
212 return -ENOMEM; in vmap_pte_range()
229 return -ENOMEM; in vmap_pmd_range()
233 return -ENOMEM; in vmap_pmd_range()
247 return -ENOMEM; in vmap_pud_range()
251 return -ENOMEM; in vmap_pud_range()
265 return -ENOMEM; in vmap_p4d_range()
269 return -ENOMEM; in vmap_p4d_range()
275 * map_kernel_range_noflush - map kernel VM area with the specified pages
276 * @addr: start of the VM area to map
277 * @size: size of the VM area to map
279 * @pages: pages to map
281 * Map PFN_UP(@size) pages at @addr. The VM area @addr and @size specify should
286 * calling flush_cache_vmap() on to-be-mapped areas before calling this
290 * 0 on success, -errno on failure.
333 * ARM, x86-64 and sparc64 put modules in a special place, in is_vmalloc_or_module_addr()
397 * Map a vmalloc()-space virtual address to the physical page frame number.
435 * This augment red-black tree represents the free vmap space.
436 * All vmap_area objects in this tree are sorted by va->va_start
441 * of its sub-tree, right or left. Therefore it is possible to
456 return (va->va_end - va->va_start); in va_size()
465 return va ? va->subtree_max_size : 0; in get_subtree_max_size()
475 get_subtree_max_size(va->rb_node.rb_left), in compute_subtree_max_size()
476 get_subtree_max_size(va->rb_node.rb_right)); in compute_subtree_max_size()
501 if (addr < va->va_start) in __find_vmap_area()
502 n = n->rb_left; in __find_vmap_area()
503 else if (addr >= va->va_end) in __find_vmap_area()
504 n = n->rb_right; in __find_vmap_area()
529 link = &root->rb_node; in find_va_links()
541 * it link, where the new va->rb_node will be attached to. in find_va_links()
551 if (va->va_start < tmp_va->va_end && in find_va_links()
552 va->va_end <= tmp_va->va_start) in find_va_links()
553 link = &(*link)->rb_left; in find_va_links()
554 else if (va->va_end > tmp_va->va_start && in find_va_links()
555 va->va_start >= tmp_va->va_end) in find_va_links()
556 link = &(*link)->rb_right; in find_va_links()
558 WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n", in find_va_links()
559 va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end); in find_va_links()
565 *parent = &tmp_va->rb_node; in find_va_links()
576 * The red-black tree where we try to find VA neighbors in get_va_next_sibling()
583 list = &rb_entry(parent, struct vmap_area, rb_node)->list; in get_va_next_sibling()
584 return (&parent->rb_right == link ? list->next : list); in get_va_next_sibling()
596 head = &rb_entry(parent, struct vmap_area, rb_node)->list; in link_va()
597 if (&parent->rb_right != link) in link_va()
598 head = head->prev; in link_va()
601 /* Insert to the rb-tree */ in link_va()
602 rb_link_node(&va->rb_node, parent, link); in link_va()
606 * to the tree. We do not set va->subtree_max_size to in link_va()
615 rb_insert_augmented(&va->rb_node, in link_va()
617 va->subtree_max_size = 0; in link_va()
619 rb_insert_color(&va->rb_node, root); in link_va()
622 /* Address-sort this list */ in link_va()
623 list_add(&va->list, head); in link_va()
629 if (WARN_ON(RB_EMPTY_NODE(&va->rb_node))) in unlink_va()
633 rb_erase_augmented(&va->rb_node, in unlink_va()
636 rb_erase(&va->rb_node, root); in unlink_va()
638 list_del(&va->list); in unlink_va()
639 RB_CLEAR_NODE(&va->rb_node); in unlink_va()
651 if (computed_size != va->subtree_max_size) in augment_tree_propagate_check()
653 va_size(va), va->subtree_max_size); in augment_tree_propagate_check()
665 * - After VA has been inserted to the tree(free path);
666 * - After VA has been shrunk(allocation path);
667 * - After VA has been increased(merging path).
673 * 4--8
677 * 2--2 8--8
683 * node becomes 4--6.
693 free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL); in augment_tree_propagate_from()
732 * Merge de-allocated chunk of VA memory with previous
770 * |<------VA------>|<-----Next----->| in merge_or_add_vmap_area()
776 if (sibling->va_start == va->va_end) { in merge_or_add_vmap_area()
777 sibling->va_start = va->va_start; in merge_or_add_vmap_area()
791 * |<-----Prev----->|<------VA------>| in merge_or_add_vmap_area()
795 if (next->prev != head) { in merge_or_add_vmap_area()
796 sibling = list_entry(next->prev, struct vmap_area, list); in merge_or_add_vmap_area()
797 if (sibling->va_end == va->va_start) { in merge_or_add_vmap_area()
808 sibling->va_end = va->va_end; in merge_or_add_vmap_area()
836 if (va->va_start > vstart) in is_within_this_va()
837 nva_start_addr = ALIGN(va->va_start, align); in is_within_this_va()
846 return (nva_start_addr + size <= va->va_end); in is_within_this_va()
866 length = size + align - 1; in find_vmap_lowest_match()
871 if (get_subtree_max_size(node->rb_left) >= length && in find_vmap_lowest_match()
872 vstart < va->va_start) { in find_vmap_lowest_match()
873 node = node->rb_left; in find_vmap_lowest_match()
880 * sub-tree if it does not have a free block that is in find_vmap_lowest_match()
883 if (get_subtree_max_size(node->rb_right) >= length) { in find_vmap_lowest_match()
884 node = node->rb_right; in find_vmap_lowest_match()
889 * OK. We roll back and find the first right sub-tree, in find_vmap_lowest_match()
898 if (get_subtree_max_size(node->rb_right) >= length && in find_vmap_lowest_match()
899 vstart <= va->va_start) { in find_vmap_lowest_match()
900 node = node->rb_right; in find_vmap_lowest_match()
963 if (nva_start_addr < va->va_start || in classify_va_fit_type()
964 nva_start_addr + size > va->va_end) in classify_va_fit_type()
968 if (va->va_start == nva_start_addr) { in classify_va_fit_type()
969 if (va->va_end == nva_start_addr + size) in classify_va_fit_type()
973 } else if (va->va_end == nva_start_addr + size) { in classify_va_fit_type()
995 * |---------------| in adjust_va_to_fit_type()
1005 * |-------|-------| in adjust_va_to_fit_type()
1007 va->va_start += size; in adjust_va_to_fit_type()
1014 * |-------|-------| in adjust_va_to_fit_type()
1016 va->va_end = nva_start_addr; in adjust_va_to_fit_type()
1023 * |---|-------|---| in adjust_va_to_fit_type()
1028 * For percpu allocator we do not do any pre-allocation in adjust_va_to_fit_type()
1054 return -1; in adjust_va_to_fit_type()
1060 lva->va_start = va->va_start; in adjust_va_to_fit_type()
1061 lva->va_end = nva_start_addr; in adjust_va_to_fit_type()
1066 va->va_start = nva_start_addr + size; in adjust_va_to_fit_type()
1068 return -1; in adjust_va_to_fit_type()
1075 insert_vmap_area_augment(lva, &va->rb_node, in adjust_va_to_fit_type()
1099 if (va->va_start > vstart) in __alloc_vmap_area()
1100 nva_start_addr = ALIGN(va->va_start, align); in __alloc_vmap_area()
1164 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1171 return ERR_PTR(-ENOMEM); in alloc_vmap_area()
1177 kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask); in alloc_vmap_area()
1188 * The preload is done in non-atomic context, thus it allows us in alloc_vmap_area()
1220 va->va_start = addr; in alloc_vmap_area()
1221 va->va_end = addr + size; in alloc_vmap_area()
1222 va->vm = NULL; in alloc_vmap_area()
1229 BUG_ON(!IS_ALIGNED(va->va_start, align)); in alloc_vmap_area()
1230 BUG_ON(va->va_start < vstart); in alloc_vmap_area()
1231 BUG_ON(va->va_end > vend); in alloc_vmap_area()
1262 return ERR_PTR(-EBUSY); in alloc_vmap_area()
1311 /* for per-CPU blocks */
1324 * Purges all lazily-freed vmap areas.
1344 if (va->va_start < start) in __purge_vmap_area_lazy()
1345 start = va->va_start; in __purge_vmap_area_lazy()
1346 if (va->va_end > end) in __purge_vmap_area_lazy()
1347 end = va->va_end; in __purge_vmap_area_lazy()
1355 unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT; in __purge_vmap_area_lazy()
1356 unsigned long orig_start = va->va_start; in __purge_vmap_area_lazy()
1357 unsigned long orig_end = va->va_end; in __purge_vmap_area_lazy()
1360 * Finally insert or merge lazily-freed area. It is in __purge_vmap_area_lazy()
1372 va->va_start, va->va_end); in __purge_vmap_area_lazy()
1419 nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >> in free_vmap_area_noflush()
1423 llist_add(&va->purge_list, &vmap_purge_list); in free_vmap_area_noflush()
1434 flush_cache_vunmap(va->va_start, va->va_end); in free_unmap_vmap_area()
1435 unmap_kernel_range_noflush(va->va_start, va->va_end - va->va_start); in free_unmap_vmap_area()
1437 flush_tlb_kernel_range(va->va_start, va->va_end); in free_unmap_vmap_area()
1461 * to #define VMALLOC_SPACE (VMALLOC_END-VMALLOC_START). Guess
1517 addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1); in addr_to_vb_idx()
1532 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1537 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1553 return ERR_PTR(-ENOMEM); in new_vmap_block()
1563 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block()
1564 spin_lock_init(&vb->lock); in new_vmap_block()
1565 vb->va = va; in new_vmap_block()
1568 vb->free = VMAP_BBMAP_BITS - (1UL << order); in new_vmap_block()
1569 vb->dirty = 0; in new_vmap_block()
1570 vb->dirty_min = VMAP_BBMAP_BITS; in new_vmap_block()
1571 vb->dirty_max = 0; in new_vmap_block()
1572 INIT_LIST_HEAD(&vb->free_list); in new_vmap_block()
1574 vb_idx = addr_to_vb_idx(va->va_start); in new_vmap_block()
1583 spin_lock(&vbq->lock); in new_vmap_block()
1584 list_add_tail_rcu(&vb->free_list, &vbq->free); in new_vmap_block()
1585 spin_unlock(&vbq->lock); in new_vmap_block()
1595 tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start)); in free_vmap_block()
1598 free_vmap_area_noflush(vb->va); in free_vmap_block()
1610 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in purge_fragmented_blocks()
1612 if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) in purge_fragmented_blocks()
1615 spin_lock(&vb->lock); in purge_fragmented_blocks()
1616 if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { in purge_fragmented_blocks()
1617 vb->free = 0; /* prevent further allocs after releasing lock */ in purge_fragmented_blocks()
1618 vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ in purge_fragmented_blocks()
1619 vb->dirty_min = 0; in purge_fragmented_blocks()
1620 vb->dirty_max = VMAP_BBMAP_BITS; in purge_fragmented_blocks()
1621 spin_lock(&vbq->lock); in purge_fragmented_blocks()
1622 list_del_rcu(&vb->free_list); in purge_fragmented_blocks()
1623 spin_unlock(&vbq->lock); in purge_fragmented_blocks()
1624 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1625 list_add_tail(&vb->purge, &purge); in purge_fragmented_blocks()
1627 spin_unlock(&vb->lock); in purge_fragmented_blocks()
1632 list_del(&vb->purge); in purge_fragmented_blocks()
1666 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in vb_alloc()
1669 spin_lock(&vb->lock); in vb_alloc()
1670 if (vb->free < (1UL << order)) { in vb_alloc()
1671 spin_unlock(&vb->lock); in vb_alloc()
1675 pages_off = VMAP_BBMAP_BITS - vb->free; in vb_alloc()
1676 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc()
1677 vb->free -= 1UL << order; in vb_alloc()
1678 if (vb->free == 0) { in vb_alloc()
1679 spin_lock(&vbq->lock); in vb_alloc()
1680 list_del_rcu(&vb->free_list); in vb_alloc()
1681 spin_unlock(&vbq->lock); in vb_alloc()
1684 spin_unlock(&vb->lock); in vb_alloc()
1710 offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT; in vb_free()
1718 spin_lock(&vb->lock); in vb_free()
1721 vb->dirty_min = min(vb->dirty_min, offset); in vb_free()
1722 vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); in vb_free()
1724 vb->dirty += 1UL << order; in vb_free()
1725 if (vb->dirty == VMAP_BBMAP_BITS) { in vb_free()
1726 BUG_ON(vb->free); in vb_free()
1727 spin_unlock(&vb->lock); in vb_free()
1730 spin_unlock(&vb->lock); in vb_free()
1747 list_for_each_entry_rcu(vb, &vbq->free, free_list) { in _vm_unmap_aliases()
1748 spin_lock(&vb->lock); in _vm_unmap_aliases()
1749 if (vb->dirty) { in _vm_unmap_aliases()
1750 unsigned long va_start = vb->va->va_start; in _vm_unmap_aliases()
1753 s = va_start + (vb->dirty_min << PAGE_SHIFT); in _vm_unmap_aliases()
1754 e = va_start + (vb->dirty_max << PAGE_SHIFT); in _vm_unmap_aliases()
1761 spin_unlock(&vb->lock); in _vm_unmap_aliases()
1774 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1796 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1822 debug_check_no_locks_freed((void *)va->va_start, in vm_unmap_ram()
1823 (va->va_end - va->va_start)); in vm_unmap_ram()
1829 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1835 * faster than vmap so it's good. But if you mix long-life and short-life
1838 * the end. Please use this function for short-lived objects.
1860 addr = va->va_start; in vm_map_ram()
1877 * vm_area_add_early - add vmap area early during boot
1878 * @vm: vm_struct to add
1880 * This function is used to add fixed kernel vm area to vmlist before
1881 * vmalloc_init() is called. @vm->addr, @vm->size, and @vm->flags
1886 void __init vm_area_add_early(struct vm_struct *vm) in vm_area_add_early() argument
1891 for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) { in vm_area_add_early()
1892 if (tmp->addr >= vm->addr) { in vm_area_add_early()
1893 BUG_ON(tmp->addr < vm->addr + vm->size); in vm_area_add_early()
1896 BUG_ON(tmp->addr + tmp->size > vm->addr); in vm_area_add_early()
1898 vm->next = *p; in vm_area_add_early()
1899 *p = vm; in vm_area_add_early()
1903 * vm_area_register_early - register vmap area early during boot
1904 * @vm: vm_struct to register
1907 * This function is used to register kernel vm area before
1908 * vmalloc_init() is called. @vm->size and @vm->flags should contain
1910 * vm->addr contains the allocated address.
1914 void __init vm_area_register_early(struct vm_struct *vm, size_t align) in vm_area_register_early() argument
1920 vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START; in vm_area_register_early()
1922 vm->addr = (void *)addr; in vm_area_register_early()
1924 vm_area_add_early(vm); in vm_area_register_early()
1935 * -|-----|.....|-----|-----|-----|.....|- in vmap_init_free_space()
1937 * |<--------------------------------->| in vmap_init_free_space()
1940 if (busy->va_start - vmap_start > 0) { in vmap_init_free_space()
1943 free->va_start = vmap_start; in vmap_init_free_space()
1944 free->va_end = busy->va_start; in vmap_init_free_space()
1952 vmap_start = busy->va_end; in vmap_init_free_space()
1955 if (vmap_end - vmap_start > 0) { in vmap_init_free_space()
1958 free->va_start = vmap_start; in vmap_init_free_space()
1959 free->va_end = vmap_end; in vmap_init_free_space()
1984 spin_lock_init(&vbq->lock); in vmalloc_init()
1985 INIT_LIST_HEAD(&vbq->free); in vmalloc_init()
1987 init_llist_head(&p->list); in vmalloc_init()
1988 INIT_WORK(&p->wq, free_work); in vmalloc_init()
1992 for (tmp = vmlist; tmp; tmp = tmp->next) { in vmalloc_init()
1997 va->va_start = (unsigned long)tmp->addr; in vmalloc_init()
1998 va->va_end = va->va_start + tmp->size; in vmalloc_init()
1999 va->vm = tmp; in vmalloc_init()
2011 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
2012 * @addr: start of the VM area to unmap
2013 * @size: size of the VM area to unmap
2027 static inline void setup_vmalloc_vm_locked(struct vm_struct *vm, in setup_vmalloc_vm_locked() argument
2030 vm->flags = flags; in setup_vmalloc_vm_locked()
2031 vm->addr = (void *)va->va_start; in setup_vmalloc_vm_locked()
2032 vm->size = va->va_end - va->va_start; in setup_vmalloc_vm_locked()
2033 vm->caller = caller; in setup_vmalloc_vm_locked()
2034 va->vm = vm; in setup_vmalloc_vm_locked()
2037 static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va, in setup_vmalloc_vm() argument
2041 setup_vmalloc_vm_locked(vm, va, flags, caller); in setup_vmalloc_vm()
2045 static void clear_vm_uninitialized_flag(struct vm_struct *vm) in clear_vm_uninitialized_flag() argument
2049 * we should make sure that vm has proper values. in clear_vm_uninitialized_flag()
2053 vm->flags &= ~VM_UNINITIALIZED; in clear_vm_uninitialized_flag()
2086 kasan_unpoison_vmalloc((void *)va->va_start, requested_size); in __get_vm_area_node()
2102 * get_vm_area - reserve a contiguous kernel virtual area
2127 * find_vm_area - find a continuous kernel virtual area
2130 * Search for the kernel VM area starting at @addr, and return it.
2144 return va->vm; in find_vm_area()
2148 * remove_vm_area - find and remove a continuous kernel virtual area
2151 * Search for the kernel VM area starting at @addr, and remove it.
2152 * This function returns the found VM area, but using it is NOT safe
2165 if (va && va->vm) { in remove_vm_area()
2166 struct vm_struct *vm = va->vm; in remove_vm_area() local
2168 va->vm = NULL; in remove_vm_area()
2171 kasan_free_shadow(vm); in remove_vm_area()
2174 return vm; in remove_vm_area()
2186 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2187 if (page_address(area->pages[i])) in set_area_direct_map()
2188 set_direct_map(area->pages[i]); in set_area_direct_map()
2191 /* Handle removing and resetting vm mappings related to the vm_struct. */
2195 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2199 remove_vm_area(area->addr); in vm_remove_mappings()
2206 * If not deallocating pages, just do the flush of the VM area and in vm_remove_mappings()
2215 * If execution gets here, flush the vm mapping and reset the direct in vm_remove_mappings()
2216 * map. Find the start and end range of the direct mappings to make sure in vm_remove_mappings()
2217 * the vm_unmap_aliases() flush includes the direct map. in vm_remove_mappings()
2219 for (i = 0; i < area->nr_pages; i++) { in vm_remove_mappings()
2220 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2229 * Set direct map to something invalid so that it won't be cached if in vm_remove_mappings()
2231 * reset the direct map permissions to the default. in vm_remove_mappings()
2251 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
2256 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2257 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2259 kasan_poison_vmalloc(area->addr, area->size); in __vunmap()
2266 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2267 struct page *page = area->pages[i]; in __vunmap()
2272 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2274 kvfree(area->pages); in __vunmap()
2291 if (llist_add((struct llist_node *)addr, &p->list)) in __vfree_deferred()
2292 schedule_work(&p->wq); in __vfree_deferred()
2296 * vfree_atomic - release memory allocated by vmalloc()
2322 * vfree - Release memory allocated by vmalloc()
2336 * conventions for vfree() arch-depenedent would be a really bad idea).
2354 * vunmap - release virtual mapping obtained by vmap()
2372 * vmap - map an array of pages into virtually contiguous space
2374 * @count: number of pages to map
2375 * @flags: vm_area->flags
2402 if (map_kernel_range((unsigned long)area->addr, size, pgprot_nx(prot), in vmap()
2404 vunmap(area->addr); in vmap()
2409 area->pages = pages; in vmap()
2410 return area->addr; in vmap()
2425 if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) in vmap_pfn_apply()
2426 return -EINVAL; in vmap_pfn_apply()
2427 *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); in vmap_pfn_apply()
2432 * vmap_pfn - map an array of PFNs into virtually contiguous space
2434 * @count: number of pages to map
2449 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2454 return area->addr; in vmap_pfn()
2474 area->caller); in __vmalloc_area_node()
2480 remove_vm_area(area->addr); in __vmalloc_area_node()
2485 area->pages = pages; in __vmalloc_area_node()
2486 area->nr_pages = nr_pages; in __vmalloc_area_node()
2488 for (i = 0; i < area->nr_pages; i++) { in __vmalloc_area_node()
2498 area->nr_pages = i; in __vmalloc_area_node()
2499 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2502 area->pages[i] = page; in __vmalloc_area_node()
2506 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2508 if (map_kernel_range((unsigned long)area->addr, get_vm_area_size(area), in __vmalloc_area_node()
2512 return area->addr; in __vmalloc_area_node()
2517 (area->nr_pages*PAGE_SIZE), area->size); in __vmalloc_area_node()
2518 __vfree(area->addr); in __vmalloc_area_node()
2523 * __vmalloc_node_range - allocate virtually contiguous memory
2526 * @start: vm area range start
2527 * @end: vm area range end
2530 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
2535 * allocator with @gfp_mask flags. Map them into contiguous
2580 * __vmalloc_node - allocate virtually contiguous memory
2588 * @gfp_mask flags. Map them into contiguous kernel virtual space.
2590 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2621 * vmalloc - allocate virtually contiguous memory
2625 * allocator and map them into contiguous kernel virtual space.
2640 * vzalloc - allocate virtually contiguous memory with zero fill
2644 * allocator and map them into contiguous kernel virtual space.
2660 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2678 * vmalloc_node - allocate memory on a specific node
2683 * allocator and map them into contiguous kernel virtual space.
2698 * vzalloc_node - allocate memory on a specific node with zero fill
2703 * allocator and map them into contiguous kernel virtual space.
2728 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2732 * page level allocator and map them into contiguous kernel virtual space.
2744 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2775 length = PAGE_SIZE - offset; in aligned_vread()
2791 void *map = kmap_atomic(p); in aligned_vread() local
2792 memcpy(buf, map + offset, length); in aligned_vread()
2793 kunmap_atomic(map); in aligned_vread()
2800 count -= length; in aligned_vread()
2814 length = PAGE_SIZE - offset; in aligned_vwrite()
2830 void *map = kmap_atomic(p); in aligned_vwrite() local
2831 memcpy(map + offset, buf, length); in aligned_vwrite()
2832 kunmap_atomic(map); in aligned_vwrite()
2837 count -= length; in aligned_vwrite()
2843 * vread() - read vmalloc area in a safe way.
2845 * @addr: vm address.
2851 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2869 struct vm_struct *vm; in vread() local
2876 count = -(unsigned long) addr; in vread()
2883 if (!va->vm) in vread()
2886 vm = va->vm; in vread()
2887 vaddr = (char *) vm->addr; in vread()
2888 if (addr >= vaddr + get_vm_area_size(vm)) in vread()
2896 count--; in vread()
2898 n = vaddr + get_vm_area_size(vm) - addr; in vread()
2901 if (!(vm->flags & VM_IOREMAP)) in vread()
2907 count -= n; in vread()
2914 /* zero-fill memory holes */ in vread()
2916 memset(buf, 0, buflen - (buf - buf_start)); in vread()
2922 * vwrite() - write vmalloc area in a safe way.
2924 * @addr: vm address.
2948 struct vm_struct *vm; in vwrite() local
2955 count = -(unsigned long) addr; in vwrite()
2963 if (!va->vm) in vwrite()
2966 vm = va->vm; in vwrite()
2967 vaddr = (char *) vm->addr; in vwrite()
2968 if (addr >= vaddr + get_vm_area_size(vm)) in vwrite()
2975 count--; in vwrite()
2977 n = vaddr + get_vm_area_size(vm) - addr; in vwrite()
2980 if (!(vm->flags & VM_IOREMAP)) { in vwrite()
2986 count -= n; in vwrite()
2996 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3001 * @size: size of map area
3003 * Returns: 0 for success, -Exxx on failure
3021 return -EINVAL; in remap_vmalloc_range_partial()
3026 return -EINVAL; in remap_vmalloc_range_partial()
3030 return -EINVAL; in remap_vmalloc_range_partial()
3032 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3033 return -EINVAL; in remap_vmalloc_range_partial()
3037 return -EINVAL; in remap_vmalloc_range_partial()
3050 size -= PAGE_SIZE; in remap_vmalloc_range_partial()
3053 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; in remap_vmalloc_range_partial()
3060 * remap_vmalloc_range - map vmalloc pages to userspace
3061 * @vma: vma to cover (map full range of vma)
3063 * @pgoff: number of pages into addr before first page to map
3065 * Returns: 0 for success, -Exxx on failure
3076 return remap_vmalloc_range_partial(vma, vma->vm_start, in remap_vmalloc_range()
3078 vma->vm_end - vma->vm_start); in remap_vmalloc_range()
3085 ret = remove_vm_area(area->addr); in free_vm_area()
3098 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3103 * i.e. va->va_start < addr && va->va_end < addr or NULL
3117 if (tmp->va_start <= addr) { in pvm_find_va_enclose_addr()
3119 if (tmp->va_end >= addr) in pvm_find_va_enclose_addr()
3122 n = n->rb_right; in pvm_find_va_enclose_addr()
3124 n = n->rb_left; in pvm_find_va_enclose_addr()
3132 * pvm_determine_end_from_reverse - find the highest aligned address
3135 * in - the VA we start the search(reverse order);
3136 * out - the VA with the highest aligned end address.
3143 unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pvm_determine_end_from_reverse()
3149 addr = min((*va)->va_end & ~(align - 1), vmalloc_end); in pvm_determine_end_from_reverse()
3150 if ((*va)->va_start < addr) in pvm_determine_end_from_reverse()
3159 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3168 * Percpu allocator wants to use congruent vm areas so that it can
3176 * does everything top-down and scans free blocks from the end looking
3187 const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1); in pcpu_get_vm_areas()
3218 if (vmalloc_end - vmalloc_start < last_end) { in pcpu_get_vm_areas()
3237 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
3243 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3263 if (base + end > va->va_end) { in pcpu_get_vm_areas()
3264 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3272 if (base + start < va->va_start) { in pcpu_get_vm_areas()
3273 va = node_to_va(rb_prev(&va->rb_node)); in pcpu_get_vm_areas()
3274 base = pvm_determine_end_from_reverse(&va, align) - end; in pcpu_get_vm_areas()
3283 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3315 va->va_start = start; in pcpu_get_vm_areas()
3316 va->va_end = start + size; in pcpu_get_vm_areas()
3323 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3326 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3330 /* insert all vm's */ in pcpu_get_vm_areas()
3350 while (area--) { in pcpu_get_vm_areas()
3351 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3352 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3357 va->va_start, va->va_end); in pcpu_get_vm_areas()
3401 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3402 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3407 va->va_start, va->va_end); in pcpu_get_vm_areas()
3418 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3461 unsigned int nr, *counters = m->private; in show_numa_info()
3466 if (v->flags & VM_UNINITIALIZED) in show_numa_info()
3473 for (nr = 0; nr < v->nr_pages; nr++) in show_numa_info()
3474 counters[page_to_nid(v->pages[nr])]++; in show_numa_info()
3492 seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n", in show_purge_info()
3493 (void *)va->va_start, (void *)va->va_end, in show_purge_info()
3494 va->va_end - va->va_start); in show_purge_info()
3506 * s_show can encounter race with remove_vm_area, !vm on behalf in s_show()
3509 if (!va->vm) { in s_show()
3510 seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n", in s_show()
3511 (void *)va->va_start, (void *)va->va_end, in s_show()
3512 va->va_end - va->va_start); in s_show()
3517 v = va->vm; in s_show()
3519 seq_printf(m, "0x%pK-0x%pK %7ld", in s_show()
3520 v->addr, v->addr + v->size, v->size); in s_show()
3522 if (v->caller) in s_show()
3523 seq_printf(m, " %pS", v->caller); in s_show()
3525 if (v->nr_pages) in s_show()
3526 seq_printf(m, " pages=%d", v->nr_pages); in s_show()
3528 if (v->phys_addr) in s_show()
3529 seq_printf(m, " phys=%pa", &v->phys_addr); in s_show()
3531 if (v->flags & VM_IOREMAP) in s_show()
3534 if (v->flags & VM_ALLOC) in s_show()
3537 if (v->flags & VM_MAP) in s_show()
3540 if (v->flags & VM_USERMAP) in s_show()
3543 if (v->flags & VM_DMA_COHERENT) in s_show()
3544 seq_puts(m, " dma-coherent"); in s_show()
3546 if (is_vmalloc_addr(v->pages)) in s_show()
3558 if (list_is_last(&va->list, &vmap_area_list)) in s_show()