Lines Matching full:area
452 * @addr: start of the VM area to unmap
453 * @end: end of the VM area to unmap (non-inclusive)
622 * @addr: start of the VM area to map
623 * @end: end of the VM area to map (non-inclusive)
768 * find a lowest match of free area.
1115 * free area is inserted. If VA has been merged, it is
1163 /* Point to the new merged area. */ in __merge_or_add_vmap_area()
1194 /* Point to the new merged area. */ in __merge_or_add_vmap_area()
1489 * Returns a start address of the newly allocated area, if success.
1566 * when fit type of free area is NE_FIT_TYPE. It guarantees that in preload_this_cpu_lock()
1759 * Finally insert or merge lazily-freed area. It is in __purge_vmap_area_lazy()
1808 * Free a vmap area, caller ensuring that the area has been unmapped
1837 * Free and unmap a vmap area
2307 * vm_area_add_early - add vmap area early during boot
2310 * This function is used to add fixed kernel vm area to vmlist before
2333 * vm_area_register_early - register vmap area early during boot
2337 * This function is used to register kernel vm area before
2482 struct vm_struct *area; in __get_vm_area_node() local
2494 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2495 if (unlikely(!area)) in __get_vm_area_node()
2503 kfree(area); in __get_vm_area_node()
2507 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2518 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
2521 return area; in __get_vm_area_node()
2533 * get_vm_area - reserve a contiguous kernel virtual area
2534 * @size: size of the area
2537 * Search an area of @size in the kernel virtual mapping area,
2538 * and reserved it for out purposes. Returns the area descriptor
2541 * Return: the area descriptor on success or %NULL on failure.
2560 * find_vm_area - find a continuous kernel virtual area
2563 * Search for the kernel VM area starting at @addr, and return it.
2567 * Return: the area descriptor on success or %NULL on failure.
2581 * remove_vm_area - find and remove a continuous kernel virtual area
2584 * Search for the kernel VM area starting at @addr, and remove it.
2585 * This function returns the found VM area, but using it is NOT safe
2588 * Return: the area descriptor on success or %NULL on failure.
2614 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2620 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2621 if (page_address(area->pages[i])) in set_area_direct_map()
2622 set_direct_map(area->pages[i]); in set_area_direct_map()
2626 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) in vm_remove_mappings() argument
2629 unsigned int page_order = vm_area_page_order(area); in vm_remove_mappings()
2630 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2634 remove_vm_area(area->addr); in vm_remove_mappings()
2641 * If not deallocating pages, just do the flush of the VM area and in vm_remove_mappings()
2654 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_remove_mappings()
2655 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2671 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_remove_mappings()
2673 set_area_direct_map(area, set_direct_map_default_noflush); in vm_remove_mappings()
2678 struct vm_struct *area; in __vunmap() local
2687 area = find_vm_area(addr); in __vunmap()
2688 if (unlikely(!area)) { in __vunmap()
2689 WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n", in __vunmap()
2694 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2695 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2697 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2699 vm_remove_mappings(area, deallocate_pages); in __vunmap()
2704 for (i = 0; i < area->nr_pages; i++) { in __vunmap()
2705 struct page *page = area->pages[i]; in __vunmap()
2716 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2718 kvfree(area->pages); in __vunmap()
2721 kfree(area); in __vunmap()
2768 * Free the virtually continuous memory area starting at @addr, as obtained
2800 * Free the virtually contiguous memory area starting at @addr,
2827 * Return: the address of the area or %NULL on failure
2832 struct vm_struct *area; in vmap() local
2849 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2850 if (!area) in vmap()
2853 addr = (unsigned long)area->addr; in vmap()
2856 vunmap(area->addr); in vmap()
2861 area->pages = pages; in vmap()
2862 area->nr_pages = count; in vmap()
2864 return area->addr; in vmap()
2897 struct vm_struct *area; in vmap_pfn() local
2899 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2901 if (!area) in vmap_pfn()
2903 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2905 free_vm_area(area); in vmap_pfn()
2908 return area->addr; in vmap_pfn()
3005 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
3011 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
3012 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
3026 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
3027 area->caller); in __vmalloc_area_node()
3029 area->pages = kmalloc_node(array_size, nested_gfp, node); in __vmalloc_area_node()
3032 if (!area->pages) { in __vmalloc_area_node()
3036 free_vm_area(area); in __vmalloc_area_node()
3040 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
3041 page_order = vm_area_page_order(area); in __vmalloc_area_node()
3043 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, in __vmalloc_area_node()
3044 node, page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
3046 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
3050 for (i = 0; i < area->nr_pages; i++) in __vmalloc_area_node()
3051 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); in __vmalloc_area_node()
3058 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
3061 area->nr_pages * PAGE_SIZE, page_order); in __vmalloc_area_node()
3075 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3089 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3093 return area->addr; in __vmalloc_area_node()
3096 __vfree(area->addr); in __vmalloc_area_node()
3104 * @start: vm area range start
3105 * @end: vm area range end
3108 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
3126 * Return: the address of the area or %NULL on failure
3133 struct vm_struct *area; in __vmalloc_node_range() local
3173 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | in __vmalloc_node_range()
3176 if (!area) { in __vmalloc_node_range()
3213 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range()
3230 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); in __vmalloc_node_range()
3237 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
3241 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3243 return area->addr; in __vmalloc_node_range()
3360 * The resulting memory area is zeroed so it can be mapped to userspace
3444 * The resulting memory area is 32bit addressable and zeroed so it can be
3477 * To do safe access to this _mapped_ area, we need in aligned_vread()
3500 * vread() - read vmalloc area in a safe way.
3505 * This function checks that addr is a valid vmalloc'ed area, and
3506 * copy data from that area to a given buffer. If the given memory range
3508 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3509 * IOREMAP area is treated as memory hole and no copy is done.
3512 * vm_struct area, returns 0. @buf should be kernel's buffer.
3515 * should know vmalloc() area is valid and can use memcpy().
3516 * This is for routines which have to access vmalloc area without
3521 * include any intersection with valid vmalloc area
3570 else /* IOREMAP area is treated as memory hole */ in vread()
3594 * @size: size of map area
3598 * This function checks that @kaddr is a valid vmalloc'ed area,
3609 struct vm_struct *area; in remap_vmalloc_range_partial() local
3621 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3622 if (!area) in remap_vmalloc_range_partial()
3625 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3629 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3659 * This function checks that addr is a valid vmalloc'ed area, and
3674 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3677 ret = remove_vm_area(area->addr); in free_vm_area()
3678 BUG_ON(ret != area); in free_vm_area()
3679 kfree(area); in free_vm_area()
3693 * Returns: vmap_area if it is found. If there is no such area
3753 * @offsets: array containing offset of each area
3754 * @sizes: array containing size of each area
3771 * base address is pulled down to fit the area. Scanning is repeated till
3783 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3789 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3790 start = offsets[area]; in pcpu_get_vm_areas()
3791 end = start + sizes[area]; in pcpu_get_vm_areas()
3794 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3795 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3797 /* detect the area with the highest address */ in pcpu_get_vm_areas()
3799 last_area = area; in pcpu_get_vm_areas()
3801 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3820 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3821 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3822 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3823 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3829 /* start scanning - we scan from the top, begin with the last area */ in pcpu_get_vm_areas()
3830 area = term_area = last_area; in pcpu_get_vm_areas()
3831 start = offsets[area]; in pcpu_get_vm_areas()
3832 end = start + sizes[area]; in pcpu_get_vm_areas()
3857 term_area = area; in pcpu_get_vm_areas()
3867 term_area = area; in pcpu_get_vm_areas()
3872 * This area fits, move on to the previous one. If in pcpu_get_vm_areas()
3875 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3876 if (area == term_area) in pcpu_get_vm_areas()
3879 start = offsets[area]; in pcpu_get_vm_areas()
3880 end = start + sizes[area]; in pcpu_get_vm_areas()
3885 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3888 start = base + offsets[area]; in pcpu_get_vm_areas()
3889 size = sizes[area]; in pcpu_get_vm_areas()
3903 /* Allocated area. */ in pcpu_get_vm_areas()
3904 va = vas[area]; in pcpu_get_vm_areas()
3912 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3913 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3919 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3920 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3922 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3933 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
3934 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
3935 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas()
3947 while (area--) { in pcpu_get_vm_areas()
3948 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3949 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3950 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3955 vas[area] = NULL; in pcpu_get_vm_areas()
3965 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3966 if (vas[area]) in pcpu_get_vm_areas()
3969 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
3971 if (!vas[area]) in pcpu_get_vm_areas()
3979 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3980 if (vas[area]) in pcpu_get_vm_areas()
3981 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
3983 kfree(vms[area]); in pcpu_get_vm_areas()
3997 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3998 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3999 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4000 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4005 vas[area] = NULL; in pcpu_get_vm_areas()
4006 kfree(vms[area]); in pcpu_get_vm_areas()
4116 * of vmap area is being tear down or vm_map_ram allocation. in s_show()