Lines Matching refs:area
2404 struct vm_struct *area; in __get_vm_area_node() local
2416 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2417 if (unlikely(!area)) in __get_vm_area_node()
2425 kfree(area); in __get_vm_area_node()
2431 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2433 return area; in __get_vm_area_node()
2526 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2532 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2533 if (page_address(area->pages[i])) in set_area_direct_map()
2534 set_direct_map(area->pages[i]); in set_area_direct_map()
2538 static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) in vm_remove_mappings() argument
2541 unsigned int page_order = vm_area_page_order(area); in vm_remove_mappings()
2542 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; in vm_remove_mappings()
2546 remove_vm_area(area->addr); in vm_remove_mappings()
2566 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_remove_mappings()
2567 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_remove_mappings()
2583 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_remove_mappings()
2585 set_area_direct_map(area, set_direct_map_default_noflush); in vm_remove_mappings()
2590 struct vm_struct *area; in __vunmap() local
2599 area = find_vm_area(addr); in __vunmap()
2600 if (unlikely(!area)) { in __vunmap()
2606 debug_check_no_locks_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2607 debug_check_no_obj_freed(area->addr, get_vm_area_size(area)); in __vunmap()
2609 kasan_poison_vmalloc(area->addr, get_vm_area_size(area)); in __vunmap()
2611 vm_remove_mappings(area, deallocate_pages); in __vunmap()
2614 unsigned int page_order = vm_area_page_order(area); in __vunmap()
2617 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in __vunmap()
2618 struct page *page = area->pages[i]; in __vunmap()
2624 atomic_long_sub(area->nr_pages, &nr_vmalloc_pages); in __vunmap()
2626 kvfree(area->pages); in __vunmap()
2629 kfree(area); in __vunmap()
2740 struct vm_struct *area; in vmap() local
2750 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2751 if (!area) in vmap()
2754 addr = (unsigned long)area->addr; in vmap()
2757 vunmap(area->addr); in vmap()
2762 area->pages = pages; in vmap()
2763 area->nr_pages = count; in vmap()
2765 return area->addr; in vmap()
2798 struct vm_struct *area; in vmap_pfn() local
2800 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2802 if (!area) in vmap_pfn()
2804 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2806 free_vm_area(area); in vmap_pfn()
2809 return area->addr; in vmap_pfn()
2885 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
2890 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
2891 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
2903 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
2904 area->caller); in __vmalloc_area_node()
2906 area->pages = kmalloc_node(array_size, nested_gfp, node); in __vmalloc_area_node()
2909 if (!area->pages) { in __vmalloc_area_node()
2913 free_vm_area(area); in __vmalloc_area_node()
2917 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
2918 page_order = vm_area_page_order(area); in __vmalloc_area_node()
2920 area->nr_pages = vm_area_alloc_pages(gfp_mask, node, in __vmalloc_area_node()
2921 page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
2923 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
2929 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
2932 area->nr_pages * PAGE_SIZE, page_order); in __vmalloc_area_node()
2936 if (vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
2940 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
2944 return area->addr; in __vmalloc_area_node()
2947 __vfree(area->addr); in __vmalloc_area_node()
2974 struct vm_struct *area; in __vmalloc_node_range() local
3013 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | in __vmalloc_node_range()
3016 if (!area) { in __vmalloc_node_range()
3023 addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range()
3032 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
3035 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3398 struct vm_struct *area; in remap_vmalloc_range_partial() local
3410 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3411 if (!area) in remap_vmalloc_range_partial()
3414 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3418 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3463 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3466 ret = remove_vm_area(area->addr); in free_vm_area()
3467 BUG_ON(ret != area); in free_vm_area()
3468 kfree(area); in free_vm_area()
3572 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
3579 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3580 start = offsets[area]; in pcpu_get_vm_areas()
3581 end = start + sizes[area]; in pcpu_get_vm_areas()
3584 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
3585 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
3589 last_area = area; in pcpu_get_vm_areas()
3591 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
3610 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3611 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
3612 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
3613 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
3620 area = term_area = last_area; in pcpu_get_vm_areas()
3621 start = offsets[area]; in pcpu_get_vm_areas()
3622 end = start + sizes[area]; in pcpu_get_vm_areas()
3647 term_area = area; in pcpu_get_vm_areas()
3657 term_area = area; in pcpu_get_vm_areas()
3665 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
3666 if (area == term_area) in pcpu_get_vm_areas()
3669 start = offsets[area]; in pcpu_get_vm_areas()
3670 end = start + sizes[area]; in pcpu_get_vm_areas()
3675 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3678 start = base + offsets[area]; in pcpu_get_vm_areas()
3679 size = sizes[area]; in pcpu_get_vm_areas()
3696 va = vas[area]; in pcpu_get_vm_areas()
3704 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3705 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
3708 kasan_unpoison_vmalloc((void *)vas[area]->va_start, in pcpu_get_vm_areas()
3709 sizes[area]); in pcpu_get_vm_areas()
3714 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3715 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
3717 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
3732 while (area--) { in pcpu_get_vm_areas()
3733 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3734 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3735 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3740 vas[area] = NULL; in pcpu_get_vm_areas()
3750 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3751 if (vas[area]) in pcpu_get_vm_areas()
3754 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
3756 if (!vas[area]) in pcpu_get_vm_areas()
3764 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3765 if (vas[area]) in pcpu_get_vm_areas()
3766 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
3768 kfree(vms[area]); in pcpu_get_vm_areas()
3782 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
3783 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
3784 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
3785 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
3790 vas[area] = NULL; in pcpu_get_vm_areas()
3791 kfree(vms[area]); in pcpu_get_vm_areas()