Lines Matching refs:area
2576 struct vm_struct *area; in __get_vm_area_node() local
2588 area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node); in __get_vm_area_node()
2589 if (unlikely(!area)) in __get_vm_area_node()
2597 kfree(area); in __get_vm_area_node()
2601 setup_vmalloc_vm(area, va, flags, caller); in __get_vm_area_node()
2612 area->addr = kasan_unpoison_vmalloc(area->addr, requested_size, in __get_vm_area_node()
2615 return area; in __get_vm_area_node()
2709 static inline void set_area_direct_map(const struct vm_struct *area, in set_area_direct_map() argument
2715 for (i = 0; i < area->nr_pages; i++) in set_area_direct_map()
2716 if (page_address(area->pages[i])) in set_area_direct_map()
2717 set_direct_map(area->pages[i]); in set_area_direct_map()
2723 static void vm_reset_perms(struct vm_struct *area) in vm_reset_perms() argument
2726 unsigned int page_order = vm_area_page_order(area); in vm_reset_perms()
2734 for (i = 0; i < area->nr_pages; i += 1U << page_order) { in vm_reset_perms()
2735 unsigned long addr = (unsigned long)page_address(area->pages[i]); in vm_reset_perms()
2752 set_area_direct_map(area, set_direct_map_invalid_noflush); in vm_reset_perms()
2754 set_area_direct_map(area, set_direct_map_default_noflush); in vm_reset_perms()
2897 struct vm_struct *area; in vmap() local
2917 area = get_vm_area_caller(size, flags, __builtin_return_address(0)); in vmap()
2918 if (!area) in vmap()
2921 addr = (unsigned long)area->addr; in vmap()
2924 vunmap(area->addr); in vmap()
2929 area->pages = pages; in vmap()
2930 area->nr_pages = count; in vmap()
2932 return area->addr; in vmap()
2971 struct vm_struct *area; in vmap_pfn() local
2973 area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP, in vmap_pfn()
2975 if (!area) in vmap_pfn()
2977 if (apply_to_page_range(&init_mm, (unsigned long)area->addr, in vmap_pfn()
2979 free_vm_area(area); in vmap_pfn()
2983 flush_cache_vmap((unsigned long)area->addr, in vmap_pfn()
2984 (unsigned long)area->addr + count * PAGE_SIZE); in vmap_pfn()
2986 return area->addr; in vmap_pfn()
3101 static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, in __vmalloc_area_node() argument
3107 unsigned long addr = (unsigned long)area->addr; in __vmalloc_area_node()
3108 unsigned long size = get_vm_area_size(area); in __vmalloc_area_node()
3122 area->pages = __vmalloc_node(array_size, 1, nested_gfp, node, in __vmalloc_area_node()
3123 area->caller); in __vmalloc_area_node()
3125 area->pages = kmalloc_node(array_size, nested_gfp, node); in __vmalloc_area_node()
3128 if (!area->pages) { in __vmalloc_area_node()
3132 free_vm_area(area); in __vmalloc_area_node()
3136 set_vm_area_page_order(area, page_shift - PAGE_SHIFT); in __vmalloc_area_node()
3137 page_order = vm_area_page_order(area); in __vmalloc_area_node()
3139 area->nr_pages = vm_area_alloc_pages(gfp_mask | __GFP_NOWARN, in __vmalloc_area_node()
3140 node, page_order, nr_small_pages, area->pages); in __vmalloc_area_node()
3142 atomic_long_add(area->nr_pages, &nr_vmalloc_pages); in __vmalloc_area_node()
3146 for (i = 0; i < area->nr_pages; i++) in __vmalloc_area_node()
3147 mod_memcg_page_state(area->pages[i], MEMCG_VMALLOC, 1); in __vmalloc_area_node()
3154 if (area->nr_pages != nr_small_pages) { in __vmalloc_area_node()
3168 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3182 ret = vmap_pages_range(addr, addr + size, prot, area->pages, in __vmalloc_area_node()
3196 area->nr_pages * PAGE_SIZE); in __vmalloc_area_node()
3200 return area->addr; in __vmalloc_area_node()
3203 vfree(area->addr); in __vmalloc_area_node()
3240 struct vm_struct *area; in __vmalloc_node_range() local
3280 area = __get_vm_area_node(real_size, align, shift, VM_ALLOC | in __vmalloc_node_range()
3283 if (!area) { in __vmalloc_node_range()
3320 ret = __vmalloc_area_node(area, gfp_mask, prot, shift, node); in __vmalloc_node_range()
3337 area->addr = kasan_unpoison_vmalloc(area->addr, real_size, kasan_flags); in __vmalloc_node_range()
3344 clear_vm_uninitialized_flag(area); in __vmalloc_node_range()
3348 kmemleak_vmalloc(area, size, gfp_mask); in __vmalloc_node_range()
3350 return area->addr; in __vmalloc_node_range()
3856 struct vm_struct *area; in remap_vmalloc_range_partial() local
3868 area = find_vm_area(kaddr); in remap_vmalloc_range_partial()
3869 if (!area) in remap_vmalloc_range_partial()
3872 if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT))) in remap_vmalloc_range_partial()
3876 end_index > get_vm_area_size(area)) in remap_vmalloc_range_partial()
3921 void free_vm_area(struct vm_struct *area) in free_vm_area() argument
3924 ret = remove_vm_area(area->addr); in free_vm_area()
3925 BUG_ON(ret != area); in free_vm_area()
3926 kfree(area); in free_vm_area()
4030 int area, area2, last_area, term_area; in pcpu_get_vm_areas() local
4036 for (last_area = 0, area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4037 start = offsets[area]; in pcpu_get_vm_areas()
4038 end = start + sizes[area]; in pcpu_get_vm_areas()
4041 BUG_ON(!IS_ALIGNED(offsets[area], align)); in pcpu_get_vm_areas()
4042 BUG_ON(!IS_ALIGNED(sizes[area], align)); in pcpu_get_vm_areas()
4046 last_area = area; in pcpu_get_vm_areas()
4048 for (area2 = area + 1; area2 < nr_vms; area2++) { in pcpu_get_vm_areas()
4067 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4068 vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL); in pcpu_get_vm_areas()
4069 vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL); in pcpu_get_vm_areas()
4070 if (!vas[area] || !vms[area]) in pcpu_get_vm_areas()
4077 area = term_area = last_area; in pcpu_get_vm_areas()
4078 start = offsets[area]; in pcpu_get_vm_areas()
4079 end = start + sizes[area]; in pcpu_get_vm_areas()
4104 term_area = area; in pcpu_get_vm_areas()
4114 term_area = area; in pcpu_get_vm_areas()
4122 area = (area + nr_vms - 1) % nr_vms; in pcpu_get_vm_areas()
4123 if (area == term_area) in pcpu_get_vm_areas()
4126 start = offsets[area]; in pcpu_get_vm_areas()
4127 end = start + sizes[area]; in pcpu_get_vm_areas()
4132 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4135 start = base + offsets[area]; in pcpu_get_vm_areas()
4136 size = sizes[area]; in pcpu_get_vm_areas()
4151 va = vas[area]; in pcpu_get_vm_areas()
4159 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4160 if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area])) in pcpu_get_vm_areas()
4166 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4167 insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list); in pcpu_get_vm_areas()
4169 setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC, in pcpu_get_vm_areas()
4180 for (area = 0; area < nr_vms; area++) in pcpu_get_vm_areas()
4181 vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr, in pcpu_get_vm_areas()
4182 vms[area]->size, KASAN_VMALLOC_PROT_NORMAL); in pcpu_get_vm_areas()
4194 while (area--) { in pcpu_get_vm_areas()
4195 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4196 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4197 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4202 vas[area] = NULL; in pcpu_get_vm_areas()
4212 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4213 if (vas[area]) in pcpu_get_vm_areas()
4216 vas[area] = kmem_cache_zalloc( in pcpu_get_vm_areas()
4218 if (!vas[area]) in pcpu_get_vm_areas()
4226 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4227 if (vas[area]) in pcpu_get_vm_areas()
4228 kmem_cache_free(vmap_area_cachep, vas[area]); in pcpu_get_vm_areas()
4230 kfree(vms[area]); in pcpu_get_vm_areas()
4244 for (area = 0; area < nr_vms; area++) { in pcpu_get_vm_areas()
4245 orig_start = vas[area]->va_start; in pcpu_get_vm_areas()
4246 orig_end = vas[area]->va_end; in pcpu_get_vm_areas()
4247 va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root, in pcpu_get_vm_areas()
4252 vas[area] = NULL; in pcpu_get_vm_areas()
4253 kfree(vms[area]); in pcpu_get_vm_areas()