Lines Matching full:region
93 * region. This test is intentionally done in reverse order, in kobjsize()
407 * initialise the percpu counter for VM and region record slabs
419 * validate the region tree
420 * - the caller must hold the region lock
425 struct vm_region *region, *last; in validate_nommu_regions() local
437 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
440 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
441 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
442 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
454 * add a region into the global tree
456 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument
468 if (region->vm_start < pregion->vm_start) in add_nommu_region()
470 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
472 else if (pregion == region) in add_nommu_region()
478 rb_link_node(®ion->vm_rb, parent, p); in add_nommu_region()
479 rb_insert_color(®ion->vm_rb, &nommu_region_tree); in add_nommu_region()
485 * delete a region from the global tree
487 static void delete_nommu_region(struct vm_region *region) in delete_nommu_region() argument
492 rb_erase(®ion->vm_rb, &nommu_region_tree); in delete_nommu_region()
510 * release a reference to a region
511 * - the caller must hold the region semaphore for writing, which this releases
512 * - the region may not have been added to the tree yet, in which case vm_top
515 static void __put_nommu_region(struct vm_region *region) in __put_nommu_region() argument
520 if (--region->vm_usage == 0) { in __put_nommu_region()
521 if (region->vm_top > region->vm_start) in __put_nommu_region()
522 delete_nommu_region(region); in __put_nommu_region()
525 if (region->vm_file) in __put_nommu_region()
526 fput(region->vm_file); in __put_nommu_region()
530 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
531 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
532 kmem_cache_free(vm_region_jar, region); in __put_nommu_region()
539 * release a reference to a region
541 static void put_nommu_region(struct vm_region *region) in put_nommu_region() argument
544 __put_nommu_region(region); in put_nommu_region()
947 struct vm_region *region, in do_mmap_private() argument
994 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
995 region->vm_start = (unsigned long) base; in do_mmap_private()
996 region->vm_end = region->vm_start + len; in do_mmap_private()
997 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
999 vma->vm_start = region->vm_start; in do_mmap_private()
1000 vma->vm_end = region->vm_start + len; in do_mmap_private()
1024 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
1025 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1026 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1027 region->vm_top = 0; in do_mmap_private()
1050 struct vm_region *region; in do_mmap() local
1076 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); in do_mmap()
1077 if (!region) in do_mmap()
1087 region->vm_usage = 1; in do_mmap()
1088 region->vm_flags = vm_flags; in do_mmap()
1089 region->vm_pgoff = pgoff; in do_mmap()
1095 region->vm_file = get_file(file); in do_mmap()
1140 /* new mapping is not a subset of the region */ in do_mmap()
1146 /* we've found a region we can share */ in do_mmap()
1167 fput(region->vm_file); in do_mmap()
1168 kmem_cache_free(vm_region_jar, region); in do_mmap()
1169 region = pregion; in do_mmap()
1195 vma->vm_start = region->vm_start = addr; in do_mmap()
1196 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1201 vma->vm_region = region; in do_mmap()
1204 * - the region is filled in if NOMMU_MAP_DIRECT is still set in do_mmap()
1209 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1212 add_nommu_region(region); in do_mmap()
1218 memset((void *)region->vm_start, 0, in do_mmap()
1219 region->vm_end - region->vm_start); in do_mmap()
1229 /* we flush the region from the icache only when the first executable in do_mmap()
1231 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1232 flush_icache_user_range(region->vm_start, region->vm_end); in do_mmap()
1233 region->vm_icache_flushed = true; in do_mmap()
1243 if (region->vm_file) in do_mmap()
1244 fput(region->vm_file); in do_mmap()
1245 kmem_cache_free(vm_region_jar, region); in do_mmap()
1259 kmem_cache_free(vm_region_jar, region); in do_mmap()
1266 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", in do_mmap()
1272 kmem_cache_free(vm_region_jar, region); in do_mmap()
1341 struct vm_region *region; in split_vma() local
1346 * only a single usage on the region) */ in split_vma()
1353 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); in split_vma()
1354 if (!region) in split_vma()
1368 *region = *vma->vm_region; in split_vma()
1369 new->vm_region = region; in split_vma()
1374 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1376 region->vm_start = new->vm_start = addr; in split_vma()
1377 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1406 kmem_cache_free(vm_region_jar, region); in split_vma()
1418 struct vm_region *region; in shrink_vma() local
1431 /* cut the backing region down to size */ in shrink_vma()
1432 region = vma->vm_region; in shrink_vma()
1433 BUG_ON(region->vm_usage != 1); in shrink_vma()
1436 delete_nommu_region(region); in shrink_vma()
1437 if (from > region->vm_start) { in shrink_vma()
1438 to = region->vm_top; in shrink_vma()
1439 region->vm_top = region->vm_end = from; in shrink_vma()
1441 region->vm_start = to; in shrink_vma()
1443 add_nommu_region(region); in shrink_vma()
1570 * as long as it stays within the region allocated by do_mmap_private() and the
1768 struct vm_region *region; in nommu_shrink_inode_mappings() local
1799 region = vma->vm_region; in nommu_shrink_inode_mappings()
1800 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
1801 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
1804 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
1805 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
1806 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()