Lines Matching +full:memory +full:- +full:region
1 // SPDX-License-Identifier: GPL-2.0-only
6 * have any form of memory management unit (thus no virtual memory).
8 * See Documentation/admin-guide/mm/nommu-mmap.rst
10 * Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11 * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12 * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
14 * Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
31 #include <linux/backing-dev.h>
68 * Return the total memory allocated for this pointer, not
95 * region. This test is intentionally done in reverse order, in kobjsize()
97 * PAGE_SIZE for 0-order pages. in kobjsize()
102 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
104 return vma->vm_end - vma->vm_start; in kobjsize()
115 * follow_pfn - look up PFN at a user virtual address
116 * @vma: memory mapping
122 * Returns zero and the pfn at @pfn on success, -ve otherwise.
127 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
128 return -EINVAL; in follow_pfn()
175 mmap_write_lock(current->mm); in __vmalloc_user_flags()
176 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
178 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags()
179 mmap_write_unlock(current->mm); in __vmalloc_user_flags()
207 count = -(unsigned long) buf; in vread()
214 * vmalloc - allocate virtually contiguous memory
231 * vzalloc - allocate virtually contiguous memory with zero fill
237 * The memory allocated is set to zero.
249 * vmalloc_node - allocate memory on a specific node
266 * vzalloc_node - allocate memory on a specific node with zero fill
272 * The memory allocated is set to zero.
284 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
297 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
300 * The resulting memory area is 32bit addressable and zeroed so it can be
309 * We'll have to sort out the ZONE_DMA bits for 64-bit, in vmalloc_32_user()
356 return -EINVAL; in vm_insert_page()
363 return -EINVAL; in vm_map_pages()
370 return -EINVAL; in vm_map_pages_zero()
377 * like trying to un-brk an area that has already been mapped
383 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1()
385 if (brk < mm->start_brk || brk > mm->context.end_brk) in SYSCALL_DEFINE1()
386 return mm->brk; in SYSCALL_DEFINE1()
388 if (mm->brk == brk) in SYSCALL_DEFINE1()
389 return mm->brk; in SYSCALL_DEFINE1()
394 if (brk <= mm->brk) { in SYSCALL_DEFINE1()
395 mm->brk = brk; in SYSCALL_DEFINE1()
400 * Ok, looks good - let it rip. in SYSCALL_DEFINE1()
402 flush_icache_user_range(mm->brk, brk); in SYSCALL_DEFINE1()
403 return mm->brk = brk; in SYSCALL_DEFINE1()
407 * initialise the percpu counter for VM and region record slabs
419 * validate the region tree
420 * - the caller must hold the region lock
425 struct vm_region *region, *last; in validate_nommu_regions() local
433 BUG_ON(last->vm_end <= last->vm_start); in validate_nommu_regions()
434 BUG_ON(last->vm_top < last->vm_end); in validate_nommu_regions()
437 region = rb_entry(p, struct vm_region, vm_rb); in validate_nommu_regions()
440 BUG_ON(region->vm_end <= region->vm_start); in validate_nommu_regions()
441 BUG_ON(region->vm_top < region->vm_end); in validate_nommu_regions()
442 BUG_ON(region->vm_start < last->vm_top); in validate_nommu_regions()
454 * add a region into the global tree
456 static void add_nommu_region(struct vm_region *region) in add_nommu_region() argument
468 if (region->vm_start < pregion->vm_start) in add_nommu_region()
469 p = &(*p)->rb_left; in add_nommu_region()
470 else if (region->vm_start > pregion->vm_start) in add_nommu_region()
471 p = &(*p)->rb_right; in add_nommu_region()
472 else if (pregion == region) in add_nommu_region()
478 rb_link_node(®ion->vm_rb, parent, p); in add_nommu_region()
479 rb_insert_color(®ion->vm_rb, &nommu_region_tree); in add_nommu_region()
485 * delete a region from the global tree
487 static void delete_nommu_region(struct vm_region *region) in delete_nommu_region() argument
492 rb_erase(®ion->vm_rb, &nommu_region_tree); in delete_nommu_region()
510 * release a reference to a region
511 * - the caller must hold the region semaphore for writing, which this releases
512 * - the region may not have been added to the tree yet, in which case vm_top
515 static void __put_nommu_region(struct vm_region *region) in __put_nommu_region() argument
520 if (--region->vm_usage == 0) { in __put_nommu_region()
521 if (region->vm_top > region->vm_start) in __put_nommu_region()
522 delete_nommu_region(region); in __put_nommu_region()
525 if (region->vm_file) in __put_nommu_region()
526 fput(region->vm_file); in __put_nommu_region()
528 /* IO memory and memory shared directly out of the pagecache in __put_nommu_region()
530 if (region->vm_flags & VM_MAPPED_COPY) in __put_nommu_region()
531 free_page_series(region->vm_start, region->vm_top); in __put_nommu_region()
532 kmem_cache_free(vm_region_jar, region); in __put_nommu_region()
539 * release a reference to a region
541 static void put_nommu_region(struct vm_region *region) in put_nommu_region() argument
544 __put_nommu_region(region); in put_nommu_region()
551 * - should be called with mm->mmap_lock held writelocked
559 BUG_ON(!vma->vm_region); in add_vma_to_mm()
561 mm->map_count++; in add_vma_to_mm()
562 vma->vm_mm = mm; in add_vma_to_mm()
565 if (vma->vm_file) { in add_vma_to_mm()
566 mapping = vma->vm_file->f_mapping; in add_vma_to_mm()
570 vma_interval_tree_insert(vma, &mapping->i_mmap); in add_vma_to_mm()
577 p = &mm->mm_rb.rb_node; in add_vma_to_mm()
584 if (vma->vm_start < pvma->vm_start) in add_vma_to_mm()
585 p = &(*p)->rb_left; in add_vma_to_mm()
586 else if (vma->vm_start > pvma->vm_start) { in add_vma_to_mm()
588 p = &(*p)->rb_right; in add_vma_to_mm()
589 } else if (vma->vm_end < pvma->vm_end) in add_vma_to_mm()
590 p = &(*p)->rb_left; in add_vma_to_mm()
591 else if (vma->vm_end > pvma->vm_end) { in add_vma_to_mm()
593 p = &(*p)->rb_right; in add_vma_to_mm()
595 p = &(*p)->rb_left; in add_vma_to_mm()
598 p = &(*p)->rb_right; in add_vma_to_mm()
603 rb_link_node(&vma->vm_rb, parent, p); in add_vma_to_mm()
604 rb_insert_color(&vma->vm_rb, &mm->mm_rb); in add_vma_to_mm()
621 struct mm_struct *mm = vma->vm_mm; in delete_vma_from_mm()
624 mm->map_count--; in delete_vma_from_mm()
627 if (curr->vmacache.vmas[i] == vma) { in delete_vma_from_mm()
634 if (vma->vm_file) { in delete_vma_from_mm()
635 mapping = vma->vm_file->f_mapping; in delete_vma_from_mm()
639 vma_interval_tree_remove(vma, &mapping->i_mmap); in delete_vma_from_mm()
645 rb_erase(&vma->vm_rb, &mm->mm_rb); in delete_vma_from_mm()
655 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
656 vma->vm_ops->close(vma); in delete_vma()
657 if (vma->vm_file) in delete_vma()
658 fput(vma->vm_file); in delete_vma()
659 put_nommu_region(vma->vm_region); in delete_vma()
665 * - should be called with mm->mmap_lock at least held readlocked
678 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma()
679 if (vma->vm_start > addr) in find_vma()
681 if (vma->vm_end > addr) { in find_vma()
693 * - we don't extend stack VMAs under NOMMU conditions
702 * - not supported under NOMMU conditions
706 return -ENOMEM; in expand_stack()
711 * - should be called with mm->mmap_lock at least held readlocked
727 for (vma = mm->mmap; vma; vma = vma->vm_next) { in find_vma_exact()
728 if (vma->vm_start < addr) in find_vma_exact()
730 if (vma->vm_start > addr) in find_vma_exact()
732 if (vma->vm_end == end) { in find_vma_exact()
758 return -EINVAL; in validate_mmap_request()
762 return -EINVAL; in validate_mmap_request()
765 return -EINVAL; in validate_mmap_request()
770 return -ENOMEM; in validate_mmap_request()
774 return -EOVERFLOW; in validate_mmap_request()
778 if (!file->f_op->mmap) in validate_mmap_request()
779 return -ENODEV; in validate_mmap_request()
782 * - we support chardevs that provide their own "memory" in validate_mmap_request()
783 * - we support files/blockdevs that are memory backed in validate_mmap_request()
785 if (file->f_op->mmap_capabilities) { in validate_mmap_request()
786 capabilities = file->f_op->mmap_capabilities(file); in validate_mmap_request()
790 switch (file_inode(file)->i_mode & S_IFMT) { in validate_mmap_request()
804 return -EINVAL; in validate_mmap_request()
810 if (!file->f_op->get_unmapped_area) in validate_mmap_request()
812 if (!(file->f_mode & FMODE_CAN_READ)) in validate_mmap_request()
816 if (!(file->f_mode & FMODE_READ)) in validate_mmap_request()
817 return -EACCES; in validate_mmap_request()
822 !(file->f_mode & FMODE_WRITE)) in validate_mmap_request()
823 return -EACCES; in validate_mmap_request()
826 (file->f_mode & FMODE_WRITE)) in validate_mmap_request()
827 return -EACCES; in validate_mmap_request()
830 return -ENODEV; in validate_mmap_request()
835 /* we're going to read the file into private memory we in validate_mmap_request()
838 return -ENODEV; in validate_mmap_request()
854 return -EINVAL; in validate_mmap_request()
861 if (path_noexec(&file->f_path)) { in validate_mmap_request()
863 return -EPERM; in validate_mmap_request()
866 if (current->personality & READ_IMPLIES_EXEC) { in validate_mmap_request()
878 /* anonymous mappings are always memory backed and can be in validate_mmap_request()
885 (current->personality & READ_IMPLIES_EXEC)) in validate_mmap_request()
911 /* vm_flags |= mm->def_flags; */ in determine_vm_flags()
914 /* attempt to share read-only copies of mapped file chunks */ in determine_vm_flags()
920 * if possible - used for chardevs, ramfs/tmpfs/shmfs and in determine_vm_flags()
928 * it's being traced - otherwise breakpoints set in it may interfere in determine_vm_flags()
931 if ((flags & MAP_PRIVATE) && current->ptrace) in determine_vm_flags()
945 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
947 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
950 if (ret != -ENOSYS) in do_mmap_shared_file()
953 /* getting -ENOSYS indicates that direct mmap isn't possible (as in do_mmap_shared_file()
956 return -ENODEV; in do_mmap_shared_file()
963 struct vm_region *region, in do_mmap_private() argument
972 * shared mappings on devices or memory in do_mmap_private()
973 * - VM_MAYSHARE will be set if it may attempt to share in do_mmap_private()
976 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
979 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
980 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
983 if (ret != -ENOSYS) in do_mmap_private()
992 /* allocate some memory to hold the mapping in do_mmap_private()
993 * - note that this may not return a page-aligned address if the object in do_mmap_private()
1000 /* we don't want to allocate a power-of-2 sized page set */ in do_mmap_private()
1001 if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages) in do_mmap_private()
1010 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
1011 region->vm_start = (unsigned long) base; in do_mmap_private()
1012 region->vm_end = region->vm_start + len; in do_mmap_private()
1013 region->vm_top = region->vm_start + (total << PAGE_SHIFT); in do_mmap_private()
1015 vma->vm_start = region->vm_start; in do_mmap_private()
1016 vma->vm_end = region->vm_start + len; in do_mmap_private()
1018 if (vma->vm_file) { in do_mmap_private()
1022 fpos = vma->vm_pgoff; in do_mmap_private()
1025 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1031 memset(base + ret, 0, len - ret); in do_mmap_private()
1040 free_page_series(region->vm_start, region->vm_top); in do_mmap_private()
1041 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1042 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1043 region->vm_top = 0; in do_mmap_private()
1048 len, current->pid, current->comm); in do_mmap_private()
1050 return -ENOMEM; in do_mmap_private()
1066 struct vm_region *region; in do_mmap() local
1090 region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL); in do_mmap()
1091 if (!region) in do_mmap()
1094 vma = vm_area_alloc(current->mm); in do_mmap()
1098 region->vm_usage = 1; in do_mmap()
1099 region->vm_flags = vm_flags; in do_mmap()
1100 region->vm_pgoff = pgoff; in do_mmap()
1102 vma->vm_flags = vm_flags; in do_mmap()
1103 vma->vm_pgoff = pgoff; in do_mmap()
1106 region->vm_file = get_file(file); in do_mmap()
1107 vma->vm_file = get_file(file); in do_mmap()
1114 * - we can only share with a superset match on most regular files in do_mmap()
1115 * - shared mappings on character devices and memory backed files are in do_mmap()
1124 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1130 if (!(pregion->vm_flags & VM_MAYSHARE)) in do_mmap()
1134 if (file_inode(pregion->vm_file) != in do_mmap()
1138 if (pregion->vm_pgoff >= pgend) in do_mmap()
1141 rpglen = pregion->vm_end - pregion->vm_start; in do_mmap()
1142 rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT; in do_mmap()
1143 rpgend = pregion->vm_pgoff + rpglen; in do_mmap()
1149 if ((pregion->vm_pgoff != pgoff || rpglen != pglen) && in do_mmap()
1150 !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) { in do_mmap()
1151 /* new mapping is not a subset of the region */ in do_mmap()
1157 /* we've found a region we can share */ in do_mmap()
1158 pregion->vm_usage++; in do_mmap()
1159 vma->vm_region = pregion; in do_mmap()
1160 start = pregion->vm_start; in do_mmap()
1161 start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT; in do_mmap()
1162 vma->vm_start = start; in do_mmap()
1163 vma->vm_end = start + len; in do_mmap()
1165 if (pregion->vm_flags & VM_MAPPED_COPY) in do_mmap()
1166 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap()
1170 vma->vm_region = NULL; in do_mmap()
1171 vma->vm_start = 0; in do_mmap()
1172 vma->vm_end = 0; in do_mmap()
1173 pregion->vm_usage--; in do_mmap()
1178 fput(region->vm_file); in do_mmap()
1179 kmem_cache_free(vm_region_jar, region); in do_mmap()
1180 region = pregion; in do_mmap()
1186 * - this is the hook for quasi-memory character devices to in do_mmap()
1190 addr = file->f_op->get_unmapped_area(file, addr, len, in do_mmap()
1194 if (ret != -ENOSYS) in do_mmap()
1200 ret = -ENODEV; in do_mmap()
1206 vma->vm_start = region->vm_start = addr; in do_mmap()
1207 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1212 vma->vm_region = region; in do_mmap()
1215 * - the region is filled in if NOMMU_MAP_DIRECT is still set in do_mmap()
1217 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1220 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1223 add_nommu_region(region); in do_mmap()
1226 if (!vma->vm_file && in do_mmap()
1229 memset((void *)region->vm_start, 0, in do_mmap()
1230 region->vm_end - region->vm_start); in do_mmap()
1233 result = vma->vm_start; in do_mmap()
1235 current->mm->total_vm += len >> PAGE_SHIFT; in do_mmap()
1238 add_vma_to_mm(current->mm, vma); in do_mmap()
1240 /* we flush the region from the icache only when the first executable in do_mmap()
1242 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1243 flush_icache_user_range(region->vm_start, region->vm_end); in do_mmap()
1244 region->vm_icache_flushed = true; in do_mmap()
1254 if (region->vm_file) in do_mmap()
1255 fput(region->vm_file); in do_mmap()
1256 kmem_cache_free(vm_region_jar, region); in do_mmap()
1257 if (vma->vm_file) in do_mmap()
1258 fput(vma->vm_file); in do_mmap()
1265 ret = -EINVAL; in do_mmap()
1269 kmem_cache_free(vm_region_jar, region); in do_mmap()
1271 len, current->pid); in do_mmap()
1273 return -ENOMEM; in do_mmap()
1276 pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n", in do_mmap()
1277 len, current->pid); in do_mmap()
1279 return -ENOMEM; in do_mmap()
1287 unsigned long retval = -EBADF; in ksys_mmap_pgoff()
1326 return -EFAULT; in SYSCALL_DEFINE1()
1328 return -EINVAL; in SYSCALL_DEFINE1()
1343 struct vm_region *region; in split_vma() local
1347 * only a single usage on the region) */ in split_vma()
1348 if (vma->vm_file) in split_vma()
1349 return -ENOMEM; in split_vma()
1351 if (mm->map_count >= sysctl_max_map_count) in split_vma()
1352 return -ENOMEM; in split_vma()
1354 region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL); in split_vma()
1355 if (!region) in split_vma()
1356 return -ENOMEM; in split_vma()
1360 kmem_cache_free(vm_region_jar, region); in split_vma()
1361 return -ENOMEM; in split_vma()
1365 *region = *vma->vm_region; in split_vma()
1366 new->vm_region = region; in split_vma()
1368 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1371 region->vm_top = region->vm_end = new->vm_end = addr; in split_vma()
1373 region->vm_start = new->vm_start = addr; in split_vma()
1374 region->vm_pgoff = new->vm_pgoff += npages; in split_vma()
1377 if (new->vm_ops && new->vm_ops->open) in split_vma()
1378 new->vm_ops->open(new); in split_vma()
1382 delete_nommu_region(vma->vm_region); in split_vma()
1384 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1385 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1387 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1388 vma->vm_region->vm_top = addr; in split_vma()
1390 add_nommu_region(vma->vm_region); in split_vma()
1391 add_nommu_region(new->vm_region); in split_vma()
1406 struct vm_region *region; in shrink_vma() local
1411 if (from > vma->vm_start) in shrink_vma()
1412 vma->vm_end = from; in shrink_vma()
1414 vma->vm_start = to; in shrink_vma()
1417 /* cut the backing region down to size */ in shrink_vma()
1418 region = vma->vm_region; in shrink_vma()
1419 BUG_ON(region->vm_usage != 1); in shrink_vma()
1422 delete_nommu_region(region); in shrink_vma()
1423 if (from > region->vm_start) { in shrink_vma()
1424 to = region->vm_top; in shrink_vma()
1425 region->vm_top = region->vm_end = from; in shrink_vma()
1427 region->vm_start = to; in shrink_vma()
1429 add_nommu_region(region); in shrink_vma()
1438 * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1449 return -EINVAL; in do_munmap()
1458 pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n", in do_munmap()
1459 current->pid, current->comm, in do_munmap()
1460 start, start + len - 1); in do_munmap()
1463 return -EINVAL; in do_munmap()
1466 /* we're allowed to split an anonymous VMA but not a file-backed one */ in do_munmap()
1467 if (vma->vm_file) { in do_munmap()
1469 if (start > vma->vm_start) in do_munmap()
1470 return -EINVAL; in do_munmap()
1471 if (end == vma->vm_end) in do_munmap()
1473 vma = vma->vm_next; in do_munmap()
1475 return -EINVAL; in do_munmap()
1478 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1480 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1481 return -EINVAL; in do_munmap()
1483 return -EINVAL; in do_munmap()
1484 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1485 return -EINVAL; in do_munmap()
1486 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1502 struct mm_struct *mm = current->mm; in vm_munmap()
1527 mm->total_vm = 0; in exit_mmap()
1529 while ((vma = mm->mmap)) { in exit_mmap()
1530 mm->mmap = vma->vm_next; in exit_mmap()
1539 return -ENOMEM; in vm_brk()
1547 * as long as it stays within the region allocated by do_mmap_private() and the
1562 return (unsigned long) -EINVAL; in do_mremap()
1565 return -EINVAL; in do_mremap()
1568 return (unsigned long) -EINVAL; in do_mremap()
1570 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1572 return (unsigned long) -EINVAL; in do_mremap()
1574 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1575 return (unsigned long) -EFAULT; in do_mremap()
1577 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1578 return (unsigned long) -EPERM; in do_mremap()
1580 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1581 return (unsigned long) -ENOMEM; in do_mremap()
1583 /* all checks complete - do it */ in do_mremap()
1584 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1585 return vma->vm_start; in do_mremap()
1594 mmap_write_lock(current->mm); in SYSCALL_DEFINE5()
1596 mmap_write_unlock(current->mm); in SYSCALL_DEFINE5()
1610 return -EINVAL; in remap_pfn_range()
1612 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1620 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1622 pfn += vma->vm_pgoff; in vm_iomap_memory()
1623 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1630 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1632 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1633 return -EINVAL; in remap_vmalloc_range()
1635 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1636 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1645 return -ENOMEM; in arch_get_unmapped_area()
1676 if (addr + len >= vma->vm_end) in __access_remote_vm()
1677 len = vma->vm_end - addr; in __access_remote_vm()
1680 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1683 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1698 * access_remote_vm - access another process' address space
1715 * - source/target buffer must be kernel space
1737 * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1751 struct vm_region *region; in nommu_shrink_inode_mappings() local
1756 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; in nommu_shrink_inode_mappings()
1759 i_mmap_lock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1762 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1763 /* found one - only interested if it's shared out of the page in nommu_shrink_inode_mappings()
1765 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1766 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1768 return -ETXTBSY; /* not quite true, but near enough */ in nommu_shrink_inode_mappings()
1772 /* reduce any regions that overlap the dead zone - if in existence, in nommu_shrink_inode_mappings()
1778 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1779 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1782 region = vma->vm_region; in nommu_shrink_inode_mappings()
1783 r_size = region->vm_top - region->vm_start; in nommu_shrink_inode_mappings()
1784 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size; in nommu_shrink_inode_mappings()
1787 region->vm_top -= r_top - newsize; in nommu_shrink_inode_mappings()
1788 if (region->vm_end > region->vm_top) in nommu_shrink_inode_mappings()
1789 region->vm_end = region->vm_top; in nommu_shrink_inode_mappings()
1793 i_mmap_unlock_read(inode->i_mapping); in nommu_shrink_inode_mappings()
1801 * This is intended to prevent a user from starting a single memory hogging
1805 * The default value is min(3% of free memory, 128MB)
1812 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_user_reserve()
1823 * to log in and kill a memory hogging process.
1833 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10); in init_admin_reserve()