Lines Matching refs:vma

98 		struct vm_area_struct *vma;  in kobjsize()  local
100 vma = find_vma(current->mm, (unsigned long)objp); in kobjsize()
101 if (vma) in kobjsize()
102 return vma->vm_end - vma->vm_start; in kobjsize()
122 int follow_pfn(struct vm_area_struct *vma, unsigned long address, in follow_pfn() argument
125 if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) in follow_pfn()
171 struct vm_area_struct *vma; in __vmalloc_user_flags() local
174 vma = find_vma(current->mm, (unsigned long)ret); in __vmalloc_user_flags()
175 if (vma) in __vmalloc_user_flags()
176 vma->vm_flags |= VM_USERMAP; in __vmalloc_user_flags()
353 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, in vm_insert_page() argument
360 int vm_map_pages(struct vm_area_struct *vma, struct page **pages, in vm_map_pages() argument
367 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages, in vm_map_pages_zero() argument
547 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas) in vma_mas_store() argument
549 mas_set_range(mas, vma->vm_start, vma->vm_end - 1); in vma_mas_store()
550 mas_store_prealloc(mas, vma); in vma_mas_store()
553 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) in vma_mas_remove() argument
555 mas->index = vma->vm_start; in vma_mas_remove()
556 mas->last = vma->vm_end - 1; in vma_mas_remove()
560 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm) in setup_vma_to_mm() argument
563 vma->vm_mm = mm; in setup_vma_to_mm()
566 if (vma->vm_file) { in setup_vma_to_mm()
567 struct address_space *mapping = vma->vm_file->f_mapping; in setup_vma_to_mm()
571 vma_interval_tree_insert(vma, &mapping->i_mmap); in setup_vma_to_mm()
585 struct vm_area_struct *vma) in mas_add_vma_to_mm() argument
587 BUG_ON(!vma->vm_region); in mas_add_vma_to_mm()
589 setup_vma_to_mm(vma, mm); in mas_add_vma_to_mm()
592 vma_mas_store(vma, mas); in mas_add_vma_to_mm()
601 static int add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) in add_vma_to_mm() argument
603 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); in add_vma_to_mm()
605 if (mas_preallocate(&mas, vma, GFP_KERNEL)) { in add_vma_to_mm()
610 mas_add_vma_to_mm(&mas, mm, vma); in add_vma_to_mm()
614 static void cleanup_vma_from_mm(struct vm_area_struct *vma) in cleanup_vma_from_mm() argument
616 vma->vm_mm->map_count--; in cleanup_vma_from_mm()
618 if (vma->vm_file) { in cleanup_vma_from_mm()
620 mapping = vma->vm_file->f_mapping; in cleanup_vma_from_mm()
624 vma_interval_tree_remove(vma, &mapping->i_mmap); in cleanup_vma_from_mm()
632 static int delete_vma_from_mm(struct vm_area_struct *vma) in delete_vma_from_mm() argument
634 MA_STATE(mas, &vma->vm_mm->mm_mt, 0, 0); in delete_vma_from_mm()
636 if (mas_preallocate(&mas, vma, GFP_KERNEL)) { in delete_vma_from_mm()
641 cleanup_vma_from_mm(vma); in delete_vma_from_mm()
644 vma_mas_remove(vma, &mas); in delete_vma_from_mm()
651 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) in delete_vma() argument
653 if (vma->vm_ops && vma->vm_ops->close) in delete_vma()
654 vma->vm_ops->close(vma); in delete_vma()
655 if (vma->vm_file) in delete_vma()
656 fput(vma->vm_file); in delete_vma()
657 put_nommu_region(vma->vm_region); in delete_vma()
658 vm_area_free(vma); in delete_vma()
697 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
710 struct vm_area_struct *vma; in find_vma_exact() local
714 vma = mas_walk(&mas); in find_vma_exact()
715 if (!vma) in find_vma_exact()
717 if (vma->vm_start != addr) in find_vma_exact()
719 if (vma->vm_end != end) in find_vma_exact()
722 return vma; in find_vma_exact()
925 static int do_mmap_shared_file(struct vm_area_struct *vma) in do_mmap_shared_file() argument
929 ret = call_mmap(vma->vm_file, vma); in do_mmap_shared_file()
931 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_shared_file()
946 static int do_mmap_private(struct vm_area_struct *vma, in do_mmap_private() argument
960 ret = call_mmap(vma->vm_file, vma); in do_mmap_private()
963 BUG_ON(!(vma->vm_flags & VM_MAYSHARE)); in do_mmap_private()
964 vma->vm_region->vm_top = vma->vm_region->vm_end; in do_mmap_private()
994 region->vm_flags = vma->vm_flags |= VM_MAPPED_COPY; in do_mmap_private()
999 vma->vm_start = region->vm_start; in do_mmap_private()
1000 vma->vm_end = region->vm_start + len; in do_mmap_private()
1002 if (vma->vm_file) { in do_mmap_private()
1006 fpos = vma->vm_pgoff; in do_mmap_private()
1009 ret = kernel_read(vma->vm_file, base, len, &fpos); in do_mmap_private()
1018 vma_set_anonymous(vma); in do_mmap_private()
1025 region->vm_start = vma->vm_start = 0; in do_mmap_private()
1026 region->vm_end = vma->vm_end = 0; in do_mmap_private()
1049 struct vm_area_struct *vma; in do_mmap() local
1080 vma = vm_area_alloc(current->mm); in do_mmap()
1081 if (!vma) in do_mmap()
1084 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in do_mmap()
1091 vma->vm_flags = vm_flags; in do_mmap()
1092 vma->vm_pgoff = pgoff; in do_mmap()
1096 vma->vm_file = get_file(file); in do_mmap()
1148 vma->vm_region = pregion; in do_mmap()
1151 vma->vm_start = start; in do_mmap()
1152 vma->vm_end = start + len; in do_mmap()
1155 vma->vm_flags |= VM_MAPPED_COPY; in do_mmap()
1157 ret = do_mmap_shared_file(vma); in do_mmap()
1159 vma->vm_region = NULL; in do_mmap()
1160 vma->vm_start = 0; in do_mmap()
1161 vma->vm_end = 0; in do_mmap()
1195 vma->vm_start = region->vm_start = addr; in do_mmap()
1196 vma->vm_end = region->vm_end = addr + len; in do_mmap()
1201 vma->vm_region = region; in do_mmap()
1206 if (file && vma->vm_flags & VM_SHARED) in do_mmap()
1207 ret = do_mmap_shared_file(vma); in do_mmap()
1209 ret = do_mmap_private(vma, region, len, capabilities); in do_mmap()
1215 if (!vma->vm_file && in do_mmap()
1222 result = vma->vm_start; in do_mmap()
1227 mas_add_vma_to_mm(&mas, current->mm, vma); in do_mmap()
1231 if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) { in do_mmap()
1246 if (vma->vm_file) in do_mmap()
1247 fput(vma->vm_file); in do_mmap()
1248 vm_area_free(vma); in do_mmap()
1273 vm_area_free(vma); in do_mmap()
1337 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
1343 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_end); in split_vma()
1347 if (vma->vm_file) in split_vma()
1357 new = vm_area_dup(vma); in split_vma()
1361 if (mas_preallocate(&mas, vma, GFP_KERNEL)) { in split_vma()
1368 *region = *vma->vm_region; in split_vma()
1371 npages = (addr - vma->vm_start) >> PAGE_SHIFT; in split_vma()
1384 delete_nommu_region(vma->vm_region); in split_vma()
1386 vma->vm_region->vm_start = vma->vm_start = addr; in split_vma()
1387 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages; in split_vma()
1389 vma->vm_region->vm_end = vma->vm_end = addr; in split_vma()
1390 vma->vm_region->vm_top = addr; in split_vma()
1392 add_nommu_region(vma->vm_region); in split_vma()
1396 setup_vma_to_mm(vma, mm); in split_vma()
1398 mas_set_range(&mas, vma->vm_start, vma->vm_end - 1); in split_vma()
1399 mas_store(&mas, vma); in split_vma()
1415 struct vm_area_struct *vma, in shrink_vma() argument
1422 if (delete_vma_from_mm(vma)) in shrink_vma()
1424 if (from > vma->vm_start) in shrink_vma()
1425 vma->vm_end = from; in shrink_vma()
1427 vma->vm_start = to; in shrink_vma()
1428 if (add_vma_to_mm(mm, vma)) in shrink_vma()
1432 region = vma->vm_region; in shrink_vma()
1458 struct vm_area_struct *vma; in do_munmap() local
1469 vma = mas_find(&mas, end - 1); in do_munmap()
1470 if (!vma) { in do_munmap()
1482 if (vma->vm_file) { in do_munmap()
1484 if (start > vma->vm_start) in do_munmap()
1486 if (end == vma->vm_end) in do_munmap()
1488 vma = mas_next(&mas, end - 1); in do_munmap()
1489 } while (vma); in do_munmap()
1493 if (start == vma->vm_start && end == vma->vm_end) in do_munmap()
1495 if (start < vma->vm_start || end > vma->vm_end) in do_munmap()
1499 if (end != vma->vm_end && offset_in_page(end)) in do_munmap()
1501 if (start != vma->vm_start && end != vma->vm_end) { in do_munmap()
1502 ret = split_vma(mm, vma, start, 1); in do_munmap()
1506 return shrink_vma(mm, vma, start, end); in do_munmap()
1510 if (delete_vma_from_mm(vma)) in do_munmap()
1512 delete_vma(mm, vma); in do_munmap()
1539 struct vm_area_struct *vma; in exit_mmap() local
1551 for_each_vma(vmi, vma) { in exit_mmap()
1552 cleanup_vma_from_mm(vma); in exit_mmap()
1553 delete_vma(mm, vma); in exit_mmap()
1579 struct vm_area_struct *vma; in do_mremap() local
1593 vma = find_vma_exact(current->mm, addr, old_len); in do_mremap()
1594 if (!vma) in do_mremap()
1597 if (vma->vm_end != vma->vm_start + old_len) in do_mremap()
1600 if (vma->vm_flags & VM_MAYSHARE) in do_mremap()
1603 if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start) in do_mremap()
1607 vma->vm_end = vma->vm_start + new_len; in do_mremap()
1608 return vma->vm_start; in do_mremap()
1623 struct page *follow_page(struct vm_area_struct *vma, unsigned long address, in follow_page() argument
1629 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, in remap_pfn_range() argument
1635 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; in remap_pfn_range()
1640 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) in vm_iomap_memory() argument
1643 unsigned long vm_len = vma->vm_end - vma->vm_start; in vm_iomap_memory()
1645 pfn += vma->vm_pgoff; in vm_iomap_memory()
1646 return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); in vm_iomap_memory()
1650 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, in remap_vmalloc_range() argument
1653 unsigned int size = vma->vm_end - vma->vm_start; in remap_vmalloc_range()
1655 if (!(vma->vm_flags & VM_USERMAP)) in remap_vmalloc_range()
1658 vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT)); in remap_vmalloc_range()
1659 vma->vm_end = vma->vm_start + size; in remap_vmalloc_range()
1683 struct vm_area_struct *vma; in __access_remote_vm() local
1690 vma = find_vma(mm, addr); in __access_remote_vm()
1691 if (vma) { in __access_remote_vm()
1693 if (addr + len >= vma->vm_end) in __access_remote_vm()
1694 len = vma->vm_end - addr; in __access_remote_vm()
1697 if (write && vma->vm_flags & VM_MAYWRITE) in __access_remote_vm()
1698 copy_to_user_page(vma, NULL, addr, in __access_remote_vm()
1700 else if (!write && vma->vm_flags & VM_MAYREAD) in __access_remote_vm()
1701 copy_from_user_page(vma, NULL, addr, in __access_remote_vm()
1767 struct vm_area_struct *vma; in nommu_shrink_inode_mappings() local
1779 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { in nommu_shrink_inode_mappings()
1782 if (vma->vm_flags & VM_SHARED) { in nommu_shrink_inode_mappings()
1795 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) { in nommu_shrink_inode_mappings()
1796 if (!(vma->vm_flags & VM_SHARED)) in nommu_shrink_inode_mappings()
1799 region = vma->vm_region; in nommu_shrink_inode_mappings()