Lines Matching refs:vma
80 struct vm_area_struct *vma, struct vm_area_struct *prev,
90 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
92 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
95 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
96 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
101 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
107 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
110 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
114 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
122 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
124 struct file *file = vma->vm_file; in unlink_file_vma()
129 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
137 static void remove_vma(struct vm_area_struct *vma, bool unreachable) in remove_vma() argument
140 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
141 vma->vm_ops->close(vma); in remove_vma()
142 if (vma->vm_file) in remove_vma()
143 fput(vma->vm_file); in remove_vma()
144 mpol_put(vma_policy(vma)); in remove_vma()
146 __vm_area_free(vma); in remove_vma()
148 vm_area_free(vma); in remove_vma()
285 struct vm_area_struct *vma; in validate_mm() local
289 for_each_vma(vmi, vma) { in validate_mm()
291 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
299 if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) in validate_mm()
302 if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) in validate_mm()
308 dump_vma(vma); in validate_mm()
309 pr_emerg("tree range: %px start %lx end %lx\n", vma, in validate_mm()
317 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
350 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
354 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
359 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
363 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
371 struct vm_area_struct *vma; in count_vma_pages_range() local
374 for_each_vma_range(vmi, vma, end) { in count_vma_pages_range()
375 unsigned long vm_start = max(addr, vma->vm_start); in count_vma_pages_range()
376 unsigned long vm_end = min(end, vma->vm_end); in count_vma_pages_range()
384 static void __vma_link_file(struct vm_area_struct *vma, in __vma_link_file() argument
387 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
391 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
395 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) in vma_link() argument
400 vma_iter_config(&vmi, vma->vm_start, vma->vm_end); in vma_link()
401 if (vma_iter_prealloc(&vmi, vma)) in vma_link()
404 vma_start_write(vma); in vma_link()
406 vma_iter_store(&vmi, vma); in vma_link()
408 if (vma->vm_file) { in vma_link()
409 mapping = vma->vm_file->f_mapping; in vma_link()
411 __vma_link_file(vma, mapping); in vma_link()
429 struct vm_area_struct *vma, struct vm_area_struct *next, in init_multi_vma_prep() argument
433 vp->vma = vma; in init_multi_vma_prep()
434 vp->anon_vma = vma->anon_vma; in init_multi_vma_prep()
441 vp->file = vma->vm_file; in init_multi_vma_prep()
443 vp->mapping = vma->vm_file->f_mapping; in init_multi_vma_prep()
453 struct vm_area_struct *vma) in init_vma_prep() argument
455 init_multi_vma_prep(vp, vma, NULL, NULL, NULL); in init_vma_prep()
466 uprobe_munmap(vp->vma, vp->vma->vm_start, vp->vma->vm_end); in vma_prepare()
487 anon_vma_interval_tree_pre_update_vma(vp->vma); in vma_prepare()
494 vma_interval_tree_remove(vp->vma, &vp->mapping->i_mmap); in vma_prepare()
517 vma_interval_tree_insert(vp->vma, &vp->mapping->i_mmap); in vma_complete()
537 anon_vma_interval_tree_post_update_vma(vp->vma); in vma_complete()
545 uprobe_mmap(vp->vma); in vma_complete()
560 anon_vma_merge(vp->vma, vp->remove); in vma_complete()
564 WARN_ON_ONCE(vp->vma->vm_end < vp->remove->vm_end); in vma_complete()
630 int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, in vma_expand() argument
638 vma_start_write(vma); in vma_expand()
639 if (next && (vma != next) && (end == next->vm_end)) { in vma_expand()
644 ret = dup_anon_vma(vma, next, &anon_dup); in vma_expand()
649 init_multi_vma_prep(&vp, vma, NULL, remove_next ? next : NULL, NULL); in vma_expand()
652 next != vma && end > next->vm_start); in vma_expand()
654 VM_WARN_ON(vma->vm_start < start || vma->vm_end > end); in vma_expand()
658 if (vma_iter_prealloc(vmi, vma)) in vma_expand()
662 vma_adjust_trans_huge(vma, start, end, 0); in vma_expand()
663 vma->vm_start = start; in vma_expand()
664 vma->vm_end = end; in vma_expand()
665 vma->vm_pgoff = pgoff; in vma_expand()
666 vma_iter_store(vmi, vma); in vma_expand()
668 vma_complete(&vp, vmi, vma->vm_mm); in vma_expand()
686 int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, in vma_shrink() argument
691 WARN_ON((vma->vm_start != start) && (vma->vm_end != end)); in vma_shrink()
693 if (vma->vm_start < start) in vma_shrink()
694 vma_iter_config(vmi, vma->vm_start, start); in vma_shrink()
696 vma_iter_config(vmi, end, vma->vm_end); in vma_shrink()
701 vma_start_write(vma); in vma_shrink()
703 init_vma_prep(&vp, vma); in vma_shrink()
705 vma_adjust_trans_huge(vma, start, end, 0); in vma_shrink()
708 vma->vm_start = start; in vma_shrink()
709 vma->vm_end = end; in vma_shrink()
710 vma->vm_pgoff = pgoff; in vma_shrink()
711 vma_complete(&vp, vmi, vma->vm_mm); in vma_shrink()
720 static inline bool is_mergeable_vma(struct vm_area_struct *vma, in is_mergeable_vma() argument
733 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
735 if (vma->vm_file != file) in is_mergeable_vma()
737 if (may_remove_vma && vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
739 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
741 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) in is_mergeable_vma()
747 struct anon_vma *anon_vma2, struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
753 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
754 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
773 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
778 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, true) && in can_vma_merge_before()
779 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
780 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
796 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
801 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name, false) && in can_vma_merge_after()
802 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
804 vm_pglen = vma_pages(vma); in can_vma_merge_after()
805 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
872 struct vm_area_struct *vma, *adjust, *remove, *remove2; in vma_merge() local
932 res = vma = prev; in vma_merge()
974 vma = next; /* case 3 */ in vma_merge()
991 if (vma_start < vma->vm_start || vma_end > vma->vm_end) in vma_merge()
1001 if (vma_iter_prealloc(vmi, vma)) in vma_merge()
1004 init_multi_vma_prep(&vp, vma, adjust, remove, remove2); in vma_merge()
1009 vma_adjust_trans_huge(vma, vma_start, vma_end, adj_start); in vma_merge()
1011 vma->vm_start = vma_start; in vma_merge()
1012 vma->vm_end = vma_end; in vma_merge()
1013 vma->vm_pgoff = vma_pgoff; in vma_merge()
1016 vma_iter_store(vmi, vma); in vma_merge()
1104 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1106 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); in find_mergeable_anon_vma()
1113 anon_vma = reusable_anon_vma(next, vma, next); in find_mergeable_anon_vma()
1119 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
1123 anon_vma = reusable_anon_vma(prev, prev, vma); in find_mergeable_anon_vma()
1463 static bool vma_is_shared_writable(struct vm_area_struct *vma) in vma_is_shared_writable() argument
1465 return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == in vma_is_shared_writable()
1469 static bool vma_fs_can_writeback(struct vm_area_struct *vma) in vma_fs_can_writeback() argument
1472 if (vma->vm_flags & VM_PFNMAP) in vma_fs_can_writeback()
1475 return vma->vm_file && vma->vm_file->f_mapping && in vma_fs_can_writeback()
1476 mapping_can_writeback(vma->vm_file->f_mapping); in vma_fs_can_writeback()
1483 bool vma_needs_dirty_tracking(struct vm_area_struct *vma) in vma_needs_dirty_tracking() argument
1486 if (!vma_is_shared_writable(vma)) in vma_needs_dirty_tracking()
1490 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_needs_dirty_tracking()
1497 return vma_fs_can_writeback(vma); in vma_needs_dirty_tracking()
1506 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
1509 if (!vma_is_shared_writable(vma)) in vma_wants_writenotify()
1513 if (vm_ops_needs_writenotify(vma->vm_ops)) in vma_wants_writenotify()
1519 pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) in vma_wants_writenotify()
1526 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) in vma_wants_writenotify()
1530 if (userfaultfd_wp(vma)) in vma_wants_writenotify()
1534 return vma_fs_can_writeback(vma); in vma_wants_writenotify()
1696 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area() local
1708 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area()
1710 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area()
1743 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area_topdown() local
1758 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown()
1760 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area_topdown()
1898 struct vm_area_struct *vma; in find_vma_prev() local
1901 vma = mas_walk(&mas); in find_vma_prev()
1903 if (!vma) in find_vma_prev()
1904 vma = mas_next(&mas, ULONG_MAX); in find_vma_prev()
1905 return vma; in find_vma_prev()
1913 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
1916 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
1920 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
1928 if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) in acct_stack_growth()
1932 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
1933 vma->vm_end - size; in acct_stack_growth()
1934 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
1952 static int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
1954 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
1958 MA_STATE(mas, &mm->mm_mt, vma->vm_start, address); in expand_upwards()
1960 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
1976 next = find_vma_intersection(mm, vma->vm_end, gap_addr); in expand_upwards()
1986 __mas_set_range(&mas, vma->vm_start, address - 1); in expand_upwards()
1987 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in expand_upwards()
1991 if (unlikely(anon_vma_prepare(vma))) { in expand_upwards()
1997 vma_start_write(vma); in expand_upwards()
2003 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
2006 if (address > vma->vm_end) { in expand_upwards()
2009 size = address - vma->vm_start; in expand_upwards()
2010 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
2013 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
2014 error = acct_stack_growth(vma, size, grow); in expand_upwards()
2026 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
2028 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
2029 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
2030 vma->vm_end = address; in expand_upwards()
2032 mas_store_prealloc(&mas, vma); in expand_upwards()
2033 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
2036 perf_event_mmap(vma); in expand_upwards()
2040 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
2041 khugepaged_enter_vma(vma, vma->vm_flags); in expand_upwards()
2052 int expand_downwards(struct vm_area_struct *vma, unsigned long address) in expand_downwards() argument
2054 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2055 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); in expand_downwards()
2059 if (!(vma->vm_flags & VM_GROWSDOWN)) in expand_downwards()
2077 mas_next_range(&mas, vma->vm_start); in expand_downwards()
2079 __mas_set_range(&mas, address, vma->vm_end - 1); in expand_downwards()
2080 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in expand_downwards()
2084 if (unlikely(anon_vma_prepare(vma))) { in expand_downwards()
2090 vma_start_write(vma); in expand_downwards()
2096 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2099 if (address < vma->vm_start) { in expand_downwards()
2102 size = vma->vm_end - address; in expand_downwards()
2103 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2106 if (grow <= vma->vm_pgoff) { in expand_downwards()
2107 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2119 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2121 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2122 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2123 vma->vm_start = address; in expand_downwards()
2124 vma->vm_pgoff -= grow; in expand_downwards()
2126 mas_store_prealloc(&mas, vma); in expand_downwards()
2127 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2130 perf_event_mmap(vma); in expand_downwards()
2134 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2135 khugepaged_enter_vma(vma, vma->vm_flags); in expand_downwards()
2158 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) in expand_stack_locked() argument
2160 return expand_upwards(vma, address); in expand_stack_locked()
2165 struct vm_area_struct *vma, *prev; in find_extend_vma_locked() local
2168 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma_locked()
2169 if (vma && (vma->vm_start <= addr)) in find_extend_vma_locked()
2170 return vma; in find_extend_vma_locked()
2180 int expand_stack_locked(struct vm_area_struct *vma, unsigned long address) in expand_stack_locked() argument
2182 if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) in expand_stack_locked()
2184 return expand_downwards(vma, address); in expand_stack_locked()
2189 struct vm_area_struct *vma; in find_extend_vma_locked() local
2193 vma = find_vma(mm, addr); in find_extend_vma_locked()
2194 if (!vma) in find_extend_vma_locked()
2196 if (vma->vm_start <= addr) in find_extend_vma_locked()
2197 return vma; in find_extend_vma_locked()
2198 start = vma->vm_start; in find_extend_vma_locked()
2199 if (expand_stack_locked(vma, addr)) in find_extend_vma_locked()
2201 if (vma->vm_flags & VM_LOCKED) in find_extend_vma_locked()
2202 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma_locked()
2203 return vma; in find_extend_vma_locked()
2215 static inline bool vma_expand_ok(struct vm_area_struct *vma, unsigned long addr) in vma_expand_ok() argument
2217 return REGION_NUMBER(addr) == REGION_NUMBER(vma->vm_start) && in vma_expand_ok()
2226 static inline int vma_expand_up(struct vm_area_struct *vma, unsigned long addr) in vma_expand_up() argument
2228 if (!vma_expand_ok(vma, addr)) in vma_expand_up()
2230 if (vma->vm_end != (addr & PAGE_MASK)) in vma_expand_up()
2232 return expand_upwards(vma, addr); in vma_expand_up()
2235 static inline bool vma_expand_down(struct vm_area_struct *vma, unsigned long addr) in vma_expand_down() argument
2237 if (!vma_expand_ok(vma, addr)) in vma_expand_down()
2239 return expand_downwards(vma, addr); in vma_expand_down()
2244 #define vma_expand_up(vma,addr) expand_upwards(vma, addr) argument
2245 #define vma_expand_down(vma, addr) (-EFAULT) argument
2249 #define vma_expand_up(vma,addr) (-EFAULT) argument
2250 #define vma_expand_down(vma, addr) expand_downwards(vma, addr) argument
2267 struct vm_area_struct *vma, *prev; in expand_stack() local
2273 vma = find_vma_prev(mm, addr, &prev); in expand_stack()
2274 if (vma && vma->vm_start <= addr) in expand_stack()
2278 vma = prev; in expand_stack()
2282 if (vma && !vma_expand_down(vma, addr)) in expand_stack()
2290 return vma; in expand_stack()
2302 struct vm_area_struct *vma; in remove_mt() local
2306 mas_for_each(mas, vma, ULONG_MAX) { in remove_mt()
2307 long nrpages = vma_pages(vma); in remove_mt()
2309 if (vma->vm_flags & VM_ACCOUNT) in remove_mt()
2311 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_mt()
2312 remove_vma(vma, false); in remove_mt()
2323 struct vm_area_struct *vma, struct vm_area_struct *prev, in unmap_region() argument
2333 unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); in unmap_region()
2335 free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2346 int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in __split_vma() argument
2353 WARN_ON(vma->vm_start >= addr); in __split_vma()
2354 WARN_ON(vma->vm_end <= addr); in __split_vma()
2356 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma()
2357 err = vma->vm_ops->may_split(vma, addr); in __split_vma()
2362 new = vm_area_dup(vma); in __split_vma()
2370 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2378 err = vma_dup_policy(vma, new); in __split_vma()
2382 err = anon_vma_clone(new, vma); in __split_vma()
2392 vma_start_write(vma); in __split_vma()
2395 init_vma_prep(&vp, vma); in __split_vma()
2398 vma_adjust_trans_huge(vma, vma->vm_start, addr, 0); in __split_vma()
2401 vma->vm_start = addr; in __split_vma()
2402 vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT; in __split_vma()
2404 vma->vm_end = addr; in __split_vma()
2408 vma_complete(&vp, vmi, vma->vm_mm); in __split_vma()
2428 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, in split_vma() argument
2431 if (vma->vm_mm->map_count >= sysctl_max_map_count) in split_vma()
2434 return __split_vma(vmi, vma, addr, new_below); in split_vma()
2452 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_vmi_align_munmap() argument
2474 if (start > vma->vm_start) { in do_vmi_align_munmap()
2481 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in do_vmi_align_munmap()
2484 error = __split_vma(vmi, vma, start, 1); in do_vmi_align_munmap()
2493 next = vma; in do_vmi_align_munmap()
2575 unmap_region(mm, &mas_detach, vma, prev, next, start, end, count, in do_vmi_align_munmap()
2623 struct vm_area_struct *vma; in do_vmi_munmap() local
2636 vma = vma_find(vmi, end); in do_vmi_munmap()
2637 if (!vma) { in do_vmi_munmap()
2643 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); in do_vmi_munmap()
2667 struct vm_area_struct *vma = NULL; in mmap_region() local
2720 vma = next; in mmap_region()
2726 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, in mmap_region()
2727 pgoff, vma->vm_userfaultfd_ctx, NULL) : in mmap_region()
2731 vma = prev; in mmap_region()
2738 if (vma && in mmap_region()
2739 !vma_expand(&vmi, vma, merge_start, merge_end, vm_pgoff, next)) { in mmap_region()
2740 khugepaged_enter_vma(vma, vm_flags); in mmap_region()
2744 if (vma == prev) in mmap_region()
2753 vma = vm_area_alloc(mm); in mmap_region()
2754 if (!vma) { in mmap_region()
2760 vma->vm_start = addr; in mmap_region()
2761 vma->vm_end = end; in mmap_region()
2762 vm_flags_init(vma, vm_flags); in mmap_region()
2763 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
2764 vma->vm_pgoff = pgoff; in mmap_region()
2773 vma->vm_file = get_file(file); in mmap_region()
2774 error = call_mmap(file, vma); in mmap_region()
2783 if (WARN_ON((addr != vma->vm_start))) in mmap_region()
2791 if (unlikely(vm_flags != vma->vm_flags && prev)) { in mmap_region()
2792 merge = vma_merge(&vmi, mm, prev, vma->vm_start, in mmap_region()
2793 vma->vm_end, vma->vm_flags, NULL, in mmap_region()
2794 vma->vm_file, vma->vm_pgoff, NULL, in mmap_region()
2804 fput(vma->vm_file); in mmap_region()
2805 vm_area_free(vma); in mmap_region()
2806 vma = merge; in mmap_region()
2808 vm_flags = vma->vm_flags; in mmap_region()
2813 vm_flags = vma->vm_flags; in mmap_region()
2815 error = shmem_zero_setup(vma); in mmap_region()
2819 vma_set_anonymous(vma); in mmap_region()
2822 if (map_deny_write_exec(vma, vma->vm_flags)) { in mmap_region()
2829 if (!arch_validate_flags(vma->vm_flags)) in mmap_region()
2833 if (vma_iter_prealloc(&vmi, vma)) in mmap_region()
2837 vma_start_write(vma); in mmap_region()
2838 vma_iter_store(&vmi, vma); in mmap_region()
2840 if (vma->vm_file) { in mmap_region()
2841 i_mmap_lock_write(vma->vm_file->f_mapping); in mmap_region()
2842 if (vma->vm_flags & VM_SHARED) in mmap_region()
2843 mapping_allow_writable(vma->vm_file->f_mapping); in mmap_region()
2845 flush_dcache_mmap_lock(vma->vm_file->f_mapping); in mmap_region()
2846 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); in mmap_region()
2847 flush_dcache_mmap_unlock(vma->vm_file->f_mapping); in mmap_region()
2848 i_mmap_unlock_write(vma->vm_file->f_mapping); in mmap_region()
2855 khugepaged_enter_vma(vma, vma->vm_flags); in mmap_region()
2861 file = vma->vm_file; in mmap_region()
2862 ksm_add_vma(vma); in mmap_region()
2864 perf_event_mmap(vma); in mmap_region()
2868 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in mmap_region()
2869 is_vm_hugetlb_page(vma) || in mmap_region()
2870 vma == get_gate_vma(current->mm)) in mmap_region()
2871 vm_flags_clear(vma, VM_LOCKED_MASK); in mmap_region()
2877 uprobe_mmap(vma); in mmap_region()
2886 vm_flags_set(vma, VM_SOFTDIRTY); in mmap_region()
2888 vma_set_page_prot(vma); in mmap_region()
2894 if (file && vma->vm_ops && vma->vm_ops->close) in mmap_region()
2895 vma->vm_ops->close(vma); in mmap_region()
2897 if (file || vma->vm_file) { in mmap_region()
2899 fput(vma->vm_file); in mmap_region()
2900 vma->vm_file = NULL; in mmap_region()
2902 vma_iter_set(&vmi, vma->vm_end); in mmap_region()
2904 unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, in mmap_region()
2905 vma->vm_end, vma->vm_end, true); in mmap_region()
2910 vm_area_free(vma); in mmap_region()
2957 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
2980 vma = vma_lookup(mm, start); in SYSCALL_DEFINE5()
2982 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
2985 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
2986 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5()
2987 struct vm_area_struct *next, *prev = vma; in SYSCALL_DEFINE5()
2994 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
2997 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
3010 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
3011 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
3012 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
3016 if (vma->vm_flags & VM_LOCKED) in SYSCALL_DEFINE5()
3019 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
3020 ret = do_mmap(vma->vm_file, start, size, in SYSCALL_DEFINE5()
3047 int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_vma_munmap() argument
3051 struct mm_struct *mm = vma->vm_mm; in do_vma_munmap()
3054 return do_vmi_align_munmap(vmi, vma, mm, start, end, uf, unlock); in do_vma_munmap()
3069 static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, in do_brk_flags() argument
3093 if (vma && vma->vm_end == addr && !vma_policy(vma) && in do_brk_flags()
3094 can_vma_merge_after(vma, flags, NULL, NULL, in do_brk_flags()
3096 vma_iter_config(vmi, vma->vm_start, addr + len); in do_brk_flags()
3097 if (vma_iter_prealloc(vmi, vma)) in do_brk_flags()
3100 vma_start_write(vma); in do_brk_flags()
3102 init_vma_prep(&vp, vma); in do_brk_flags()
3104 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); in do_brk_flags()
3105 vma->vm_end = addr + len; in do_brk_flags()
3106 vm_flags_set(vma, VM_SOFTDIRTY); in do_brk_flags()
3107 vma_iter_store(vmi, vma); in do_brk_flags()
3110 khugepaged_enter_vma(vma, flags); in do_brk_flags()
3114 if (vma) in do_brk_flags()
3117 vma = vm_area_alloc(mm); in do_brk_flags()
3118 if (!vma) in do_brk_flags()
3121 vma_set_anonymous(vma); in do_brk_flags()
3122 vma->vm_start = addr; in do_brk_flags()
3123 vma->vm_end = addr + len; in do_brk_flags()
3124 vma->vm_pgoff = addr >> PAGE_SHIFT; in do_brk_flags()
3125 vm_flags_init(vma, flags); in do_brk_flags()
3126 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
3127 vma_start_write(vma); in do_brk_flags()
3128 if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL)) in do_brk_flags()
3133 ksm_add_vma(vma); in do_brk_flags()
3135 perf_event_mmap(vma); in do_brk_flags()
3140 vm_flags_set(vma, VM_SOFTDIRTY); in do_brk_flags()
3144 vm_area_free(vma); in do_brk_flags()
3153 struct vm_area_struct *vma = NULL; in vm_brk_flags() local
3181 vma = vma_prev(&vmi); in vm_brk_flags()
3182 ret = do_brk_flags(&vmi, vma, addr, len, flags); in vm_brk_flags()
3207 struct vm_area_struct *vma; in exit_mmap() local
3218 vma = mas_find(&mas, ULONG_MAX); in exit_mmap()
3219 if (!vma) { in exit_mmap()
3230 unmap_vmas(&tlb, &mas, vma, 0, ULONG_MAX, ULONG_MAX, false); in exit_mmap()
3240 mas_set(&mas, vma->vm_end); in exit_mmap()
3241 free_pgtables(&tlb, &mas, vma, FIRST_USER_ADDRESS, in exit_mmap()
3250 mas_set(&mas, vma->vm_end); in exit_mmap()
3252 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
3253 nr_accounted += vma_pages(vma); in exit_mmap()
3254 remove_vma(vma, true); in exit_mmap()
3257 } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); in exit_mmap()
3271 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3273 unsigned long charged = vma_pages(vma); in insert_vm_struct()
3276 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) in insert_vm_struct()
3279 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3295 if (vma_is_anonymous(vma)) { in insert_vm_struct()
3296 BUG_ON(vma->anon_vma); in insert_vm_struct()
3297 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3300 if (vma_link(mm, vma)) { in insert_vm_struct()
3316 struct vm_area_struct *vma = *vmap; in copy_vma() local
3317 unsigned long vma_start = vma->vm_start; in copy_vma()
3318 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3327 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
3336 new_vma = vma_merge(&vmi, mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3337 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in copy_vma()
3338 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in copy_vma()
3358 *vmap = vma = new_vma; in copy_vma()
3360 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3362 new_vma = vm_area_dup(vma); in copy_vma()
3368 if (vma_dup_policy(vma, new_vma)) in copy_vma()
3370 if (anon_vma_clone(new_vma, vma)) in copy_vma()
3444 static void special_mapping_close(struct vm_area_struct *vma) in special_mapping_close() argument
3448 static const char *special_mapping_name(struct vm_area_struct *vma) in special_mapping_name() argument
3450 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3466 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) in special_mapping_split() argument
3494 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() local
3498 if (vma->vm_ops == &legacy_special_mapping_vmops) { in special_mapping_fault()
3499 pages = vma->vm_private_data; in special_mapping_fault()
3501 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
3504 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3529 struct vm_area_struct *vma; in __install_special_mapping() local
3531 vma = vm_area_alloc(mm); in __install_special_mapping()
3532 if (unlikely(vma == NULL)) in __install_special_mapping()
3535 vma->vm_start = addr; in __install_special_mapping()
3536 vma->vm_end = addr + len; in __install_special_mapping()
3538 vm_flags_init(vma, (vm_flags | mm->def_flags | in __install_special_mapping()
3540 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3542 vma->vm_ops = ops; in __install_special_mapping()
3543 vma->vm_private_data = priv; in __install_special_mapping()
3545 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3549 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3551 perf_event_mmap(vma); in __install_special_mapping()
3553 return vma; in __install_special_mapping()
3556 vm_area_free(vma); in __install_special_mapping()
3560 bool vma_is_special_mapping(const struct vm_area_struct *vma, in vma_is_special_mapping() argument
3563 return vma->vm_private_data == sm && in vma_is_special_mapping()
3564 (vma->vm_ops == &special_mapping_vmops || in vma_is_special_mapping()
3565 vma->vm_ops == &legacy_special_mapping_vmops); in vma_is_special_mapping()
3590 struct vm_area_struct *vma = __install_special_mapping( in install_special_mapping() local
3594 return PTR_ERR_OR_ZERO(vma); in install_special_mapping()
3680 struct vm_area_struct *vma; in mm_take_all_locks() local
3694 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3697 vma_start_write(vma); in mm_take_all_locks()
3701 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3704 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3705 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3706 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3710 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3713 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3714 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3715 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3719 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3722 if (vma->anon_vma) in mm_take_all_locks()
3723 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3776 struct vm_area_struct *vma; in mm_drop_all_locks() local
3783 mas_for_each(&mas, vma, ULONG_MAX) { in mm_drop_all_locks()
3784 if (vma->anon_vma) in mm_drop_all_locks()
3785 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3787 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3788 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()