Lines Matching refs:vma
79 struct vm_area_struct *vma, struct vm_area_struct *prev,
89 void vma_set_page_prot(struct vm_area_struct *vma) in vma_set_page_prot() argument
91 unsigned long vm_flags = vma->vm_flags; in vma_set_page_prot()
94 vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags); in vma_set_page_prot()
95 if (vma_wants_writenotify(vma, vm_page_prot)) { in vma_set_page_prot()
100 WRITE_ONCE(vma->vm_page_prot, vm_page_prot); in vma_set_page_prot()
106 static void __remove_shared_vm_struct(struct vm_area_struct *vma, in __remove_shared_vm_struct() argument
109 if (vma->vm_flags & VM_SHARED) in __remove_shared_vm_struct()
113 vma_interval_tree_remove(vma, &mapping->i_mmap); in __remove_shared_vm_struct()
121 void unlink_file_vma(struct vm_area_struct *vma) in unlink_file_vma() argument
123 struct file *file = vma->vm_file; in unlink_file_vma()
128 __remove_shared_vm_struct(vma, file, mapping); in unlink_file_vma()
136 static void remove_vma(struct vm_area_struct *vma) in remove_vma() argument
139 if (vma->vm_ops && vma->vm_ops->close) in remove_vma()
140 vma->vm_ops->close(vma); in remove_vma()
141 if (vma->vm_file) in remove_vma()
142 fput(vma->vm_file); in remove_vma()
143 mpol_put(vma_policy(vma)); in remove_vma()
144 vm_area_free(vma); in remove_vma()
165 static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma,
329 struct vm_area_struct *vma; in validate_mm() local
334 mas_for_each(&mas, vma, ULONG_MAX) { in validate_mm()
336 struct anon_vma *anon_vma = vma->anon_vma; in validate_mm()
341 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in validate_mm()
375 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_pre_update_vma() argument
379 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_pre_update_vma()
384 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma) in anon_vma_interval_tree_post_update_vma() argument
388 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in anon_vma_interval_tree_post_update_vma()
396 struct vm_area_struct *vma; in count_vma_pages_range() local
399 for_each_vma_range(vmi, vma, end) { in count_vma_pages_range()
400 unsigned long vm_start = max(addr, vma->vm_start); in count_vma_pages_range()
401 unsigned long vm_end = min(end, vma->vm_end); in count_vma_pages_range()
409 static void __vma_link_file(struct vm_area_struct *vma, in __vma_link_file() argument
412 if (vma->vm_flags & VM_SHARED) in __vma_link_file()
416 vma_interval_tree_insert(vma, &mapping->i_mmap); in __vma_link_file()
430 void vma_mas_store(struct vm_area_struct *vma, struct ma_state *mas) in vma_mas_store() argument
432 trace_vma_store(mas->tree, vma); in vma_mas_store()
433 mas_set_range(mas, vma->vm_start, vma->vm_end - 1); in vma_mas_store()
434 mas_store_prealloc(mas, vma); in vma_mas_store()
446 void vma_mas_remove(struct vm_area_struct *vma, struct ma_state *mas) in vma_mas_remove() argument
448 trace_vma_mas_szero(mas->tree, vma->vm_start, vma->vm_end - 1); in vma_mas_remove()
449 mas->index = vma->vm_start; in vma_mas_remove()
450 mas->last = vma->vm_end - 1; in vma_mas_remove()
470 static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma) in vma_link() argument
475 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in vma_link()
478 if (vma->vm_file) { in vma_link()
479 mapping = vma->vm_file->f_mapping; in vma_link()
483 vma_mas_store(vma, &mas); in vma_link()
486 __vma_link_file(vma, mapping); in vma_link()
512 inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma, in vma_expand() argument
516 struct mm_struct *mm = vma->vm_mm; in vma_expand()
519 struct anon_vma *anon_vma = vma->anon_vma; in vma_expand()
520 struct file *file = vma->vm_file; in vma_expand()
523 if (next && (vma != next) && (end == next->vm_end)) { in vma_expand()
525 if (next->anon_vma && !vma->anon_vma) { in vma_expand()
529 vma->anon_vma = anon_vma; in vma_expand()
530 error = anon_vma_clone(vma, next); in vma_expand()
537 VM_BUG_ON(next && !remove_next && next != vma && end > next->vm_start); in vma_expand()
539 VM_BUG_ON(vma->vm_start < start || vma->vm_end > end); in vma_expand()
541 if (mas_preallocate(mas, vma, GFP_KERNEL)) in vma_expand()
544 vma_adjust_trans_huge(vma, start, end, 0); in vma_expand()
549 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in vma_expand()
555 anon_vma_interval_tree_pre_update_vma(vma); in vma_expand()
560 vma_interval_tree_remove(vma, root); in vma_expand()
563 vma->vm_start = start; in vma_expand()
564 vma->vm_end = end; in vma_expand()
565 vma->vm_pgoff = pgoff; in vma_expand()
567 vma_mas_store(vma, mas); in vma_expand()
570 vma_interval_tree_insert(vma, root); in vma_expand()
580 anon_vma_interval_tree_post_update_vma(vma); in vma_expand()
586 uprobe_mmap(vma); in vma_expand()
595 anon_vma_merge(vma, next); in vma_expand()
615 int __vma_adjust(struct vm_area_struct *vma, unsigned long start, in __vma_adjust() argument
619 struct mm_struct *mm = vma->vm_mm; in __vma_adjust()
621 struct vm_area_struct *next = find_vma(mm, vma->vm_end); in __vma_adjust()
622 struct vm_area_struct *orig_vma = vma; in __vma_adjust()
626 struct file *file = vma->vm_file; in __vma_adjust()
654 swap(vma, next); in __vma_adjust()
656 VM_WARN_ON(expand != vma); in __vma_adjust()
670 importer = vma; in __vma_adjust()
686 importer = vma; in __vma_adjust()
688 } else if (end < vma->vm_end) { in __vma_adjust()
694 adjust_next = -(vma->vm_end - end); in __vma_adjust()
695 exporter = vma; in __vma_adjust()
715 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in __vma_adjust()
722 uprobe_munmap(vma, vma->vm_start, vma->vm_end); in __vma_adjust()
739 anon_vma = vma->anon_vma; in __vma_adjust()
746 anon_vma_interval_tree_pre_update_vma(vma); in __vma_adjust()
753 vma_interval_tree_remove(vma, root); in __vma_adjust()
758 if (start != vma->vm_start) { in __vma_adjust()
759 if ((vma->vm_start < start) && in __vma_adjust()
761 vma_mas_szero(&mas, vma->vm_start, start); in __vma_adjust()
762 VM_WARN_ON(insert && insert->vm_start > vma->vm_start); in __vma_adjust()
766 vma->vm_start = start; in __vma_adjust()
768 if (end != vma->vm_end) { in __vma_adjust()
769 if (vma->vm_end > end) { in __vma_adjust()
771 vma_mas_szero(&mas, end, vma->vm_end); in __vma_adjust()
774 insert->vm_end < vma->vm_end); in __vma_adjust()
779 vma->vm_end = end; in __vma_adjust()
783 vma_mas_store(vma, &mas); in __vma_adjust()
785 vma->vm_pgoff = pgoff; in __vma_adjust()
795 vma_interval_tree_insert(vma, root); in __vma_adjust()
815 anon_vma_interval_tree_post_update_vma(vma); in __vma_adjust()
823 uprobe_mmap(vma); in __vma_adjust()
836 anon_vma_merge(vma, next); in __vma_adjust()
840 BUG_ON(vma->vm_end < next->vm_end); in __vma_adjust()
866 static inline int is_mergeable_vma(struct vm_area_struct *vma, in is_mergeable_vma() argument
879 if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY) in is_mergeable_vma()
881 if (vma->vm_file != file) in is_mergeable_vma()
883 if (vma->vm_ops && vma->vm_ops->close) in is_mergeable_vma()
885 if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx)) in is_mergeable_vma()
887 if (!anon_vma_name_eq(anon_vma_name(vma), anon_name)) in is_mergeable_vma()
894 struct vm_area_struct *vma) in is_mergeable_anon_vma() argument
900 if ((!anon_vma1 || !anon_vma2) && (!vma || in is_mergeable_anon_vma()
901 list_is_singular(&vma->anon_vma_chain))) in is_mergeable_anon_vma()
918 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_before() argument
924 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_before()
925 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_before()
926 if (vma->vm_pgoff == vm_pgoff) in can_vma_merge_before()
940 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, in can_vma_merge_after() argument
946 if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx, anon_name) && in can_vma_merge_after()
947 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) { in can_vma_merge_after()
949 vm_pglen = vma_pages(vma); in can_vma_merge_after()
950 if (vma->vm_pgoff + vm_pglen == vm_pgoff) in can_vma_merge_after()
1140 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma) in find_mergeable_anon_vma() argument
1142 MA_STATE(mas, &vma->vm_mm->mm_mt, vma->vm_end, vma->vm_end); in find_mergeable_anon_vma()
1149 anon_vma = reusable_anon_vma(next, vma, next); in find_mergeable_anon_vma()
1155 VM_BUG_ON_VMA(prev != vma, vma); in find_mergeable_anon_vma()
1159 anon_vma = reusable_anon_vma(prev, prev, vma); in find_mergeable_anon_vma()
1501 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) in vma_wants_writenotify() argument
1503 vm_flags_t vm_flags = vma->vm_flags; in vma_wants_writenotify()
1504 const struct vm_operations_struct *vm_ops = vma->vm_ops; in vma_wants_writenotify()
1524 if (vma_soft_dirty_enabled(vma) && !is_vm_hugetlb_page(vma)) in vma_wants_writenotify()
1532 return vma->vm_file && vma->vm_file->f_mapping && in vma_wants_writenotify()
1533 mapping_can_writeback(vma->vm_file->f_mapping); in vma_wants_writenotify()
1650 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area() local
1662 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area()
1664 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area()
1697 struct vm_area_struct *vma, *prev; in generic_get_unmapped_area_topdown() local
1712 vma = find_vma_prev(mm, addr, &prev); in generic_get_unmapped_area_topdown()
1714 (!vma || addr + len <= vm_start_gap(vma)) && in generic_get_unmapped_area_topdown()
1852 struct vm_area_struct *vma; in find_vma_prev() local
1855 vma = mas_walk(&mas); in find_vma_prev()
1857 if (!vma) in find_vma_prev()
1858 vma = mas_next(&mas, ULONG_MAX); in find_vma_prev()
1859 return vma; in find_vma_prev()
1867 static int acct_stack_growth(struct vm_area_struct *vma, in acct_stack_growth() argument
1870 struct mm_struct *mm = vma->vm_mm; in acct_stack_growth()
1874 if (!may_expand_vm(mm, vma->vm_flags, grow)) in acct_stack_growth()
1882 if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) in acct_stack_growth()
1886 new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start : in acct_stack_growth()
1887 vma->vm_end - size; in acct_stack_growth()
1888 if (is_hugepage_only_range(vma->vm_mm, new_start, size)) in acct_stack_growth()
1906 int expand_upwards(struct vm_area_struct *vma, unsigned long address) in expand_upwards() argument
1908 struct mm_struct *mm = vma->vm_mm; in expand_upwards()
1914 if (!(vma->vm_flags & VM_GROWSUP)) in expand_upwards()
1930 next = find_vma_intersection(mm, vma->vm_end, gap_addr); in expand_upwards()
1937 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in expand_upwards()
1941 if (unlikely(anon_vma_prepare(vma))) { in expand_upwards()
1951 anon_vma_lock_write(vma->anon_vma); in expand_upwards()
1954 if (address > vma->vm_end) { in expand_upwards()
1957 size = address - vma->vm_start; in expand_upwards()
1958 grow = (address - vma->vm_end) >> PAGE_SHIFT; in expand_upwards()
1961 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { in expand_upwards()
1962 error = acct_stack_growth(vma, size, grow); in expand_upwards()
1974 if (vma->vm_flags & VM_LOCKED) in expand_upwards()
1976 vm_stat_account(mm, vma->vm_flags, grow); in expand_upwards()
1977 anon_vma_interval_tree_pre_update_vma(vma); in expand_upwards()
1978 vma->vm_end = address; in expand_upwards()
1980 vma_mas_store(vma, &mas); in expand_upwards()
1981 anon_vma_interval_tree_post_update_vma(vma); in expand_upwards()
1984 perf_event_mmap(vma); in expand_upwards()
1988 anon_vma_unlock_write(vma->anon_vma); in expand_upwards()
1989 khugepaged_enter_vma(vma, vma->vm_flags); in expand_upwards()
1998 int expand_downwards(struct vm_area_struct *vma, unsigned long address) in expand_downwards() argument
2000 struct mm_struct *mm = vma->vm_mm; in expand_downwards()
2001 MA_STATE(mas, &mm->mm_mt, vma->vm_start, vma->vm_start); in expand_downwards()
2018 if (mas_preallocate(&mas, vma, GFP_KERNEL)) in expand_downwards()
2022 if (unlikely(anon_vma_prepare(vma))) { in expand_downwards()
2032 anon_vma_lock_write(vma->anon_vma); in expand_downwards()
2035 if (address < vma->vm_start) { in expand_downwards()
2038 size = vma->vm_end - address; in expand_downwards()
2039 grow = (vma->vm_start - address) >> PAGE_SHIFT; in expand_downwards()
2042 if (grow <= vma->vm_pgoff) { in expand_downwards()
2043 error = acct_stack_growth(vma, size, grow); in expand_downwards()
2055 if (vma->vm_flags & VM_LOCKED) in expand_downwards()
2057 vm_stat_account(mm, vma->vm_flags, grow); in expand_downwards()
2058 anon_vma_interval_tree_pre_update_vma(vma); in expand_downwards()
2059 vma->vm_start = address; in expand_downwards()
2060 vma->vm_pgoff -= grow; in expand_downwards()
2062 vma_mas_store(vma, &mas); in expand_downwards()
2063 anon_vma_interval_tree_post_update_vma(vma); in expand_downwards()
2066 perf_event_mmap(vma); in expand_downwards()
2070 anon_vma_unlock_write(vma->anon_vma); in expand_downwards()
2071 khugepaged_enter_vma(vma, vma->vm_flags); in expand_downwards()
2093 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2095 return expand_upwards(vma, address); in expand_stack()
2101 struct vm_area_struct *vma, *prev; in find_extend_vma() local
2104 vma = find_vma_prev(mm, addr, &prev); in find_extend_vma()
2105 if (vma && (vma->vm_start <= addr)) in find_extend_vma()
2106 return vma; in find_extend_vma()
2114 int expand_stack(struct vm_area_struct *vma, unsigned long address) in expand_stack() argument
2116 return expand_downwards(vma, address); in expand_stack()
2122 struct vm_area_struct *vma; in find_extend_vma() local
2126 vma = find_vma(mm, addr); in find_extend_vma()
2127 if (!vma) in find_extend_vma()
2129 if (vma->vm_start <= addr) in find_extend_vma()
2130 return vma; in find_extend_vma()
2131 if (!(vma->vm_flags & VM_GROWSDOWN)) in find_extend_vma()
2133 start = vma->vm_start; in find_extend_vma()
2134 if (expand_stack(vma, addr)) in find_extend_vma()
2136 if (vma->vm_flags & VM_LOCKED) in find_extend_vma()
2137 populate_vma_page_range(vma, addr, start, NULL); in find_extend_vma()
2138 return vma; in find_extend_vma()
2153 struct vm_area_struct *vma; in remove_mt() local
2157 mas_for_each(mas, vma, ULONG_MAX) { in remove_mt()
2158 long nrpages = vma_pages(vma); in remove_mt()
2160 if (vma->vm_flags & VM_ACCOUNT) in remove_mt()
2162 vm_stat_account(mm, vma->vm_flags, -nrpages); in remove_mt()
2163 remove_vma(vma); in remove_mt()
2175 struct vm_area_struct *vma, struct vm_area_struct *prev, in unmap_region() argument
2184 unmap_vmas(&tlb, mt, vma, start, end); in unmap_region()
2185 free_pgtables(&tlb, mt, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, in unmap_region()
2194 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in __split_vma() argument
2201 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma()
2202 err = vma->vm_ops->may_split(vma, addr); in __split_vma()
2207 new = vm_area_dup(vma); in __split_vma()
2215 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); in __split_vma()
2218 err = vma_dup_policy(vma, new); in __split_vma()
2222 err = anon_vma_clone(new, vma); in __split_vma()
2233 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff + in __split_vma()
2236 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); in __split_vma()
2263 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, in split_vma() argument
2269 return __split_vma(mm, vma, addr, new_below); in split_vma()
2272 static inline int munmap_sidetree(struct vm_area_struct *vma, in munmap_sidetree() argument
2275 mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); in munmap_sidetree()
2276 if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) in munmap_sidetree()
2279 if (vma->vm_flags & VM_LOCKED) in munmap_sidetree()
2280 vma->vm_mm->locked_vm -= vma_pages(vma); in munmap_sidetree()
2298 do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, in do_mas_align_munmap() argument
2310 if (mas_preallocate(mas, vma, GFP_KERNEL)) in do_mas_align_munmap()
2323 if (start > vma->vm_start) { in do_mas_align_munmap()
2330 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count) in do_mas_align_munmap()
2337 error = __split_vma(mm, vma, start, 0); in do_mas_align_munmap()
2342 vma = mas_walk(mas); in do_mas_align_munmap()
2369 if (vma == next) in do_mas_align_munmap()
2370 vma = split; in do_mas_align_munmap()
2440 unmap_region(mm, &mt_detach, vma, prev, next, start, end); in do_mas_align_munmap()
2481 struct vm_area_struct *vma; in do_mas_munmap() local
2494 vma = mas_find(mas, end - 1); in do_mas_munmap()
2495 if (!vma) in do_mas_munmap()
2498 return do_mas_align_munmap(mas, vma, mm, start, end, uf, downgrade); in do_mas_munmap()
2520 struct vm_area_struct *vma = NULL; in mmap_region() local
2570 vma = next; in mmap_region()
2576 (vma ? can_vma_merge_after(prev, vm_flags, vma->anon_vma, file, in mmap_region()
2577 pgoff, vma->vm_userfaultfd_ctx, NULL) : in mmap_region()
2581 vma = prev; in mmap_region()
2587 if (vma && in mmap_region()
2588 !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) { in mmap_region()
2589 khugepaged_enter_vma(vma, vm_flags); in mmap_region()
2601 vma = vm_area_alloc(mm); in mmap_region()
2602 if (!vma) { in mmap_region()
2607 vma->vm_start = addr; in mmap_region()
2608 vma->vm_end = end; in mmap_region()
2609 vma->vm_flags = vm_flags; in mmap_region()
2610 vma->vm_page_prot = vm_get_page_prot(vm_flags); in mmap_region()
2611 vma->vm_pgoff = pgoff; in mmap_region()
2620 vma->vm_file = get_file(file); in mmap_region()
2621 error = call_mmap(file, vma); in mmap_region()
2629 if (WARN_ON((addr != vma->vm_start))) { in mmap_region()
2639 if (unlikely(vm_flags != vma->vm_flags && prev)) { in mmap_region()
2640 merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags, in mmap_region()
2641 NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX, NULL); in mmap_region()
2650 fput(vma->vm_file); in mmap_region()
2651 vm_area_free(vma); in mmap_region()
2652 vma = merge; in mmap_region()
2654 vm_flags = vma->vm_flags; in mmap_region()
2659 vm_flags = vma->vm_flags; in mmap_region()
2661 error = shmem_zero_setup(vma); in mmap_region()
2665 vma_set_anonymous(vma); in mmap_region()
2669 if (!arch_validate_flags(vma->vm_flags)) { in mmap_region()
2673 else if (vma->vm_file) in mmap_region()
2679 if (mas_preallocate(&mas, vma, GFP_KERNEL)) { in mmap_region()
2683 else if (vma->vm_file) in mmap_region()
2689 if (vma->vm_file) in mmap_region()
2690 i_mmap_lock_write(vma->vm_file->f_mapping); in mmap_region()
2692 vma_mas_store(vma, &mas); in mmap_region()
2694 if (vma->vm_file) { in mmap_region()
2695 if (vma->vm_flags & VM_SHARED) in mmap_region()
2696 mapping_allow_writable(vma->vm_file->f_mapping); in mmap_region()
2698 flush_dcache_mmap_lock(vma->vm_file->f_mapping); in mmap_region()
2699 vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap); in mmap_region()
2700 flush_dcache_mmap_unlock(vma->vm_file->f_mapping); in mmap_region()
2701 i_mmap_unlock_write(vma->vm_file->f_mapping); in mmap_region()
2708 khugepaged_enter_vma(vma, vma->vm_flags); in mmap_region()
2714 file = vma->vm_file; in mmap_region()
2716 perf_event_mmap(vma); in mmap_region()
2720 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) || in mmap_region()
2721 is_vm_hugetlb_page(vma) || in mmap_region()
2722 vma == get_gate_vma(current->mm)) in mmap_region()
2723 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in mmap_region()
2729 uprobe_mmap(vma); in mmap_region()
2738 vma->vm_flags |= VM_SOFTDIRTY; in mmap_region()
2740 vma_set_page_prot(vma); in mmap_region()
2746 if (vma->vm_ops && vma->vm_ops->close) in mmap_region()
2747 vma->vm_ops->close(vma); in mmap_region()
2749 fput(vma->vm_file); in mmap_region()
2750 vma->vm_file = NULL; in mmap_region()
2753 unmap_region(mm, mas.tree, vma, prev, next, vma->vm_start, vma->vm_end); in mmap_region()
2757 vm_area_free(vma); in mmap_region()
2812 struct vm_area_struct *vma; in SYSCALL_DEFINE5() local
2835 vma = vma_lookup(mm, start); in SYSCALL_DEFINE5()
2837 if (!vma || !(vma->vm_flags & VM_SHARED)) in SYSCALL_DEFINE5()
2840 if (start + size > vma->vm_end) { in SYSCALL_DEFINE5()
2841 VMA_ITERATOR(vmi, mm, vma->vm_end); in SYSCALL_DEFINE5()
2842 struct vm_area_struct *next, *prev = vma; in SYSCALL_DEFINE5()
2849 if (next->vm_file != vma->vm_file) in SYSCALL_DEFINE5()
2852 if (next->vm_flags != vma->vm_flags) in SYSCALL_DEFINE5()
2865 prot |= vma->vm_flags & VM_READ ? PROT_READ : 0; in SYSCALL_DEFINE5()
2866 prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0; in SYSCALL_DEFINE5()
2867 prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0; in SYSCALL_DEFINE5()
2871 if (vma->vm_flags & VM_LOCKED) in SYSCALL_DEFINE5()
2874 file = get_file(vma->vm_file); in SYSCALL_DEFINE5()
2875 ret = do_mmap(vma->vm_file, start, size, in SYSCALL_DEFINE5()
2899 static int do_brk_munmap(struct ma_state *mas, struct vm_area_struct *vma, in do_brk_munmap() argument
2903 struct mm_struct *mm = vma->vm_mm; in do_brk_munmap()
2907 ret = do_mas_align_munmap(mas, vma, mm, newbrk, oldbrk, uf, true); in do_brk_munmap()
2924 static int do_brk_flags(struct ma_state *mas, struct vm_area_struct *vma, in do_brk_flags() argument
2948 if (vma && vma->vm_end == addr && !vma_policy(vma) && in do_brk_flags()
2949 can_vma_merge_after(vma, flags, NULL, NULL, in do_brk_flags()
2951 mas_set_range(mas, vma->vm_start, addr + len - 1); in do_brk_flags()
2952 if (mas_preallocate(mas, vma, GFP_KERNEL)) in do_brk_flags()
2955 vma_adjust_trans_huge(vma, vma->vm_start, addr + len, 0); in do_brk_flags()
2956 if (vma->anon_vma) { in do_brk_flags()
2957 anon_vma_lock_write(vma->anon_vma); in do_brk_flags()
2958 anon_vma_interval_tree_pre_update_vma(vma); in do_brk_flags()
2960 vma->vm_end = addr + len; in do_brk_flags()
2961 vma->vm_flags |= VM_SOFTDIRTY; in do_brk_flags()
2962 mas_store_prealloc(mas, vma); in do_brk_flags()
2964 if (vma->anon_vma) { in do_brk_flags()
2965 anon_vma_interval_tree_post_update_vma(vma); in do_brk_flags()
2966 anon_vma_unlock_write(vma->anon_vma); in do_brk_flags()
2968 khugepaged_enter_vma(vma, flags); in do_brk_flags()
2973 vma = vm_area_alloc(mm); in do_brk_flags()
2974 if (!vma) in do_brk_flags()
2977 vma_set_anonymous(vma); in do_brk_flags()
2978 vma->vm_start = addr; in do_brk_flags()
2979 vma->vm_end = addr + len; in do_brk_flags()
2980 vma->vm_pgoff = addr >> PAGE_SHIFT; in do_brk_flags()
2981 vma->vm_flags = flags; in do_brk_flags()
2982 vma->vm_page_prot = vm_get_page_prot(flags); in do_brk_flags()
2983 mas_set_range(mas, vma->vm_start, addr + len - 1); in do_brk_flags()
2984 if (mas_store_gfp(mas, vma, GFP_KERNEL)) in do_brk_flags()
2989 perf_event_mmap(vma); in do_brk_flags()
2994 vma->vm_flags |= VM_SOFTDIRTY; in do_brk_flags()
2999 vm_area_free(vma); in do_brk_flags()
3008 struct vm_area_struct *vma = NULL; in vm_brk_flags() local
3036 vma = mas_prev(&mas, 0); in vm_brk_flags()
3037 ret = do_brk_flags(&mas, vma, addr, len, flags); in vm_brk_flags()
3062 struct vm_area_struct *vma; in exit_mmap() local
3073 vma = mas_find(&mas, ULONG_MAX); in exit_mmap()
3074 if (!vma) { in exit_mmap()
3085 unmap_vmas(&tlb, &mm->mm_mt, vma, 0, ULONG_MAX); in exit_mmap()
3094 free_pgtables(&tlb, &mm->mm_mt, vma, FIRST_USER_ADDRESS, in exit_mmap()
3104 if (vma->vm_flags & VM_ACCOUNT) in exit_mmap()
3105 nr_accounted += vma_pages(vma); in exit_mmap()
3106 remove_vma(vma); in exit_mmap()
3109 } while ((vma = mas_find(&mas, ULONG_MAX)) != NULL); in exit_mmap()
3123 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) in insert_vm_struct() argument
3125 unsigned long charged = vma_pages(vma); in insert_vm_struct()
3128 if (find_vma_intersection(mm, vma->vm_start, vma->vm_end)) in insert_vm_struct()
3131 if ((vma->vm_flags & VM_ACCOUNT) && in insert_vm_struct()
3147 if (vma_is_anonymous(vma)) { in insert_vm_struct()
3148 BUG_ON(vma->anon_vma); in insert_vm_struct()
3149 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT; in insert_vm_struct()
3152 if (vma_link(mm, vma)) { in insert_vm_struct()
3168 struct vm_area_struct *vma = *vmap; in copy_vma() local
3169 unsigned long vma_start = vma->vm_start; in copy_vma()
3170 struct mm_struct *mm = vma->vm_mm; in copy_vma()
3179 if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) { in copy_vma()
3188 new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags, in copy_vma()
3189 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in copy_vma()
3190 vma->vm_userfaultfd_ctx, anon_vma_name(vma)); in copy_vma()
3210 *vmap = vma = new_vma; in copy_vma()
3212 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); in copy_vma()
3214 new_vma = vm_area_dup(vma); in copy_vma()
3220 if (vma_dup_policy(vma, new_vma)) in copy_vma()
3222 if (anon_vma_clone(new_vma, vma)) in copy_vma()
3298 static void special_mapping_close(struct vm_area_struct *vma) in special_mapping_close() argument
3302 static const char *special_mapping_name(struct vm_area_struct *vma) in special_mapping_name() argument
3304 return ((struct vm_special_mapping *)vma->vm_private_data)->name; in special_mapping_name()
3320 static int special_mapping_split(struct vm_area_struct *vma, unsigned long addr) in special_mapping_split() argument
3348 struct vm_area_struct *vma = vmf->vma; in special_mapping_fault() local
3352 if (vma->vm_ops == &legacy_special_mapping_vmops) { in special_mapping_fault()
3353 pages = vma->vm_private_data; in special_mapping_fault()
3355 struct vm_special_mapping *sm = vma->vm_private_data; in special_mapping_fault()
3358 return sm->fault(sm, vmf->vma, vmf); in special_mapping_fault()
3383 struct vm_area_struct *vma; in __install_special_mapping() local
3386 vma = vm_area_alloc(mm); in __install_special_mapping()
3387 if (unlikely(vma == NULL)) in __install_special_mapping()
3390 vma->vm_start = addr; in __install_special_mapping()
3391 vma->vm_end = addr + len; in __install_special_mapping()
3393 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY; in __install_special_mapping()
3394 vma->vm_flags &= VM_LOCKED_CLEAR_MASK; in __install_special_mapping()
3395 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); in __install_special_mapping()
3397 vma->vm_ops = ops; in __install_special_mapping()
3398 vma->vm_private_data = priv; in __install_special_mapping()
3400 ret = insert_vm_struct(mm, vma); in __install_special_mapping()
3404 vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); in __install_special_mapping()
3406 perf_event_mmap(vma); in __install_special_mapping()
3409 return vma; in __install_special_mapping()
3412 vm_area_free(vma); in __install_special_mapping()
3417 bool vma_is_special_mapping(const struct vm_area_struct *vma, in vma_is_special_mapping() argument
3420 return vma->vm_private_data == sm && in vma_is_special_mapping()
3421 (vma->vm_ops == &special_mapping_vmops || in vma_is_special_mapping()
3422 vma->vm_ops == &legacy_special_mapping_vmops); in vma_is_special_mapping()
3447 struct vm_area_struct *vma = __install_special_mapping( in install_special_mapping() local
3451 return PTR_ERR_OR_ZERO(vma); in install_special_mapping()
3536 struct vm_area_struct *vma; in mm_take_all_locks() local
3544 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3547 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3548 is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3549 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3553 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3556 if (vma->vm_file && vma->vm_file->f_mapping && in mm_take_all_locks()
3557 !is_vm_hugetlb_page(vma)) in mm_take_all_locks()
3558 vm_lock_mapping(mm, vma->vm_file->f_mapping); in mm_take_all_locks()
3562 mas_for_each(&mas, vma, ULONG_MAX) { in mm_take_all_locks()
3565 if (vma->anon_vma) in mm_take_all_locks()
3566 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_take_all_locks()
3619 struct vm_area_struct *vma; in mm_drop_all_locks() local
3626 mas_for_each(&mas, vma, ULONG_MAX) { in mm_drop_all_locks()
3627 if (vma->anon_vma) in mm_drop_all_locks()
3628 list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) in mm_drop_all_locks()
3630 if (vma->vm_file && vma->vm_file->f_mapping) in mm_drop_all_locks()
3631 vm_unlock_mapping(vma->vm_file->f_mapping); in mm_drop_all_locks()