Lines Matching refs:vma
38 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd, in change_pte_range() argument
61 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in change_pte_range()
64 if (prot_numa && !(vma->vm_flags & VM_SHARED) && in change_pte_range()
65 atomic_read(&vma->vm_mm->mm_users) == 1) in change_pte_range()
68 flush_tlb_batched_pending(vma->vm_mm); in change_pte_range()
83 page = vm_normal_page(vma, addr, oldpte); in change_pte_range()
88 if (is_cow_mapping(vma->vm_flags) && in change_pte_range()
112 oldpte = ptep_modify_prot_start(vma, addr, pte); in change_pte_range()
120 !(vma->vm_flags & VM_SOFTDIRTY))) { in change_pte_range()
123 ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent); in change_pte_range()
138 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
152 set_pte_at(vma->vm_mm, addr, pte, newpte); in change_pte_range()
164 static inline unsigned long change_pmd_range(struct vm_area_struct *vma, in change_pmd_range() argument
189 vma, vma->vm_mm, addr, end); in change_pmd_range()
195 __split_huge_pmd(vma, pmd, addr, false, NULL); in change_pmd_range()
197 int nr_ptes = change_huge_pmd(vma, pmd, addr, in change_pmd_range()
212 this_pages = change_pte_range(vma, pmd, addr, next, newprot, in change_pmd_range()
227 static inline unsigned long change_pud_range(struct vm_area_struct *vma, in change_pud_range() argument
240 pages += change_pmd_range(vma, pud, addr, next, newprot, in change_pud_range()
247 static inline unsigned long change_p4d_range(struct vm_area_struct *vma, in change_p4d_range() argument
260 pages += change_pud_range(vma, p4d, addr, next, newprot, in change_p4d_range()
267 static unsigned long change_protection_range(struct vm_area_struct *vma, in change_protection_range() argument
271 struct mm_struct *mm = vma->vm_mm; in change_protection_range()
279 flush_cache_range(vma, addr, end); in change_protection_range()
285 pages += change_p4d_range(vma, pgd, addr, next, newprot, in change_protection_range()
291 flush_tlb_range(vma, start, end); in change_protection_range()
297 unsigned long change_protection(struct vm_area_struct *vma, unsigned long start, in change_protection() argument
303 if (is_vm_hugetlb_page(vma)) in change_protection()
304 pages = hugetlb_change_protection(vma, start, end, newprot); in change_protection()
306 pages = change_protection_range(vma, start, end, newprot, dirty_accountable, prot_numa); in change_protection()
339 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, in mprotect_fixup() argument
342 struct mm_struct *mm = vma->vm_mm; in mprotect_fixup()
343 unsigned long oldflags = vma->vm_flags; in mprotect_fixup()
351 *pprev = vma; in mprotect_fixup()
361 (vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) && in mprotect_fixup()
394 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); in mprotect_fixup()
396 vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma), in mprotect_fixup()
397 vma->vm_userfaultfd_ctx); in mprotect_fixup()
399 vma = *pprev; in mprotect_fixup()
400 VM_WARN_ON((vma->vm_flags ^ newflags) & ~VM_SOFTDIRTY); in mprotect_fixup()
404 *pprev = vma; in mprotect_fixup()
406 if (start != vma->vm_start) { in mprotect_fixup()
407 error = split_vma(mm, vma, start, 1); in mprotect_fixup()
412 if (end != vma->vm_end) { in mprotect_fixup()
413 error = split_vma(mm, vma, end, 0); in mprotect_fixup()
423 vma->vm_flags = newflags; in mprotect_fixup()
424 dirty_accountable = vma_wants_writenotify(vma, vma->vm_page_prot); in mprotect_fixup()
425 vma_set_page_prot(vma); in mprotect_fixup()
427 change_protection(vma, start, end, vma->vm_page_prot, in mprotect_fixup()
436 populate_vma_page_range(vma, start, end, NULL); in mprotect_fixup()
441 perf_event_mmap(vma); in mprotect_fixup()
456 struct vm_area_struct *vma, *prev; in do_mprotect_pkey() local
492 vma = find_vma(current->mm, start); in do_mprotect_pkey()
494 if (!vma) in do_mprotect_pkey()
496 prev = vma->vm_prev; in do_mprotect_pkey()
498 if (vma->vm_start >= end) in do_mprotect_pkey()
500 start = vma->vm_start; in do_mprotect_pkey()
502 if (!(vma->vm_flags & VM_GROWSDOWN)) in do_mprotect_pkey()
505 if (vma->vm_start > start) in do_mprotect_pkey()
508 end = vma->vm_end; in do_mprotect_pkey()
510 if (!(vma->vm_flags & VM_GROWSUP)) in do_mprotect_pkey()
514 if (start > vma->vm_start) in do_mprotect_pkey()
515 prev = vma; in do_mprotect_pkey()
525 if (rier && (vma->vm_flags & VM_MAYEXEC)) in do_mprotect_pkey()
536 new_vma_pkey = arch_override_mprotect_pkey(vma, prot, pkey); in do_mprotect_pkey()
538 newflags |= (vma->vm_flags & ~mask_off_old_flags); in do_mprotect_pkey()
546 error = security_file_mprotect(vma, reqprot, prot); in do_mprotect_pkey()
550 tmp = vma->vm_end; in do_mprotect_pkey()
553 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags); in do_mprotect_pkey()
563 vma = prev->vm_next; in do_mprotect_pkey()
564 if (!vma || vma->vm_start != nstart) { in do_mprotect_pkey()