Lines Matching +full:tlb +full:- +full:split

1 // SPDX-License-Identifier: GPL-2.0
10 * struct wp_walk - Private struct for pagetable walk callbacks
24 * wp_pte - Write-protect a pte
30 * The function write-protects a pte and records the range in
31 * virtual address space of touched ptes for efficient range TLB flushes.
36 struct wp_walk *wpwalk = walk->private; in wp_pte()
40 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in wp_pte()
43 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in wp_pte()
44 wpwalk->total++; in wp_pte()
45 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); in wp_pte()
46 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in wp_pte()
54 * struct clean_walk - Private struct for the clean_record_pte function.
75 * clean_record_pte - Clean a pte and record its address space offset in a
83 * virtual address space of touched ptes for efficient TLB flushes.
91 struct wp_walk *wpwalk = walk->private; in clean_record_pte()
96 pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + in clean_record_pte()
97 walk->vma->vm_pgoff - cwalk->bitmap_pgoff; in clean_record_pte()
98 pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); in clean_record_pte()
101 ptep_modify_prot_commit(walk->vma, addr, pte, old_pte, ptent); in clean_record_pte()
103 wpwalk->total++; in clean_record_pte()
104 wpwalk->tlbflush_start = min(wpwalk->tlbflush_start, addr); in clean_record_pte()
105 wpwalk->tlbflush_end = max(wpwalk->tlbflush_end, in clean_record_pte()
108 __set_bit(pgoff, cwalk->bitmap); in clean_record_pte()
109 cwalk->start = min(cwalk->start, pgoff); in clean_record_pte()
110 cwalk->end = max(cwalk->end, pgoff + 1); in clean_record_pte()
117 * wp_clean_pmd_entry - The pagewalk pmd callback.
119 * Dirty-tracking should take place on the PTE level, so
121 * Furthermore, never split huge pmds, since that currently
134 walk->action = ACTION_AGAIN; in wp_clean_pmd_entry()
139 walk->action = ACTION_CONTINUE; in wp_clean_pmd_entry()
147 * wp_clean_pud_entry - The pagewalk pud callback.
149 * Dirty-tracking should take place on the PTE level, so
151 * Furthermore, never split huge puds, since that currently
164 walk->action = ACTION_AGAIN; in wp_clean_pud_entry()
170 walk->action = ACTION_CONTINUE; in wp_clean_pud_entry()
179 * wp_clean_pre_vma - The pagewalk pre_vma callback.
181 * The pre_vma callback performs the cache flush, stages the tlb flush
187 struct wp_walk *wpwalk = walk->private; in wp_clean_pre_vma()
189 wpwalk->tlbflush_start = end; in wp_clean_pre_vma()
190 wpwalk->tlbflush_end = start; in wp_clean_pre_vma()
192 mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0, in wp_clean_pre_vma()
193 walk->vma, walk->mm, start, end); in wp_clean_pre_vma()
194 mmu_notifier_invalidate_range_start(&wpwalk->range); in wp_clean_pre_vma()
195 flush_cache_range(walk->vma, start, end); in wp_clean_pre_vma()
202 inc_tlb_flush_pending(walk->mm); in wp_clean_pre_vma()
208 * wp_clean_post_vma - The pagewalk post_vma callback.
210 * The post_vma callback performs the tlb flush and calls necessary mmu
215 struct wp_walk *wpwalk = walk->private; in wp_clean_post_vma()
217 if (mm_tlb_flush_nested(walk->mm)) in wp_clean_post_vma()
218 flush_tlb_range(walk->vma, wpwalk->range.start, in wp_clean_post_vma()
219 wpwalk->range.end); in wp_clean_post_vma()
220 else if (wpwalk->tlbflush_end > wpwalk->tlbflush_start) in wp_clean_post_vma()
221 flush_tlb_range(walk->vma, wpwalk->tlbflush_start, in wp_clean_post_vma()
222 wpwalk->tlbflush_end); in wp_clean_post_vma()
224 mmu_notifier_invalidate_range_end(&wpwalk->range); in wp_clean_post_vma()
225 dec_tlb_flush_pending(walk->mm); in wp_clean_post_vma()
229 * wp_clean_test_walk - The pagewalk test_walk callback.
231 * Won't perform dirty-tracking on COW, read-only or HUGETLB vmas.
236 unsigned long vm_flags = READ_ONCE(walk->vma->vm_flags); in wp_clean_test_walk()
238 /* Skip non-applicable VMAs */ in wp_clean_test_walk()
265 * wp_shared_mapping_range - Write-protect all ptes in an address space range
270 * Note: This function currently skips transhuge page-table entries, since
271 * it's intended for dirty-tracking on the PTE level. It will warn on
272 * encountering transhuge write-enabled entries, though, and can easily be
275 * Return: The number of ptes actually write-protected. Note that
276 * already write-protected ptes are not counted.
293 * clean_record_shared_mapping_range - Clean and record all ptes in an
315 * additional are added, it first needs to write-protect the address-space
317 * pfn_mkwrite(). And then after a TLB flush following the write-protection
320 * This function currently skips transhuge page-table entries, since
321 * it's intended for dirty-tracking on the PTE level. It will warn on