Home
last modified time | relevance | path

Searched refs:pvmw (Results 1 – 9 of 9) sorted by relevance

/Linux-v4.19/mm/
Dpage_vma_mapped.c10 static inline bool not_found(struct page_vma_mapped_walk *pvmw) in not_found() argument
12 page_vma_mapped_walk_done(pvmw); in not_found()
16 static bool map_pte(struct page_vma_mapped_walk *pvmw) in map_pte() argument
18 pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); in map_pte()
19 if (!(pvmw->flags & PVMW_SYNC)) { in map_pte()
20 if (pvmw->flags & PVMW_MIGRATION) { in map_pte()
21 if (!is_swap_pte(*pvmw->pte)) in map_pte()
24 if (!pte_present(*pvmw->pte)) in map_pte()
28 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte()
29 spin_lock(pvmw->ptl); in map_pte()
[all …]
Drmap.c757 struct page_vma_mapped_walk pvmw = { in page_referenced_one() local
764 while (page_vma_mapped_walk(&pvmw)) { in page_referenced_one()
765 address = pvmw.address; in page_referenced_one()
768 page_vma_mapped_walk_done(&pvmw); in page_referenced_one()
773 if (pvmw.pte) { in page_referenced_one()
775 pvmw.pte)) { in page_referenced_one()
789 pvmw.pmd)) in page_referenced_one()
886 struct page_vma_mapped_walk pvmw = { in page_mkclean_one() local
902 while (page_vma_mapped_walk(&pvmw)) { in page_mkclean_one()
906 cstart = address = pvmw.address; in page_mkclean_one()
[all …]
Dpage_idle.c58 struct page_vma_mapped_walk pvmw = { in page_idle_clear_pte_refs_one() local
65 while (page_vma_mapped_walk(&pvmw)) { in page_idle_clear_pte_refs_one()
66 addr = pvmw.address; in page_idle_clear_pte_refs_one()
67 if (pvmw.pte) { in page_idle_clear_pte_refs_one()
72 if (ptep_clear_young_notify(vma, addr, pvmw.pte)) in page_idle_clear_pte_refs_one()
75 if (pmdp_clear_young_notify(vma, addr, pvmw.pmd)) in page_idle_clear_pte_refs_one()
Dksm.c1039 struct page_vma_mapped_walk pvmw = { in write_protect_page() local
1048 pvmw.address = page_address_in_vma(page, vma); in write_protect_page()
1049 if (pvmw.address == -EFAULT) in write_protect_page()
1054 mmun_start = pvmw.address; in write_protect_page()
1055 mmun_end = pvmw.address + PAGE_SIZE; in write_protect_page()
1058 if (!page_vma_mapped_walk(&pvmw)) in write_protect_page()
1060 if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?")) in write_protect_page()
1063 if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || in write_protect_page()
1064 (pte_protnone(*pvmw.pte) && pte_savedwrite(*pvmw.pte)) || in write_protect_page()
1069 flush_cache_page(vma, pvmw.address, page_to_pfn(page)); in write_protect_page()
[all …]
Dmigrate.c206 struct page_vma_mapped_walk pvmw = { in remove_migration_pte() local
217 while (page_vma_mapped_walk(&pvmw)) { in remove_migration_pte()
221 new = page - pvmw.page->index + in remove_migration_pte()
222 linear_page_index(vma, pvmw.address); in remove_migration_pte()
226 if (!pvmw.pte) { in remove_migration_pte()
228 remove_migration_pmd(&pvmw, new); in remove_migration_pte()
235 if (pte_swp_soft_dirty(*pvmw.pte)) in remove_migration_pte()
241 entry = pte_to_swp_entry(*pvmw.pte); in remove_migration_pte()
260 set_huge_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); in remove_migration_pte()
262 hugepage_add_anon_rmap(new, vma, pvmw.address); in remove_migration_pte()
[all …]
Dhuge_memory.c2873 void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument
2876 struct vm_area_struct *vma = pvmw->vma; in set_pmd_migration_entry()
2878 unsigned long address = pvmw->address; in set_pmd_migration_entry()
2883 if (!(pvmw->pmd && !pvmw->pte)) in set_pmd_migration_entry()
2887 pmdval = *pvmw->pmd; in set_pmd_migration_entry()
2888 pmdp_invalidate(vma, address, pvmw->pmd); in set_pmd_migration_entry()
2895 set_pmd_at(mm, address, pvmw->pmd, pmdswp); in set_pmd_migration_entry()
2900 void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) in remove_migration_pmd() argument
2902 struct vm_area_struct *vma = pvmw->vma; in remove_migration_pmd()
2904 unsigned long address = pvmw->address; in remove_migration_pmd()
[all …]
/Linux-v4.19/include/linux/
Drmap.h215 static inline void page_vma_mapped_walk_done(struct page_vma_mapped_walk *pvmw) in page_vma_mapped_walk_done() argument
217 if (pvmw->pte) in page_vma_mapped_walk_done()
218 pte_unmap(pvmw->pte); in page_vma_mapped_walk_done()
219 if (pvmw->ptl) in page_vma_mapped_walk_done()
220 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done()
223 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw);
Dswapops.h265 extern void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
268 extern void remove_migration_pmd(struct page_vma_mapped_walk *pvmw,
296 static inline void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw, in set_pmd_migration_entry() argument
302 static inline void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, in remove_migration_pmd() argument
/Linux-v4.19/kernel/events/
Duprobes.c158 struct page_vma_mapped_walk pvmw = { in __replace_page() local
181 if (!page_vma_mapped_walk(&pvmw)) { in __replace_page()
185 VM_BUG_ON_PAGE(addr != pvmw.address, old_page); in __replace_page()
197 flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); in __replace_page()
198 ptep_clear_flush_notify(vma, addr, pvmw.pte); in __replace_page()
199 set_pte_at_notify(mm, addr, pvmw.pte, in __replace_page()
205 page_vma_mapped_walk_done(&pvmw); in __replace_page()