Lines Matching refs:vma
136 m_next_vma(struct proc_maps_private *priv, struct vm_area_struct *vma) in m_next_vma() argument
138 if (vma == priv->tail_vma) in m_next_vma()
140 return vma->vm_next ?: priv->tail_vma; in m_next_vma()
143 static void m_cache_vma(struct seq_file *m, struct vm_area_struct *vma) in m_cache_vma() argument
146 m->version = m_next_vma(m->private, vma) ? vma->vm_end : -1UL; in m_cache_vma()
154 struct vm_area_struct *vma; in m_start() local
178 vma = find_vma(mm, last_addr - 1); in m_start()
179 if (vma && vma->vm_start <= last_addr) in m_start()
180 vma = m_next_vma(priv, vma); in m_start()
181 if (vma) in m_start()
182 return vma; in m_start()
187 for (vma = mm->mmap; pos; pos--) { in m_start()
188 m->version = vma->vm_start; in m_start()
189 vma = vma->vm_next; in m_start()
191 return vma; in m_start()
268 static int is_stack(struct vm_area_struct *vma) in is_stack() argument
275 return vma->vm_start <= vma->vm_mm->start_stack && in is_stack()
276 vma->vm_end >= vma->vm_mm->start_stack; in is_stack()
300 show_map_vma(struct seq_file *m, struct vm_area_struct *vma) in show_map_vma() argument
302 struct mm_struct *mm = vma->vm_mm; in show_map_vma()
303 struct file *file = vma->vm_file; in show_map_vma()
304 vm_flags_t flags = vma->vm_flags; in show_map_vma()
312 struct inode *inode = file_inode(vma->vm_file); in show_map_vma()
315 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; in show_map_vma()
318 start = vma->vm_start; in show_map_vma()
319 end = vma->vm_end; in show_map_vma()
332 if (vma->vm_ops && vma->vm_ops->name) { in show_map_vma()
333 name = vma->vm_ops->name(vma); in show_map_vma()
338 name = arch_vma_name(vma); in show_map_vma()
345 if (vma->vm_start <= mm->brk && in show_map_vma()
346 vma->vm_end >= mm->start_brk) { in show_map_vma()
351 if (is_stack(vma)) in show_map_vma()
513 walk->vma->vm_file->f_mapping, addr, end); in smaps_pte_hole()
525 struct vm_area_struct *vma = walk->vma; in smaps_pte_entry() local
526 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pte_entry()
530 page = vm_normal_page(vma, addr, *pte); in smaps_pte_entry()
553 page = find_get_entry(vma->vm_file->f_mapping, in smaps_pte_entry()
554 linear_page_index(vma, addr)); in smaps_pte_entry()
577 struct vm_area_struct *vma = walk->vma; in smaps_pmd_entry() local
578 bool locked = !!(vma->vm_flags & VM_LOCKED); in smaps_pmd_entry()
582 page = follow_trans_huge_pmd(vma, addr, pmd, FOLL_DUMP); in smaps_pmd_entry()
605 struct vm_area_struct *vma = walk->vma; in smaps_pte_range() local
609 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range()
624 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range()
633 static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma) in show_smap_vma_flags() argument
697 if (vma->vm_flags & (1UL << i)) { in show_smap_vma_flags()
712 struct vm_area_struct *vma = walk->vma; in smaps_hugetlb_range() local
716 page = vm_normal_page(vma, addr, *pte); in smaps_hugetlb_range()
729 mss->shared_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
731 mss->private_hugetlb += huge_page_size(hstate_vma(vma)); in smaps_hugetlb_range()
750 static void smap_gather_stats(struct vm_area_struct *vma, in smap_gather_stats() argument
756 if (vma->vm_file && shmem_mapping(vma->vm_file->f_mapping)) { in smap_gather_stats()
767 unsigned long shmem_swapped = shmem_swap_usage(vma); in smap_gather_stats()
769 if (!shmem_swapped || (vma->vm_flags & VM_SHARED) || in smap_gather_stats()
770 !(vma->vm_flags & VM_WRITE)) { in smap_gather_stats()
774 walk_page_vma(vma, &smaps_shmem_walk_ops, mss); in smap_gather_stats()
780 walk_page_vma(vma, &smaps_walk_ops, mss); in smap_gather_stats()
827 struct vm_area_struct *vma = v; in show_smap() local
832 smap_gather_stats(vma, &mss); in show_smap()
834 show_map_vma(m, vma); in show_smap()
836 SEQ_PUT_DEC("Size: ", vma->vm_end - vma->vm_start); in show_smap()
837 SEQ_PUT_DEC(" kB\nKernelPageSize: ", vma_kernel_pagesize(vma)); in show_smap()
838 SEQ_PUT_DEC(" kB\nMMUPageSize: ", vma_mmu_pagesize(vma)); in show_smap()
844 transparent_hugepage_enabled(vma)); in show_smap()
847 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma)); in show_smap()
848 show_smap_vma_flags(m, vma); in show_smap()
850 m_cache_vma(m, vma); in show_smap()
860 struct vm_area_struct *vma; in show_smaps_rollup() local
882 for (vma = priv->mm->mmap; vma; vma = vma->vm_next) { in show_smaps_rollup()
883 smap_gather_stats(vma, &mss); in show_smaps_rollup()
884 last_vma_end = vma->vm_end; in show_smaps_rollup()
988 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1002 old_pte = ptep_modify_prot_start(vma, addr, pte); in clear_soft_dirty()
1005 ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent); in clear_soft_dirty()
1008 set_pte_at(vma->vm_mm, addr, pte, ptent); in clear_soft_dirty()
1012 static inline void clear_soft_dirty(struct vm_area_struct *vma, in clear_soft_dirty() argument
1019 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1026 old = pmdp_invalidate(vma, addr, pmdp); in clear_soft_dirty_pmd()
1035 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1038 set_pmd_at(vma->vm_mm, addr, pmdp, pmd); in clear_soft_dirty_pmd()
1042 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma, in clear_soft_dirty_pmd() argument
1052 struct vm_area_struct *vma = walk->vma; in clear_refs_pte_range() local
1057 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range()
1060 clear_soft_dirty_pmd(vma, addr, pmd); in clear_refs_pte_range()
1070 pmdp_test_and_clear_young(vma, addr, pmd); in clear_refs_pte_range()
1081 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in clear_refs_pte_range()
1086 clear_soft_dirty(vma, addr, pte); in clear_refs_pte_range()
1093 page = vm_normal_page(vma, addr, ptent); in clear_refs_pte_range()
1098 ptep_test_and_clear_young(vma, addr, pte); in clear_refs_pte_range()
1111 struct vm_area_struct *vma = walk->vma; in clear_refs_test_walk() local
1113 if (vma->vm_flags & VM_PFNMAP) in clear_refs_test_walk()
1122 if (cp->type == CLEAR_REFS_ANON && vma->vm_file) in clear_refs_test_walk()
1124 if (cp->type == CLEAR_REFS_MAPPED && !vma->vm_file) in clear_refs_test_walk()
1140 struct vm_area_struct *vma; in clear_refs_write() local
1189 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1190 if (!(vma->vm_flags & VM_SOFTDIRTY)) in clear_refs_write()
1215 for (vma = mm->mmap; vma; vma = vma->vm_next) { in clear_refs_write()
1216 vma->vm_flags &= ~VM_SOFTDIRTY; in clear_refs_write()
1217 vma_set_page_prot(vma); in clear_refs_write()
1292 struct vm_area_struct *vma = find_vma(walk->mm, addr); in pagemap_pte_hole() local
1297 if (vma) in pagemap_pte_hole()
1298 hole_end = min(end, vma->vm_start); in pagemap_pte_hole()
1308 if (!vma) in pagemap_pte_hole()
1312 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pte_hole()
1314 for (; addr < min(end, vma->vm_end); addr += PAGE_SIZE) { in pagemap_pte_hole()
1325 struct vm_area_struct *vma, unsigned long addr, pte_t pte) in pte_to_pagemap_entry() argument
1334 page = vm_normal_page(vma, addr, pte); in pte_to_pagemap_entry()
1357 if (vma->vm_flags & VM_SOFTDIRTY) in pte_to_pagemap_entry()
1366 struct vm_area_struct *vma = walk->vma; in pagemap_pmd_range() local
1373 ptl = pmd_trans_huge_lock(pmdp, vma); in pagemap_pmd_range()
1379 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_pmd_range()
1443 pme = pte_to_pagemap_entry(pm, vma, addr, *pte); in pagemap_pmd_range()
1462 struct vm_area_struct *vma = walk->vma; in pagemap_hugetlb_range() local
1467 if (vma->vm_flags & VM_SOFTDIRTY) in pagemap_hugetlb_range()
1694 static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma, in can_gather_numa_stats() argument
1703 page = vm_normal_page(vma, addr, pte); in can_gather_numa_stats()
1719 struct vm_area_struct *vma, in can_gather_numa_stats_pmd() argument
1728 page = vm_normal_page_pmd(vma, addr, pmd); in can_gather_numa_stats_pmd()
1747 struct vm_area_struct *vma = walk->vma; in gather_pte_stats() local
1753 ptl = pmd_trans_huge_lock(pmd, vma); in gather_pte_stats()
1757 page = can_gather_numa_stats_pmd(*pmd, vma, addr); in gather_pte_stats()
1770 struct page *page = can_gather_numa_stats(*pte, vma, addr); in gather_pte_stats()
1820 struct vm_area_struct *vma = v; in show_numa_map() local
1822 struct file *file = vma->vm_file; in show_numa_map()
1823 struct mm_struct *mm = vma->vm_mm; in show_numa_map()
1834 pol = __get_vma_policy(vma, vma->vm_start); in show_numa_map()
1842 seq_printf(m, "%08lx %s", vma->vm_start, buffer); in show_numa_map()
1847 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { in show_numa_map()
1849 } else if (is_stack(vma)) { in show_numa_map()
1853 if (is_vm_hugetlb_page(vma)) in show_numa_map()
1857 walk_page_vma(vma, &show_numa_ops, md); in show_numa_map()
1877 if (md->active < md->pages && !is_vm_hugetlb_page(vma)) in show_numa_map()
1887 seq_printf(m, " kernelpagesize_kB=%lu", vma_kernel_pagesize(vma) >> 10); in show_numa_map()
1890 m_cache_vma(m, vma); in show_numa_map()