/Linux-v4.19/arch/arm/lib/ |
D | uaccess_with_memcpy.c | 33 spinlock_t *ptl; in pin_page_for_write() local 58 ptl = ¤t->mm->page_table_lock; in pin_page_for_write() 59 spin_lock(ptl); in pin_page_for_write() 62 spin_unlock(ptl); in pin_page_for_write() 67 *ptlp = ptl; in pin_page_for_write() 74 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); in pin_page_for_write() 77 pte_unmap_unlock(pte, ptl); in pin_page_for_write() 82 *ptlp = ptl; in pin_page_for_write() 105 spinlock_t *ptl; in __copy_to_user_memcpy() local 108 while (!pin_page_for_write(to, &pte, &ptl)) { in __copy_to_user_memcpy() [all …]
|
/Linux-v4.19/mm/ |
D | page_vma_mapped.c | 28 pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); in map_pte() 29 spin_lock(pvmw->ptl); in map_pte() 139 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte); in page_vma_mapped_walk() 140 spin_lock(pvmw->ptl); in page_vma_mapped_walk() 163 pvmw->ptl = pmd_lock(mm, pvmw->pmd); in page_vma_mapped_walk() 185 spin_unlock(pvmw->ptl); in page_vma_mapped_walk() 186 pvmw->ptl = NULL; in page_vma_mapped_walk() 210 if (pvmw->ptl) { in page_vma_mapped_walk() 211 spin_unlock(pvmw->ptl); in page_vma_mapped_walk() 212 pvmw->ptl = NULL; in page_vma_mapped_walk() [all …]
|
D | huge_memory.c | 575 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in __do_huge_pmd_anonymous_page() 589 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 607 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 613 spin_unlock(vmf->ptl); in __do_huge_pmd_anonymous_page() 695 vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd); in do_huge_pmd_anonymous_page() 701 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 703 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 709 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 713 spin_unlock(vmf->ptl); in do_huge_pmd_anonymous_page() 734 spinlock_t *ptl; in insert_pfn_pmd() local [all …]
|
D | memory.c | 652 spinlock_t *ptl; in __pte_alloc() local 672 ptl = pmd_lock(mm, pmd); in __pte_alloc() 678 spin_unlock(ptl); in __pte_alloc() 1291 spinlock_t *ptl; in zap_pte_range() local 1299 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in zap_pte_range() 1395 pte_unmap_unlock(start_pte, ptl); in zap_pte_range() 1663 spinlock_t **ptl) in __get_locked_pte() argument 1682 return pte_alloc_map_lock(mm, pmd, addr, ptl); in __get_locked_pte() 1698 spinlock_t *ptl; in insert_page() local 1705 pte = get_locked_pte(mm, addr, &ptl); in insert_page() [all …]
|
D | mincore.c | 117 spinlock_t *ptl; in mincore_pte_range() local 123 ptl = pmd_trans_huge_lock(pmd, vma); in mincore_pte_range() 124 if (ptl) { in mincore_pte_range() 126 spin_unlock(ptl); in mincore_pte_range() 135 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); in mincore_pte_range() 165 pte_unmap_unlock(ptep - 1, ptl); in mincore_pte_range()
|
D | migrate.c | 311 spinlock_t *ptl) in __migration_entry_wait() argument 317 spin_lock(ptl); in __migration_entry_wait() 337 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait() 342 pte_unmap_unlock(ptep, ptl); in __migration_entry_wait() 348 spinlock_t *ptl = pte_lockptr(mm, pmd); in migration_entry_wait() local 350 __migration_entry_wait(mm, ptep, ptl); in migration_entry_wait() 356 spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), mm, pte); in migration_entry_wait_huge() local 357 __migration_entry_wait(mm, pte, ptl); in migration_entry_wait_huge() 363 spinlock_t *ptl; in pmd_migration_entry_wait() local 366 ptl = pmd_lock(mm, pmd); in pmd_migration_entry_wait() [all …]
|
D | gup.c | 79 spinlock_t *ptl; in follow_page_pte() local 86 ptep = pte_offset_map_lock(mm, pmd, address, &ptl); in follow_page_pte() 102 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 109 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 145 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 202 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 205 pte_unmap_unlock(ptep, ptl); in follow_page_pte() 216 spinlock_t *ptl; in follow_pmd_mask() local 260 ptl = pmd_lock(mm, pmd); in follow_pmd_mask() 262 spin_unlock(ptl); in follow_pmd_mask() [all …]
|
D | userfaultfd.c | 32 spinlock_t *ptl; in mcopy_atomic_pte() local 77 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mcopy_atomic_pte() 91 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte() 96 pte_unmap_unlock(dst_pte, ptl); in mcopy_atomic_pte() 109 spinlock_t *ptl; in mfill_zeropage_pte() local 115 dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); in mfill_zeropage_pte() 123 pte_unmap_unlock(dst_pte, ptl); in mfill_zeropage_pte()
|
D | hugetlb.c | 3325 spinlock_t *ptl; in __unmap_hugepage_range() local 3354 ptl = huge_pte_lock(h, mm, ptep); in __unmap_hugepage_range() 3356 spin_unlock(ptl); in __unmap_hugepage_range() 3366 spin_unlock(ptl); in __unmap_hugepage_range() 3376 spin_unlock(ptl); in __unmap_hugepage_range() 3388 spin_unlock(ptl); in __unmap_hugepage_range() 3407 spin_unlock(ptl); in __unmap_hugepage_range() 3526 struct page *pagecache_page, spinlock_t *ptl) in hugetlb_cow() argument 3568 spin_unlock(ptl); in hugetlb_cow() 3584 spin_lock(ptl); in hugetlb_cow() [all …]
|
D | madvise.c | 207 spinlock_t *ptl; in swapin_walk_pmd_entry() local 209 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl); in swapin_walk_pmd_entry() 211 pte_unmap_unlock(orig_pte, ptl); in swapin_walk_pmd_entry() 317 spinlock_t *ptl; in madvise_free_pte_range() local 332 orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() 374 pte_unmap_unlock(orig_pte, ptl); in madvise_free_pte_range() 378 pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() 383 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in madvise_free_pte_range() 437 pte_unmap_unlock(orig_pte, ptl); in madvise_free_pte_range()
|
D | pagewalk.c | 93 spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma); in walk_pud_range() local 95 if (ptl) { in walk_pud_range() 97 spin_unlock(ptl); in walk_pud_range()
|
D | khugepaged.c | 637 spinlock_t *ptl) in __collapse_huge_page_copy() argument 652 spin_lock(ptl); in __collapse_huge_page_copy() 658 spin_unlock(ptl); in __collapse_huge_page_copy() 670 spin_lock(ptl); in __collapse_huge_page_copy() 677 spin_unlock(ptl); in __collapse_huge_page_copy() 1109 spinlock_t *ptl; in khugepaged_scan_pmd() local 1122 pte = pte_offset_map_lock(mm, pmd, address, &ptl); in khugepaged_scan_pmd() 1212 pte_unmap_unlock(pte, ptl); in khugepaged_scan_pmd() 1274 spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd); in retract_page_tables() local 1277 spin_unlock(ptl); in retract_page_tables()
|
/Linux-v4.19/arch/arm/mm/ |
D | fault-armv.c | 74 static inline void do_pte_lock(spinlock_t *ptl) in do_pte_lock() argument 80 spin_lock_nested(ptl, SINGLE_DEPTH_NESTING); in do_pte_lock() 83 static inline void do_pte_unlock(spinlock_t *ptl) in do_pte_unlock() argument 85 spin_unlock(ptl); in do_pte_unlock() 88 static inline void do_pte_lock(spinlock_t *ptl) {} in do_pte_lock() argument 89 static inline void do_pte_unlock(spinlock_t *ptl) {} in do_pte_unlock() argument 95 spinlock_t *ptl; in adjust_pte() local 119 ptl = pte_lockptr(vma->vm_mm, pmd); in adjust_pte() 121 do_pte_lock(ptl); in adjust_pte() 125 do_pte_unlock(ptl); in adjust_pte()
|
/Linux-v4.19/arch/s390/mm/ |
D | pgtable.c | 756 spinlock_t *ptl; in set_guest_storage_key() local 765 ptl = pmd_lock(mm, pmdp); in set_guest_storage_key() 767 spin_unlock(ptl); in set_guest_storage_key() 779 spin_unlock(ptl); in set_guest_storage_key() 782 spin_unlock(ptl); in set_guest_storage_key() 784 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl); in set_guest_storage_key() 812 pte_unmap_unlock(ptep, ptl); in set_guest_storage_key() 857 spinlock_t *ptl; in reset_guest_reference_bit() local 868 ptl = pmd_lock(mm, pmdp); in reset_guest_reference_bit() 870 spin_unlock(ptl); in reset_guest_reference_bit() [all …]
|
D | gmap.c | 544 spinlock_t *ptl; in __gmap_link() local 600 ptl = pmd_lock(mm, pmd); in __gmap_link() 622 spin_unlock(ptl); in __gmap_link() 677 spinlock_t *ptl; in __gmap_zap() local 686 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl); in __gmap_zap() 689 pte_unmap_unlock(ptep, ptl); in __gmap_zap() 846 spinlock_t **ptl) in gmap_pte_op_walk() argument 855 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl); in gmap_pte_op_walk() 891 static void gmap_pte_op_end(spinlock_t *ptl) in gmap_pte_op_end() argument 893 if (ptl) in gmap_pte_op_end() [all …]
|
/Linux-v4.19/arch/powerpc/mm/ |
D | hugetlbpage.c | 56 unsigned int pshift, spinlock_t *ptl) in __hugepte_alloc() argument 86 spin_lock(ptl); in __hugepte_alloc() 116 spin_unlock(ptl); in __hugepte_alloc() 132 spinlock_t *ptl; in huge_pte_alloc() local 145 ptl = &mm->page_table_lock; in huge_pte_alloc() 153 ptl = pud_lockptr(mm, pu); in huge_pte_alloc() 162 ptl = pmd_lockptr(mm, pm); in huge_pte_alloc() 169 ptl = &mm->page_table_lock; in huge_pte_alloc() 175 ptl = pud_lockptr(mm, pu); in huge_pte_alloc() 180 ptl = pmd_lockptr(mm, pm); in huge_pte_alloc() [all …]
|
D | subpage-prot.c | 67 spinlock_t *ptl; in hpte_flush_range() local 78 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in hpte_flush_range() 86 pte_unmap_unlock(pte - 1, ptl); in hpte_flush_range()
|
/Linux-v4.19/fs/proc/ |
D | task_mmu.c | 572 spinlock_t *ptl; in smaps_pte_range() local 574 ptl = pmd_trans_huge_lock(pmd, vma); in smaps_pte_range() 575 if (ptl) { in smaps_pte_range() 578 spin_unlock(ptl); in smaps_pte_range() 589 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); in smaps_pte_range() 592 pte_unmap_unlock(pte - 1, ptl); in smaps_pte_range() 993 spinlock_t *ptl; in clear_refs_pte_range() local 996 ptl = pmd_trans_huge_lock(pmd, vma); in clear_refs_pte_range() 997 if (ptl) { in clear_refs_pte_range() 1013 spin_unlock(ptl); in clear_refs_pte_range() [all …]
|
/Linux-v4.19/Documentation/vm/ |
D | split_page_table_lock.rst | 63 This field shares storage with page->ptl. 80 page->ptl 83 page->ptl is used to access split page table lock, where 'page' is struct 92 - if size of spinlock_t is bigger then size of long, we use page->ptl as 100 Please, never access page->ptl directly -- use appropriate helper.
|
/Linux-v4.19/include/linux/ |
D | rmap.h | 211 spinlock_t *ptl; member 219 if (pvmw->ptl) in page_vma_mapped_walk_done() 220 spin_unlock(pvmw->ptl); in page_vma_mapped_walk_done()
|
D | mm.h | 368 spinlock_t *ptl; /* Page table lock. member 1694 spinlock_t **ptl); 1696 spinlock_t **ptl) in get_locked_pte() argument 1699 __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); in get_locked_pte() 1832 return page->ptl; in ptlock_ptr() 1850 return &page->ptl; in ptlock_ptr() 1868 VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page); in ptlock_init() 1926 #define pte_unmap_unlock(pte, ptl) do { \ argument 1927 spin_unlock(ptl); \ 1992 spinlock_t *ptl = pmd_lockptr(mm, pmd); in pmd_lock() local [all …]
|
D | hugetlb.h | 628 spinlock_t *ptl; in huge_pte_lock() local 630 ptl = huge_pte_lockptr(h, mm, pte); in huge_pte_lock() 631 spin_lock(ptl); in huge_pte_lock() 632 return ptl; in huge_pte_lock()
|
/Linux-v4.19/arch/m68k/kernel/ |
D | sys_m68k.c | 470 spinlock_t *ptl; in sys_atomic_cmpxchg_32() local 480 pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl); in sys_atomic_cmpxchg_32() 483 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32() 495 pte_unmap_unlock(pte, ptl); in sys_atomic_cmpxchg_32()
|
/Linux-v4.19/arch/x86/xen/ |
D | mmu_pv.c | 690 spinlock_t *ptl = NULL; in xen_pte_lock() local 693 ptl = ptlock_ptr(page); in xen_pte_lock() 694 spin_lock_nest_lock(ptl, &mm->page_table_lock); in xen_pte_lock() 697 return ptl; in xen_pte_lock() 702 spinlock_t *ptl = v; in xen_pte_unlock() local 703 spin_unlock(ptl); in xen_pte_unlock() 732 spinlock_t *ptl; in xen_pin_page() local 756 ptl = NULL; in xen_pin_page() 758 ptl = xen_pte_lock(page, mm); in xen_pin_page() 764 if (ptl) { in xen_pin_page() [all …]
|
/Linux-v4.19/arch/sh/mm/ |
D | cache-sh5.c | 390 spinlock_t *ptl; in sh64_dcache_purge_user_pages() local 408 pte = pte_offset_map_lock(mm, pmd, addr, &ptl); in sh64_dcache_purge_user_pages() 416 pte_unmap_unlock(pte - 1, ptl); in sh64_dcache_purge_user_pages()
|