Lines Matching +full:tlb +full:- +full:split

1 // SPDX-License-Identifier: GPL-2.0-or-later
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
27 #include <asm/tlb.h>
29 #include <asm/pte-walk.h>
41 return current->thread.regs && TRAP(current->thread.regs) == 0x400; in is_exec_fault()
76 /* Server-style MMU handles coherency when hashing if HW exec permission
77 * is supposed per page (currently 64-bit only). If not, then, we always
93 if (!test_bit(PG_dcache_clean, &pg->flags)) { in set_pte_filter_hash()
95 set_bit(PG_dcache_clean, &pg->flags); in set_pte_filter_hash()
128 if (test_bit(PG_dcache_clean, &pg->flags)) in set_pte_filter()
134 set_bit(PG_dcache_clean, &pg->flags); in set_pte_filter()
163 if (WARN_ON(!(vma->vm_flags & VM_EXEC))) in set_access_flags_filter()
173 if (test_bit(PG_dcache_clean, &pg->flags)) in set_access_flags_filter()
178 set_bit(PG_dcache_clean, &pg->flags); in set_access_flags_filter()
192 * tlb flush for this update. in set_pte_at()
196 /* Note: mm->context.id might not yet have been assigned as in set_pte_at()
220 assert_pte_locked(vma->vm_mm, address); in ptep_set_access_flags()
235 * TLB entry. Without this, platforms that don't do a write of the TLB in huge_ptep_set_access_flags()
236 * entry in the TLB miss handler asm will fault ad infinitum. in huge_ptep_set_access_flags()
252 assert_spin_locked(huge_pte_lockptr(h, vma->vm_mm, ptep)); in huge_ptep_set_access_flags()
274 pte_basic_t *entry = &ptep->pte; in set_huge_pte_at()
279 * tlb flush for this update. in set_huge_pte_at()
305 pgd = mm->pgd + pgd_index(addr); in assert_pte_locked()
344 * when we have MSR[EE] = 0 but the paca->irq_soft_mask = IRQS_ENABLED
365 * value don't get updated by a parallel THP split/collapse, in __find_linux_pte()
423 * A hugepage split is captured by this condition, see in __find_linux_pte()