Lines Matching +full:i +full:- +full:tlb +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
6 #include <asm/tlb.h>
11 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
23 void paravirt_tlb_remove_table(struct mmu_gather *tlb, void *table) in paravirt_tlb_remove_table() argument
25 tlb_remove_page(tlb, table); in paravirt_tlb_remove_table()
39 return -EINVAL; in setup_userpte()
48 return -EINVAL; in setup_userpte()
53 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) in ___pte_free_tlb() argument
57 paravirt_tlb_remove_table(tlb, pte); in ___pte_free_tlb()
61 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) in ___pmd_free_tlb() argument
66 * NOTE! For PAE, any changes to the top page-directory-pointer-table in ___pmd_free_tlb()
70 tlb->need_flush_all = 1; in ___pmd_free_tlb()
73 paravirt_tlb_remove_table(tlb, page); in ___pmd_free_tlb()
77 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) in ___pud_free_tlb() argument
80 paravirt_tlb_remove_table(tlb, virt_to_page(pud)); in ___pud_free_tlb()
84 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d) in ___p4d_free_tlb() argument
87 paravirt_tlb_remove_table(tlb, virt_to_page(p4d)); in ___p4d_free_tlb()
97 list_add(&page->lru, &pgd_list); in pgd_list_add()
104 list_del(&page->lru); in pgd_list_del()
115 virt_to_page(pgd)->pt_mm = mm; in pgd_set_mm()
120 return page->pt_mm; in pgd_page_get_mm()
126 ptes in non-PAE, or shared PMD in PAE), then just copy the in pgd_ctor()
154 * List of all pgd's needed for non-PAE so it can invalidate entries
157 * tactic would be needed. This is essentially codepath-based locking
161 * -- nyc
166 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
167 * updating the top-level pagetable entries to guarantee the
169 * all 4 top-level entries are used almost immediately in a
170 * new process's life, we just pre-populate them here.
180 * We allocate separate PMDs for the kernel part of the user page-table
181 * when PTI is enabled. We need them to map the per-process LDT into the
182 * user-space page-table.
197 * According to Intel App note "TLBs, Paging-Structure Caches, in pud_populate()
198 * and Their Invalidation", April 2007, document 317080-001, in pud_populate()
200 * TLB via cr3 if the top-level pgd is changed... in pud_populate()
206 /* No need to prepopulate any pagetable entries in non-PAE modes. */
215 int i; in free_pmds() local
217 for (i = 0; i < count; i++) in free_pmds()
218 if (pmds[i]) { in free_pmds()
219 pgtable_pmd_page_dtor(virt_to_page(pmds[i])); in free_pmds()
220 free_page((unsigned long)pmds[i]); in free_pmds()
227 int i; in preallocate_pmds() local
234 for (i = 0; i < count; i++) { in preallocate_pmds()
245 pmds[i] = pmd; in preallocate_pmds()
250 return -ENOMEM; in preallocate_pmds()
279 int i; in pgd_mop_up_pmds() local
281 for (i = 0; i < PREALLOCATED_PMDS; i++) in pgd_mop_up_pmds()
282 mop_up_one_pmd(mm, &pgdp[i]); in pgd_mop_up_pmds()
291 for (i = 0; i < PREALLOCATED_USER_PMDS; i++) in pgd_mop_up_pmds()
292 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]); in pgd_mop_up_pmds()
300 int i; in pgd_prepopulate_pmd() local
302 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ in pgd_prepopulate_pmd()
308 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) { in pgd_prepopulate_pmd()
309 pmd_t *pmd = pmds[i]; in pgd_prepopulate_pmd()
311 if (i >= KERNEL_PGD_BOUNDARY) in pgd_prepopulate_pmd()
312 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), in pgd_prepopulate_pmd()
327 int i; in pgd_prepopulate_user_pmd() local
335 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) { in pgd_prepopulate_user_pmd()
336 pmd_t *pmd = pmds[i]; in pgd_prepopulate_user_pmd()
379 * page for pgd. We are able to just allocate a 32-byte for pgd. in pgtable_cache_init()
380 * During boot time, we create a 32-byte slab for pgd table allocation. in pgtable_cache_init()
398 * a 32-byte slab for pgd to save memory space. in _pgd_alloc()
435 mm->pgd = pgd; in pgd_alloc()
447 * Make sure that pre-populating the pmds is atomic with in pgd_alloc()
510 * We had a write-protection fault here and changed the pmd in pmdp_set_access_flags()
511 * to to more permissive. No need to flush the TLB for that, in pmdp_set_access_flags()
513 * worst-case we'll generate a spurious fault. in pmdp_set_access_flags()
530 * We had a write-protection fault here and changed the pud in pudp_set_access_flags()
531 * to to more permissive. No need to flush the TLB for that, in pudp_set_access_flags()
533 * worst-case we'll generate a spurious fault. in pudp_set_access_flags()
548 (unsigned long *) &ptep->pte); in ptep_test_and_clear_young()
585 * On x86 CPUs, clearing the accessed bit without a TLB flush in ptep_clear_flush_young()
590 * So as a performance optimization don't flush the TLB when in ptep_clear_flush_young()
627 * reserve_top_address - reserves a hole in the top of kernel address space
628 * @reserve - size of hole to reserve
637 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE; in reserve_top_address()
639 -reserve, __FIXADDR_TOP + PAGE_SIZE); in reserve_top_address()
678 * p4d_set_huge - setup kernel P4D mapping
680 * No 512GB pages yet -- always return 0
688 * p4d_clear_huge - clear kernel P4D mapping when it is set
690 * No 512GB pages yet -- always return 0
698 * pud_set_huge - setup kernel PUD mapping
703 * - MTRRs are disabled, or
705 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
707 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
710 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
724 /* Bail out if we are we on a populated non-leaf entry: */ in pud_set_huge()
736 * pmd_set_huge - setup kernel PMD mapping
749 …pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR over… in pmd_set_huge()
754 /* Bail out if we are we on a populated non-leaf entry: */ in pmd_set_huge()
766 * pud_clear_huge - clear kernel PUD mapping when it is set
781 * pmd_clear_huge - clear kernel PMD mapping when it is set
797 * pud_free_pmd_page - Clear pud entry and free pmd page.
801 * Context: The pud range has been unmapped and TLB purged.
810 int i; in pud_free_pmd_page() local
817 for (i = 0; i < PTRS_PER_PMD; i++) { in pud_free_pmd_page()
818 pmd_sv[i] = pmd[i]; in pud_free_pmd_page()
819 if (!pmd_none(pmd[i])) in pud_free_pmd_page()
820 pmd_clear(&pmd[i]); in pud_free_pmd_page()
825 /* INVLPG to clear all paging-structure caches */ in pud_free_pmd_page()
826 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); in pud_free_pmd_page()
828 for (i = 0; i < PTRS_PER_PMD; i++) { in pud_free_pmd_page()
829 if (!pmd_none(pmd_sv[i])) { in pud_free_pmd_page()
830 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]); in pud_free_pmd_page()
844 * pmd_free_pte_page - Clear pmd entry and free pte page.
848 * Context: The pmd range has been unmapped and TLB purged.
858 /* INVLPG to clear all paging-structure caches */ in pmd_free_pte_page()
859 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1); in pmd_free_pte_page()
869 * Disable free page handling on x86-PAE. This assures that ioremap()