Lines Matching +full:tlb +full:- +full:split
1 /* SPDX-License-Identifier: GPL-2.0 */
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
65 return ((2ULL << (e - s)) - 1) << s; in rsvd_bits()
86 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE)) in kvm_mmu_reload()
108 u64 root_hpa = vcpu->arch.mmu->root_hpa; in kvm_mmu_load_pgd()
114 vcpu->arch.mmu->shadow_root_level); in kvm_mmu_load_pgd()
124 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault)) in kvm_mmu_do_page_fault()
127 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault); in kvm_mmu_do_page_fault()
131 * Currently, we have two sorts of write-protection, a) the first one
132 * write-protects guest page to sync the guest modification, b) another one is
135 * 1) the first case clears MMU-writable bit.
136 * 2) the first case requires flushing tlb immediately avoiding corrupting
138 * mmu-lock. And the another case does not need to flush tlb until returning
139 * the dirty bitmap to userspace since it only write-protects the page
141 * missed, so it can flush tlb out of mmu-lock.
143 * So, there is the problem: the first case can meet the corrupted tlb caused
144 * by another case which write-protects pages but without flush tlb
146 * it flush tlb if we try to write-protect a spte whose MMU-writable bit
147 * is set, it works since another case never touches MMU-writable bit.
150 * changed) we need to check whether the spte with MMU-writable becomes
151 * readonly, if that happens, we need to flush tlb. Fortunately,
154 * The rules to use MMU-writable and PT_WRITABLE_MASK:
155 * - if we want to see if it has writable tlb entry or if the spte can be
156 * writable on the mmu mapping, check MMU-writable, this is the most
158 * - if we fix page fault on the spte or do write-protection by dirty logging,
161 * TODO: introduce APIs to split these two cases.
186 * If CPL = 3, SMAP applies to all supervisor-mode data accesses in permission_fault()
196 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); in permission_fault()
198 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); in permission_fault()
199 bool fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
203 if (unlikely(mmu->pkru_mask)) { in permission_fault()
212 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; in permission_fault()
216 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); in permission_fault()
218 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
219 errcode |= -pkru_bits & PFERR_PK_MASK; in permission_fault()
223 return -(u32)fault & errcode; in permission_fault()
240 return smp_load_acquire(&kvm->arch.memslots_have_rmaps); in kvm_memslots_have_rmaps()
246 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
254 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages()
255 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
261 return __kvm_mmu_slot_lpages(slot, slot->npages, level); in kvm_mmu_slot_lpages()
266 atomic64_add(count, &kvm->stat.pages[level - 1]); in kvm_update_page_stats()