Lines Matching +full:supervisor +full:- +full:level

1 /* SPDX-License-Identifier: GPL-2.0 */
57 return ((2ULL << (e - s)) - 1) << s; in rsvd_bits()
61 * The number of non-reserved physical address bits irrespective of features
81 return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1; in kvm_mmu_max_gfn()
129 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE)) in kvm_mmu_reload()
151 u64 root_hpa = vcpu->arch.mmu->root.hpa; in kvm_mmu_load_pgd()
157 vcpu->arch.mmu->root_role.level); in kvm_mmu_load_pgd()
166 * be stale. Refresh CR0.WP and the metadata on-demand when checking in kvm_mmu_refresh_passthrough_bits()
172 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu) in kvm_mmu_refresh_passthrough_bits()
195 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1. in permission_fault()
196 * For implicit supervisor accesses, SMAP cannot be overridden. in permission_fault()
198 * SMAP works on supervisor accesses only, and not_smap can in permission_fault()
214 fault = (mmu->permissions[index] >> pte_access) & 1; in permission_fault()
217 if (unlikely(mmu->pkru_mask)) { in permission_fault()
226 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3; in permission_fault()
230 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT)); in permission_fault()
232 pkru_bits &= mmu->pkru_mask >> offset; in permission_fault()
233 errcode |= -pkru_bits & PFERR_PK_MASK; in permission_fault()
237 return -(u32)fault & errcode; in permission_fault()
255 return smp_load_acquire(&kvm->arch.shadow_root_allocated); in kvm_shadow_root_allocated()
269 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
272 return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) - in gfn_to_index()
273 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
278 int level) in __kvm_mmu_slot_lpages() argument
280 return gfn_to_index(slot->base_gfn + npages - 1, in __kvm_mmu_slot_lpages()
281 slot->base_gfn, level) + 1; in __kvm_mmu_slot_lpages()
285 kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level) in kvm_mmu_slot_lpages() argument
287 return __kvm_mmu_slot_lpages(slot, slot->npages, level); in kvm_mmu_slot_lpages()
290 static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count) in kvm_update_page_stats() argument
292 atomic64_add(count, &kvm->stat.pages[level - 1]); in kvm_update_page_stats()
303 if (mmu != &vcpu->arch.nested_mmu) in kvm_translate_gpa()