Lines Matching refs:mmu

2184 	    || vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {  in __kvm_sync_page()
2378 role = vcpu->arch.mmu.base_role; in kvm_mmu_get_page()
2384 if (!vcpu->arch.mmu.direct_map in kvm_mmu_get_page()
2385 && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) { in kvm_mmu_get_page()
2460 iterator->level = vcpu->arch.mmu.shadow_root_level; in shadow_walk_init_using_root()
2463 vcpu->arch.mmu.root_level < PT64_ROOT_4LEVEL && in shadow_walk_init_using_root()
2464 !vcpu->arch.mmu.direct_map) in shadow_walk_init_using_root()
2472 BUG_ON(root != vcpu->arch.mmu.root_hpa); in shadow_walk_init_using_root()
2475 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3]; in shadow_walk_init_using_root()
2486 shadow_walk_init_using_root(iterator, vcpu, vcpu->arch.mmu.root_hpa, in shadow_walk_init()
3098 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in __direct_map()
3313 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in fast_page_fault()
3487 struct kvm_mmu *mmu = &vcpu->arch.mmu; in kvm_mmu_free_roots() local
3493 if (!(free_active_root && VALID_PAGE(mmu->root_hpa))) { in kvm_mmu_free_roots()
3496 VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_free_roots()
3507 mmu_free_root_page(vcpu->kvm, &mmu->prev_roots[i].hpa, in kvm_mmu_free_roots()
3511 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in kvm_mmu_free_roots()
3512 (mmu->root_level >= PT64_ROOT_4LEVEL || mmu->direct_map)) { in kvm_mmu_free_roots()
3513 mmu_free_root_page(vcpu->kvm, &mmu->root_hpa, in kvm_mmu_free_roots()
3517 if (mmu->pae_root[i] != 0) in kvm_mmu_free_roots()
3519 &mmu->pae_root[i], in kvm_mmu_free_roots()
3521 mmu->root_hpa = INVALID_PAGE; in kvm_mmu_free_roots()
3547 if (vcpu->arch.mmu.shadow_root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_direct_roots()
3554 vcpu->arch.mmu.shadow_root_level, 1, ACC_ALL); in mmu_alloc_direct_roots()
3557 vcpu->arch.mmu.root_hpa = __pa(sp->spt); in mmu_alloc_direct_roots()
3558 } else if (vcpu->arch.mmu.shadow_root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_direct_roots()
3560 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_direct_roots()
3573 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK; in mmu_alloc_direct_roots()
3575 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_direct_roots()
3589 root_gfn = vcpu->arch.mmu.get_cr3(vcpu) >> PAGE_SHIFT; in mmu_alloc_shadow_roots()
3598 if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3599 hpa_t root = vcpu->arch.mmu.root_hpa; in mmu_alloc_shadow_roots()
3609 vcpu->arch.mmu.shadow_root_level, 0, ACC_ALL); in mmu_alloc_shadow_roots()
3613 vcpu->arch.mmu.root_hpa = root; in mmu_alloc_shadow_roots()
3623 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) in mmu_alloc_shadow_roots()
3627 hpa_t root = vcpu->arch.mmu.pae_root[i]; in mmu_alloc_shadow_roots()
3630 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) { in mmu_alloc_shadow_roots()
3631 pdptr = vcpu->arch.mmu.get_pdptr(vcpu, i); in mmu_alloc_shadow_roots()
3633 vcpu->arch.mmu.pae_root[i] = 0; in mmu_alloc_shadow_roots()
3651 vcpu->arch.mmu.pae_root[i] = root | pm_mask; in mmu_alloc_shadow_roots()
3653 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root); in mmu_alloc_shadow_roots()
3659 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_4LEVEL) { in mmu_alloc_shadow_roots()
3660 if (vcpu->arch.mmu.lm_root == NULL) { in mmu_alloc_shadow_roots()
3672 lm_root[0] = __pa(vcpu->arch.mmu.pae_root) | pm_mask; in mmu_alloc_shadow_roots()
3674 vcpu->arch.mmu.lm_root = lm_root; in mmu_alloc_shadow_roots()
3677 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.lm_root); in mmu_alloc_shadow_roots()
3685 if (vcpu->arch.mmu.direct_map) in mmu_alloc_roots()
3696 if (vcpu->arch.mmu.direct_map) in kvm_mmu_sync_roots()
3699 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in kvm_mmu_sync_roots()
3704 if (vcpu->arch.mmu.root_level >= PT64_ROOT_4LEVEL) { in kvm_mmu_sync_roots()
3705 hpa_t root = vcpu->arch.mmu.root_hpa; in kvm_mmu_sync_roots()
3737 hpa_t root = vcpu->arch.mmu.pae_root[i]; in kvm_mmu_sync_roots()
3777 static bool is_rsvd_bits_set(struct kvm_mmu *mmu, u64 gpte, int level) in is_rsvd_bits_set() argument
3779 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level); in is_rsvd_bits_set()
3782 static bool is_shadow_zero_bits_set(struct kvm_mmu *mmu, u64 spte, int level) in is_shadow_zero_bits_set() argument
3784 return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level); in is_shadow_zero_bits_set()
3811 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in walk_shadow_page_get_mmio_spte()
3828 reserved |= is_shadow_zero_bits_set(&vcpu->arch.mmu, spte, in walk_shadow_page_get_mmio_spte()
3907 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) in shadow_page_table_clear_flood()
3934 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in nonpaging_page_fault()
3947 arch.direct_map = vcpu->arch.mmu.direct_map; in kvm_arch_setup_async_pf()
3948 arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); in kvm_arch_setup_async_pf()
4054 MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa)); in tdp_page_fault()
4130 struct kvm_mmu *mmu = &vcpu->arch.mmu; in cached_root_available() local
4132 root.cr3 = mmu->get_cr3(vcpu); in cached_root_available()
4133 root.hpa = mmu->root_hpa; in cached_root_available()
4136 swap(root, mmu->prev_roots[i]); in cached_root_available()
4144 mmu->root_hpa = root.hpa; in cached_root_available()
4153 struct kvm_mmu *mmu = &vcpu->arch.mmu; in fast_cr3_switch() local
4160 if (mmu->shadow_root_level >= PT64_ROOT_4LEVEL && in fast_cr3_switch()
4161 mmu->root_level >= PT64_ROOT_4LEVEL) { in fast_cr3_switch()
4190 page_header(mmu->root_hpa)); in fast_cr3_switch()
4222 vcpu->arch.mmu.inject_page_fault(vcpu, fault); in inject_page_fault()
4242 static inline bool is_last_gpte(struct kvm_mmu *mmu, in is_last_gpte() argument
4250 gpte &= level - mmu->last_nonleaf_level; in is_last_gpte()
4514 struct kvm_mmu *mmu, bool ept) in update_permission_bitmask() argument
4526 for (byte = 0; byte < ARRAY_SIZE(mmu->permissions); ++byte) { in update_permission_bitmask()
4550 if (!mmu->nx) in update_permission_bitmask()
4581 mmu->permissions[byte] = ff | uf | wf | smepf | smapf; in update_permission_bitmask()
4609 static void update_pkru_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in update_pkru_bitmask() argument
4616 mmu->pkru_mask = 0; in update_pkru_bitmask()
4622 mmu->pkru_mask = 0; in update_pkru_bitmask()
4628 for (bit = 0; bit < ARRAY_SIZE(mmu->permissions); ++bit) { in update_pkru_bitmask()
4656 mmu->pkru_mask |= (pkey_bits & 3) << pfec; in update_pkru_bitmask()
4660 static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu) in update_last_nonleaf_level() argument
4662 unsigned root_level = mmu->root_level; in update_last_nonleaf_level()
4664 mmu->last_nonleaf_level = root_level; in update_last_nonleaf_level()
4666 mmu->last_nonleaf_level++; in update_last_nonleaf_level()
4743 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_tdp_mmu()
4815 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_mmu()
4835 union kvm_mmu_page_role role = vcpu->arch.mmu.base_role; in kvm_calc_shadow_ept_root_page_role()
4849 struct kvm_mmu *context = &vcpu->arch.mmu; in kvm_init_shadow_ept_mmu()
4876 struct kvm_mmu *context = &vcpu->arch.mmu; in init_kvm_softmmu()
4933 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_init_mmu()
4936 vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; in kvm_init_mmu()
4985 WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_unload()
4999 vcpu->arch.mmu.update_pte(vcpu, sp, spte, new); in mmu_pte_write_new_pte()
5179 !((sp->role.word ^ vcpu->arch.mmu.base_role.word) in kvm_mmu_pte_write()
5197 if (vcpu->arch.mmu.direct_map) in kvm_mmu_unprotect_page_virt()
5233 bool direct = vcpu->arch.mmu.direct_map; in kvm_mmu_page_fault()
5236 if (vcpu->arch.mmu.direct_map) { in kvm_mmu_page_fault()
5249 r = vcpu->arch.mmu.page_fault(vcpu, cr2, lower_32_bits(error_code), in kvm_mmu_page_fault()
5266 if (vcpu->arch.mmu.direct_map && in kvm_mmu_page_fault()
5314 struct kvm_mmu *mmu = &vcpu->arch.mmu; in kvm_mmu_invlpg() local
5321 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invlpg()
5335 if (VALID_PAGE(mmu->prev_roots[i].hpa)) in kvm_mmu_invlpg()
5336 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invlpg()
5345 struct kvm_mmu *mmu = &vcpu->arch.mmu; in kvm_mmu_invpcid_gva() local
5350 mmu->invlpg(vcpu, gva, mmu->root_hpa); in kvm_mmu_invpcid_gva()
5355 if (VALID_PAGE(mmu->prev_roots[i].hpa) && in kvm_mmu_invpcid_gva()
5356 pcid == kvm_get_pcid(vcpu, mmu->prev_roots[i].cr3)) { in kvm_mmu_invpcid_gva()
5357 mmu->invlpg(vcpu, gva, mmu->prev_roots[i].hpa); in kvm_mmu_invpcid_gva()
5389 free_page((unsigned long)vcpu->arch.mmu.pae_root); in free_mmu_pages()
5390 free_page((unsigned long)vcpu->arch.mmu.lm_root); in free_mmu_pages()
5410 vcpu->arch.mmu.pae_root = page_address(page); in alloc_mmu_pages()
5412 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE; in alloc_mmu_pages()
5421 vcpu->arch.walk_mmu = &vcpu->arch.mmu; in kvm_mmu_create()
5422 vcpu->arch.mmu.root_hpa = INVALID_PAGE; in kvm_mmu_create()
5423 vcpu->arch.mmu.translate_gpa = translate_gpa; in kvm_mmu_create()
5427 vcpu->arch.mmu.prev_roots[i] = KVM_MMU_ROOT_INFO_INVALID; in kvm_mmu_create()
5434 MMU_WARN_ON(VALID_PAGE(vcpu->arch.mmu.root_hpa)); in kvm_mmu_setup()