Lines Matching refs:kvm_mmu_page
54 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp) in tdp_mmu_free_sp()
70 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page, in tdp_mmu_free_sp_rcu_callback()
76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root, in kvm_tdp_mmu_put_root()
107 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm, in tdp_mmu_next_root()
108 struct kvm_mmu_page *prev_root, in tdp_mmu_next_root()
111 struct kvm_mmu_page *next_root; in tdp_mmu_next_root()
181 static struct kvm_mmu_page *tdp_mmu_alloc_sp(struct kvm_vcpu *vcpu) in tdp_mmu_alloc_sp()
183 struct kvm_mmu_page *sp; in tdp_mmu_alloc_sp()
191 static void tdp_mmu_init_sp(struct kvm_mmu_page *sp, tdp_ptep_t sptep, in tdp_mmu_init_sp()
206 static void tdp_mmu_init_child_sp(struct kvm_mmu_page *child_sp, in tdp_mmu_init_child_sp()
209 struct kvm_mmu_page *parent_sp; in tdp_mmu_init_child_sp()
224 struct kvm_mmu_page *root; in kvm_tdp_mmu_get_vcpu_root_hpa()
262 static void tdp_account_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_account_mmu_page()
268 static void tdp_unaccount_mmu_page(struct kvm *kvm, struct kvm_mmu_page *sp) in tdp_unaccount_mmu_page()
283 static void tdp_mmu_unlink_sp(struct kvm *kvm, struct kvm_mmu_page *sp, in tdp_mmu_unlink_sp()
322 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt)); in handle_removed_pt()
701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root()
727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root()
763 bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_tdp_mmu_zap_sp()
791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs()
833 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_leafs()
843 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_all()
867 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_invalidated_roots()
912 struct kvm_mmu_page *root; in kvm_tdp_mmu_invalidate_all_roots()
956 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(iter->sptep)); in tdp_mmu_map_handle_target_level()
1016 struct kvm_mmu_page *sp, bool shared) in tdp_mmu_link_sp()
1035 struct kvm_mmu_page *sp, bool shared);
1046 struct kvm_mmu_page *sp; in kvm_tdp_mmu_map()
1126 struct kvm_mmu_page *root; in kvm_tdp_mmu_unmap_gfn_range()
1142 struct kvm_mmu_page *root; in kvm_tdp_mmu_handle_gfn()
1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range()
1312 struct kvm_mmu_page *root; in kvm_tdp_mmu_wrprot_slot()
1324 static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(gfp_t gfp) in __tdp_mmu_alloc_sp_for_split()
1326 struct kvm_mmu_page *sp; in __tdp_mmu_alloc_sp_for_split()
1343 static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, in tdp_mmu_alloc_sp_for_split()
1347 struct kvm_mmu_page *sp; in tdp_mmu_alloc_sp_for_split()
1384 struct kvm_mmu_page *sp, bool shared) in tdp_mmu_split_huge_page()
1422 struct kvm_mmu_page *root, in tdp_mmu_split_huge_pages_root()
1426 struct kvm_mmu_page *sp = NULL; in tdp_mmu_split_huge_pages_root()
1495 struct kvm_mmu_page *root; in kvm_tdp_mmu_try_split_huge_pages()
1516 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range()
1559 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_slot()
1578 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked()
1631 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_pt_masked()
1638 struct kvm_mmu_page *root, in zap_collapsible_spte_range()
1695 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_collapsible_sptes()
1708 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn()
1748 struct kvm_mmu_page *root; in kvm_tdp_mmu_write_protect_gfn()