Lines Matching refs:root

76 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,  in kvm_tdp_mmu_put_root()  argument
81 if (!refcount_dec_and_test(&root->tdp_mmu_root_count)) in kvm_tdp_mmu_put_root()
89 KVM_BUG_ON(!is_tdp_mmu_page(root) || !root->role.invalid, kvm); in kvm_tdp_mmu_put_root()
92 list_del_rcu(&root->link); in kvm_tdp_mmu_put_root()
94 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback); in kvm_tdp_mmu_put_root()
224 struct kvm_mmu_page *root; in kvm_tdp_mmu_get_vcpu_root_hpa() local
232 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) { in kvm_tdp_mmu_get_vcpu_root_hpa()
233 if (root->role.word == role.word && in kvm_tdp_mmu_get_vcpu_root_hpa()
234 kvm_tdp_mmu_get_root(root)) in kvm_tdp_mmu_get_vcpu_root_hpa()
238 root = tdp_mmu_alloc_sp(vcpu); in kvm_tdp_mmu_get_vcpu_root_hpa()
239 tdp_mmu_init_sp(root, NULL, 0, role); in kvm_tdp_mmu_get_vcpu_root_hpa()
248 refcount_set(&root->tdp_mmu_root_count, 2); in kvm_tdp_mmu_get_vcpu_root_hpa()
251 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots); in kvm_tdp_mmu_get_vcpu_root_hpa()
255 return __pa(root->spt); in kvm_tdp_mmu_get_vcpu_root_hpa()
643 for_each_tdp_pte(_iter, root_to_sp(_mmu->root.hpa), _start, _end)
701 static void __tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in __tdp_mmu_zap_root() argument
709 for_each_tdp_pte_min_level(iter, root, zap_level, start, end) { in __tdp_mmu_zap_root()
727 static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_root() argument
741 WARN_ON_ONCE(!refcount_read(&root->tdp_mmu_root_count)); in tdp_mmu_zap_root()
757 __tdp_mmu_zap_root(kvm, root, shared, PG_LEVEL_1G); in tdp_mmu_zap_root()
758 __tdp_mmu_zap_root(kvm, root, shared, root->role.level); in tdp_mmu_zap_root()
791 static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root, in tdp_mmu_zap_leafs() argument
802 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_4K, start, end) { in tdp_mmu_zap_leafs()
833 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_leafs() local
835 for_each_tdp_mmu_root_yield_safe(kvm, root, false) in kvm_tdp_mmu_zap_leafs()
836 flush = tdp_mmu_zap_leafs(kvm, root, start, end, true, flush); in kvm_tdp_mmu_zap_leafs()
843 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_all() local
857 for_each_tdp_mmu_root_yield_safe(kvm, root, false) in kvm_tdp_mmu_zap_all()
858 tdp_mmu_zap_root(kvm, root, false); in kvm_tdp_mmu_zap_all()
867 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_invalidated_roots() local
871 for_each_tdp_mmu_root_yield_safe(kvm, root, true) { in kvm_tdp_mmu_zap_invalidated_roots()
872 if (!root->tdp_mmu_scheduled_root_to_zap) in kvm_tdp_mmu_zap_invalidated_roots()
875 root->tdp_mmu_scheduled_root_to_zap = false; in kvm_tdp_mmu_zap_invalidated_roots()
876 KVM_BUG_ON(!root->role.invalid, kvm); in kvm_tdp_mmu_zap_invalidated_roots()
887 tdp_mmu_zap_root(kvm, root, true); in kvm_tdp_mmu_zap_invalidated_roots()
894 kvm_tdp_mmu_put_root(kvm, root, true); in kvm_tdp_mmu_zap_invalidated_roots()
912 struct kvm_mmu_page *root; in kvm_tdp_mmu_invalidate_all_roots() local
934 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link) { in kvm_tdp_mmu_invalidate_all_roots()
941 if (!root->role.invalid) { in kvm_tdp_mmu_invalidate_all_roots()
942 root->tdp_mmu_scheduled_root_to_zap = true; in kvm_tdp_mmu_invalidate_all_roots()
943 root->role.invalid = true; in kvm_tdp_mmu_invalidate_all_roots()
1126 struct kvm_mmu_page *root; in kvm_tdp_mmu_unmap_gfn_range() local
1128 __for_each_tdp_mmu_root_yield_safe(kvm, root, range->slot->as_id, false, false) in kvm_tdp_mmu_unmap_gfn_range()
1129 flush = tdp_mmu_zap_leafs(kvm, root, range->start, range->end, in kvm_tdp_mmu_unmap_gfn_range()
1142 struct kvm_mmu_page *root; in kvm_tdp_mmu_handle_gfn() local
1150 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) { in kvm_tdp_mmu_handle_gfn()
1153 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end) in kvm_tdp_mmu_handle_gfn()
1271 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
1282 for_each_tdp_pte_min_level(iter, root, min_level, start, end) { in wrprot_gfn_range()
1312 struct kvm_mmu_page *root; in kvm_tdp_mmu_wrprot_slot() local
1317 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_wrprot_slot()
1318 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
1422 struct kvm_mmu_page *root, in tdp_mmu_split_huge_pages_root() argument
1443 for_each_tdp_pte_min_level(iter, root, target_level + 1, start, end) { in tdp_mmu_split_huge_pages_root()
1495 struct kvm_mmu_page *root; in kvm_tdp_mmu_try_split_huge_pages() local
1500 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, shared) { in kvm_tdp_mmu_try_split_huge_pages()
1501 r = tdp_mmu_split_huge_pages_root(kvm, root, start, end, target_level, shared); in kvm_tdp_mmu_try_split_huge_pages()
1503 kvm_tdp_mmu_put_root(kvm, root, shared); in kvm_tdp_mmu_try_split_huge_pages()
1516 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
1525 tdp_root_for_each_leaf_pte(iter, root, start, end) { in clear_dirty_gfn_range()
1559 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_slot() local
1564 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_clear_dirty_slot()
1565 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
1578 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
1589 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
1631 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_pt_masked() local
1633 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_clear_dirty_pt_masked()
1634 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
1638 struct kvm_mmu_page *root, in zap_collapsible_spte_range() argument
1648 for_each_tdp_pte_min_level(iter, root, PG_LEVEL_2M, start, end) { in zap_collapsible_spte_range()
1695 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_collapsible_sptes() local
1699 for_each_valid_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true) in kvm_tdp_mmu_zap_collapsible_sptes()
1700 zap_collapsible_spte_range(kvm, root, slot); in kvm_tdp_mmu_zap_collapsible_sptes()
1708 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1719 for_each_tdp_pte_min_level(iter, root, min_level, gfn, gfn + 1) { in write_protect_gfn()
1748 struct kvm_mmu_page *root; in kvm_tdp_mmu_write_protect_gfn() local
1752 for_each_tdp_mmu_root(kvm, root, slot->as_id) in kvm_tdp_mmu_write_protect_gfn()
1753 spte_set |= write_protect_gfn(kvm, root, gfn, min_level); in kvm_tdp_mmu_write_protect_gfn()