Lines Matching full:root

64 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
67 void kvm_tdp_mmu_free_root(struct kvm *kvm, struct kvm_mmu_page *root) in kvm_tdp_mmu_free_root() argument
73 WARN_ON(root->root_count); in kvm_tdp_mmu_free_root()
74 WARN_ON(!root->tdp_mmu_page); in kvm_tdp_mmu_free_root()
76 list_del(&root->link); in kvm_tdp_mmu_free_root()
78 zap_gfn_range(kvm, root, 0, max_gfn, false); in kvm_tdp_mmu_free_root()
80 free_page((unsigned long)root->spt); in kvm_tdp_mmu_free_root()
81 kmem_cache_free(mmu_page_header_cache, root); in kvm_tdp_mmu_free_root()
118 struct kvm_mmu_page *root; in get_tdp_mmu_vcpu_root() local
124 /* Check for an existing root before allocating a new one. */ in get_tdp_mmu_vcpu_root()
125 for_each_tdp_mmu_root(kvm, root) { in get_tdp_mmu_vcpu_root()
126 if (root->role.word == role.word) { in get_tdp_mmu_vcpu_root()
127 kvm_mmu_get_root(kvm, root); in get_tdp_mmu_vcpu_root()
129 return root; in get_tdp_mmu_vcpu_root()
133 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level); in get_tdp_mmu_vcpu_root()
134 root->root_count = 1; in get_tdp_mmu_vcpu_root()
136 list_add(&root->link, &kvm->arch.tdp_mmu_roots); in get_tdp_mmu_vcpu_root()
140 return root; in get_tdp_mmu_vcpu_root()
145 struct kvm_mmu_page *root; in kvm_tdp_mmu_get_vcpu_root_hpa() local
147 root = get_tdp_mmu_vcpu_root(vcpu); in kvm_tdp_mmu_get_vcpu_root_hpa()
148 if (!root) in kvm_tdp_mmu_get_vcpu_root_hpa()
151 return __pa(root->spt); in kvm_tdp_mmu_get_vcpu_root_hpa()
316 struct kvm_mmu_page *root = sptep_to_sp(root_pt); in __tdp_mmu_set_spte() local
317 int as_id = kvm_mmu_page_as_id(root); in __tdp_mmu_set_spte()
392 * non-root pages mapping GFNs strictly within that range. Returns true if
401 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in zap_gfn_range() argument
407 tdp_root_for_each_pte(iter, root, start, end) { in zap_gfn_range()
433 * non-root pages mapping GFNs strictly within that range. Returns true if
439 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_gfn_range() local
442 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_zap_gfn_range()
444 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_zap_gfn_range()
447 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_zap_gfn_range()
449 flush |= zap_gfn_range(kvm, root, start, end, true); in kvm_tdp_mmu_zap_gfn_range()
451 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_zap_gfn_range()
603 struct kvm_mmu_page *root, gfn_t start, in kvm_tdp_mmu_handle_hva_range() argument
608 struct kvm_mmu_page *root; in kvm_tdp_mmu_handle_hva_range() local
612 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_handle_hva_range()
614 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_handle_hva_range()
617 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_handle_hva_range()
619 as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_handle_hva_range()
637 ret |= handler(kvm, memslot, root, gfn_start, in kvm_tdp_mmu_handle_hva_range()
641 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_handle_hva_range()
649 struct kvm_mmu_page *root, gfn_t start, in zap_gfn_range_hva_wrapper() argument
652 return zap_gfn_range(kvm, root, start, end, false); in zap_gfn_range_hva_wrapper()
667 struct kvm_mmu_page *root, gfn_t start, gfn_t end, in age_gfn_range() argument
674 tdp_root_for_each_leaf_pte(iter, root, start, end) { in age_gfn_range()
714 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, in test_age_gfn() argument
719 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) in test_age_gfn()
739 struct kvm_mmu_page *root, gfn_t gfn, gfn_t unused, in set_tdp_spte() argument
752 tdp_root_for_each_pte(iter, root, gfn, gfn + 1) { in set_tdp_spte()
792 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in wrprot_gfn_range() argument
801 for_each_tdp_pte_min_level(iter, root->spt, root->role.level, in wrprot_gfn_range()
825 struct kvm_mmu_page *root; in kvm_tdp_mmu_wrprot_slot() local
829 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_wrprot_slot()
830 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_wrprot_slot()
835 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_wrprot_slot()
838 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_wrprot_slot()
840 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
843 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_wrprot_slot()
856 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_gfn_range() argument
863 tdp_root_for_each_leaf_pte(iter, root, start, end) { in clear_dirty_gfn_range()
893 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_slot() local
897 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_clear_dirty_slot()
898 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_clear_dirty_slot()
903 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_clear_dirty_slot()
906 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_clear_dirty_slot()
908 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
911 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_clear_dirty_slot()
924 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root, in clear_dirty_pt_masked() argument
930 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask), in clear_dirty_pt_masked()
969 struct kvm_mmu_page *root; in kvm_tdp_mmu_clear_dirty_pt_masked() local
973 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_clear_dirty_pt_masked()
974 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_clear_dirty_pt_masked()
978 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot); in kvm_tdp_mmu_clear_dirty_pt_masked()
987 static bool set_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root, in set_dirty_gfn_range() argument
994 tdp_root_for_each_pte(iter, root, start, end) { in set_dirty_gfn_range()
1016 struct kvm_mmu_page *root; in kvm_tdp_mmu_slot_set_dirty() local
1020 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_slot_set_dirty()
1021 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_slot_set_dirty()
1026 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_slot_set_dirty()
1029 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_slot_set_dirty()
1031 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty()
1034 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_slot_set_dirty()
1044 struct kvm_mmu_page *root, in zap_collapsible_spte_range() argument
1051 tdp_root_for_each_pte(iter, root, start, end) { in zap_collapsible_spte_range()
1077 struct kvm_mmu_page *root; in kvm_tdp_mmu_zap_collapsible_sptes() local
1080 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_zap_collapsible_sptes()
1081 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_zap_collapsible_sptes()
1086 * Take a reference on the root so that it cannot be freed if in kvm_tdp_mmu_zap_collapsible_sptes()
1089 kvm_mmu_get_root(kvm, root); in kvm_tdp_mmu_zap_collapsible_sptes()
1091 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes()
1094 kvm_mmu_put_root(kvm, root); in kvm_tdp_mmu_zap_collapsible_sptes()
1103 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root, in write_protect_gfn() argument
1110 tdp_root_for_each_leaf_pte(iter, root, gfn, gfn + 1) { in write_protect_gfn()
1132 struct kvm_mmu_page *root; in kvm_tdp_mmu_write_protect_gfn() local
1137 for_each_tdp_mmu_root(kvm, root) { in kvm_tdp_mmu_write_protect_gfn()
1138 root_as_id = kvm_mmu_page_as_id(root); in kvm_tdp_mmu_write_protect_gfn()
1142 spte_set |= write_protect_gfn(kvm, root, gfn); in kvm_tdp_mmu_write_protect_gfn()