Lines Matching refs:rmap_head

841 			struct kvm_rmap_head *rmap_head)  in pte_list_add()  argument
846 if (!rmap_head->val) { in pte_list_add()
848 rmap_head->val = (unsigned long)spte; in pte_list_add()
849 } else if (!(rmap_head->val & 1)) { in pte_list_add()
852 desc->sptes[0] = (u64 *)rmap_head->val; in pte_list_add()
854 rmap_head->val = (unsigned long)desc | 1; in pte_list_add()
858 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); in pte_list_add()
877 pte_list_desc_remove_entry(struct kvm_rmap_head *rmap_head, in pte_list_desc_remove_entry() argument
890 rmap_head->val = 0; in pte_list_desc_remove_entry()
895 rmap_head->val = (unsigned long)desc->more | 1; in pte_list_desc_remove_entry()
899 static void __pte_list_remove(u64 *spte, struct kvm_rmap_head *rmap_head) in __pte_list_remove() argument
905 if (!rmap_head->val) { in __pte_list_remove()
908 } else if (!(rmap_head->val & 1)) { in __pte_list_remove()
910 if ((u64 *)rmap_head->val != spte) { in __pte_list_remove()
914 rmap_head->val = 0; in __pte_list_remove()
917 desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); in __pte_list_remove()
922 pte_list_desc_remove_entry(rmap_head, in __pte_list_remove()
935 static void pte_list_remove(struct kvm_rmap_head *rmap_head, u64 *sptep) in pte_list_remove() argument
938 __pte_list_remove(sptep, rmap_head); in pte_list_remove()
972 struct kvm_rmap_head *rmap_head; in rmap_add() local
976 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_add()
977 return pte_list_add(vcpu, spte, rmap_head); in rmap_add()
984 struct kvm_rmap_head *rmap_head; in rmap_remove() local
988 rmap_head = gfn_to_rmap(kvm, gfn, sp); in rmap_remove()
989 __pte_list_remove(spte, rmap_head); in rmap_remove()
1009 static u64 *rmap_get_first(struct kvm_rmap_head *rmap_head, in rmap_get_first() argument
1014 if (!rmap_head->val) in rmap_get_first()
1017 if (!(rmap_head->val & 1)) { in rmap_get_first()
1019 sptep = (u64 *)rmap_head->val; in rmap_get_first()
1023 iter->desc = (struct pte_list_desc *)(rmap_head->val & ~1ul); in rmap_get_first()
1128 struct kvm_rmap_head *rmap_head, in __rmap_write_protect() argument
1135 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_write_protect()
1168 static bool __rmap_clear_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) in __rmap_clear_dirty() argument
1174 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_clear_dirty()
1199 static bool __rmap_set_dirty(struct kvm *kvm, struct kvm_rmap_head *rmap_head) in __rmap_set_dirty() argument
1205 for_each_rmap_spte(rmap_head, &iter, sptep) in __rmap_set_dirty()
1226 struct kvm_rmap_head *rmap_head; in kvm_mmu_write_protect_pt_masked() local
1232 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1234 __rmap_write_protect(kvm, rmap_head, false); in kvm_mmu_write_protect_pt_masked()
1255 struct kvm_rmap_head *rmap_head; in kvm_mmu_clear_dirty_pt_masked() local
1261 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
1263 __rmap_clear_dirty(kvm, rmap_head); in kvm_mmu_clear_dirty_pt_masked()
1295 struct kvm_rmap_head *rmap_head; in kvm_mmu_slot_gfn_write_protect() local
1300 rmap_head = __gfn_to_rmap(gfn, i, slot); in kvm_mmu_slot_gfn_write_protect()
1301 write_protected |= __rmap_write_protect(kvm, rmap_head, true); in kvm_mmu_slot_gfn_write_protect()
1319 static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head) in kvm_zap_rmapp() argument
1325 while ((sptep = rmap_get_first(rmap_head, &iter))) { in kvm_zap_rmapp()
1328 pte_list_remove(rmap_head, sptep); in kvm_zap_rmapp()
1335 static int kvm_unmap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_unmap_rmapp() argument
1339 return kvm_zap_rmapp(kvm, rmap_head); in kvm_unmap_rmapp()
1342 static int kvm_set_pte_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_set_pte_rmapp() argument
1357 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_set_pte_rmapp()
1364 pte_list_remove(rmap_head, sptep); in kvm_set_pte_rmapp()
1456 struct kvm_rmap_head *rmap_head, in kvm_handle_hva_range() argument
1501 struct kvm_rmap_head *rmap_head, in kvm_handle_hva() argument
1534 static int kvm_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_age_rmapp() argument
1542 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_age_rmapp()
1549 static int kvm_test_age_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head, in kvm_test_age_rmapp() argument
1556 for_each_rmap_spte(rmap_head, &iter, sptep) in kvm_test_age_rmapp()
1566 struct kvm_rmap_head *rmap_head; in rmap_recycle() local
1571 rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp); in rmap_recycle()
1573 kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0); in rmap_recycle()
5205 typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
5508 struct kvm_rmap_head *rmap_head) in slot_rmap_write_protect() argument
5510 return __rmap_write_protect(kvm, rmap_head, false); in slot_rmap_write_protect()
5542 struct kvm_rmap_head *rmap_head) in kvm_mmu_zap_collapsible_spte() argument
5551 for_each_rmap_spte(rmap_head, &iter, sptep) { in kvm_mmu_zap_collapsible_spte()
5565 pte_list_remove(rmap_head, sptep); in kvm_mmu_zap_collapsible_spte()