Lines Matching refs:kvm
98 int lpid = vcpu->kvm->arch.lpid; in kvmhv_copy_tofrom_guest_radix()
140 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_walk_radix_tree() local
174 ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); in kvmppc_mmu_walk_radix_tree()
236 struct kvm *kvm = vcpu->kvm; in kvmppc_mmu_radix_translate_table() local
252 ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); in kvmppc_mmu_radix_translate_table()
283 vcpu->kvm->arch.process_table, pid, &pte); in kvmppc_mmu_radix_xlate()
309 void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, in kvmppc_radix_tlbie_page() argument
347 static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) in kvmppc_radix_flush_pwc() argument
368 static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, in kvmppc_radix_update_pte() argument
375 static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, in kvmppc_radix_set_pte_at() argument
378 radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); in kvmppc_radix_set_pte_at()
417 void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, in kvmppc_unmap_pte() argument
428 old = kvmppc_radix_update_pte(kvm, pte, ~0UL, 0, gpa, shift); in kvmppc_unmap_pte()
429 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid); in kvmppc_unmap_pte()
432 if (lpid != kvm->arch.lpid) in kvmppc_unmap_pte()
436 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
443 kvm->stat.num_2M_pages--; in kvmppc_unmap_pte()
445 kvm->stat.num_1G_pages--; in kvmppc_unmap_pte()
450 kvmhv_remove_nest_rmap_range(kvm, memslot, gpa, hpa, page_size); in kvmppc_unmap_pte()
470 static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, in kvmppc_unmap_free_pte() argument
482 kvmppc_unmap_pte(kvm, p, in kvmppc_unmap_free_pte()
491 static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, in kvmppc_unmap_free_pmd() argument
505 kvmppc_unmap_pte(kvm, (pte_t *)p, in kvmppc_unmap_free_pmd()
513 kvmppc_unmap_free_pte(kvm, pte, full, lpid); in kvmppc_unmap_free_pmd()
520 static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, in kvmppc_unmap_free_pud() argument
535 kvmppc_unmap_free_pmd(kvm, pmd, true, lpid); in kvmppc_unmap_free_pud()
539 pud_free(kvm->mm, pud); in kvmppc_unmap_free_pud()
542 void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) in kvmppc_free_pgtable_radix() argument
553 kvmppc_unmap_free_pud(kvm, pud, lpid); in kvmppc_free_pgtable_radix()
558 void kvmppc_free_radix(struct kvm *kvm) in kvmppc_free_radix() argument
560 if (kvm->arch.pgtable) { in kvmppc_free_radix()
561 kvmppc_free_pgtable_radix(kvm, kvm->arch.pgtable, in kvmppc_free_radix()
562 kvm->arch.lpid); in kvmppc_free_radix()
563 pgd_free(kvm->mm, kvm->arch.pgtable); in kvmppc_free_radix()
564 kvm->arch.pgtable = NULL; in kvmppc_free_radix()
568 static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, in kvmppc_unmap_free_pmd_entry_table() argument
579 kvmppc_radix_flush_pwc(kvm, lpid); in kvmppc_unmap_free_pmd_entry_table()
581 kvmppc_unmap_free_pte(kvm, pte, false, lpid); in kvmppc_unmap_free_pmd_entry_table()
584 static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, in kvmppc_unmap_free_pud_entry_table() argument
595 kvmppc_radix_flush_pwc(kvm, lpid); in kvmppc_unmap_free_pud_entry_table()
597 kvmppc_unmap_free_pmd(kvm, pmd, false, lpid); in kvmppc_unmap_free_pud_entry_table()
609 int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, in kvmppc_create_pte() argument
629 new_pud = pud_alloc_one(kvm->mm, gpa); in kvmppc_create_pte()
641 spin_lock(&kvm->mmu_lock); in kvmppc_create_pte()
643 if (mmu_invalidate_retry(kvm, mmu_seq)) in kvmppc_create_pte()
651 p4d_populate(kvm->mm, p4d, new_pud); in kvmppc_create_pte()
667 kvmppc_radix_update_pte(kvm, (pte_t *)pud, in kvmppc_create_pte()
681 kvmppc_unmap_pte(kvm, (pte_t *)pud, hgpa, PUD_SHIFT, NULL, in kvmppc_create_pte()
691 kvmppc_unmap_free_pud_entry_table(kvm, pud, gpa, lpid); in kvmppc_create_pte()
693 kvmppc_radix_set_pte_at(kvm, gpa, (pte_t *)pud, pte); in kvmppc_create_pte()
695 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
702 pud_populate(kvm->mm, pud, new_pmd); in kvmppc_create_pte()
718 kvmppc_radix_update_pte(kvm, pmdp_ptep(pmd), in kvmppc_create_pte()
733 kvmppc_unmap_pte(kvm, pmdp_ptep(pmd), lgpa, PMD_SHIFT, NULL, in kvmppc_create_pte()
743 kvmppc_unmap_free_pmd_entry_table(kvm, pmd, gpa, lpid); in kvmppc_create_pte()
745 kvmppc_radix_set_pte_at(kvm, gpa, pmdp_ptep(pmd), pte); in kvmppc_create_pte()
747 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
754 pmd_populate(kvm->mm, pmd, new_ptep); in kvmppc_create_pte()
767 kvmppc_radix_update_pte(kvm, ptep, 0, pte_val(pte), gpa, 0); in kvmppc_create_pte()
771 kvmppc_radix_set_pte_at(kvm, gpa, ptep, pte); in kvmppc_create_pte()
773 kvmhv_insert_nest_rmap(kvm, rmapp, n_rmap); in kvmppc_create_pte()
777 spin_unlock(&kvm->mmu_lock); in kvmppc_create_pte()
779 pud_free(kvm->mm, new_pud); in kvmppc_create_pte()
787 bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, in kvmppc_hv_handle_set_rc() argument
804 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift); in kvmppc_hv_handle_set_rc()
806 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvmppc_hv_handle_set_rc()
809 kvmppc_radix_update_pte(kvm, ptep, 0, pgflags, gpa, shift); in kvmppc_hv_handle_set_rc()
821 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_instantiate_page() local
833 mmu_seq = kvm->mmu_invalidate_seq; in kvmppc_book3s_instantiate_page()
865 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
866 ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); in kvmppc_book3s_instantiate_page()
870 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_instantiate_page()
915 ret = kvmppc_create_pte(kvm, kvm->arch.pgtable, pte, gpa, level, in kvmppc_book3s_instantiate_page()
916 mmu_seq, kvm->arch.lpid, NULL, NULL); in kvmppc_book3s_instantiate_page()
931 kvm->stat.num_2M_pages++; in kvmppc_book3s_instantiate_page()
933 kvm->stat.num_1G_pages++; in kvmppc_book3s_instantiate_page()
942 struct kvm *kvm = vcpu->kvm; in kvmppc_book3s_radix_page_fault() local
970 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_book3s_radix_page_fault()
971 return kvmppc_send_page_to_uv(kvm, gfn); in kvmppc_book3s_radix_page_fault()
974 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
1005 spin_lock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1006 if (kvmppc_hv_handle_set_rc(kvm, false, writing, in kvmppc_book3s_radix_page_fault()
1007 gpa, kvm->arch.lpid)) in kvmppc_book3s_radix_page_fault()
1009 spin_unlock(&kvm->mmu_lock); in kvmppc_book3s_radix_page_fault()
1026 void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_unmap_radix() argument
1033 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { in kvm_unmap_radix()
1034 uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); in kvm_unmap_radix()
1038 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_unmap_radix()
1040 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, in kvm_unmap_radix()
1041 kvm->arch.lpid); in kvm_unmap_radix()
1045 bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_age_radix() argument
1054 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_age_radix()
1057 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_age_radix()
1059 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0, in kvm_age_radix()
1064 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, in kvm_age_radix()
1073 bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, in kvm_test_age_radix() argument
1082 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_test_age_radix()
1085 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvm_test_age_radix()
1092 static int kvm_radix_test_clear_dirty(struct kvm *kvm, in kvm_radix_test_clear_dirty() argument
1102 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvm_radix_test_clear_dirty()
1109 ptep = find_kvm_secondary_pte_unlocked(kvm, gpa, &shift); in kvm_radix_test_clear_dirty()
1115 spin_lock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1127 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1134 old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0, in kvm_radix_test_clear_dirty()
1136 kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid); in kvm_radix_test_clear_dirty()
1139 kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0, in kvm_radix_test_clear_dirty()
1142 spin_unlock(&kvm->mmu_lock); in kvm_radix_test_clear_dirty()
1147 long kvmppc_hv_get_dirty_log_radix(struct kvm *kvm, in kvmppc_hv_get_dirty_log_radix() argument
1154 npages = kvm_radix_test_clear_dirty(kvm, memslot, i); in kvmppc_hv_get_dirty_log_radix()
1172 void kvmppc_radix_flush_memslot(struct kvm *kvm, in kvmppc_radix_flush_memslot() argument
1180 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START) in kvmppc_radix_flush_memslot()
1181 kvmppc_uvmem_drop_pages(memslot, kvm, true); in kvmppc_radix_flush_memslot()
1183 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) in kvmppc_radix_flush_memslot()
1187 spin_lock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
1189 ptep = find_kvm_secondary_pte(kvm, gpa, &shift); in kvmppc_radix_flush_memslot()
1191 kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, in kvmppc_radix_flush_memslot()
1192 kvm->arch.lpid); in kvmppc_radix_flush_memslot()
1199 kvm->mmu_invalidate_seq++; in kvmppc_radix_flush_memslot()
1200 spin_unlock(&kvm->mmu_lock); in kvmppc_radix_flush_memslot()
1213 int kvmhv_get_rmmu_info(struct kvm *kvm, struct kvm_ppc_rmmu_info *info) in kvmhv_get_rmmu_info() argument
1240 int kvmppc_init_vm_radix(struct kvm *kvm) in kvmppc_init_vm_radix() argument
1242 kvm->arch.pgtable = pgd_alloc(kvm->mm); in kvmppc_init_vm_radix()
1243 if (!kvm->arch.pgtable) in kvmppc_init_vm_radix()
1259 struct kvm *kvm; member
1271 struct kvm *kvm = inode->i_private; in debugfs_radix_open() local
1278 kvm_get_kvm(kvm); in debugfs_radix_open()
1279 p->kvm = kvm; in debugfs_radix_open()
1290 kvm_put_kvm(p->kvm); in debugfs_radix_release()
1301 struct kvm *kvm; in debugfs_radix_read() local
1313 kvm = p->kvm; in debugfs_radix_read()
1314 if (!kvm_is_radix(kvm)) in debugfs_radix_read()
1350 p->lpid = kvmhv_nested_next_lpid(kvm, p->lpid); in debugfs_radix_read()
1357 pgt = kvm->arch.pgtable; in debugfs_radix_read()
1359 nested = kvmhv_get_nested(kvm, p->lpid, false); in debugfs_radix_read()
1462 void kvmhv_radix_debugfs_init(struct kvm *kvm) in kvmhv_radix_debugfs_init() argument
1464 debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm, in kvmhv_radix_debugfs_init()