Lines Matching refs:hpt
68 struct kvm_hpt_info hpt; member
73 unsigned long hpt = 0; in kvmppc_allocate_hpt() local
84 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); in kvmppc_allocate_hpt()
85 memset((void *)hpt, 0, (1ul << order)); in kvmppc_allocate_hpt()
89 if (!hpt) in kvmppc_allocate_hpt()
90 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL in kvmppc_allocate_hpt()
93 if (!hpt) in kvmppc_allocate_hpt()
105 free_pages(hpt, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
110 info->virt = hpt; in kvmppc_allocate_hpt()
120 kvm->arch.hpt = *info; in kvmppc_set_hpt()
148 if (kvm->arch.hpt.order == order) { in kvmppc_alloc_reset_hpt()
152 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
161 if (kvm->arch.hpt.virt) { in kvmppc_alloc_reset_hpt()
162 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_alloc_reset_hpt()
225 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma()
226 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma()
237 & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_map_vrma()
374 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
378 gr = kvm->arch.hpt.rev[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
553 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
554 rev = &kvm->arch.hpt.rev[index]; in kvmppc_book3s_hv_page_fault()
782 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvmppc_unmap_hpte()
783 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvmppc_unmap_hpte()
841 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_unmap_rmapp()
901 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_age_rmapp()
921 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_age_rmapp()
972 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_age_rmapp()
989 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); in kvm_test_age_rmapp()
1035 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_clear_dirty_npages()
1052 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_test_clear_dirty_npages()
1215 rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); in resize_hpt_allocate()
1220 resize->hpt.virt); in resize_hpt_allocate()
1229 struct kvm_hpt_info *old = &kvm->arch.hpt; in resize_hpt_rehash_hpte()
1230 struct kvm_hpt_info *new = &resize->hpt; in resize_hpt_rehash_hpte()
1386 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { in resize_hpt_rehash()
1408 hpt_tmp = kvm->arch.hpt; in resize_hpt_pivot()
1409 kvmppc_set_hpt(kvm, &resize->hpt); in resize_hpt_pivot()
1410 resize->hpt = hpt_tmp; in resize_hpt_pivot()
1431 if (resize->hpt.virt) in resize_hpt_release()
1432 kvmppc_free_hpt(&resize->hpt); in resize_hpt_release()
1748 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_read()
1749 revp = kvm->arch.hpt.rev + i; in kvm_htab_read()
1764 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1774 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1790 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1811 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { in kvm_htab_read()
1873 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || in kvm_htab_write()
1874 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) in kvm_htab_write()
1877 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_write()
2074 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in debugfs_htab_read()
2075 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); in debugfs_htab_read()
2086 gr = kvm->arch.hpt.rev[i].guest_rpte; in debugfs_htab_read()