Lines Matching refs:hpt

67 	struct kvm_hpt_info hpt;  member
72 unsigned long hpt = 0; in kvmppc_allocate_hpt() local
83 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); in kvmppc_allocate_hpt()
84 memset((void *)hpt, 0, (1ul << order)); in kvmppc_allocate_hpt()
88 if (!hpt) in kvmppc_allocate_hpt()
89 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL in kvmppc_allocate_hpt()
92 if (!hpt) in kvmppc_allocate_hpt()
104 free_pages(hpt, order - PAGE_SHIFT); in kvmppc_allocate_hpt()
109 info->virt = hpt; in kvmppc_allocate_hpt()
119 kvm->arch.hpt = *info; in kvmppc_set_hpt()
147 if (kvm->arch.hpt.order == order) { in kvmppc_alloc_reset_hpt()
151 memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); in kvmppc_alloc_reset_hpt()
160 if (kvm->arch.hpt.virt) { in kvmppc_alloc_reset_hpt()
161 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_alloc_reset_hpt()
224 if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) in kvmppc_map_vrma()
225 npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; in kvmppc_map_vrma()
236 & kvmppc_hpt_mask(&kvm->arch.hpt); in kvmppc_map_vrma()
365 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_mmu_book3s_64_hv_xlate()
369 gr = kvm->arch.hpt.rev[index].guest_rpte; in kvmppc_mmu_book3s_64_hv_xlate()
530 hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); in kvmppc_book3s_hv_page_fault()
531 rev = &kvm->arch.hpt.rev[index]; in kvmppc_book3s_hv_page_fault()
804 __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvmppc_unmap_hpte()
805 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvmppc_unmap_hpte()
863 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_unmap_rmapp()
918 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_age_rmapp()
938 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_age_rmapp()
981 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_age_rmapp()
998 hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); in kvm_test_age_rmapp()
1038 struct revmap_entry *rev = kvm->arch.hpt.rev; in kvm_test_clear_dirty_npages()
1055 hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); in kvm_test_clear_dirty_npages()
1218 rc = kvmppc_allocate_hpt(&resize->hpt, resize->order); in resize_hpt_allocate()
1223 resize->hpt.virt); in resize_hpt_allocate()
1232 struct kvm_hpt_info *old = &kvm->arch.hpt; in resize_hpt_rehash_hpte()
1233 struct kvm_hpt_info *new = &resize->hpt; in resize_hpt_rehash_hpte()
1389 for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { in resize_hpt_rehash()
1411 hpt_tmp = kvm->arch.hpt; in resize_hpt_pivot()
1412 kvmppc_set_hpt(kvm, &resize->hpt); in resize_hpt_pivot()
1413 resize->hpt = hpt_tmp; in resize_hpt_pivot()
1434 if (resize->hpt.virt) in resize_hpt_release()
1435 kvmppc_free_hpt(&resize->hpt); in resize_hpt_release()
1751 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_read()
1752 revp = kvm->arch.hpt.rev + i; in kvm_htab_read()
1767 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1777 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1793 while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && in kvm_htab_read()
1814 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { in kvm_htab_read()
1876 if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || in kvm_htab_write()
1877 i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) in kvm_htab_write()
1880 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in kvm_htab_write()
2078 hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); in debugfs_htab_read()
2079 for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); in debugfs_htab_read()
2090 gr = kvm->arch.hpt.rev[i].guest_rpte; in debugfs_htab_read()