Lines Matching refs:pgt
49 struct kvm_pgtable *pgt; member
92 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr) in __kvm_pgd_page_idx() argument
94 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */ in __kvm_pgd_page_idx()
95 u64 mask = BIT(pgt->ia_bits) - 1; in __kvm_pgd_page_idx()
102 return __kvm_pgd_page_idx(data->pgt, data->addr); in kvm_pgd_page_idx()
107 struct kvm_pgtable pgt = { in kvm_pgd_pages() local
112 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1; in kvm_pgd_pages()
273 struct kvm_pgtable *pgt = data->pgt; in _kvm_pgtable_walk() local
274 u64 limit = BIT(pgt->ia_bits); in _kvm_pgtable_walk()
279 if (!pgt->pgd) in _kvm_pgtable_walk()
283 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE]; in _kvm_pgtable_walk()
285 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level); in _kvm_pgtable_walk()
293 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_walk() argument
297 .pgt = pgt, in kvm_pgtable_walk()
373 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys, in kvm_pgtable_hyp_map() argument
390 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_hyp_map()
396 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits) in kvm_pgtable_hyp_init() argument
400 pgt->pgd = (kvm_pte_t *)get_zeroed_page(GFP_KERNEL); in kvm_pgtable_hyp_init()
401 if (!pgt->pgd) in kvm_pgtable_hyp_init()
404 pgt->ia_bits = va_bits; in kvm_pgtable_hyp_init()
405 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels; in kvm_pgtable_hyp_init()
406 pgt->mmu = NULL; in kvm_pgtable_hyp_init()
417 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt) in kvm_pgtable_hyp_destroy() argument
424 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_hyp_destroy()
425 free_page((unsigned long)pgt->pgd); in kvm_pgtable_hyp_destroy()
426 pgt->pgd = NULL; in kvm_pgtable_hyp_destroy()
616 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size, in kvm_pgtable_stage2_map() argument
623 .mmu = pgt->mmu, in kvm_pgtable_stage2_map()
638 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_map()
697 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_unmap() argument
701 .arg = pgt->mmu, in kvm_pgtable_stage2_unmap()
705 return kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_unmap()
741 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr, in stage2_update_leaf_attrs() argument
758 ret = kvm_pgtable_walk(pgt, addr, size, &walker); in stage2_update_leaf_attrs()
770 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_wrprotect() argument
772 return stage2_update_leaf_attrs(pgt, addr, size, 0, in kvm_pgtable_stage2_wrprotect()
777 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_mkyoung() argument
780 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0, in kvm_pgtable_stage2_mkyoung()
786 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_mkold() argument
789 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF, in kvm_pgtable_stage2_mkold()
800 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr) in kvm_pgtable_stage2_is_young() argument
803 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL); in kvm_pgtable_stage2_is_young()
807 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr, in kvm_pgtable_stage2_relax_perms() argument
823 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level); in kvm_pgtable_stage2_relax_perms()
825 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level); in kvm_pgtable_stage2_relax_perms()
842 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size) in kvm_pgtable_stage2_flush() argument
852 return kvm_pgtable_walk(pgt, addr, size, &walker); in kvm_pgtable_stage2_flush()
855 int kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm *kvm) in kvm_pgtable_stage2_init() argument
864 pgt->pgd = alloc_pages_exact(pgd_sz, GFP_KERNEL_ACCOUNT | __GFP_ZERO); in kvm_pgtable_stage2_init()
865 if (!pgt->pgd) in kvm_pgtable_stage2_init()
868 pgt->ia_bits = ia_bits; in kvm_pgtable_stage2_init()
869 pgt->start_level = start_level; in kvm_pgtable_stage2_init()
870 pgt->mmu = &kvm->arch.mmu; in kvm_pgtable_stage2_init()
894 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt) in kvm_pgtable_stage2_destroy() argument
903 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker)); in kvm_pgtable_stage2_destroy()
904 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE; in kvm_pgtable_stage2_destroy()
905 free_pages_exact(pgt->pgd, pgd_sz); in kvm_pgtable_stage2_destroy()
906 pgt->pgd = NULL; in kvm_pgtable_stage2_destroy()