/Linux-v5.10/arch/powerpc/kvm/ |
D | book3s_64_mmu.c | 192 struct kvmppc_pte *gpte, bool data, in kvmppc_mmu_book3s_64_xlate() argument 213 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_xlate() 214 gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); in kvmppc_mmu_book3s_64_xlate() 215 gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); in kvmppc_mmu_book3s_64_xlate() 216 gpte->raddr &= KVM_PAM; in kvmppc_mmu_book3s_64_xlate() 217 gpte->may_execute = true; in kvmppc_mmu_book3s_64_xlate() 218 gpte->may_read = true; in kvmppc_mmu_book3s_64_xlate() 219 gpte->may_write = true; in kvmppc_mmu_book3s_64_xlate() 220 gpte->page_size = MMU_PAGE_4K; in kvmppc_mmu_book3s_64_xlate() 221 gpte->wimg = HPTE_R_M; in kvmppc_mmu_book3s_64_xlate() [all …]
|
D | book3s_64_mmu_radix.c | 129 struct kvmppc_pte *gpte, u64 root, in kvmppc_mmu_walk_radix_tree() argument 196 gpte->page_size = ps; in kvmppc_mmu_walk_radix_tree() 197 gpte->page_shift = offset; in kvmppc_mmu_walk_radix_tree() 199 gpte->eaddr = eaddr; in kvmppc_mmu_walk_radix_tree() 200 gpte->raddr = gpa; in kvmppc_mmu_walk_radix_tree() 203 gpte->may_read = !!(pte & _PAGE_READ); in kvmppc_mmu_walk_radix_tree() 204 gpte->may_write = !!(pte & _PAGE_WRITE); in kvmppc_mmu_walk_radix_tree() 205 gpte->may_execute = !!(pte & _PAGE_EXEC); in kvmppc_mmu_walk_radix_tree() 207 gpte->rc = pte & (_PAGE_ACCESSED | _PAGE_DIRTY); in kvmppc_mmu_walk_radix_tree() 224 struct kvmppc_pte *gpte, u64 table, in kvmppc_mmu_radix_translate_table() argument [all …]
|
D | book3s_hv_nested.c | 1218 struct kvmppc_pte gpte, in kvmhv_handle_nested_set_rc() argument 1230 if (pgflags & ~gpte.rc) in kvmhv_handle_nested_set_rc() 1236 gpte.raddr, kvm->arch.lpid); in kvmhv_handle_nested_set_rc() 1286 struct kvmppc_pte gpte; in __kvmhv_nested_page_fault() local 1309 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte); in __kvmhv_nested_page_fault() 1325 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr); in __kvmhv_nested_page_fault() 1342 l1_shift = gpte.page_shift; in __kvmhv_nested_page_fault() 1349 gpa = gpte.raddr; in __kvmhv_nested_page_fault() 1407 perm |= gpte.may_read ? 0UL : _PAGE_READ; in __kvmhv_nested_page_fault() 1408 perm |= gpte.may_write ? 0UL : _PAGE_WRITE; in __kvmhv_nested_page_fault() [all …]
|
D | book3s_64_mmu_hv.c | 332 struct kvmppc_pte *gpte, bool data, bool iswrite) in kvmppc_mmu_book3s_64_hv_xlate() argument 344 return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); in kvmppc_mmu_book3s_64_hv_xlate() 374 gpte->eaddr = eaddr; in kvmppc_mmu_book3s_64_hv_xlate() 375 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); in kvmppc_mmu_book3s_64_hv_xlate() 383 gpte->may_read = hpte_read_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 384 gpte->may_write = hpte_write_permission(pp, key); in kvmppc_mmu_book3s_64_hv_xlate() 385 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); in kvmppc_mmu_book3s_64_hv_xlate() 391 gpte->may_read = 0; in kvmppc_mmu_book3s_64_hv_xlate() 393 gpte->may_write = 0; in kvmppc_mmu_book3s_64_hv_xlate() 397 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); in kvmppc_mmu_book3s_64_hv_xlate()
|
/Linux-v5.10/arch/x86/kvm/mmu/ |
D | paging_tmpl.h | 99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl() argument 101 return (gpte & PT_LVL_ADDR_MASK(lvl)) >> PAGE_SHIFT; in gpte_to_gfn_lvl() 105 unsigned gpte) in FNAME() 117 mask |= (gpte >> (PT_GUEST_DIRTY_SHIFT - PT_WRITABLE_SHIFT)) & in FNAME() 131 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) in FNAME() 136 return __is_bad_mt_xwr(rsvd_check, gpte); in FNAME() 140 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME() 142 return __is_rsvd_bits_set(&mmu->guest_rsvd_check, gpte, level) || in FNAME() 143 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME() 191 u64 gpte) in FNAME() [all …]
|
D | mmu.c | 287 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta() argument 291 return (gpte & PT32_DIR_PSE36_MASK) << shift; in pse36_gfn_delta() 3937 unsigned level, unsigned gpte) in is_last_gpte() argument 3944 gpte &= level - mmu->last_nonleaf_level; in is_last_gpte() 3951 gpte |= level - PG_LEVEL_4K - 1; in is_last_gpte() 3953 return gpte & PT_PAGE_SIZE_MASK; in is_last_gpte()
|
/Linux-v5.10/arch/powerpc/include/asm/ |
D | kvm_book3s.h | 186 struct kvmppc_pte *gpte, u64 root, 189 struct kvmppc_pte *gpte, u64 table, 192 struct kvmppc_pte *gpte, bool data, bool iswrite);
|
/Linux-v5.10/Documentation/virt/kvm/ |
D | mmu.rst | 54 gpte guest pte (referring to gfns) 168 first or second 512-gpte block in the guest page table. For second-level 169 page tables, each 32-bit gpte is converted to two 64-bit sptes 278 protected, and synchronize sptes to gptes when a gpte is written. 342 - if successful, we can let the guest continue and modify the gpte 381 we cannot map the permissions for gpte.u=1, gpte.w=0 to any spte (the 399 spte.nx=gpte.nx back. For this to work, KVM forces EFER.NX to 1 when
|
D | locking.rst | 65 | gpte = gfn1 | 67 | spte is the shadow page table entry corresponding with gpte and | 84 | | gpte is changed to point to |
|