Lines Matching refs:FNAME

26 	#define FNAME(name) paging##64_##name  macro
45 #define FNAME(name) paging##32_##name macro
59 #define FNAME(name) ept_##name macro
77 #define gpte_to_gfn_lvl FNAME(gpte_to_gfn_lvl)
104 static inline void FNAME(protect_clean_gpte)(struct kvm_mmu *mmu, unsigned *access, in FNAME() function
122 static inline int FNAME(is_present_gpte)(unsigned long pte) in FNAME() function
131 static bool FNAME(is_bad_mt_xwr)(struct rsvd_bits_validate *rsvd_check, u64 gpte) in FNAME() function
140 static bool FNAME(is_rsvd_bits_set)(struct kvm_mmu *mmu, u64 gpte, int level) in FNAME() function
143 FNAME(is_bad_mt_xwr)(&mmu->guest_rsvd_check, gpte); in FNAME()
146 static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, in FNAME() function
189 static bool FNAME(prefetch_invalid_gpte)(struct kvm_vcpu *vcpu, in FNAME() function
193 if (!FNAME(is_present_gpte)(gpte)) in FNAME()
201 if (FNAME(is_rsvd_bits_set)(vcpu->arch.mmu, gpte, PG_LEVEL_4K)) in FNAME()
217 static inline unsigned FNAME(gpte_access)(u64 gpte) in FNAME() function
235 static int FNAME(update_accessed_dirty_bits)(struct kvm_vcpu *vcpu, in FNAME() function
287 ret = FNAME(cmpxchg_gpte)(vcpu, mmu, ptep_user, index, orig_pte, pte); in FNAME()
297 static inline unsigned FNAME(gpte_pkeys)(struct kvm_vcpu *vcpu, u64 gpte) in FNAME() function
311 static int FNAME(walk_addr_generic)(struct guest_walker *walker, in FNAME() function
344 if (!FNAME(is_present_gpte)(pte)) in FNAME()
412 if (unlikely(!FNAME(is_present_gpte)(pte))) in FNAME()
415 if (unlikely(FNAME(is_rsvd_bits_set)(mmu, pte, walker->level))) { in FNAME()
423 pte_pkey = FNAME(gpte_pkeys)(vcpu, pte); in FNAME()
427 walker->pt_access = FNAME(gpte_access)(pt_access ^ walk_nx_mask); in FNAME()
428 walker->pte_access = FNAME(gpte_access)(pte_access ^ walk_nx_mask); in FNAME()
446 FNAME(protect_clean_gpte)(mmu, &walker->pte_access, pte); in FNAME()
457 ret = FNAME(update_accessed_dirty_bits)(vcpu, mmu, walker, in FNAME()
511 static int FNAME(walk_addr)(struct guest_walker *walker, in FNAME() function
514 return FNAME(walk_addr_generic)(walker, vcpu, vcpu->arch.mmu, addr, in FNAME()
519 static int FNAME(walk_addr_nested)(struct guest_walker *walker, in FNAME() function
523 return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, in FNAME()
529 FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME() function
536 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, spte, gpte)) in FNAME()
542 pte_access = sp->role.access & FNAME(gpte_access)(gpte); in FNAME()
543 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
560 static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in FNAME() function
565 FNAME(prefetch_gpte)(vcpu, sp, spte, gpte, false); in FNAME()
568 static bool FNAME(gpte_changed)(struct kvm_vcpu *vcpu, in FNAME() function
591 static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, in FNAME() function
617 if (!FNAME(prefetch_gpte)(vcpu, sp, spte, gptep[i], true)) in FNAME()
627 static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr, in FNAME() function
653 if (FNAME(gpte_changed)(vcpu, gw, top_level)) in FNAME()
678 if (FNAME(gpte_changed)(vcpu, gw, it.level - 1)) in FNAME()
723 FNAME(pte_prefetch)(vcpu, gw, it.sptep); in FNAME()
749 FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu, in FNAME() function
785 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gpa_t addr, u32 error_code, in FNAME() function
808 r = FNAME(walk_addr)(&walker, vcpu, addr, error_code); in FNAME()
832 is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu, in FNAME()
879 r = FNAME(fetch)(vcpu, addr, &walker, error_code, max_level, pfn, in FNAME()
889 static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp) in FNAME() function
901 static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa) in FNAME() function
936 pte_gpa = FNAME(get_level1_sp_gpa)(sp); in FNAME()
951 FNAME(update_pte)(vcpu, sp, sptep, &gpte); in FNAME()
961 static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gpa_t addr, u32 access, in FNAME() function
968 r = FNAME(walk_addr)(&walker, vcpu, addr, access); in FNAME()
981 static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gpa_t vaddr, in FNAME() function
994 r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, access); in FNAME()
1019 static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in FNAME() function
1029 first_pte_gpa = FNAME(get_level1_sp_gpa)(sp); in FNAME()
1046 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { in FNAME()
1059 pte_access &= FNAME(gpte_access)(gpte); in FNAME()
1060 FNAME(protect_clean_gpte)(vcpu->arch.mmu, &pte_access, gpte); in FNAME()
1095 #undef FNAME