Lines Matching full:fault

92 	struct x86_exception fault;  member
249 ret = __try_cmpxchg_user(ptep_user, &orig_pte, pte, fault); in FNAME()
369 nested_access, &walker->fault); in FNAME()
373 * instruction) triggers a nested page fault. The exit in FNAME()
375 * "guest page access" as the nested page fault's cause, in FNAME()
433 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
443 * On a write fault, fold the dirty bit into accessed_dirty. in FNAME()
469 walker->fault.vector = PF_VECTOR; in FNAME()
470 walker->fault.error_code_valid = true; in FNAME()
471 walker->fault.error_code = errcode; in FNAME()
505 walker->fault.address = addr; in FNAME()
506 walker->fault.nested_page_fault = mmu != vcpu->arch.walk_mmu; in FNAME()
507 walker->fault.async_page_fault = false; in FNAME()
509 trace_kvm_mmu_walker_error(walker->fault.error_code); in FNAME()
618 static int FNAME(fetch)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault, in FNAME()
625 gfn_t base_gfn = fault->gfn; in FNAME()
645 for (shadow_walk_init(&it, vcpu, fault->addr); in FNAME()
690 kvm_mmu_hugepage_adjust(vcpu, fault); in FNAME()
692 trace_kvm_mmu_spte_requested(fault); in FNAME()
701 if (fault->nx_huge_page_workaround_enabled) in FNAME()
702 disallowed_hugepage_adjust(fault, *it.sptep, it.level); in FNAME()
704 base_gfn = fault->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
705 if (it.level == fault->goal_level) in FNAME()
716 if (fault->huge_page_disallowed && in FNAME()
717 fault->req_level >= it.level) in FNAME()
721 if (WARN_ON_ONCE(it.level != fault->goal_level)) in FNAME()
724 ret = mmu_set_spte(vcpu, fault->slot, it.sptep, gw->pte_access, in FNAME()
725 base_gfn, fault->pfn, fault); in FNAME()
746 * @write_fault_to_shadow_pgtable will return true if the fault gfn is
777 * Page fault handler. There are several causes for a page fault:
784 * - normal guest page fault due to the guest pte marked not present, not
790 static int FNAME(page_fault)(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) in FNAME()
797 pgprintk("%s: addr %lx err %x\n", __func__, fault->addr, fault->error_code); in FNAME()
798 WARN_ON_ONCE(fault->is_tdp); in FNAME()
802 * If PFEC.RSVD is set, this is a shadow page fault. in FNAME()
805 r = FNAME(walk_addr)(&walker, vcpu, fault->addr, in FNAME()
806 fault->error_code & ~PFERR_RSVD_MASK); in FNAME()
812 pgprintk("%s: guest page fault\n", __func__); in FNAME()
813 if (!fault->prefetch) in FNAME()
814 kvm_inject_emulated_page_fault(vcpu, &walker.fault); in FNAME()
819 fault->gfn = walker.gfn; in FNAME()
820 fault->slot = kvm_vcpu_gfn_to_memslot(vcpu, fault->gfn); in FNAME()
822 if (page_fault_handle_page_track(vcpu, fault)) { in FNAME()
823 shadow_page_table_clear_flood(vcpu, fault->addr); in FNAME()
834 &walker, fault->user, &vcpu->arch.write_fault_to_shadow_pgtable); in FNAME()
837 fault->max_level = PG_LEVEL_4K; in FNAME()
839 fault->max_level = walker.level; in FNAME()
844 r = kvm_faultin_pfn(vcpu, fault); in FNAME()
848 r = handle_abnormal_pfn(vcpu, fault, walker.pte_access); in FNAME()
856 if (fault->write && !(walker.pte_access & ACC_WRITE_MASK) && in FNAME()
857 !is_cr0_wp(vcpu->arch.mmu) && !fault->user && fault->slot) { in FNAME()
874 if (is_page_fault_stale(vcpu, fault, mmu_seq)) in FNAME()
880 r = FNAME(fetch)(vcpu, fault, &walker); in FNAME()
884 kvm_release_pfn_clean(fault->pfn); in FNAME()
979 *exception = walker.fault; in FNAME()