Lines Matching refs:kvm_mmu_page
325 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in count_spte_clear()
409 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in __get_spte_lockless()
701 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
709 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
760 static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in account_shadowed()
779 void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) in account_huge_nx_page()
790 static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_shadowed()
807 void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp) in unaccount_huge_nx_page()
951 struct kvm_mmu_page *sp) in gfn_to_rmap()
971 struct kvm_mmu_page *sp; in rmap_add()
982 struct kvm_mmu_page *sp; in rmap_remove()
1090 struct kvm_mmu_page *sp = sptep_to_sp(sptep); in drop_large_spte()
1567 struct kvm_mmu_page *sp; in rmap_recycle()
1628 static void kvm_mmu_free_page(struct kvm_mmu_page *sp) in kvm_mmu_free_page()
1645 struct kvm_mmu_page *sp, u64 *parent_pte) in mmu_page_add_parent_pte()
1653 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, in mmu_page_remove_parent_pte()
1659 static void drop_parent_pte(struct kvm_mmu_page *sp, in drop_parent_pte()
1666 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, int direct) in kvm_mmu_alloc_page()
1668 struct kvm_mmu_page *sp; in kvm_mmu_alloc_page()
1688 static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) in kvm_mmu_mark_parents_unsync()
1700 struct kvm_mmu_page *sp; in mark_unsync()
1713 struct kvm_mmu_page *sp) in nonpaging_sync_page()
1719 struct kvm_mmu_page *sp, u64 *spte, in nonpaging_update_pte()
1729 struct kvm_mmu_page *sp;
1735 static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, in mmu_pages_add()
1751 static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx) in clear_unsync_child_bit()
1758 static int __mmu_unsync_walk(struct kvm_mmu_page *sp, in __mmu_unsync_walk()
1764 struct kvm_mmu_page *child; in __mmu_unsync_walk()
1799 static int mmu_unsync_walk(struct kvm_mmu_page *sp, in mmu_unsync_walk()
1810 static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_unlink_unsync_page()
1818 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1833 static inline bool is_ept_sp(struct kvm_mmu_page *sp) in is_ept_sp()
1839 static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in __kvm_sync_page()
1883 static bool is_obsolete_sp(struct kvm *kvm, struct kvm_mmu_page *sp) in is_obsolete_sp()
1889 static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in kvm_sync_page()
1900 struct kvm_mmu_page *s; in kvm_sync_pages()
1915 struct kvm_mmu_page *parent[PT64_ROOT_MAX_LEVEL];
1931 struct kvm_mmu_page *sp = pvec->page[n].sp; in mmu_pages_next()
1948 struct kvm_mmu_page *sp; in mmu_pages_first()
1971 struct kvm_mmu_page *sp; in mmu_pages_clear_parents()
1987 struct kvm_mmu_page *parent) in mmu_sync_children()
1990 struct kvm_mmu_page *sp; in mmu_sync_children()
2021 static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp) in __clear_sp_write_flooding_count()
2031 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, in kvm_mmu_get_page()
2042 struct kvm_mmu_page *sp; in kvm_mmu_get_page()
2190 struct kvm_mmu_page *sp) in link_shadow_page()
2210 struct kvm_mmu_page *child; in validate_direct_spte()
2229 static int mmu_page_zap_pte(struct kvm *kvm, struct kvm_mmu_page *sp, in mmu_page_zap_pte()
2233 struct kvm_mmu_page *child; in mmu_page_zap_pte()
2262 struct kvm_mmu_page *sp, in kvm_mmu_page_unlink_children()
2274 static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) in kvm_mmu_unlink_parents()
2284 struct kvm_mmu_page *parent, in mmu_zap_unsync_children()
2295 struct kvm_mmu_page *sp; in mmu_zap_unsync_children()
2308 struct kvm_mmu_page *sp, in __kvm_mmu_prepare_zap_page()
2365 static bool kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp, in kvm_mmu_prepare_zap_page()
2377 struct kvm_mmu_page *sp, *nsp; in kvm_mmu_commit_zap_page()
2403 struct kvm_mmu_page *sp, *tmp; in kvm_mmu_zap_oldest_mmu_pages()
2481 struct kvm_mmu_page *sp; in kvm_mmu_unprotect_page()
2501 static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) in kvm_unsync_page()
2513 struct kvm_mmu_page *sp; in mmu_need_write_protect()
2577 struct kvm_mmu_page *sp; in set_spte()
2618 struct kvm_mmu_page *child; in mmu_set_spte()
2686 struct kvm_mmu_page *sp, in direct_pte_prefetch_many()
2714 struct kvm_mmu_page *sp, u64 *sptep) in __direct_pte_prefetch()
2738 struct kvm_mmu_page *sp; in direct_pte_prefetch()
2870 struct kvm_mmu_page *sp; in __direct_map()
2996 fast_pf_fix_direct_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, in fast_pf_fix_direct_spte()
3049 struct kvm_mmu_page *sp; in fast_page_fault()
3148 struct kvm_mmu_page *sp; in mmu_free_root_page()
3229 struct kvm_mmu_page *sp; in mmu_alloc_root()
3383 struct kvm_mmu_page *sp; in kvm_mmu_sync_roots()
4815 struct kvm_mmu_page *sp, u64 *spte, in mmu_pte_write_new_pte()
4870 static bool detect_write_flooding(struct kvm_mmu_page *sp) in detect_write_flooding()
4887 static bool detect_write_misaligned(struct kvm_mmu_page *sp, gpa_t gpa, in detect_write_misaligned()
4911 static u64 *get_written_sptes(struct kvm_mmu_page *sp, gpa_t gpa, int *nspte) in get_written_sptes()
4963 struct kvm_mmu_page *sp; in kvm_mmu_pte_write()
5350 struct kvm_mmu_page *sp, *node; in kvm_zap_obsolete_pages()
5548 struct kvm_mmu_page *sp; in kvm_mmu_zap_collapsible_spte()
5665 struct kvm_mmu_page *sp, *node; in kvm_mmu_zap_all()
5883 sizeof(struct kvm_mmu_page), in kvm_mmu_module_init()
5970 struct kvm_mmu_page *sp; in kvm_recover_nx_lpages()
5990 struct kvm_mmu_page, in kvm_recover_nx_lpages()