Lines Matching refs:invalid_list

1819 				     struct list_head *invalid_list);
1821 struct list_head *invalid_list);
1840 struct list_head *invalid_list) in __kvm_sync_page() argument
1844 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
1852 struct list_head *invalid_list, in kvm_mmu_remote_flush_or_zap() argument
1855 if (!remote_flush && list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1858 if (!list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1859 kvm_mmu_commit_zap_page(kvm, invalid_list); in kvm_mmu_remote_flush_or_zap()
1866 struct list_head *invalid_list, in kvm_mmu_flush_or_zap() argument
1869 if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) in kvm_mmu_flush_or_zap()
1890 struct list_head *invalid_list) in kvm_sync_page() argument
1893 return __kvm_sync_page(vcpu, sp, invalid_list); in kvm_sync_page()
1898 struct list_head *invalid_list) in kvm_sync_pages() argument
1908 ret |= kvm_sync_page(vcpu, s, invalid_list); in kvm_sync_pages()
1993 LIST_HEAD(invalid_list); in mmu_sync_children()
2008 flush |= kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2012 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2018 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2046 LIST_HEAD(invalid_list); in kvm_mmu_get_page()
2080 if (!__kvm_sync_page(vcpu, sp, &invalid_list)) in kvm_mmu_get_page()
2083 WARN_ON(!list_empty(&invalid_list)); in kvm_mmu_get_page()
2115 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); in kvm_mmu_get_page()
2119 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in kvm_mmu_get_page()
2230 u64 *spte, struct list_head *invalid_list) in mmu_page_zap_pte() argument
2250 if (tdp_enabled && invalid_list && in mmu_page_zap_pte()
2253 invalid_list); in mmu_page_zap_pte()
2263 struct list_head *invalid_list) in kvm_mmu_page_unlink_children() argument
2269 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); in kvm_mmu_page_unlink_children()
2285 struct list_head *invalid_list) in mmu_zap_unsync_children() argument
2298 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2309 struct list_head *invalid_list, in __kvm_mmu_prepare_zap_page() argument
2316 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2317 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2338 list_add(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2340 list_move(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2366 struct list_head *invalid_list) in kvm_mmu_prepare_zap_page() argument
2370 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); in kvm_mmu_prepare_zap_page()
2375 struct list_head *invalid_list) in kvm_mmu_commit_zap_page() argument
2379 if (list_empty(invalid_list)) in kvm_mmu_commit_zap_page()
2393 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2404 LIST_HEAD(invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2420 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, in kvm_mmu_zap_oldest_mmu_pages()
2430 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2482 LIST_HEAD(invalid_list); in kvm_mmu_unprotect_page()
2492 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2494 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
3146 struct list_head *invalid_list) in mmu_free_root_page() argument
3159 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_free_root_page()
3171 LIST_HEAD(invalid_list); in kvm_mmu_free_roots()
3192 &invalid_list); in kvm_mmu_free_roots()
3197 mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); in kvm_mmu_free_roots()
3203 &invalid_list); in kvm_mmu_free_roots()
3209 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_free_roots()
4964 LIST_HEAD(invalid_list); in kvm_mmu_pte_write()
4997 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_pte_write()
5021 kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); in kvm_mmu_pte_write()
5666 LIST_HEAD(invalid_list); in kvm_mmu_zap_all()
5674 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) in kvm_mmu_zap_all()
5680 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_all()
5724 LIST_HEAD(invalid_list); in mmu_shrink_scan()
5972 LIST_HEAD(invalid_list); in kvm_recover_nx_lpages()
5997 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_recover_nx_lpages()
6002 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_recover_nx_lpages()
6006 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_recover_nx_lpages()