Lines Matching refs:invalid_list

1895 				     struct list_head *invalid_list);
1897 struct list_head *invalid_list);
1910 struct list_head *invalid_list) in kvm_sync_page() argument
1913 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in kvm_sync_page()
1921 struct list_head *invalid_list, in kvm_mmu_remote_flush_or_zap() argument
1924 if (!remote_flush && list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1927 if (!list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
1928 kvm_mmu_commit_zap_page(kvm, invalid_list); in kvm_mmu_remote_flush_or_zap()
1935 struct list_head *invalid_list, in kvm_mmu_flush_or_zap() argument
1938 if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) in kvm_mmu_flush_or_zap()
2037 LIST_HEAD(invalid_list); in mmu_sync_children()
2053 flush |= kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2057 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2068 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2095 LIST_HEAD(invalid_list); in kvm_mmu_get_page()
2128 &invalid_list); in kvm_mmu_get_page()
2148 if (!kvm_sync_page(vcpu, sp, &invalid_list)) in kvm_mmu_get_page()
2151 WARN_ON(!list_empty(&invalid_list)); in kvm_mmu_get_page()
2176 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_get_page()
2287 u64 *spte, struct list_head *invalid_list) in mmu_page_zap_pte() argument
2305 if (tdp_enabled && invalid_list && in mmu_page_zap_pte()
2308 invalid_list); in mmu_page_zap_pte()
2318 struct list_head *invalid_list) in kvm_mmu_page_unlink_children() argument
2324 zapped += mmu_page_zap_pte(kvm, sp, sp->spt + i, invalid_list); in kvm_mmu_page_unlink_children()
2340 struct list_head *invalid_list) in mmu_zap_unsync_children() argument
2353 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2364 struct list_head *invalid_list, in __kvm_mmu_prepare_zap_page() argument
2371 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2372 *nr_zapped += kvm_mmu_page_unlink_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2393 list_add(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2395 list_move(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2421 struct list_head *invalid_list) in kvm_mmu_prepare_zap_page() argument
2425 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); in kvm_mmu_prepare_zap_page()
2430 struct list_head *invalid_list) in kvm_mmu_commit_zap_page() argument
2434 if (list_empty(invalid_list)) in kvm_mmu_commit_zap_page()
2448 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2459 LIST_HEAD(invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2475 unstable = __kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, in kvm_mmu_zap_oldest_mmu_pages()
2485 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_oldest_mmu_pages()
2546 LIST_HEAD(invalid_list); in kvm_mmu_unprotect_page()
2556 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2558 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
3309 struct list_head *invalid_list) in mmu_free_root_page() argument
3321 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_free_root_page()
3332 LIST_HEAD(invalid_list); in kvm_mmu_free_roots()
3353 &invalid_list); in kvm_mmu_free_roots()
3358 mmu_free_root_page(kvm, &mmu->root_hpa, &invalid_list); in kvm_mmu_free_roots()
3365 &invalid_list); in kvm_mmu_free_roots()
3373 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_free_roots()
5212 LIST_HEAD(invalid_list); in kvm_mmu_pte_write()
5245 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_pte_write()
5265 kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); in kvm_mmu_pte_write()
5924 LIST_HEAD(invalid_list); in kvm_mmu_zap_all()
5932 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) in kvm_mmu_zap_all()
5938 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_all()
5982 LIST_HEAD(invalid_list); in mmu_shrink_scan()
6210 LIST_HEAD(invalid_list); in kvm_recover_nx_lpages()
6235 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_recover_nx_lpages()
6240 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_lpages()
6245 kvm_mmu_remote_flush_or_zap(kvm, &invalid_list, flush); in kvm_recover_nx_lpages()