Lines Matching refs:invalid_list

2330 				     struct list_head *invalid_list);
2332 struct list_head *invalid_list);
2352 struct list_head *invalid_list) in __kvm_sync_page() argument
2356 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
2364 struct list_head *invalid_list, in kvm_mmu_remote_flush_or_zap() argument
2367 if (!remote_flush && list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
2370 if (!list_empty(invalid_list)) in kvm_mmu_remote_flush_or_zap()
2371 kvm_mmu_commit_zap_page(kvm, invalid_list); in kvm_mmu_remote_flush_or_zap()
2378 struct list_head *invalid_list, in kvm_mmu_flush_or_zap() argument
2381 if (kvm_mmu_remote_flush_or_zap(vcpu->kvm, invalid_list, remote_flush)) in kvm_mmu_flush_or_zap()
2402 struct list_head *invalid_list) in kvm_sync_page() argument
2405 return __kvm_sync_page(vcpu, sp, invalid_list); in kvm_sync_page()
2410 struct list_head *invalid_list) in kvm_sync_pages() argument
2420 ret |= kvm_sync_page(vcpu, s, invalid_list); in kvm_sync_pages()
2505 LIST_HEAD(invalid_list); in mmu_sync_children()
2520 flush |= kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2524 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2530 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2558 LIST_HEAD(invalid_list); in kvm_mmu_get_page()
2588 if (!__kvm_sync_page(vcpu, sp, &invalid_list)) in kvm_mmu_get_page()
2591 WARN_ON(!list_empty(&invalid_list)); in kvm_mmu_get_page()
2623 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); in kvm_mmu_get_page()
2628 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in kvm_mmu_get_page()
2788 struct list_head *invalid_list) in mmu_zap_unsync_children() argument
2801 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2812 struct list_head *invalid_list, in __kvm_mmu_prepare_zap_page() argument
2819 *nr_zapped = mmu_zap_unsync_children(kvm, sp, invalid_list); in __kvm_mmu_prepare_zap_page()
2834 list_move(&sp->link, invalid_list); in __kvm_mmu_prepare_zap_page()
2856 struct list_head *invalid_list) in kvm_mmu_prepare_zap_page() argument
2860 __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list, &nr_zapped); in kvm_mmu_prepare_zap_page()
2865 struct list_head *invalid_list) in kvm_mmu_commit_zap_page() argument
2869 if (list_empty(invalid_list)) in kvm_mmu_commit_zap_page()
2883 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2890 struct list_head *invalid_list) in prepare_zap_oldest_mmu_page() argument
2899 return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in prepare_zap_oldest_mmu_page()
2908 LIST_HEAD(invalid_list); in kvm_mmu_change_mmu_pages()
2915 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in kvm_mmu_change_mmu_pages()
2918 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_change_mmu_pages()
2930 LIST_HEAD(invalid_list); in kvm_mmu_unprotect_page()
2940 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2942 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
3695 struct list_head *invalid_list) in mmu_free_root_page() argument
3705 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_free_root_page()
3715 LIST_HEAD(invalid_list); in kvm_mmu_free_roots()
3736 &invalid_list); in kvm_mmu_free_roots()
3742 &invalid_list); in kvm_mmu_free_roots()
3748 &invalid_list); in kvm_mmu_free_roots()
3754 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_free_roots()
5421 LIST_HEAD(invalid_list); in kvm_mmu_pte_write()
5454 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_pte_write()
5478 kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); in kvm_mmu_pte_write()
5501 LIST_HEAD(invalid_list); in make_mmu_pages_available()
5507 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
5512 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
6108 LIST_HEAD(invalid_list); in kvm_mmu_zap_all()
6116 if (__kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list, &ign)) in kvm_mmu_zap_all()
6122 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_zap_all()
6162 LIST_HEAD(invalid_list); in mmu_shrink_scan()
6191 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in mmu_shrink_scan()
6193 kvm_mmu_commit_zap_page(kvm, &invalid_list); in mmu_shrink_scan()
6421 LIST_HEAD(invalid_list); in kvm_recover_nx_lpages()
6439 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_recover_nx_lpages()
6443 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_recover_nx_lpages()