Lines Matching refs:invalid_list

2157 				    struct list_head *invalid_list);
2159 struct list_head *invalid_list);
2181 struct list_head *invalid_list) in __kvm_sync_page() argument
2185 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list); in __kvm_sync_page()
2193 struct list_head *invalid_list, in kvm_mmu_flush_or_zap() argument
2196 if (!list_empty(invalid_list)) { in kvm_mmu_flush_or_zap()
2197 kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list); in kvm_mmu_flush_or_zap()
2220 struct list_head *invalid_list) in kvm_sync_page() argument
2223 return __kvm_sync_page(vcpu, sp, invalid_list); in kvm_sync_page()
2228 struct list_head *invalid_list) in kvm_sync_pages() argument
2238 ret |= kvm_sync_page(vcpu, s, invalid_list); in kvm_sync_pages()
2323 LIST_HEAD(invalid_list); in mmu_sync_children()
2338 flush |= kvm_sync_page(vcpu, sp, &invalid_list); in mmu_sync_children()
2342 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2348 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in mmu_sync_children()
2376 LIST_HEAD(invalid_list); in kvm_mmu_get_page()
2406 if (!__kvm_sync_page(vcpu, sp, &invalid_list)) in kvm_mmu_get_page()
2409 WARN_ON(!list_empty(&invalid_list)); in kvm_mmu_get_page()
2441 flush |= kvm_sync_pages(vcpu, gfn, &invalid_list); in kvm_mmu_get_page()
2447 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); in kvm_mmu_get_page()
2607 struct list_head *invalid_list) in mmu_zap_unsync_children() argument
2620 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_zap_unsync_children()
2630 struct list_head *invalid_list) in kvm_mmu_prepare_zap_page() argument
2636 ret = mmu_zap_unsync_children(kvm, sp, invalid_list); in kvm_mmu_prepare_zap_page()
2648 list_move(&sp->link, invalid_list); in kvm_mmu_prepare_zap_page()
2666 struct list_head *invalid_list) in kvm_mmu_commit_zap_page() argument
2670 if (list_empty(invalid_list)) in kvm_mmu_commit_zap_page()
2684 list_for_each_entry_safe(sp, nsp, invalid_list, link) { in kvm_mmu_commit_zap_page()
2691 struct list_head *invalid_list) in prepare_zap_oldest_mmu_page() argument
2700 return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in prepare_zap_oldest_mmu_page()
2709 LIST_HEAD(invalid_list); in kvm_mmu_change_mmu_pages()
2716 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in kvm_mmu_change_mmu_pages()
2719 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_change_mmu_pages()
2731 LIST_HEAD(invalid_list); in kvm_mmu_unprotect_page()
2741 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); in kvm_mmu_unprotect_page()
2743 kvm_mmu_commit_zap_page(kvm, &invalid_list); in kvm_mmu_unprotect_page()
3467 struct list_head *invalid_list) in mmu_free_root_page() argument
3477 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list); in mmu_free_root_page()
3486 LIST_HEAD(invalid_list); in kvm_mmu_free_roots()
3508 &invalid_list); in kvm_mmu_free_roots()
3514 &invalid_list); in kvm_mmu_free_roots()
3520 &invalid_list); in kvm_mmu_free_roots()
3525 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in kvm_mmu_free_roots()
5133 LIST_HEAD(invalid_list); in kvm_mmu_pte_write()
5165 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); in kvm_mmu_pte_write()
5187 kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush); in kvm_mmu_pte_write()
5210 LIST_HEAD(invalid_list); in make_mmu_pages_available()
5216 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list)) in make_mmu_pages_available()
5221 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); in make_mmu_pages_available()
5809 LIST_HEAD(invalid_list); in mmu_shrink_scan()
5838 if (prepare_zap_oldest_mmu_page(kvm, &invalid_list)) in mmu_shrink_scan()
5840 kvm_mmu_commit_zap_page(kvm, &invalid_list); in mmu_shrink_scan()