Home
last modified time | relevance | path

Searched refs:gfn_end (Results 1 – 7 of 7) sorted by relevance

/Linux-v5.10/arch/mips/kvm/
Dmmu.c446 gpa_t gfn_end, in handle_hva_to_gpa() argument
460 gfn_t gfn, gfn_end; in handle_hva_to_gpa() local
473 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in handle_hva_to_gpa()
475 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
482 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_unmap_hva_handler() argument
485 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); in kvm_unmap_hva_handler()
498 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_set_spte_handler() argument
540 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_age_hva_handler() argument
543 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); in kvm_age_hva_handler()
546 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_test_age_hva_handler() argument
/Linux-v5.10/arch/powerpc/kvm/
De500_mmu_host.c406 unsigned long gfn_start, gfn_end; in kvmppc_e500_shadow_map() local
410 gfn_end = gfn_start + tsize_pages; in kvmppc_e500_shadow_map()
414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
Dbook3s_pr.c439 gfn_t gfn, gfn_end; in do_kvm_unmap_hva() local
451 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in do_kvm_unmap_hva()
454 gfn_end << PAGE_SHIFT); in do_kvm_unmap_hva()
Dbook3s_64_mmu_hv.c770 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local
782 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
784 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range()
/Linux-v5.10/arch/x86/kvm/
Dmmu.h216 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
/Linux-v5.10/arch/x86/kvm/mmu/
Dtdp_mmu.c623 gfn_t gfn_start, gfn_end; in kvm_tdp_mmu_handle_hva_range() local
635 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_tdp_mmu_handle_hva_range()
638 gfn_end, data); in kvm_tdp_mmu_handle_hva_range()
Dmmu.c1472 gfn_t gfn_start, gfn_end; in kvm_handle_hva_range() local
1484 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); in kvm_handle_hva_range()
1488 gfn_start, gfn_end - 1, in kvm_handle_hva_range()
5473 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) in kvm_zap_gfn_range() argument
5487 end = min(gfn_end, memslot->base_gfn + memslot->npages); in kvm_zap_gfn_range()
5499 flush = kvm_tdp_mmu_zap_gfn_range(kvm, gfn_start, gfn_end); in kvm_zap_gfn_range()