Home
last modified time | relevance | path

Searched refs:base_gfn (Results 1 – 25 of 26) sorted by relevance

12

/Linux-v5.10/arch/arm64/kvm/
Dmmu.c147 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_flush_memslot()
407 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; in stage2_unmap_memslot()
567 start = memslot->base_gfn << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
568 end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; in kvm_mmu_wp_memory_region()
591 phys_addr_t base_gfn = slot->base_gfn + gfn_offset; in kvm_mmu_write_protect_pt_masked() local
592 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
593 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT; in kvm_mmu_write_protect_pt_masked()
641 gpa_start = memslot->base_gfn << PAGE_SHIFT; in fault_supports_stage2_huge_mapping()
1312 if (memslot->base_gfn + memslot->npages >= in kvm_arch_prepare_memory_region()
1395 gpa_t gpa = slot->base_gfn << PAGE_SHIFT; in kvm_arch_flush_shadow_memslot()
/Linux-v5.10/arch/powerpc/kvm/
Dtrace_hv.h285 __field(u64, base_gfn)
297 __entry->base_gfn = memslot ? memslot->base_gfn : -1UL;
305 __entry->base_gfn, __entry->slot_flags)
Dbook3s_hv_uvmem.c259 p->base_pfn = slot->base_gfn; in kvmppc_uvmem_slot_init()
277 if (p->base_pfn == slot->base_gfn) { in kvmppc_uvmem_slot_free()
390 unsigned long gfn = memslot->base_gfn; in kvmppc_memslot_page_merge()
441 memslot->base_gfn << PAGE_SHIFT, in __kvmppc_uvmem_memslot_create()
612 gfn = slot->base_gfn; in kvmppc_uvmem_drop_pages()
786 unsigned long gfn = memslot->base_gfn; in kvmppc_uv_migrate_mem_slot()
Dbook3s_64_mmu_hv.c568 if (gfn_base < memslot->base_gfn) in kvmppc_book3s_hv_page_fault()
682 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; in kvmppc_book3s_hv_page_fault()
849 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp()
895 gfn = memslot->base_gfn; in kvmppc_core_flush_memslot_hv()
924 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_rmapp()
987 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_test_age_rmapp()
1126 if (gfn < memslot->base_gfn || in kvmppc_harvest_vpa_dirty()
1127 gfn >= memslot->base_gfn + memslot->npages) in kvmppc_harvest_vpa_dirty()
1132 __set_bit_le(gfn - memslot->base_gfn, map); in kvmppc_harvest_vpa_dirty()
1207 set_bit_le(gfn - memslot->base_gfn, memslot->dirty_bitmap); in kvmppc_unpin_guest_page()
[all …]
Dbook3s_64_mmu_radix.c1034 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_age_radix()
1065 unsigned long gfn = memslot->base_gfn + pagenum; in kvm_radix_test_clear_dirty()
1108 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_radix_test_clear_dirty()
1156 gpa = memslot->base_gfn << PAGE_SHIFT; in kvmppc_radix_flush_memslot()
Dbook3s_hv_rm_mmu.c117 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
155 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); in revmap_for_hpte()
246 slot_fn = gfn - memslot->base_gfn; in kvmppc_do_h_enter()
De500_mmu_host.c381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
Dbook3s_hv_nested.c907 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn; in kvmhv_remove_nest_rmap_range()
1435 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in __kvmhv_nested_page_fault()
Dbook3s_pr.c1899 ga = memslot->base_gfn << PAGE_SHIFT; in kvm_vm_ioctl_get_dirty_log_pr()
/Linux-v5.10/arch/x86/kvm/mmu/
Dtdp_mmu.c840 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_wrprot_slot()
841 slot->base_gfn + slot->npages, min_level); in kvm_tdp_mmu_wrprot_slot()
908 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_clear_dirty_slot()
909 slot->base_gfn + slot->npages); in kvm_tdp_mmu_clear_dirty_slot()
1031 spte_set |= set_dirty_gfn_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_slot_set_dirty()
1032 slot->base_gfn + slot->npages); in kvm_tdp_mmu_slot_set_dirty()
1091 zap_collapsible_spte_range(kvm, root, slot->base_gfn, in kvm_tdp_mmu_zap_collapsible_sptes()
1092 slot->base_gfn + slot->npages); in kvm_tdp_mmu_zap_collapsible_sptes()
Dpage_track.c64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track()
154 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in kvm_page_track_is_active()
Dpaging_tmpl.h640 gfn_t base_gfn = gw->gfn; in FNAME() local
701 base_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in FNAME()
710 sp = kvm_mmu_get_page(vcpu, base_gfn, addr, in FNAME()
719 it.level, base_gfn, pfn, prefault, map_writable); in FNAME()
Dmmu.c733 idx = gfn_to_index(gfn, slot->base_gfn, level); in lpage_info_slot()
946 idx = gfn_to_index(gfn, slot->base_gfn, level); in __gfn_to_rmap()
1230 slot->base_gfn + gfn_offset, mask, true); in kvm_mmu_write_protect_pt_masked()
1232 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_write_protect_pt_masked()
1259 slot->base_gfn + gfn_offset, mask, false); in kvm_mmu_clear_dirty_pt_masked()
1261 rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask), in kvm_mmu_clear_dirty_pt_masked()
2873 gfn_t base_gfn = gfn; in __direct_map() local
2891 base_gfn = gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1); in __direct_map()
2897 sp = kvm_mmu_get_page(vcpu, base_gfn, it.addr, in __direct_map()
2908 write, level, base_gfn, pfn, prefault, in __direct_map()
[all …]
/Linux-v5.10/include/linux/
Dkvm_host.h342 gfn_t base_gfn; member
1076 if (gfn >= memslots[slot].base_gfn && in search_memslots()
1077 gfn < memslots[slot].base_gfn + memslots[slot].npages) in search_memslots()
1083 if (gfn >= memslots[slot].base_gfn) in search_memslots()
1089 if (start < slots->used_slots && gfn >= memslots[start].base_gfn && in search_memslots()
1090 gfn < memslots[start].base_gfn + memslots[start].npages) { in search_memslots()
1107 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; in __gfn_to_hva_memslot()
1120 return slot->base_gfn + gfn_offset; in hva_to_gfn_memslot()
/Linux-v5.10/virt/kvm/
Dkvm_main.c997 if (memslot->base_gfn > mslots[i + 1].base_gfn) in kvm_memslot_move_backward()
1000 WARN_ON_ONCE(memslot->base_gfn == mslots[i + 1].base_gfn); in kvm_memslot_move_backward()
1024 if (memslot->base_gfn < mslots[i - 1].base_gfn) in kvm_memslot_move_forward()
1027 WARN_ON_ONCE(memslot->base_gfn == mslots[i - 1].base_gfn); in kvm_memslot_move_forward()
1321 new.base_gfn = mem->guest_phys_addr >> PAGE_SHIFT; in __kvm_set_memory_region()
1339 if (new.base_gfn != old.base_gfn) in __kvm_set_memory_region()
1356 if (!((new.base_gfn + new.npages <= tmp->base_gfn) || in __kvm_set_memory_region()
1357 (new.base_gfn >= tmp->base_gfn + tmp->npages))) in __kvm_set_memory_region()
1736 *nr_pages = slot->npages - (gfn - slot->base_gfn); in __gfn_to_hva_many()
2649 unsigned long rel_gfn = gfn - memslot->base_gfn; in mark_page_dirty_in_slot()
/Linux-v5.10/arch/mips/kvm/
Dmmu.c419 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked() local
420 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
421 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
Dmips.c222 kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, in kvm_arch_flush_shadow_memslot()
223 slot->base_gfn + slot->npages - 1); in kvm_arch_flush_shadow_memslot()
263 needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, in kvm_arch_commit_memory_region()
264 new->base_gfn + new->npages - 1); in kvm_arch_commit_memory_region()
/Linux-v5.10/arch/s390/kvm/
Dkvm-s390.c601 cur_gfn = memslot->base_gfn; in kvm_arch_sync_dirty_log()
602 last_gfn = memslot->base_gfn + memslot->npages; in kvm_arch_sync_dirty_log()
1930 if (gfn >= memslots[slot].base_gfn && in gfn_to_memslot_approx()
1931 gfn < memslots[slot].base_gfn + memslots[slot].npages) in gfn_to_memslot_approx()
1937 if (gfn >= memslots[slot].base_gfn) in gfn_to_memslot_approx()
1946 if (gfn >= memslots[start].base_gfn && in gfn_to_memslot_approx()
1947 gfn < memslots[start].base_gfn + memslots[start].npages) { in gfn_to_memslot_approx()
1982 unsigned long ofs = cur_gfn - ms->base_gfn; in kvm_s390_next_dirty_cmma()
1984 if (ms->base_gfn + ms->npages <= cur_gfn) { in kvm_s390_next_dirty_cmma()
1999 return ms->base_gfn + ofs; in kvm_s390_next_dirty_cmma()
[all …]
Dpv.c135 npages = memslot->base_gfn + memslot->npages; in kvm_s390_pv_alloc_vm()
Dpriv.c1181 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms))) in __do_essa()
/Linux-v5.10/include/trace/events/
Dkvm.h272 __entry->hva = ((gfn - slot->base_gfn) <<
/Linux-v5.10/arch/powerpc/include/asm/
Dkvm_book3s_64.h494 return !(memslot->base_gfn & mask) && !(memslot->npages & mask); in slot_is_aligned()
/Linux-v5.10/arch/x86/include/asm/
Dkvm_host.h123 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index() argument
127 (base_gfn >> KVM_HPAGE_GFN_SHIFT(level)); in gfn_to_index()
/Linux-v5.10/arch/x86/kvm/
Dx86.c10477 lpages = gfn_to_index(slot->base_gfn + npages - 1, in kvm_alloc_memslot_metadata()
10478 slot->base_gfn, level) + 1; in kvm_alloc_memslot_metadata()
10494 if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10496 if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1)) in kvm_alloc_memslot_metadata()
10503 if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) { in kvm_alloc_memslot_metadata()
/Linux-v5.10/drivers/gpu/drm/i915/gvt/
Dkvmgt.c1777 gfn = slot->base_gfn + i; in kvmgt_page_track_flush_slot()

12