Home
last modified time | relevance | path

Searched refs:gfn_t (Results 1 – 25 of 30) sorted by relevance

12

/Linux-v5.4/include/linux/
Dkvm_host.h344 gfn_t base_gfn;
702 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
705 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
706 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
707 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
708 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
709 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
715 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
716 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
717 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
[all …]
Dkvm_types.h36 typedef u64 gfn_t; typedef
/Linux-v5.4/arch/mips/kvm/
Dmmu.c299 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in kvm_mips_flush_gpa_pt()
423 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
443 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
445 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
446 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
447 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
460 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, in BUILD_PTE_RANGE_OP()
461 gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
471 int (*handler)(struct kvm *kvm, gfn_t gfn, in handle_hva_to_gpa()
486 gfn_t gfn, gfn_end; in handle_hva_to_gpa()
[all …]
/Linux-v5.4/arch/x86/include/asm/
Dkvm_page_track.h58 struct kvm_memory_slot *slot, gfn_t gfn,
61 struct kvm_memory_slot *slot, gfn_t gfn,
63 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
Dkvm_host.h120 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index()
327 gfn_t gfn;
331 gfn_t *gfns;
718 gfn_t mmio_gfn;
735 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
1106 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1160 gfn_t offset, unsigned long mask);
1223 gfn_t gfn;
1276 gfn_t gfn_offset, unsigned long mask);
1404 gfn_t gfn, void *data, int offset, int len,
[all …]
/Linux-v5.4/virt/kvm/
Dkvm_main.c147 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
989 gfn_t base_gfn; in __kvm_set_memory_region()
1266 gfn_t offset; in kvm_get_dirty_log_protect()
1301 gfn_t offset; in kvm_clear_dirty_log_protect()
1374 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot()
1380 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot()
1385 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn()
1397 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size()
1426 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many()
1427 gfn_t *nr_pages, bool write) in __gfn_to_hva_many()
[all …]
/Linux-v5.4/arch/x86/kvm/
Dmmu.h206 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
208 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
209 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
Dpage_track.c63 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track()
91 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page()
126 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page()
145 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_page_track_is_active()
Dmmu.c471 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn()
486 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte()
633 static gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta()
1150 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
1158 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn()
1176 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
1187 gfn_t gfn, int count) in update_gfn_disallow_lpage_count()
1199 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_disallow_lpage()
1204 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_allow_lpage()
1213 gfn_t gfn; in account_shadowed()
[all …]
Dpaging_tmpl.h87 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
95 gfn_t gfn;
99 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl()
228 gfn_t table_gfn; in FNAME()
303 gfn_t table_gfn; in FNAME()
316 gfn_t gfn; in FNAME()
348 gfn_t real_gfn; in FNAME()
520 gfn_t gfn; in FNAME()
624 gfn_t gfn, base_gfn; in FNAME()
646 gfn_t table_gfn; in FNAME()
[all …]
Dmmutrace.h205 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
210 __field(gfn_t, gfn)
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
233 __field(gfn_t, gfn)
331 TP_PROTO(int level, gfn_t gfn, u64 *sptep),
Dx86.h187 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info()
282 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
286 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dmmu_audit.c96 gfn_t gfn; in audit_mappings()
133 gfn_t gfn; in inspect_spte_has_rmap()
Dmtrr.c309 gfn_t start, end; in update_mtrr()
615 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type()
691 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency()
Dx86.c653 gfn_t ngfn, void *data, int offset, int len, in kvm_read_guest_page_mmu()
657 gfn_t real_gfn; in kvm_read_guest_page_mmu()
671 static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_read_nested_guest_page()
689 gfn_t pdpt_gfn = cr3 >> PAGE_SHIFT; in load_pdptrs()
727 gfn_t gfn; in pdptrs_changed()
9972 static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) in kvm_async_pf_hash_fn()
9982 static void kvm_add_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_add_async_pf_gfn()
9992 static u32 kvm_async_pf_gfn_slot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_async_pf_gfn_slot()
10005 bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_find_async_pf_gfn()
10010 static void kvm_del_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_del_async_pf_gfn()
Dhyperv.c569 gfn_t msg_page_gfn; in synic_deliver_msg()
/Linux-v5.4/drivers/gpu/drm/i915/gvt/
Dkvmgt.c87 gfn_t gfn;
105 gfn_t gfn;
238 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) in __gvt_cache_find_gfn()
256 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, in __gvt_cache_add()
359 __kvmgt_protect_table_find(struct kvmgt_guest_info *info, gfn_t gfn) in __kvmgt_protect_table_find()
374 gfn_t gfn) in kvmgt_gfn_is_write_protected()
382 static void kvmgt_protect_table_add(struct kvmgt_guest_info *info, gfn_t gfn) in kvmgt_protect_table_add()
398 gfn_t gfn) in kvmgt_protect_table_del()
1719 gfn_t gfn; in kvmgt_page_track_flush_slot()
/Linux-v5.4/arch/powerpc/kvm/
De500_mmu_host.c323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map()
560 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_tlb1_map()
612 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
Dbook3s.c430 gfn_t gfn = gpa >> PAGE_SHIFT; in kvmppc_gpa_to_pfn()
Dbooke.c1239 gfn_t gfn; in kvmppc_handle_exit()
1296 gfn_t gfn; in kvmppc_handle_exit()
Dbook3s_xive_native.c547 gfn_t gfn; in kvmppc_xive_native_set_queue_config()
Dbook3s_64_mmu_hv.c782 gfn_t gfn, gfn_end; in kvm_handle_hva_range()
Dbook3s_pr.c403 gfn_t gfn, gfn_end; in do_kvm_unmap_hva()
/Linux-v5.4/arch/mips/include/asm/
Dkvm_host.h915 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
916 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
/Linux-v5.4/virt/kvm/arm/
Dmmu.c1374 gfn_t gfn = *ipap >> PAGE_SHIFT; in transparent_hugepage_adjust()
1561 gfn_t gfn_offset, unsigned long mask) in kvm_mmu_write_protect_pt_masked()
1579 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
1672 gfn_t gfn = fault_ipa >> PAGE_SHIFT; in user_mem_abort()
1906 gfn_t gfn; in kvm_handle_guest_abort()
2017 gfn_t gpa; in handle_hva_to_gpa()

12