Home
last modified time | relevance | path

Searched refs:gfn_t (Results 1 – 25 of 42) sorted by relevance

12

/Linux-v6.6/arch/x86/kvm/mmu/
Dtdp_mmu.h23 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
43 gfn_t gfn, unsigned long mask,
49 struct kvm_memory_slot *slot, gfn_t gfn,
54 gfn_t start, gfn_t end,
Dmmu_internal.h80 gfn_t gfn;
160 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level()
166 gfn_t gfn, bool can_unsync, bool prefetch);
168 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
169 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
175 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn()
231 gfn_t gfn;
340 const struct kvm_memory_slot *slot, gfn_t gfn,
Dtdp_mmu.c192 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp()
258 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
324 gfn_t base_gfn = sp->gfn; in handle_removed_pt()
333 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt()
419 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte()
604 u64 old_spte, u64 new_spte, gfn_t gfn, int level) in tdp_mmu_set_spte()
690 static inline gfn_t tdp_mmu_max_gfn_exclusive(void) in tdp_mmu_max_gfn_exclusive()
706 gfn_t end = tdp_mmu_max_gfn_exclusive(); in __tdp_mmu_zap_root()
707 gfn_t start = 0; in __tdp_mmu_zap_root()
792 gfn_t start, gfn_t end, bool can_yield, bool flush) in tdp_mmu_zap_leafs()
[all …]
Dtdp_iter.h83 gfn_t next_last_level_gfn;
89 gfn_t yielded_gfn;
95 gfn_t gfn;
134 int min_level, gfn_t next_last_level_gfn);
Dpage_track.c62 static void update_gfn_write_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_write_track()
78 gfn_t gfn) in __kvm_write_track_add_gfn()
101 struct kvm_memory_slot *slot, gfn_t gfn) in __kvm_write_track_remove_gfn()
124 const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_gfn_is_write_tracked()
255 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn) in kvm_write_track_add_gfn()
285 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn) in kvm_write_track_remove_gfn()
Dpage_track.h19 gfn_t gfn);
21 struct kvm_memory_slot *slot, gfn_t gfn);
24 const struct kvm_memory_slot *slot, gfn_t gfn);
Dmmu.c277 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_arch_flush_remote_tlbs_range()
285 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index);
291 gfn_t gfn = kvm_mmu_page_get_gfn(sp, spte_index(sptep)); in kvm_flush_remote_tlbs_sptep()
305 static gfn_t get_mmio_spte_gfn(u64 spte) in get_mmio_spte_gfn()
721 static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index) in kvm_mmu_page_get_gfn()
759 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation()
780 gfn_t gfn = kvm_mmu_page_get_gfn(sp, index); in kvm_mmu_page_set_access()
789 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot()
799 gfn_t gfn, int count) in update_gfn_disallow_lpage_count()
811 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn) in kvm_mmu_gfn_disallow_lpage()
[all …]
Dpaging_tmpl.h83 gfn_t table_gfn[PT_MAX_FULL_LEVELS];
91 gfn_t gfn;
96 static inline gfn_t pse36_gfn_delta(u32 gpte) in pse36_gfn_delta()
104 static gfn_t gpte_to_gfn_lvl(pt_element_t gpte, int lvl) in gpte_to_gfn_lvl()
205 gfn_t table_gfn; in FNAME()
309 gfn_t table_gfn; in FNAME()
322 gfn_t gfn; in FNAME()
538 gfn_t gfn; in FNAME()
634 gfn_t base_gfn = fault->gfn; in FNAME()
666 gfn_t table_gfn; in FNAME()
[all …]
Dmmutrace.h212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte),
217 __field(gfn_t, gfn)
235 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
240 __field(gfn_t, gfn)
335 TP_PROTO(int level, gfn_t gfn, u64 *sptep),
393 TP_PROTO(int as_id, gfn_t gfn, int level, u64 old_spte, u64 new_spte),
Dtdp_iter.c40 int min_level, gfn_t next_last_level_gfn) in tdp_iter_start()
/Linux-v6.6/include/linux/
Dkvm_host.h263 gfn_t start;
264 gfn_t end;
583 gfn_t base_gfn;
1054 gfn_t start) in kvm_memslot_iter_start()
1112 static inline bool kvm_memslot_iter_is_valid(struct kvm_memslot_iter *iter, gfn_t end) in kvm_memslot_iter_is_valid()
1168 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
1171 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1172 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1173 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1174 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
[all …]
Dkvm_types.h41 typedef u64 gfn_t; typedef
/Linux-v6.6/arch/x86/include/asm/
Dkvm_page_track.h43 void (*track_remove_region)(gfn_t gfn, unsigned long nr_pages,
52 int kvm_write_track_add_gfn(struct kvm *kvm, gfn_t gfn);
53 int kvm_write_track_remove_gfn(struct kvm *kvm, gfn_t gfn);
Dkvm_host.h922 gfn_t mmio_gfn;
941 gfn_t gfns[ASYNC_PF_PER_VCPU];
1606 int (*flush_remote_tlbs_range)(struct kvm *kvm, gfn_t gfn,
1607 gfn_t nr_pages);
1666 u8 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
1786 gfn_t gfn;
2033 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
2174 extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
/Linux-v6.6/arch/mips/kvm/
Dmmu.c270 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in kvm_mips_flush_gpa_pt()
396 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
416 gfn_t gfn_offset, unsigned long mask) in kvm_arch_mmu_enable_log_dirty_pt_masked()
418 gfn_t base_gfn = slot->base_gfn + gfn_offset; in kvm_arch_mmu_enable_log_dirty_pt_masked()
419 gfn_t start = base_gfn + __ffs(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
420 gfn_t end = base_gfn + __fls(mask); in kvm_arch_mmu_enable_log_dirty_pt_masked()
433 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, in BUILD_PTE_RANGE_OP()
434 gfn_t end_gfn) in BUILD_PTE_RANGE_OP()
515 gfn_t gfn = gpa >> PAGE_SHIFT; in _kvm_mips_map_page_fast()
592 gfn_t gfn = gpa >> PAGE_SHIFT; in kvm_mips_map_page()
/Linux-v6.6/arch/x86/kvm/
Dmmu.h66 static inline gfn_t kvm_mmu_max_gfn(void) in kvm_mmu_max_gfn()
240 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
269 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level) in gfn_to_index()
Dkvm_onhyperv.h10 int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, gfn_t nr_pages);
Dkvm_onhyperv.c95 int hv_flush_remote_tlbs_range(struct kvm *kvm, gfn_t start_gfn, gfn_t nr_pages) in hv_flush_remote_tlbs_range()
Dx86.h219 gva_t gva, gfn_t gfn, unsigned access) in vcpu_cache_mmio_info()
311 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);
314 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
Dmtrr.c321 gfn_t start, end; in update_mtrr()
614 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_mtrr_get_guest_memory_type()
690 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_mtrr_check_gfn_range_consistency()
/Linux-v6.6/virt/kvm/
Dkvm_main.c369 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages) in kvm_flush_remote_tlbs_range()
1929 gfn_t start, gfn_t end) in kvm_check_memslot_overlap()
1956 gfn_t base_gfn; in __kvm_set_memory_region()
2192 gfn_t offset; in kvm_get_dirty_log_protect()
2261 gfn_t offset; in kvm_clear_dirty_log_protect()
2345 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot()
2351 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot()
2384 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn()
2392 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn()
2400 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size()
[all …]
/Linux-v6.6/drivers/gpu/drm/i915/gvt/
Dkvmgt.c92 gfn_t gfn;
100 gfn_t gfn;
111 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages,
228 static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn) in __gvt_cache_find_gfn()
246 static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, in __gvt_cache_add()
349 __kvmgt_protect_table_find(struct intel_vgpu *info, gfn_t gfn) in __kvmgt_protect_table_find()
365 static bool kvmgt_gfn_is_write_protected(struct intel_vgpu *info, gfn_t gfn) in kvmgt_gfn_is_write_protected()
373 static void kvmgt_protect_table_add(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_add()
388 static void kvmgt_protect_table_del(struct intel_vgpu *info, gfn_t gfn) in kvmgt_protect_table_del()
1596 static void kvmgt_page_track_remove_region(gfn_t gfn, unsigned long nr_pages, in kvmgt_page_track_remove_region()
/Linux-v6.6/arch/mips/include/asm/
Dkvm_host.h808 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
809 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn);
/Linux-v6.6/arch/powerpc/kvm/
De500_mmu_host.c323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map()
560 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_tlb1_map()
612 gfn_t gfn = gpaddr >> PAGE_SHIFT; in kvmppc_mmu_map()
/Linux-v6.6/arch/riscv/kvm/
Dvcpu_exit.c19 gfn_t gfn; in gstage_page_fault()

12