Home
last modified time | relevance | path

Searched refs:gfn (Results 1 – 25 of 59) sorted by relevance

123

/Linux-v4.19/drivers/gpu/drm/i915/gvt/
Dpage_track.c35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument
37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track()
48 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument
54 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track()
65 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track()
81 unsigned long gfn) in intel_vgpu_unregister_page_track() argument
85 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track()
88 intel_gvt_hypervisor_disable_page_track(vgpu, gfn); in intel_vgpu_unregister_page_track()
101 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_enable_page_track() argument
106 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track()
[all …]
Dmpt.h165 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_enable_page_track() argument
167 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_enable_page_track()
179 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_disable_page_track() argument
181 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_disable_page_track()
225 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_gfn_to_mfn() argument
227 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); in intel_gvt_hypervisor_gfn_to_mfn()
241 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, in intel_gvt_hypervisor_dma_map_guest_page() argument
244 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, in intel_gvt_hypervisor_dma_map_guest_page()
271 struct intel_vgpu *vgpu, unsigned long gfn, in intel_gvt_hypervisor_map_gfn_to_mfn() argument
279 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, in intel_gvt_hypervisor_map_gfn_to_mfn()
[all …]
Dhypercall.h47 int (*enable_page_track)(unsigned long handle, u64 gfn);
48 int (*disable_page_track)(unsigned long handle, u64 gfn);
53 unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
55 int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
59 int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
66 bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
Dpage_track.h42 struct intel_vgpu *vgpu, unsigned long gfn);
45 unsigned long gfn, gvt_page_track_handler_t handler,
48 unsigned long gfn);
50 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
51 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
Dkvmgt.c80 gfn_t gfn; member
98 gfn_t gfn; member
113 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument
123 unsigned long cur_gfn = gfn + npage; in gvt_unpin_guest_page()
131 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument
145 unsigned long cur_gfn = gfn + npage; in gvt_pin_guest_page()
176 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page()
180 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument
187 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page()
196 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_map_page()
[all …]
Dgtt.c655 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_get_entry()
684 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT, in ppgtt_spt_set_entry()
740 if (spt->guest_page.gfn) { in ppgtt_free_spt()
744 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn); in ppgtt_free_spt()
786 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_spt_by_gfn() argument
790 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_find_spt_by_gfn()
859 unsigned long gfn, bool guest_pde_ips) in ppgtt_alloc_spt_gfn() argument
871 ret = intel_vgpu_register_page_track(vgpu, gfn, in ppgtt_alloc_spt_gfn()
879 spt->guest_page.gfn = gfn; in ppgtt_alloc_spt_gfn()
882 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn); in ppgtt_alloc_spt_gfn()
[all …]
/Linux-v4.19/arch/x86/kvm/
Dpage_track.c65 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument
70 index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL); in update_gfn_track()
93 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument
100 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page()
106 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page()
109 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) in kvm_slot_page_track_add_page()
128 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument
134 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page()
140 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page()
147 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_page_track_is_active() argument
[all …]
Dmmutrace.h13 __field(__u64, gfn) \
20 __entry->gfn = sp->gfn; \
37 __entry->gfn, role.level, \
205 TP_PROTO(u64 *sptep, gfn_t gfn, unsigned access, unsigned int gen),
206 TP_ARGS(sptep, gfn, access, gen),
210 __field(gfn_t, gfn)
217 __entry->gfn = gfn;
223 __entry->gfn, __entry->access, __entry->gen)
228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access),
229 TP_ARGS(addr, gfn, access),
[all …]
Dmmu.c347 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument
352 u64 gpa = gfn << PAGE_SHIFT; in mark_mmio_spte()
360 trace_mark_mmio_spte(sptep, gfn, access, gen); in mark_mmio_spte()
385 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument
389 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte()
1025 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn()
1028 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument
1031 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index)); in kvm_mmu_page_set_gfn()
1033 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn()
1040 static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn, in lpage_info_slot() argument
[all …]
Dmmu_audit.c99 gfn_t gfn; in audit_mappings() local
116 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings()
117 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings()
136 gfn_t gfn; in inspect_spte_has_rmap() local
139 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap()
142 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap()
146 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap()
148 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap()
153 rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap()
205 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection()
[all …]
Dpaging_tmpl.h98 gfn_t gfn; member
298 gfn_t gfn; in FNAME() local
403 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME()
404 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME()
407 gfn += pse36_gfn_delta(pte); in FNAME()
409 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME()
413 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME()
502 gfn_t gfn; in FNAME() local
510 gfn = gpte_to_gfn(gpte); in FNAME()
513 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME()
[all …]
Dmmu.h214 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
215 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
217 struct kvm_memory_slot *slot, u64 gfn);
/Linux-v4.19/include/linux/
Dkvm_host.h654 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
657 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
658 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
659 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
660 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
661 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
667 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
668 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
669 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
671 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
[all …]
/Linux-v4.19/include/trace/events/
Dkvm.h259 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref),
260 TP_ARGS(gfn, level, slot, ref),
264 __field( u64, gfn )
270 __entry->gfn = gfn;
272 __entry->hva = ((gfn - slot->base_gfn) <<
278 __entry->hva, __entry->gfn, __entry->level,
285 TP_PROTO(u64 gva, u64 gfn),
287 TP_ARGS(gva, gfn),
291 __field(u64, gfn)
296 __entry->gfn = gfn;
[all …]
/Linux-v4.19/virt/kvm/
Dkvm_main.c130 static void mark_page_dirty_in_slot(struct kvm_memory_slot *memslot, gfn_t gfn);
1221 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1223 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1227 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument
1229 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); in kvm_vcpu_gfn_to_memslot()
1232 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1234 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1244 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1251 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1273 static unsigned long __gfn_to_hva_many(struct kvm_memory_slot *slot, gfn_t gfn, in __gfn_to_hva_many() argument
[all …]
/Linux-v4.19/drivers/xen/
Dxlate_mmu.c44 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data);
83 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument
88 info->h_gpfns[info->h_iter] = gfn; in setup_hparams()
145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
158 data.fgfn = gfn; in xen_xlate_remap_gfn_array()
174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument
179 xrp.gpfn = gfn; in unmap_gfn()
197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument
201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
/Linux-v4.19/arch/powerpc/kvm/
Dbook3s_64_mmu_hv.c494 unsigned long gpa, gfn, hva, pfn; in kvmppc_book3s_hv_page_fault() local
556 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault()
557 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault()
585 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault()
758 unsigned long gfn);
773 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local
784 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range()
787 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range()
788 ret = handler(kvm, memslot, gfn); in kvm_handle_hva_range()
805 unsigned long *rmapp, unsigned long gfn) in kvmppc_unmap_hpte() argument
[all …]
Dbook3s_64_mmu_radix.c208 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() local
211 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte()
213 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte()
529 unsigned long gpa, gfn, hva; in kvmppc_book3s_radix_page_fault() local
555 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_radix_page_fault()
558 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault()
624 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_radix_page_fault()
631 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in kvmppc_book3s_radix_page_fault()
708 unsigned long gfn) in kvm_unmap_radix() argument
711 unsigned long gpa = gfn << PAGE_SHIFT; in kvm_unmap_radix()
[all …]
De500_mmu_host.c326 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument
356 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map()
357 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map()
384 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map()
412 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map()
415 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map()
417 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map()
419 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map()
452 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map()
456 __func__, (long)gfn); in kvmppc_e500_shadow_map()
[all …]
Dbook3s_hv_rm_mmu.c111 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument
118 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map()
119 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map()
127 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local
131 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte()
132 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte()
134 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte()
145 unsigned long gfn; in revmap_for_hpte() local
147 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte()
148 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte()
[all …]
/Linux-v4.19/arch/x86/include/asm/
Dkvm_page_track.h58 struct kvm_memory_slot *slot, gfn_t gfn,
61 struct kvm_memory_slot *slot, gfn_t gfn,
63 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
/Linux-v4.19/arch/x86/xen/
Dmmu.c179 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument
186 return do_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, in xen_remap_domain_gfn_range()
193 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument
198 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array()
206 return do_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array()
/Linux-v4.19/arch/mips/kvm/
Dmmu.c471 int (*handler)(struct kvm *kvm, gfn_t gfn, in handle_hva_to_gpa() argument
486 gfn_t gfn, gfn_end; in handle_hva_to_gpa() local
498 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa()
501 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa()
508 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_unmap_hva_handler() argument
511 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); in kvm_unmap_hva_handler()
523 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_set_spte_handler() argument
526 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler()
564 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_age_hva_handler() argument
567 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); in kvm_age_hva_handler()
[all …]
/Linux-v4.19/include/xen/
Dxen-ops.h84 xen_pfn_t *gfn, int nr,
125 xen_pfn_t gfn, int nr,
134 xen_pfn_t *gfn, int nr,
147 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument
/Linux-v4.19/Documentation/virtual/kvm/
Dlocking.txt42 - SPTE_HOST_WRITEABLE means the gfn is writable on host.
43 - SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when
44 the gfn is writable on guest mmu and it is not write-protected by shadow
53 1): The mapping from gfn to pfn
54 The mapping from gfn to pfn may be changed since we can only ensure the pfn
84 to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
85 to pin gfn to pfn, because after gfn_to_pfn_atomic():
87 be reused for another gfn.
91 Then, we can ensure the dirty bitmaps is correctly set for a gfn.

123