/Linux-v5.10/drivers/gpu/drm/i915/gvt/ |
D | page_track.c | 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument 56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument 87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() 90 intel_gvt_hypervisor_disable_page_track(vgpu, gfn); in intel_vgpu_unregister_page_track() 103 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_enable_page_track() argument 108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() [all …]
|
D | mpt.h | 163 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_enable_page_track() argument 165 return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_enable_page_track() 177 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_disable_page_track() argument 179 return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn); in intel_gvt_hypervisor_disable_page_track() 223 struct intel_vgpu *vgpu, unsigned long gfn) in intel_gvt_hypervisor_gfn_to_mfn() argument 225 return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); in intel_gvt_hypervisor_gfn_to_mfn() 239 struct intel_vgpu *vgpu, unsigned long gfn, unsigned long size, in intel_gvt_hypervisor_dma_map_guest_page() argument 242 return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn, size, in intel_gvt_hypervisor_dma_map_guest_page() 284 struct intel_vgpu *vgpu, unsigned long gfn, in intel_gvt_hypervisor_map_gfn_to_mfn() argument 292 return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, in intel_gvt_hypervisor_map_gfn_to_mfn() [all …]
|
D | hypercall.h | 57 int (*enable_page_track)(unsigned long handle, u64 gfn); 58 int (*disable_page_track)(unsigned long handle, u64 gfn); 63 unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); 65 int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn, 71 int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, 79 bool (*is_valid_gfn)(unsigned long handle, unsigned long gfn);
|
D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
D | kvmgt.c | 87 gfn_t gfn; member 105 gfn_t gfn; member 151 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 163 unsigned long cur_gfn = gfn + npage; in gvt_unpin_guest_page() 171 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 186 unsigned long cur_gfn = gfn + npage; in gvt_pin_guest_page() 217 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 221 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument 228 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page() 237 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_map_page() [all …]
|
/Linux-v5.10/arch/x86/kvm/mmu/ |
D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 37 __entry->gfn, role.level, \ 205 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 206 TP_ARGS(sptep, gfn, spte), 210 __field(gfn_t, gfn) 217 __entry->gfn = gfn; 223 __entry->gfn, __entry->access, __entry->gen) 228 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 229 TP_ARGS(addr, gfn, access), [all …]
|
D | tdp_mmu.c | 98 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn, in alloc_tdp_mmu_page() argument 108 sp->gfn = gfn; in alloc_tdp_mmu_page() 154 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 174 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log() argument 187 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); in handle_changed_spte_dirty_log() 188 mark_page_dirty_in_slot(slot, gfn); in handle_changed_spte_dirty_log() 204 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, in __handle_changed_spte() argument 219 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1)); in __handle_changed_spte() 235 as_id, gfn, old_spte, new_spte, level); in __handle_changed_spte() 264 as_id, gfn, old_spte, new_spte, level); in __handle_changed_spte() [all …]
|
D | tdp_iter.c | 14 SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep() 18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() argument 20 return gfn & -KVM_PAGES_PER_HPAGE(level); in round_gfn_for_level() 39 iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); in tdp_iter_start() 85 iter->gfn = round_gfn_for_level(iter->goal_gfn, iter->level); in try_step_down() 104 if (SHADOW_PT_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side() 108 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side() 109 iter->goal_gfn = iter->gfn; in try_step_side() 127 iter->gfn = round_gfn_for_level(iter->gfn, iter->level); in try_step_up() 171 if (iter->gfn > goal_gfn) in tdp_iter_refresh_walk() [all …]
|
D | page_track.c | 59 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument 64 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track() 87 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument 94 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page() 100 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page() 103 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn)) in kvm_slot_page_track_add_page() 122 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument 128 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page() 134 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page() 141 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, in kvm_page_track_is_active() argument [all …]
|
D | mmu.c | 215 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 218 u64 mask = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() 220 trace_mark_mmio_spte(sptep, gfn, mask); in mark_mmio_spte() 239 static bool set_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, gfn_t gfn, in set_mmio_spte() argument 243 mark_mmio_spte(vcpu, sptep, gfn, access); in set_mmio_spte() 706 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS)); in kvm_mmu_page_get_gfn() 709 static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn) in kvm_mmu_page_set_gfn() argument 712 sp->gfns[index] = gfn; in kvm_mmu_page_set_gfn() 716 if (WARN_ON(gfn != kvm_mmu_page_get_gfn(sp, index))) in kvm_mmu_page_set_gfn() 719 sp->gfn, in kvm_mmu_page_set_gfn() [all …]
|
D | mmu_internal.h | 38 gfn_t gfn; member 88 bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn, 91 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 92 void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn); 94 struct kvm_memory_slot *slot, u64 gfn); 136 int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn, 139 void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
|
D | mmu_audit.c | 96 gfn_t gfn; in audit_mappings() local 113 gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); in audit_mappings() 114 pfn = kvm_vcpu_gfn_to_pfn_atomic(vcpu, gfn); in audit_mappings() 133 gfn_t gfn; in inspect_spte_has_rmap() local 136 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); in inspect_spte_has_rmap() 139 slot = __gfn_to_memslot(slots, gfn); in inspect_spte_has_rmap() 143 audit_printk(kvm, "no memslot for gfn %llx\n", gfn); in inspect_spte_has_rmap() 145 (long int)(sptep - rev_sp->spt), rev_sp->gfn); in inspect_spte_has_rmap() 150 rmap_head = __gfn_to_rmap(gfn, rev_sp->role.level, slot); in inspect_spte_has_rmap() 202 slot = __gfn_to_memslot(slots, sp->gfn); in audit_write_protection() [all …]
|
D | paging_tmpl.h | 95 gfn_t gfn; member 331 gfn_t gfn; in FNAME() local 433 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME() 434 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME() 437 gfn += pse36_gfn_delta(pte); in FNAME() 439 real_gpa = mmu->translate_gpa(vcpu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 443 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME() 533 gfn_t gfn; in FNAME() local 541 gfn = gpte_to_gfn(gpte); in FNAME() 544 pfn = pte_prefetch_gfn_to_pfn(vcpu, gfn, in FNAME() [all …]
|
D | spte.c | 48 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) in make_mmio_spte() argument 52 u64 gpa = gfn << PAGE_SHIFT; in make_mmio_spte() 85 gfn_t gfn, kvm_pfn_t pfn, u64 old_spte, bool speculative, in make_spte() argument 123 spte |= kvm_x86_ops.get_mt_mask(vcpu, gfn, in make_spte() 148 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) { in make_spte() 150 __func__, gfn); in make_spte()
|
/Linux-v5.10/arch/powerpc/kvm/ |
D | book3s_hv_uvmem.c | 287 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument 293 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn() 294 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn() 306 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument 309 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn() 313 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument 315 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn() 319 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument 321 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared() 325 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument [all …]
|
D | book3s_64_mmu_hv.c | 491 unsigned long gpa, gfn, hva, pfn, hpa; in kvmppc_book3s_hv_page_fault() local 554 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 555 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault() 580 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault() 592 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in kvmppc_book3s_hv_page_fault() 755 unsigned long gfn); 770 gfn_t gfn, gfn_end; in kvm_handle_hva_range() local 781 gfn = hva_to_gfn_memslot(hva_start, memslot); in kvm_handle_hva_range() 784 for (; gfn < gfn_end; ++gfn) { in kvm_handle_hva_range() 785 ret = handler(kvm, memslot, gfn); in kvm_handle_hva_range() [all …]
|
D | e500_mmu_host.c | 323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument 353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() 416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map() 449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map() 453 __func__, (long)gfn); in kvmppc_e500_shadow_map() [all …]
|
D | book3s_hv_rm_mmu.c | 110 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument 117 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 118 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map() 126 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 130 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 131 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte() 133 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte() 144 unsigned long gfn; in revmap_for_hpte() local 146 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte() 147 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte() [all …]
|
/Linux-v5.10/include/linux/ |
D | kvm_host.h | 240 kvm_pfn_t gfn; member 704 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 707 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 708 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 709 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 710 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 711 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 717 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 718 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 720 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); [all …]
|
/Linux-v5.10/include/trace/events/ |
D | kvm.h | 259 TP_PROTO(ulong gfn, int level, struct kvm_memory_slot *slot, int ref), 260 TP_ARGS(gfn, level, slot, ref), 264 __field( u64, gfn ) 270 __entry->gfn = gfn; 272 __entry->hva = ((gfn - slot->base_gfn) << 278 __entry->hva, __entry->gfn, __entry->level, 285 TP_PROTO(u64 gva, u64 gfn), 287 TP_ARGS(gva, gfn), 291 __field(u64, gfn) 296 __entry->gfn = gfn; [all …]
|
/Linux-v5.10/virt/kvm/ |
D | kvm_main.c | 1669 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument 1671 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot() 1675 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument 1677 return __gfn_to_memslot(kvm_vcpu_memslots(vcpu), gfn); in kvm_vcpu_gfn_to_memslot() 1681 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument 1683 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn() 1689 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument 1691 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_is_visible_gfn() 1697 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size() argument 1704 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size() [all …]
|
/Linux-v5.10/include/xen/ |
D | xen-ops.h | 84 xen_pfn_t *gfn, int nr, 97 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 134 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 140 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 148 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 196 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 203 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false, in xen_remap_domain_gfn_range()
|
/Linux-v5.10/drivers/xen/ |
D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
/Linux-v5.10/arch/x86/include/asm/ |
D | kvm_page_track.h | 57 struct kvm_memory_slot *slot, gfn_t gfn, 60 struct kvm_memory_slot *slot, gfn_t gfn, 62 bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
|
/Linux-v5.10/arch/mips/kvm/ |
D | mmu.c | 445 int (*handler)(struct kvm *kvm, gfn_t gfn, in handle_hva_to_gpa() argument 460 gfn_t gfn, gfn_end; in handle_hva_to_gpa() local 472 gfn = hva_to_gfn_memslot(hva_start, memslot); in handle_hva_to_gpa() 475 ret |= handler(kvm, gfn, gfn_end, memslot, data); in handle_hva_to_gpa() 482 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_unmap_hva_handler() argument 485 kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); in kvm_unmap_hva_handler() 498 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_set_spte_handler() argument 501 gpa_t gpa = gfn << PAGE_SHIFT; in kvm_set_spte_handler() 540 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, in kvm_age_hva_handler() argument 543 return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); in kvm_age_hva_handler() [all …]
|