| /Linux-v6.1/arch/x86/kvm/mmu/ |
| D | mmutrace.h | 13 __field(__u64, gfn) \ 20 __entry->gfn = sp->gfn; \ 37 __entry->gfn, role.level, \ 212 TP_PROTO(u64 *sptep, gfn_t gfn, u64 spte), 213 TP_ARGS(sptep, gfn, spte), 217 __field(gfn_t, gfn) 224 __entry->gfn = gfn; 230 __entry->gfn, __entry->access, __entry->gen) 235 TP_PROTO(u64 addr, gfn_t gfn, unsigned access), 236 TP_ARGS(addr, gfn, access), [all …]
|
| D | tdp_iter.c | 14 SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level); in tdp_iter_refresh_sptep() 18 static gfn_t round_gfn_for_level(gfn_t gfn, int level) in round_gfn_for_level() argument 20 return gfn & -KVM_PAGES_PER_HPAGE(level); in round_gfn_for_level() 33 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in tdp_iter_restart() 100 iter->gfn = round_gfn_for_level(iter->next_last_level_gfn, iter->level); in try_step_down() 119 if (SPTE_INDEX(iter->gfn << PAGE_SHIFT, iter->level) == in try_step_side() 123 iter->gfn += KVM_PAGES_PER_HPAGE(iter->level); in try_step_side() 124 iter->next_last_level_gfn = iter->gfn; in try_step_side() 142 iter->gfn = round_gfn_for_level(iter->gfn, iter->level); in try_step_up()
|
| D | tdp_mmu.c | 285 gfn_t gfn, union kvm_mmu_page_role role) in tdp_mmu_init_sp() argument 290 sp->gfn = gfn; in tdp_mmu_init_sp() 308 tdp_mmu_init_sp(child_sp, iter->sptep, iter->gfn, role); in tdp_mmu_init_child_sp() 342 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn, 357 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn, in handle_changed_spte_dirty_log() argument 370 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn); in handle_changed_spte_dirty_log() 371 mark_page_dirty_in_slot(kvm, slot, gfn); in handle_changed_spte_dirty_log() 432 gfn_t base_gfn = sp->gfn; in handle_removed_pt() 441 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level); in handle_removed_pt() local 504 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn, in handle_removed_pt() [all …]
|
| D | page_track.c | 87 static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn, in update_gfn_track() argument 92 index = gfn_to_index(gfn, slot->base_gfn, PG_LEVEL_4K); in update_gfn_track() 115 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_add_page() argument 126 update_gfn_track(slot, gfn, mode, 1); in kvm_slot_page_track_add_page() 132 kvm_mmu_gfn_disallow_lpage(slot, gfn); in kvm_slot_page_track_add_page() 135 if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn, PG_LEVEL_4K)) in kvm_slot_page_track_add_page() 154 struct kvm_memory_slot *slot, gfn_t gfn, in kvm_slot_page_track_remove_page() argument 164 update_gfn_track(slot, gfn, mode, -1); in kvm_slot_page_track_remove_page() 170 kvm_mmu_gfn_allow_lpage(slot, gfn); in kvm_slot_page_track_remove_page() 179 gfn_t gfn, enum kvm_page_track_mode mode) in kvm_slot_page_track_is_active() argument [all …]
|
| D | mmu.c | 263 static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn, in mark_mmio_spte() argument 266 u64 spte = make_mmio_spte(vcpu, gfn, access); in mark_mmio_spte() 268 trace_mark_mmio_spte(sptep, gfn, spte); in mark_mmio_spte() 686 return sp->gfn; in kvm_mmu_page_get_gfn() 691 return sp->gfn + (index << ((sp->role.level - 1) * SPTE_LEVEL_BITS)); in kvm_mmu_page_get_gfn() 721 gfn_t gfn, unsigned int access) in kvm_mmu_page_set_translation() argument 724 sp->shadowed_translation[index] = (gfn << PAGE_SHIFT) | access; in kvm_mmu_page_set_translation() 731 sp->gfn, kvm_mmu_page_get_access(sp, index), access); in kvm_mmu_page_set_translation() 733 WARN_ONCE(gfn != kvm_mmu_page_get_gfn(sp, index), in kvm_mmu_page_set_translation() 736 sp->gfn, kvm_mmu_page_get_gfn(sp, index), gfn); in kvm_mmu_page_set_translation() [all …]
|
| D | paging_tmpl.h | 91 gfn_t gfn; member 322 gfn_t gfn; in FNAME() local 425 gfn = gpte_to_gfn_lvl(pte, walker->level); in FNAME() 426 gfn += (addr & PT_LVL_OFFSET_MASK(walker->level)) >> PAGE_SHIFT; in FNAME() 430 gfn += pse36_gfn_delta(pte); in FNAME() 433 real_gpa = kvm_translate_gpa(vcpu, mmu, gfn_to_gpa(gfn), access, &walker->fault); in FNAME() 437 walker->gfn = real_gpa >> PAGE_SHIFT; in FNAME() 526 gfn_t gfn; in FNAME() local 534 gfn = gpte_to_gfn(gpte); in FNAME() 538 slot = gfn_to_memslot_dirty_bitmap(vcpu, gfn, in FNAME() [all …]
|
| D | mmu_internal.h | 67 gfn_t gfn; member 159 gfn_t gfn, bool can_unsync, bool prefetch); 161 void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 162 void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); 164 struct kvm_memory_slot *slot, u64 gfn, 218 gfn_t gfn; member 311 const struct kvm_memory_slot *slot, gfn_t gfn,
|
| D | spte.c | 71 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) in make_mmio_spte() argument 75 u64 gpa = gfn << PAGE_SHIFT; in make_mmio_spte() 139 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, in make_spte() argument 181 spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, in make_spte() 211 if (mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, can_unsync, prefetch)) { in make_spte() 213 __func__, gfn); in make_spte() 234 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); in make_spte()
|
| /Linux-v6.1/drivers/gpu/drm/i915/gvt/ |
| D | page_track.c | 35 struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_find_page_track() argument 37 return radix_tree_lookup(&vgpu->page_track_tree, gfn); in intel_vgpu_find_page_track() 50 int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn, in intel_vgpu_register_page_track() argument 56 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_register_page_track() 67 ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track); in intel_vgpu_register_page_track() 83 unsigned long gfn) in intel_vgpu_unregister_page_track() argument 87 track = radix_tree_delete(&vgpu->page_track_tree, gfn); in intel_vgpu_unregister_page_track() 90 intel_gvt_page_track_remove(vgpu, gfn); in intel_vgpu_unregister_page_track() 103 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn) in intel_vgpu_enable_page_track() argument 108 track = intel_vgpu_find_page_track(vgpu, gfn); in intel_vgpu_enable_page_track() [all …]
|
| D | kvmgt.c | 92 gfn_t gfn; member 100 gfn_t gfn; member 130 static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_unpin_guest_page() argument 133 vfio_unpin_pages(&vgpu->vfio_device, gfn << PAGE_SHIFT, in gvt_unpin_guest_page() 138 static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_pin_guest_page() argument 151 dma_addr_t cur_iova = (gfn + npage) << PAGE_SHIFT; in gvt_pin_guest_page() 175 gvt_unpin_guest_page(vgpu, gfn, npage * PAGE_SIZE); in gvt_pin_guest_page() 179 static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn, in gvt_dma_map_page() argument 186 ret = gvt_pin_guest_page(vgpu, gfn, size, &page); in gvt_dma_map_page() 195 gvt_unpin_guest_page(vgpu, gfn, size); in gvt_dma_map_page() [all …]
|
| D | page_track.h | 45 struct intel_vgpu *vgpu, unsigned long gfn); 48 unsigned long gfn, gvt_page_track_handler_t handler, 51 unsigned long gfn); 53 int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn); 54 int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
|
| /Linux-v6.1/arch/powerpc/kvm/ |
| D | book3s_hv_uvmem.c | 289 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm, in kvmppc_mark_gfn() argument 295 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) { in kvmppc_mark_gfn() 296 unsigned long index = gfn - p->base_pfn; in kvmppc_mark_gfn() 308 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn, in kvmppc_gfn_secure_uvmem_pfn() argument 311 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn); in kvmppc_gfn_secure_uvmem_pfn() 315 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_secure_mem_pfn() argument 317 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0); in kvmppc_gfn_secure_mem_pfn() 321 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_shared() argument 323 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0); in kvmppc_gfn_shared() 327 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm) in kvmppc_gfn_remove() argument [all …]
|
| D | book3s_64_mmu_hv.c | 500 unsigned long gpa, gfn, hva, pfn, hpa; in kvmppc_book3s_hv_page_fault() local 563 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_hv_page_fault() 564 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_hv_page_fault() 589 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_hv_page_fault() 601 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in kvmppc_book3s_hv_page_fault() 766 unsigned long *rmapp, unsigned long gfn) in kvmppc_unmap_hpte() argument 790 hpte_rpn(ptel, psize) == gfn) { in kvmppc_unmap_hpte() 798 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_unmap_hpte() 807 unsigned long gfn) in kvm_unmap_rmapp() argument 813 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; in kvm_unmap_rmapp() [all …]
|
| D | e500_mmu_host.c | 323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, in kvmppc_e500_shadow_map() argument 353 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn); in kvmppc_e500_shadow_map() 354 hva = gfn_to_hva_memslot(slot, gfn); in kvmppc_e500_shadow_map() 381 slot_start = pfn - (gfn - slot->base_gfn); in kvmppc_e500_shadow_map() 409 gfn_start = gfn & ~(tsize_pages - 1); in kvmppc_e500_shadow_map() 412 if (gfn_start + pfn - gfn < start) in kvmppc_e500_shadow_map() 414 if (gfn_end + pfn - gfn > end) in kvmppc_e500_shadow_map() 416 if ((gfn & (tsize_pages - 1)) != in kvmppc_e500_shadow_map() 449 pfn = gfn_to_pfn_memslot(slot, gfn); in kvmppc_e500_shadow_map() 453 __func__, (long)gfn); in kvmppc_e500_shadow_map() [all …]
|
| D | book3s_hv_rm_mmu.c | 97 unsigned long gfn, unsigned long psize) in kvmppc_update_dirty_map() argument 104 gfn -= memslot->base_gfn; in kvmppc_update_dirty_map() 105 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); in kvmppc_update_dirty_map() 113 unsigned long gfn; in kvmppc_set_dirty_from_hpte() local 117 gfn = hpte_rpn(hpte_gr, psize); in kvmppc_set_dirty_from_hpte() 118 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in kvmppc_set_dirty_from_hpte() 120 kvmppc_update_dirty_map(memslot, gfn, psize); in kvmppc_set_dirty_from_hpte() 131 unsigned long gfn; in revmap_for_hpte() local 133 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); in revmap_for_hpte() 134 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); in revmap_for_hpte() [all …]
|
| D | book3s_64_mmu_radix.c | 424 unsigned long gfn = gpa >> PAGE_SHIFT; in kvmppc_unmap_pte() local 436 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_unmap_pte() 453 kvmppc_update_dirty_map(memslot, gfn, page_size); in kvmppc_unmap_pte() 824 unsigned long hva, gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_instantiate_page() local 842 hva = gfn_to_hva_memslot(memslot, gfn); in kvmppc_book3s_instantiate_page() 849 pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, in kvmppc_book3s_instantiate_page() 943 unsigned long gpa, gfn; in kvmppc_book3s_radix_page_fault() local 964 gfn = gpa >> PAGE_SHIFT; in kvmppc_book3s_radix_page_fault() 969 return kvmppc_send_page_to_uv(kvm, gfn); in kvmppc_book3s_radix_page_fault() 972 memslot = gfn_to_memslot(kvm, gfn); in kvmppc_book3s_radix_page_fault() [all …]
|
| /Linux-v6.1/include/linux/ |
| D | kvm_host.h | 282 kvm_pfn_t gfn; member 1126 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, 1129 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); 1130 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); 1131 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); 1132 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); 1133 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, 1138 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); 1139 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, 1141 kvm_pfn_t gfn_to_pfn_memslot(const struct kvm_memory_slot *slot, gfn_t gfn); [all …]
|
| /Linux-v6.1/include/xen/ |
| D | xen-ops.h | 66 xen_pfn_t *gfn, int nr, 79 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 116 xen_pfn_t *gfn, int nr, in xen_remap_domain_gfn_array() argument 122 return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, in xen_remap_domain_gfn_array() 130 return xen_remap_pfn(vma, addr, gfn, nr, err_ptr, prot, domid, in xen_remap_domain_gfn_array() 176 xen_pfn_t gfn, int nr, in xen_remap_domain_gfn_range() argument 183 return xen_remap_pfn(vma, addr, &gfn, nr, NULL, prot, domid, false); in xen_remap_domain_gfn_range()
|
| /Linux-v6.1/drivers/xen/ |
| D | xlate_mmu.c | 45 typedef void (*xen_gfn_fn_t)(unsigned long gfn, void *data); 84 static void setup_hparams(unsigned long gfn, void *data) in setup_hparams() argument 89 info->h_gpfns[info->h_iter] = gfn; in setup_hparams() 145 xen_pfn_t *gfn, int nr, in xen_xlate_remap_gfn_array() argument 158 data.fgfn = gfn; in xen_xlate_remap_gfn_array() 174 static void unmap_gfn(unsigned long gfn, void *data) in unmap_gfn() argument 179 xrp.gpfn = gfn; in unmap_gfn() 197 static void setup_balloon_gfn(unsigned long gfn, void *data) in setup_balloon_gfn() argument 201 info->pfns[info->idx++] = gfn; in setup_balloon_gfn()
|
| /Linux-v6.1/virt/kvm/ |
| D | dirty_ring.c | 75 static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_invalid() argument 77 smp_store_release(&gfn->flags, 0); in kvm_dirty_gfn_set_invalid() 80 static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_set_dirtied() argument 82 gfn->flags = KVM_DIRTY_GFN_F_DIRTY; in kvm_dirty_gfn_set_dirtied() 85 static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn) in kvm_dirty_gfn_harvested() argument 87 return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET; in kvm_dirty_gfn_harvested()
|
| D | kvm_main.c | 2316 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument 2318 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot() 2322 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_gfn_to_memslot() argument 2337 slot = try_get_memslot(vcpu->last_used_slot, gfn); in kvm_vcpu_gfn_to_memslot() 2346 slot = search_memslots(slots, gfn, false); in kvm_vcpu_gfn_to_memslot() 2355 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument 2357 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn() 2363 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_vcpu_is_visible_gfn() argument 2365 struct kvm_memory_slot *memslot = kvm_vcpu_gfn_to_memslot(vcpu, gfn); in kvm_vcpu_is_visible_gfn() 2371 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn) in kvm_host_page_size() argument [all …]
|
| /Linux-v6.1/include/trace/events/ |
| D | kvm.h | 261 TP_PROTO(u64 gva, u64 gfn), 263 TP_ARGS(gva, gfn), 267 __field(u64, gfn) 272 __entry->gfn = gfn; 275 TP_printk("gva = %#llx, gfn = %#llx", __entry->gva, __entry->gfn) 280 TP_PROTO(u64 gva, u64 gfn), 282 TP_ARGS(gva, gfn) 287 TP_PROTO(u64 gva, u64 gfn), 289 TP_ARGS(gva, gfn)
|
| /Linux-v6.1/arch/x86/include/asm/ |
| D | kvm_page_track.h | 61 struct kvm_memory_slot *slot, gfn_t gfn, 64 struct kvm_memory_slot *slot, gfn_t gfn, 68 gfn_t gfn, enum kvm_page_track_mode mode);
|
| D | sev-common.h | 86 #define GHCB_MSR_PSC_REQ_GFN(gfn, op) \ argument 90 ((u64)((gfn) & GENMASK_ULL(39, 0)) << 12) | \ 120 gfn : 40, member
|
| /Linux-v6.1/arch/riscv/kvm/ |
| D | vcpu_exit.c | 19 gfn_t gfn; in gstage_page_fault() local 23 gfn = fault_addr >> PAGE_SHIFT; in gstage_page_fault() 24 memslot = gfn_to_memslot(vcpu->kvm, gfn); in gstage_page_fault() 25 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable); in gstage_page_fault()
|