Lines Matching refs:gfn

268 	kvm_pfn_t gfn;  member
843 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
846 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
847 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
848 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
849 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
850 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
856 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
857 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
859 kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
860 kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
861 kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn,
871 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
879 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
891 #define __kvm_get_guest(kvm, gfn, offset, v) \ argument
893 unsigned long __addr = gfn_to_hva(kvm, gfn); \
911 #define __kvm_put_guest(kvm, gfn, offset, v) \ argument
913 unsigned long __addr = gfn_to_hva(kvm, gfn); \
920 mark_page_dirty(kvm, gfn); \
934 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
935 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
936 bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
937 unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
938 void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn);
939 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
942 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
943 kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
944 kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
946 int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
948 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
952 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
953 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
954 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
960 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
964 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
1202 try_get_memslot(struct kvm_memslots *slots, int slot_index, gfn_t gfn) in try_get_memslot() argument
1217 if (gfn >= slot->base_gfn && gfn < slot->base_gfn + slot->npages) in try_get_memslot()
1230 search_memslots(struct kvm_memslots *slots, gfn_t gfn, int *index) in search_memslots() argument
1242 if (gfn >= memslots[slot].base_gfn) in search_memslots()
1248 slot = try_get_memslot(slots, start, gfn); in search_memslots()
1263 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) in __gfn_to_memslot() argument
1268 slot = try_get_memslot(slots, slot_index, gfn); in __gfn_to_memslot()
1272 slot = search_memslots(slots, gfn, &slot_index); in __gfn_to_memslot()
1282 __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) in __gfn_to_hva_memslot() argument
1290 unsigned long offset = gfn - slot->base_gfn; in __gfn_to_hva_memslot()
1295 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
1297 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1308 static inline gpa_t gfn_to_gpa(gfn_t gfn) in gfn_to_gpa() argument
1310 return (gpa_t)gfn << PAGE_SHIFT; in gfn_to_gpa()