Lines Matching refs:kvm

188 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
190 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
191 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
228 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
230 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
232 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
268 bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
269 bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
270 bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
271 bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
323 struct kvm *kvm; member
638 struct kvm *kvm, int irq_source_id, int level,
671 bool kvm_arch_irqchip_in_kernel(struct kvm *kvm);
704 struct kvm { struct
839 static inline void kvm_vm_dead(struct kvm *kvm) in kvm_vm_dead() argument
841 kvm->vm_dead = true; in kvm_vm_dead()
842 kvm_make_all_cpus_request(kvm, KVM_REQ_VM_DEAD); in kvm_vm_dead()
845 static inline void kvm_vm_bugged(struct kvm *kvm) in kvm_vm_bugged() argument
847 kvm->vm_bugged = true; in kvm_vm_bugged()
848 kvm_vm_dead(kvm); in kvm_vm_bugged()
852 #define KVM_BUG(cond, kvm, fmt...) \ argument
856 if (WARN_ONCE(__ret && !(kvm)->vm_bugged, fmt)) \
857 kvm_vm_bugged(kvm); \
861 #define KVM_BUG_ON(cond, kvm) \ argument
865 if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
866 kvm_vm_bugged(kvm); \
878 #define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \ argument
884 else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
885 kvm_vm_bugged(kvm); \
895 vcpu->____srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_srcu_read_lock()
900 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->____srcu_idx); in kvm_vcpu_srcu_read_unlock()
908 static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) in kvm_dirty_log_manual_protect_and_init_set() argument
910 return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); in kvm_dirty_log_manual_protect_and_init_set()
913 static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) in kvm_get_bus() argument
915 return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, in kvm_get_bus()
916 lockdep_is_held(&kvm->slots_lock) || in kvm_get_bus()
917 !refcount_read(&kvm->users_count)); in kvm_get_bus()
920 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) in kvm_get_vcpu() argument
922 int num_vcpus = atomic_read(&kvm->online_vcpus); in kvm_get_vcpu()
927 return xa_load(&kvm->vcpu_array, i); in kvm_get_vcpu()
930 #define kvm_for_each_vcpu(idx, vcpup, kvm) \ argument
931 xa_for_each_range(&kvm->vcpu_array, idx, vcpup, 0, \
932 (atomic_read(&kvm->online_vcpus) - 1))
934 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) in kvm_get_vcpu_by_id() argument
942 vcpu = kvm_get_vcpu(kvm, id); in kvm_get_vcpu_by_id()
945 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_vcpu_by_id()
951 void kvm_destroy_vcpus(struct kvm *kvm);
957 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
958 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
960 static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) in kvm_arch_post_irq_ack_notifier_list_update() argument
963 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) in kvm_arch_post_irq_routing_update() argument
984 void kvm_get_kvm(struct kvm *kvm);
985 bool kvm_get_kvm_safe(struct kvm *kvm);
986 void kvm_put_kvm(struct kvm *kvm);
988 void kvm_put_kvm_no_destroy(struct kvm *kvm);
990 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) in __kvm_memslots() argument
993 return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, in __kvm_memslots()
994 lockdep_is_held(&kvm->slots_lock) || in __kvm_memslots()
995 !refcount_read(&kvm->users_count)); in __kvm_memslots()
998 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) in kvm_memslots() argument
1000 return __kvm_memslots(kvm, 0); in kvm_memslots()
1007 return __kvm_memslots(vcpu->kvm, as_id); in kvm_vcpu_memslots()
1015 bool kvm_are_all_memslots_empty(struct kvm *kvm);
1148 int kvm_set_memory_region(struct kvm *kvm,
1150 int __kvm_set_memory_region(struct kvm *kvm,
1152 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot);
1153 void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen);
1154 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1158 void kvm_arch_commit_memory_region(struct kvm *kvm,
1163 void kvm_arch_flush_shadow_all(struct kvm *kvm);
1165 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1171 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
1172 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
1173 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
1180 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
1181 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
1195 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1197 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
1198 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1200 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1203 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1205 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1207 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1209 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1212 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
1215 #define __kvm_get_guest(kvm, gfn, offset, v) \ argument
1217 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1226 #define kvm_get_guest(kvm, gpa, v) \ argument
1229 struct kvm *__kvm = kvm; \
1235 #define __kvm_put_guest(kvm, gfn, offset, v) \ argument
1237 unsigned long __addr = gfn_to_hva(kvm, gfn); \
1244 mark_page_dirty(kvm, gfn); \
1248 #define kvm_put_guest(kvm, gpa, v) \ argument
1251 struct kvm *__kvm = kvm; \
1257 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
1258 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
1259 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
1262 void mark_page_dirty_in_slot(struct kvm *kvm, const struct kvm_memory_slot *memslot, gfn_t gfn);
1263 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
1303 void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm,
1382 void kvm_flush_remote_tlbs(struct kvm *kvm);
1383 void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1384 void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
1395 void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start,
1397 void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start,
1406 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
1408 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1412 void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
1415 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
1416 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
1420 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
1422 int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1452 int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
1458 int kvm_arch_pm_notifier(struct kvm *kvm, unsigned long state);
1476 int kvm_arch_post_init_vm(struct kvm *kvm);
1477 void kvm_arch_pre_destroy_vm(struct kvm *kvm);
1478 int kvm_arch_create_vm_debugfs(struct kvm *kvm);
1485 static inline struct kvm *kvm_arch_alloc_vm(void) in kvm_arch_alloc_vm()
1487 return kzalloc(sizeof(struct kvm), GFP_KERNEL_ACCOUNT); in kvm_arch_alloc_vm()
1491 static inline void __kvm_arch_free_vm(struct kvm *kvm) in __kvm_arch_free_vm() argument
1493 kvfree(kvm); in __kvm_arch_free_vm()
1497 static inline void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
1499 __kvm_arch_free_vm(kvm); in kvm_arch_free_vm()
1504 static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm) in kvm_arch_flush_remote_tlbs() argument
1509 int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
1513 static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, in kvm_arch_flush_remote_tlbs_range() argument
1519 int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
1523 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
1524 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
1525 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
1527 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) in kvm_arch_register_noncoherent_dma() argument
1531 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) in kvm_arch_unregister_noncoherent_dma() argument
1535 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) in kvm_arch_has_noncoherent_dma() argument
1541 void kvm_arch_start_assignment(struct kvm *kvm);
1542 void kvm_arch_end_assignment(struct kvm *kvm);
1543 bool kvm_arch_has_assigned_device(struct kvm *kvm);
1545 static inline void kvm_arch_start_assignment(struct kvm *kvm) in kvm_arch_start_assignment() argument
1549 static inline void kvm_arch_end_assignment(struct kvm *kvm) in kvm_arch_end_assignment() argument
1553 static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm) in kvm_arch_has_assigned_device() argument
1588 bool kvm_arch_intc_initialized(struct kvm *kvm);
1590 static inline bool kvm_arch_intc_initialized(struct kvm *kvm) in kvm_arch_intc_initialized() argument
1606 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
1607 void kvm_arch_destroy_vm(struct kvm *kvm);
1608 void kvm_arch_sync_events(struct kvm *kvm);
1621 int kvm_irq_map_gsi(struct kvm *kvm,
1623 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
1625 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
1627 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
1630 struct kvm *kvm, int irq_source_id,
1632 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
1633 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
1634 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
1635 void kvm_register_irq_ack_notifier(struct kvm *kvm,
1637 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
1639 int kvm_request_irq_source_id(struct kvm *kvm);
1640 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
1641 bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
1731 static inline int memslot_id(struct kvm *kvm, gfn_t gfn) in memslot_id() argument
1733 return gfn_to_memslot(kvm, gfn)->id; in memslot_id()
1759 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) in kvm_is_error_gpa() argument
1761 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); in kvm_is_error_gpa()
1772 struct kvm *kvm; member
1951 static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) in mmu_invalidate_retry() argument
1953 if (unlikely(kvm->mmu_invalidate_in_progress)) in mmu_invalidate_retry()
1968 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry()
1973 static inline int mmu_invalidate_retry_hva(struct kvm *kvm, in mmu_invalidate_retry_hva() argument
1977 lockdep_assert_held(&kvm->mmu_lock); in mmu_invalidate_retry_hva()
1984 if (unlikely(kvm->mmu_invalidate_in_progress) && in mmu_invalidate_retry_hva()
1985 hva >= kvm->mmu_invalidate_range_start && in mmu_invalidate_retry_hva()
1986 hva < kvm->mmu_invalidate_range_end) in mmu_invalidate_retry_hva()
1988 if (kvm->mmu_invalidate_seq != mmu_seq) in mmu_invalidate_retry_hva()
1998 bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
1999 int kvm_set_irq_routing(struct kvm *kvm,
2003 int kvm_set_routing_entry(struct kvm *kvm,
2006 void kvm_free_irq_routing(struct kvm *kvm);
2010 static inline void kvm_free_irq_routing(struct kvm *kvm) {} in kvm_free_irq_routing() argument
2014 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
2018 void kvm_eventfd_init(struct kvm *kvm);
2019 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
2022 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
2023 void kvm_irqfd_release(struct kvm *kvm);
2024 bool kvm_notify_irqfd_resampler(struct kvm *kvm,
2027 void kvm_irq_routing_update(struct kvm *);
2029 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
2034 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
2036 static inline bool kvm_notify_irqfd_resampler(struct kvm *kvm, in kvm_notify_irqfd_resampler() argument
2046 static inline void kvm_eventfd_init(struct kvm *kvm) {} in kvm_eventfd_init() argument
2048 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) in kvm_irqfd() argument
2053 static inline void kvm_irqfd_release(struct kvm *kvm) {} in kvm_irqfd_release() argument
2056 static inline void kvm_irq_routing_update(struct kvm *kvm) in kvm_irq_routing_update() argument
2061 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) in kvm_ioeventfd() argument
2068 void kvm_arch_irq_routing_update(struct kvm *kvm);
2135 struct kvm *kvm; member
2231 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
2273 void kvm_arch_guest_memory_reclaimed(struct kvm *kvm);
2284 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
2286 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,