Lines Matching refs:kvm

154 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
158 __weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_range() argument
254 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument
264 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_vcpus_request_mask()
306 bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, in kvm_make_all_cpus_request_except() argument
314 called = kvm_make_vcpus_request_mask(kvm, req, except, NULL, cpus); in kvm_make_all_cpus_request_except()
320 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
322 return kvm_make_all_cpus_request_except(kvm, req, NULL); in kvm_make_all_cpus_request()
327 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
329 ++kvm->stat.generic.remote_tlb_flush_requests; in kvm_flush_remote_tlbs()
342 if (!kvm_arch_flush_remote_tlb(kvm) in kvm_flush_remote_tlbs()
343 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
344 ++kvm->stat.generic.remote_tlb_flush; in kvm_flush_remote_tlbs()
349 void kvm_reload_remote_mmus(struct kvm *kvm) in kvm_reload_remote_mmus() argument
351 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); in kvm_reload_remote_mmus()
409 static void kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
413 vcpu->kvm = kvm; in kvm_vcpu_init()
448 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
450 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
457 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range() local
460 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range()
461 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); in kvm_mmu_notifier_invalidate_range()
462 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range()
465 typedef bool (*hva_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range);
467 typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start,
493 static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, in __kvm_handle_hva_range() argument
507 idx = srcu_read_lock(&kvm->srcu); in __kvm_handle_hva_range()
510 slots = __kvm_memslots(kvm, i); in __kvm_handle_hva_range()
539 KVM_MMU_LOCK(kvm); in __kvm_handle_hva_range()
541 range->on_lock(kvm, range->start, range->end); in __kvm_handle_hva_range()
545 ret |= range->handler(kvm, &gfn_range); in __kvm_handle_hva_range()
550 kvm_flush_remote_tlbs(kvm); in __kvm_handle_hva_range()
553 KVM_MMU_UNLOCK(kvm); in __kvm_handle_hva_range()
555 srcu_read_unlock(&kvm->srcu, idx); in __kvm_handle_hva_range()
567 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_handle_hva_range() local
578 return __kvm_handle_hva_range(kvm, &range); in kvm_handle_hva_range()
586 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_handle_hva_range_no_flush() local
597 return __kvm_handle_hva_range(kvm, &range); in kvm_handle_hva_range_no_flush()
604 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
615 WARN_ON_ONCE(!READ_ONCE(kvm->mn_active_invalidate_count)); in kvm_mmu_notifier_change_pte()
616 if (!READ_ONCE(kvm->mmu_notifier_count)) in kvm_mmu_notifier_change_pte()
622 void kvm_inc_notifier_count(struct kvm *kvm, unsigned long start, in kvm_inc_notifier_count() argument
630 kvm->mmu_notifier_count++; in kvm_inc_notifier_count()
631 if (likely(kvm->mmu_notifier_count == 1)) { in kvm_inc_notifier_count()
632 kvm->mmu_notifier_range_start = start; in kvm_inc_notifier_count()
633 kvm->mmu_notifier_range_end = end; in kvm_inc_notifier_count()
644 kvm->mmu_notifier_range_start = in kvm_inc_notifier_count()
645 min(kvm->mmu_notifier_range_start, start); in kvm_inc_notifier_count()
646 kvm->mmu_notifier_range_end = in kvm_inc_notifier_count()
647 max(kvm->mmu_notifier_range_end, end); in kvm_inc_notifier_count()
654 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
675 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
676 kvm->mn_active_invalidate_count++; in kvm_mmu_notifier_invalidate_range_start()
677 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_start()
679 __kvm_handle_hva_range(kvm, &hva_range); in kvm_mmu_notifier_invalidate_range_start()
684 void kvm_dec_notifier_count(struct kvm *kvm, unsigned long start, in kvm_dec_notifier_count() argument
692 kvm->mmu_notifier_seq++; in kvm_dec_notifier_count()
699 kvm->mmu_notifier_count--; in kvm_dec_notifier_count()
705 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
717 __kvm_handle_hva_range(kvm, &hva_range); in kvm_mmu_notifier_invalidate_range_end()
720 spin_lock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
721 wake = (--kvm->mn_active_invalidate_count == 0); in kvm_mmu_notifier_invalidate_range_end()
722 spin_unlock(&kvm->mn_invalidate_lock); in kvm_mmu_notifier_invalidate_range_end()
729 rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); in kvm_mmu_notifier_invalidate_range_end()
731 BUG_ON(kvm->mmu_notifier_count < 0); in kvm_mmu_notifier_invalidate_range_end()
780 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
783 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
784 kvm_arch_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
785 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
799 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
801 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
802 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
807 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
819 struct kvm *kvm = container_of(bl, struct kvm, pm_notifier); in kvm_pm_notifier_call() local
821 return kvm_arch_pm_notifier(kvm, state); in kvm_pm_notifier_call()
824 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
826 kvm->pm_notifier.notifier_call = kvm_pm_notifier_call; in kvm_init_pm_notifier()
828 kvm->pm_notifier.priority = INT_MAX; in kvm_init_pm_notifier()
829 register_pm_notifier(&kvm->pm_notifier); in kvm_init_pm_notifier()
832 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
834 unregister_pm_notifier(&kvm->pm_notifier); in kvm_destroy_pm_notifier()
837 static void kvm_init_pm_notifier(struct kvm *kvm) in kvm_init_pm_notifier() argument
841 static void kvm_destroy_pm_notifier(struct kvm *kvm) in kvm_destroy_pm_notifier() argument
870 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) in kvm_free_memslot() argument
874 kvm_arch_free_memslot(kvm, slot); in kvm_free_memslot()
880 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
888 kvm_free_memslot(kvm, memslot); in kvm_free_memslots()
906 static void kvm_destroy_vm_debugfs(struct kvm *kvm) in kvm_destroy_vm_debugfs() argument
912 if (!kvm->debugfs_dentry) in kvm_destroy_vm_debugfs()
915 debugfs_remove_recursive(kvm->debugfs_dentry); in kvm_destroy_vm_debugfs()
917 if (kvm->debugfs_stat_data) { in kvm_destroy_vm_debugfs()
919 kfree(kvm->debugfs_stat_data[i]); in kvm_destroy_vm_debugfs()
920 kfree(kvm->debugfs_stat_data); in kvm_destroy_vm_debugfs()
924 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) in kvm_create_vm_debugfs() argument
952 kvm->debugfs_dentry = dent; in kvm_create_vm_debugfs()
953 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, in kvm_create_vm_debugfs()
954 sizeof(*kvm->debugfs_stat_data), in kvm_create_vm_debugfs()
956 if (!kvm->debugfs_stat_data) in kvm_create_vm_debugfs()
965 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
968 kvm->debugfs_stat_data[i] = stat_data; in kvm_create_vm_debugfs()
970 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
980 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
983 kvm->debugfs_stat_data[i + kvm_vm_stats_header.num_desc] = stat_data; in kvm_create_vm_debugfs()
985 kvm->debugfs_dentry, stat_data, in kvm_create_vm_debugfs()
989 ret = kvm_arch_create_vm_debugfs(kvm); in kvm_create_vm_debugfs()
991 kvm_destroy_vm_debugfs(kvm); in kvm_create_vm_debugfs()
1002 int __weak kvm_arch_post_init_vm(struct kvm *kvm) in kvm_arch_post_init_vm() argument
1011 void __weak kvm_arch_pre_destroy_vm(struct kvm *kvm) in kvm_arch_pre_destroy_vm() argument
1021 int __weak kvm_arch_create_vm_debugfs(struct kvm *kvm) in kvm_arch_create_vm_debugfs() argument
1026 static struct kvm *kvm_create_vm(unsigned long type) in kvm_create_vm()
1028 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
1032 if (!kvm) in kvm_create_vm()
1035 KVM_MMU_LOCK_INIT(kvm); in kvm_create_vm()
1037 kvm->mm = current->mm; in kvm_create_vm()
1038 kvm_eventfd_init(kvm); in kvm_create_vm()
1039 mutex_init(&kvm->lock); in kvm_create_vm()
1040 mutex_init(&kvm->irq_lock); in kvm_create_vm()
1041 mutex_init(&kvm->slots_lock); in kvm_create_vm()
1042 mutex_init(&kvm->slots_arch_lock); in kvm_create_vm()
1043 spin_lock_init(&kvm->mn_invalidate_lock); in kvm_create_vm()
1044 rcuwait_init(&kvm->mn_memslots_update_rcuwait); in kvm_create_vm()
1046 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
1050 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
1052 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
1055 refcount_set(&kvm->users_count, 1); in kvm_create_vm()
1063 rcu_assign_pointer(kvm->memslots[i], slots); in kvm_create_vm()
1067 rcu_assign_pointer(kvm->buses[i], in kvm_create_vm()
1069 if (!kvm->buses[i]) in kvm_create_vm()
1073 kvm->max_halt_poll_ns = halt_poll_ns; in kvm_create_vm()
1075 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
1084 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
1087 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
1091 r = kvm_arch_post_init_vm(kvm); in kvm_create_vm()
1096 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
1100 kvm_init_pm_notifier(kvm); in kvm_create_vm()
1102 return kvm; in kvm_create_vm()
1106 if (kvm->mmu_notifier.ops) in kvm_create_vm()
1107 mmu_notifier_unregister(&kvm->mmu_notifier, current->mm); in kvm_create_vm()
1112 kvm_arch_destroy_vm(kvm); in kvm_create_vm()
1114 WARN_ON_ONCE(!refcount_dec_and_test(&kvm->users_count)); in kvm_create_vm()
1116 kfree(kvm_get_bus(kvm, i)); in kvm_create_vm()
1118 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_create_vm()
1119 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
1121 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
1123 kvm_arch_free_vm(kvm); in kvm_create_vm()
1128 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
1137 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { in kvm_destroy_devices()
1143 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
1146 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
1148 kvm_destroy_pm_notifier(kvm); in kvm_destroy_vm()
1149 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); in kvm_destroy_vm()
1150 kvm_destroy_vm_debugfs(kvm); in kvm_destroy_vm()
1151 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
1153 list_del(&kvm->vm_list); in kvm_destroy_vm()
1155 kvm_arch_pre_destroy_vm(kvm); in kvm_destroy_vm()
1157 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
1159 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); in kvm_destroy_vm()
1163 kvm->buses[i] = NULL; in kvm_destroy_vm()
1165 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
1167 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
1176 WARN_ON(rcuwait_active(&kvm->mn_memslots_update_rcuwait)); in kvm_destroy_vm()
1177 kvm->mn_active_invalidate_count = 0; in kvm_destroy_vm()
1179 kvm_arch_flush_shadow_all(kvm); in kvm_destroy_vm()
1181 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
1182 kvm_destroy_devices(kvm); in kvm_destroy_vm()
1184 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_destroy_vm()
1185 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
1186 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
1187 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
1193 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
1195 refcount_inc(&kvm->users_count); in kvm_get_kvm()
1203 bool kvm_get_kvm_safe(struct kvm *kvm) in kvm_get_kvm_safe() argument
1205 return refcount_inc_not_zero(&kvm->users_count); in kvm_get_kvm_safe()
1209 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
1211 if (refcount_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
1212 kvm_destroy_vm(kvm); in kvm_put_kvm()
1223 void kvm_put_kvm_no_destroy(struct kvm *kvm) in kvm_put_kvm_no_destroy() argument
1225 WARN_ON(refcount_dec_and_test(&kvm->users_count)); in kvm_put_kvm_no_destroy()
1231 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
1233 kvm_irqfd_release(kvm); in kvm_vm_release()
1235 kvm_put_kvm(kvm); in kvm_vm_release()
1430 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, in install_new_memslots() argument
1433 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); in install_new_memslots()
1444 spin_lock(&kvm->mn_invalidate_lock); in install_new_memslots()
1445 prepare_to_rcuwait(&kvm->mn_memslots_update_rcuwait); in install_new_memslots()
1446 while (kvm->mn_active_invalidate_count) { in install_new_memslots()
1448 spin_unlock(&kvm->mn_invalidate_lock); in install_new_memslots()
1450 spin_lock(&kvm->mn_invalidate_lock); in install_new_memslots()
1452 finish_rcuwait(&kvm->mn_memslots_update_rcuwait); in install_new_memslots()
1453 rcu_assign_pointer(kvm->memslots[as_id], slots); in install_new_memslots()
1454 spin_unlock(&kvm->mn_invalidate_lock); in install_new_memslots()
1461 mutex_unlock(&kvm->slots_arch_lock); in install_new_memslots()
1463 synchronize_srcu_expedited(&kvm->srcu); in install_new_memslots()
1482 kvm_arch_memslots_updated(kvm, gen); in install_new_memslots()
1524 static int kvm_set_memslot(struct kvm *kvm, in kvm_set_memslot() argument
1548 mutex_lock(&kvm->slots_arch_lock); in kvm_set_memslot()
1550 slots = kvm_dup_memslots(__kvm_memslots(kvm, as_id), change); in kvm_set_memslot()
1552 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1569 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1578 kvm_arch_flush_shadow_memslot(kvm, slot); in kvm_set_memslot()
1581 mutex_lock(&kvm->slots_arch_lock); in kvm_set_memslot()
1589 kvm_copy_memslots(slots, __kvm_memslots(kvm, as_id)); in kvm_set_memslot()
1592 r = kvm_arch_prepare_memory_region(kvm, new, mem, change); in kvm_set_memslot()
1597 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1599 kvm_arch_commit_memory_region(kvm, mem, old, new, change); in kvm_set_memslot()
1608 slots = install_new_memslots(kvm, as_id, slots); in kvm_set_memslot()
1610 mutex_unlock(&kvm->slots_arch_lock); in kvm_set_memslot()
1616 static int kvm_delete_memslot(struct kvm *kvm, in kvm_delete_memslot() argument
1634 r = kvm_set_memslot(kvm, mem, old, &new, as_id, KVM_MR_DELETE); in kvm_delete_memslot()
1638 kvm_free_memslot(kvm, old); in kvm_delete_memslot()
1650 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
1688 tmp = id_to_memslot(__kvm_memslots(kvm, as_id), id); in __kvm_set_memory_region()
1698 return kvm_delete_memslot(kvm, mem, &old, as_id); in __kvm_set_memory_region()
1734 kvm_for_each_memslot(tmp, __kvm_memslots(kvm, as_id)) { in __kvm_set_memory_region()
1746 else if (!new.dirty_bitmap && !kvm->dirty_ring_size) { in __kvm_set_memory_region()
1751 if (kvm_dirty_log_manual_protect_and_init_set(kvm)) in __kvm_set_memory_region()
1755 r = kvm_set_memslot(kvm, mem, &old, &new, as_id, change); in __kvm_set_memory_region()
1770 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
1775 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
1776 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
1777 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
1782 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
1788 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
1799 int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, in kvm_get_dirty_log() argument
1808 if (kvm->dirty_ring_size) in kvm_get_dirty_log()
1819 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
1824 kvm_arch_sync_dirty_log(kvm, *memslot); in kvm_get_dirty_log()
1862 static int kvm_get_dirty_log_protect(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_get_dirty_log_protect() argument
1873 if (kvm->dirty_ring_size) in kvm_get_dirty_log_protect()
1881 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
1888 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_get_dirty_log_protect()
1892 if (kvm->manual_dirty_log_protect) { in kvm_get_dirty_log_protect()
1906 KVM_MMU_LOCK(kvm); in kvm_get_dirty_log_protect()
1919 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
1922 KVM_MMU_UNLOCK(kvm); in kvm_get_dirty_log_protect()
1926 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); in kvm_get_dirty_log_protect()
1953 static int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_get_dirty_log() argument
1958 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1960 r = kvm_get_dirty_log_protect(kvm, log); in kvm_vm_ioctl_get_dirty_log()
1962 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1972 static int kvm_clear_dirty_log_protect(struct kvm *kvm, in kvm_clear_dirty_log_protect() argument
1985 if (kvm->dirty_ring_size) in kvm_clear_dirty_log_protect()
1996 slots = __kvm_memslots(kvm, as_id); in kvm_clear_dirty_log_protect()
2010 kvm_arch_sync_dirty_log(kvm, memslot); in kvm_clear_dirty_log_protect()
2017 KVM_MMU_LOCK(kvm); in kvm_clear_dirty_log_protect()
2036 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_clear_dirty_log_protect()
2040 KVM_MMU_UNLOCK(kvm); in kvm_clear_dirty_log_protect()
2043 kvm_arch_flush_remote_tlbs_memslot(kvm, memslot); in kvm_clear_dirty_log_protect()
2048 static int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, in kvm_vm_ioctl_clear_dirty_log() argument
2053 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2055 r = kvm_clear_dirty_log_protect(kvm, log); in kvm_vm_ioctl_clear_dirty_log()
2057 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_clear_dirty_log()
2062 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
2064 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
2093 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
2095 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
2166 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
2168 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
2197 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
2199 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
2465 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
2468 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, in gfn_to_pfn_prot()
2491 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
2493 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
2533 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
2537 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
2626 return __kvm_map_gfn(kvm_memslots(vcpu->kvm), gfn, map, in kvm_map_gfn()
2638 static void __kvm_unmap_gfn(struct kvm *kvm, in __kvm_unmap_gfn() argument
2664 mark_page_dirty_in_slot(kvm, memslot, map->gfn); in __kvm_unmap_gfn()
2678 __kvm_unmap_gfn(vcpu->kvm, gfn_to_memslot(vcpu->kvm, map->gfn), map, in kvm_unmap_gfn()
2686 __kvm_unmap_gfn(vcpu->kvm, kvm_vcpu_gfn_to_memslot(vcpu, map->gfn), in kvm_vcpu_unmap()
2768 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
2771 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
2786 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
2794 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
2854 static int __kvm_write_guest_page(struct kvm *kvm, in __kvm_write_guest_page() argument
2867 mark_page_dirty_in_slot(kvm, memslot, gfn); in __kvm_write_guest_page()
2871 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
2874 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
2876 return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len); in kvm_write_guest_page()
2885 return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len); in kvm_vcpu_write_guest_page()
2889 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
2898 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
2972 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
2975 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
2980 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_offset_cached() argument
2984 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached()
2999 return kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_offset_cached()
3004 mark_page_dirty_in_slot(kvm, ghc->memslot, gpa >> PAGE_SHIFT); in kvm_write_guest_offset_cached()
3010 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
3013 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_write_guest_cached()
3017 int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_offset_cached() argument
3021 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_offset_cached()
3036 return kvm_read_guest(kvm, gpa, data, len); in kvm_read_guest_offset_cached()
3046 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
3049 return kvm_read_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_read_guest_cached()
3053 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
3062 ret = kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest()
3073 void mark_page_dirty_in_slot(struct kvm *kvm, in mark_page_dirty_in_slot() argument
3081 if (kvm->dirty_ring_size) in mark_page_dirty_in_slot()
3082 kvm_dirty_ring_push(kvm_dirty_ring_get(kvm), in mark_page_dirty_in_slot()
3090 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
3094 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
3095 mark_page_dirty_in_slot(kvm, memslot, gfn); in mark_page_dirty()
3104 mark_page_dirty_in_slot(vcpu->kvm, memslot, gfn); in kvm_vcpu_mark_page_dirty()
3145 if (val > vcpu->kvm->max_halt_poll_ns) in grow_halt_poll_ns()
3146 val = vcpu->kvm->max_halt_poll_ns; in grow_halt_poll_ns()
3175 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
3190 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
3273 } else if (vcpu->kvm->max_halt_poll_ns) { in kvm_vcpu_block()
3278 block_ns > vcpu->kvm->max_halt_poll_ns) in kvm_vcpu_block()
3281 else if (vcpu->halt_poll_ns < vcpu->kvm->max_halt_poll_ns && in kvm_vcpu_block()
3282 block_ns < vcpu->kvm->max_halt_poll_ns) in kvm_vcpu_block()
3427 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
3429 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; in kvm_vcpu_on_spin()
3444 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
3466 kvm->last_boosted_vcpu = i; in kvm_vcpu_on_spin()
3482 static bool kvm_page_in_dirty_ring(struct kvm *kvm, unsigned long pgoff) in kvm_page_in_dirty_ring() argument
3487 kvm->dirty_ring_size / PAGE_SIZE); in kvm_page_in_dirty_ring()
3506 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
3508 else if (kvm_page_in_dirty_ring(vcpu->kvm, vmf->pgoff)) in kvm_vcpu_fault()
3528 if ((kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff) || in kvm_vcpu_mmap()
3529 kvm_page_in_dirty_ring(vcpu->kvm, vma->vm_pgoff + pages - 1)) && in kvm_vcpu_mmap()
3541 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
3575 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
3584 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
3593 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3594 if (kvm->created_vcpus == KVM_MAX_VCPUS) { in kvm_vm_ioctl_create_vcpu()
3595 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3599 kvm->created_vcpus++; in kvm_vm_ioctl_create_vcpu()
3600 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3602 r = kvm_arch_vcpu_precreate(kvm, id); in kvm_vm_ioctl_create_vcpu()
3620 kvm_vcpu_init(vcpu, kvm, id); in kvm_vm_ioctl_create_vcpu()
3626 if (kvm->dirty_ring_size) { in kvm_vm_ioctl_create_vcpu()
3628 id, kvm->dirty_ring_size); in kvm_vm_ioctl_create_vcpu()
3633 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3634 if (kvm_get_vcpu_by_id(kvm, id)) { in kvm_vm_ioctl_create_vcpu()
3639 vcpu->vcpu_idx = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
3640 BUG_ON(kvm->vcpus[vcpu->vcpu_idx]); in kvm_vm_ioctl_create_vcpu()
3647 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
3650 kvm_put_kvm_no_destroy(kvm); in kvm_vm_ioctl_create_vcpu()
3654 kvm->vcpus[vcpu->vcpu_idx] = vcpu; in kvm_vm_ioctl_create_vcpu()
3661 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
3663 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3669 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3678 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3679 kvm->created_vcpus--; in kvm_vm_ioctl_create_vcpu()
3680 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
3742 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) in kvm_vcpu_ioctl()
3952 if (vcpu->kvm->mm != current->mm || vcpu->kvm->vm_bugged) in kvm_vcpu_compat_ioctl()
4018 if (dev->kvm->mm != current->mm || dev->kvm->vm_bugged) in kvm_device_ioctl()
4039 struct kvm *kvm = dev->kvm; in kvm_device_release() local
4042 mutex_lock(&kvm->lock); in kvm_device_release()
4045 mutex_unlock(&kvm->lock); in kvm_device_release()
4048 kvm_put_kvm(kvm); in kvm_device_release()
4092 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
4117 dev->kvm = kvm; in kvm_ioctl_create_device()
4119 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4122 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4126 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
4127 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4132 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
4135 kvm_put_kvm_no_destroy(kvm); in kvm_ioctl_create_device()
4136 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
4138 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
4147 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
4197 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
4200 static int kvm_vm_ioctl_enable_dirty_log_ring(struct kvm *kvm, u32 size) in kvm_vm_ioctl_enable_dirty_log_ring() argument
4221 if (kvm->dirty_ring_size) in kvm_vm_ioctl_enable_dirty_log_ring()
4224 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4226 if (kvm->created_vcpus) { in kvm_vm_ioctl_enable_dirty_log_ring()
4230 kvm->dirty_ring_size = size; in kvm_vm_ioctl_enable_dirty_log_ring()
4234 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_dirty_log_ring()
4238 static int kvm_vm_ioctl_reset_dirty_pages(struct kvm *kvm) in kvm_vm_ioctl_reset_dirty_pages() argument
4244 if (!kvm->dirty_ring_size) in kvm_vm_ioctl_reset_dirty_pages()
4247 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4249 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_vm_ioctl_reset_dirty_pages()
4250 cleared += kvm_dirty_ring_reset(vcpu->kvm, &vcpu->dirty_ring); in kvm_vm_ioctl_reset_dirty_pages()
4252 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_reset_dirty_pages()
4255 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_reset_dirty_pages()
4260 int __attribute__((weak)) kvm_vm_ioctl_enable_cap(struct kvm *kvm, in kvm_vm_ioctl_enable_cap() argument
4266 static int kvm_vm_ioctl_enable_cap_generic(struct kvm *kvm, in kvm_vm_ioctl_enable_cap_generic() argument
4279 kvm->manual_dirty_log_protect = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
4287 kvm->max_halt_poll_ns = cap->args[0]; in kvm_vm_ioctl_enable_cap_generic()
4291 return kvm_vm_ioctl_enable_dirty_log_ring(kvm, cap->args[0]); in kvm_vm_ioctl_enable_cap_generic()
4293 return kvm_vm_ioctl_enable_cap(kvm, cap); in kvm_vm_ioctl_enable_cap_generic()
4300 struct kvm *kvm = file->private_data; in kvm_vm_stats_read() local
4302 return kvm_stats_read(kvm->stats_id, &kvm_vm_stats_header, in kvm_vm_stats_read()
4303 &kvm_vm_stats_desc[0], &kvm->stat, in kvm_vm_stats_read()
4304 sizeof(kvm->stat), user_buffer, size, offset); in kvm_vm_stats_read()
4312 static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) in kvm_vm_ioctl_get_stats_fd() argument
4322 &kvm_vm_stats_fops, kvm, O_RDONLY); in kvm_vm_ioctl_get_stats_fd()
4336 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
4340 if (kvm->mm != current->mm || kvm->vm_bugged) in kvm_vm_ioctl()
4344 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
4352 r = kvm_vm_ioctl_enable_cap_generic(kvm, &cap); in kvm_vm_ioctl()
4363 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
4372 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
4382 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_ioctl()
4393 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
4402 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
4412 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
4421 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
4431 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
4444 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
4469 if (!kvm_arch_can_set_irq_routing(kvm)) in kvm_vm_ioctl()
4485 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
4498 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
4510 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
4513 r = kvm_vm_ioctl_reset_dirty_pages(kvm); in kvm_vm_ioctl()
4516 r = kvm_vm_ioctl_get_stats_fd(kvm); in kvm_vm_ioctl()
4548 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
4551 if (kvm->mm != current->mm || kvm->vm_bugged) in kvm_vm_compat_ioctl()
4568 r = kvm_vm_ioctl_clear_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
4584 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
4610 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
4613 kvm = kvm_create_vm(type); in kvm_dev_ioctl_create_vm()
4614 if (IS_ERR(kvm)) in kvm_dev_ioctl_create_vm()
4615 return PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
4617 r = kvm_coalesced_mmio_init(kvm); in kvm_dev_ioctl_create_vm()
4625 snprintf(kvm->stats_id, sizeof(kvm->stats_id), in kvm_dev_ioctl_create_vm()
4628 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); in kvm_dev_ioctl_create_vm()
4641 if (kvm_create_vm_debugfs(kvm, r) < 0) { in kvm_dev_ioctl_create_vm()
4646 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); in kvm_dev_ioctl_create_vm()
4652 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
4912 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
4932 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
4983 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
4991 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
4998 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_register_dev()
5026 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
5027 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
5033 int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
5039 lockdep_assert_held(&kvm->slots_lock); in kvm_io_bus_unregister_dev()
5041 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_unregister_dev()
5063 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
5064 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
5080 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_get_dev() argument
5087 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_io_bus_get_dev()
5089 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); in kvm_io_bus_get_dev()
5100 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_io_bus_get_dev()
5118 if (!kvm_get_kvm_safe(stat_data->kvm)) in kvm_debugfs_open()
5125 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_open()
5138 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_release()
5143 static int kvm_get_stat_per_vm(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vm() argument
5145 *val = *(u64 *)((void *)(&kvm->stat) + offset); in kvm_get_stat_per_vm()
5150 static int kvm_clear_stat_per_vm(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vm() argument
5152 *(u64 *)((void *)(&kvm->stat) + offset) = 0; in kvm_clear_stat_per_vm()
5157 static int kvm_get_stat_per_vcpu(struct kvm *kvm, size_t offset, u64 *val) in kvm_get_stat_per_vcpu() argument
5164 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_get_stat_per_vcpu()
5170 static int kvm_clear_stat_per_vcpu(struct kvm *kvm, size_t offset) in kvm_clear_stat_per_vcpu() argument
5175 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_clear_stat_per_vcpu()
5188 r = kvm_get_stat_per_vm(stat_data->kvm, in kvm_stat_data_get()
5192 r = kvm_get_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_get()
5210 r = kvm_clear_stat_per_vm(stat_data->kvm, in kvm_stat_data_clear()
5214 r = kvm_clear_stat_per_vcpu(stat_data->kvm, in kvm_stat_data_clear()
5241 struct kvm *kvm; in vm_stat_get() local
5246 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_get()
5247 kvm_get_stat_per_vm(kvm, offset, &tmp_val); in vm_stat_get()
5257 struct kvm *kvm; in vm_stat_clear() local
5263 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_clear()
5264 kvm_clear_stat_per_vm(kvm, offset); in vm_stat_clear()
5277 struct kvm *kvm; in vcpu_stat_get() local
5282 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_get()
5283 kvm_get_stat_per_vcpu(kvm, offset, &tmp_val); in vcpu_stat_get()
5293 struct kvm *kvm; in vcpu_stat_clear() local
5299 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_clear()
5300 kvm_clear_stat_per_vcpu(kvm, offset); in vcpu_stat_clear()
5311 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) in kvm_uevent_notify_change() argument
5316 if (!kvm_dev.this_device || !kvm) in kvm_uevent_notify_change()
5339 kvm->userspace_pid = task_pid_nr(current); in kvm_uevent_notify_change()
5343 add_uevent_var(env, "PID=%d", kvm->userspace_pid); in kvm_uevent_notify_change()
5345 if (kvm->debugfs_dentry) { in kvm_uevent_notify_change()
5349 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); in kvm_uevent_notify_change()
5611 struct kvm *kvm; member
5626 struct kvm *kvm = init_context->kvm; in kvm_vm_worker_thread() local
5658 err = thread_fn(kvm, data); in kvm_vm_worker_thread()
5663 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, in kvm_vm_create_worker_thread() argument
5671 init_context.kvm = kvm; in kvm_vm_create_worker_thread()