Lines Matching refs:kvm

139 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
143 __weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, in kvm_arch_mmu_notifier_invalidate_range() argument
212 bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, in kvm_make_vcpus_request_mask() argument
221 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_make_vcpus_request_mask()
242 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req) in kvm_make_all_cpus_request() argument
251 called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap, cpus); in kvm_make_all_cpus_request()
258 void kvm_flush_remote_tlbs(struct kvm *kvm) in kvm_flush_remote_tlbs() argument
264 long dirty_count = smp_load_acquire(&kvm->tlbs_dirty); in kvm_flush_remote_tlbs()
277 if (!kvm_arch_flush_remote_tlb(kvm) in kvm_flush_remote_tlbs()
278 || kvm_make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) in kvm_flush_remote_tlbs()
279 ++kvm->stat.remote_tlb_flush; in kvm_flush_remote_tlbs()
280 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0); in kvm_flush_remote_tlbs()
285 void kvm_reload_remote_mmus(struct kvm *kvm) in kvm_reload_remote_mmus() argument
287 kvm_make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD); in kvm_reload_remote_mmus()
290 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id) in kvm_vcpu_init() argument
297 vcpu->kvm = kvm; in kvm_vcpu_init()
343 static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) in mmu_notifier_to_kvm()
345 return container_of(mn, struct kvm, mmu_notifier); in mmu_notifier_to_kvm()
353 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_change_pte() local
356 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_change_pte()
357 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
358 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_change_pte()
359 kvm_set_spte_hva(kvm, address, pte); in kvm_mmu_notifier_change_pte()
360 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_change_pte()
361 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_change_pte()
370 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_start() local
374 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_invalidate_range_start()
375 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
381 kvm->mmu_notifier_count++; in kvm_mmu_notifier_invalidate_range_start()
382 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end); in kvm_mmu_notifier_invalidate_range_start()
383 need_tlb_flush |= kvm->tlbs_dirty; in kvm_mmu_notifier_invalidate_range_start()
386 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_invalidate_range_start()
388 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_start()
390 ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable); in kvm_mmu_notifier_invalidate_range_start()
392 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_invalidate_range_start()
402 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_invalidate_range_end() local
404 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
410 kvm->mmu_notifier_seq++; in kvm_mmu_notifier_invalidate_range_end()
417 kvm->mmu_notifier_count--; in kvm_mmu_notifier_invalidate_range_end()
418 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_invalidate_range_end()
420 BUG_ON(kvm->mmu_notifier_count < 0); in kvm_mmu_notifier_invalidate_range_end()
428 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_flush_young() local
431 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_flush_young()
432 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
434 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_flush_young()
436 kvm_flush_remote_tlbs(kvm); in kvm_mmu_notifier_clear_flush_young()
438 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_flush_young()
439 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_flush_young()
449 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_clear_young() local
452 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_clear_young()
453 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
467 young = kvm_age_hva(kvm, start, end); in kvm_mmu_notifier_clear_young()
468 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_clear_young()
469 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_clear_young()
478 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_test_young() local
481 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_test_young()
482 spin_lock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
483 young = kvm_test_age_hva(kvm, address); in kvm_mmu_notifier_test_young()
484 spin_unlock(&kvm->mmu_lock); in kvm_mmu_notifier_test_young()
485 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_test_young()
493 struct kvm *kvm = mmu_notifier_to_kvm(mn); in kvm_mmu_notifier_release() local
496 idx = srcu_read_lock(&kvm->srcu); in kvm_mmu_notifier_release()
497 kvm_arch_flush_shadow_all(kvm); in kvm_mmu_notifier_release()
498 srcu_read_unlock(&kvm->srcu, idx); in kvm_mmu_notifier_release()
512 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
514 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops; in kvm_init_mmu_notifier()
515 return mmu_notifier_register(&kvm->mmu_notifier, current->mm); in kvm_init_mmu_notifier()
520 static int kvm_init_mmu_notifier(struct kvm *kvm) in kvm_init_mmu_notifier() argument
554 static void kvm_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, in kvm_free_memslot() argument
560 kvm_arch_free_memslot(kvm, free, dont); in kvm_free_memslot()
565 static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots) in kvm_free_memslots() argument
573 kvm_free_memslot(kvm, memslot, NULL); in kvm_free_memslots()
578 static void kvm_destroy_vm_debugfs(struct kvm *kvm) in kvm_destroy_vm_debugfs() argument
582 if (!kvm->debugfs_dentry) in kvm_destroy_vm_debugfs()
585 debugfs_remove_recursive(kvm->debugfs_dentry); in kvm_destroy_vm_debugfs()
587 if (kvm->debugfs_stat_data) { in kvm_destroy_vm_debugfs()
589 kfree(kvm->debugfs_stat_data[i]); in kvm_destroy_vm_debugfs()
590 kfree(kvm->debugfs_stat_data); in kvm_destroy_vm_debugfs()
594 static int kvm_create_vm_debugfs(struct kvm *kvm, int fd) in kvm_create_vm_debugfs() argument
604 kvm->debugfs_dentry = debugfs_create_dir(dir_name, kvm_debugfs_dir); in kvm_create_vm_debugfs()
606 kvm->debugfs_stat_data = kcalloc(kvm_debugfs_num_entries, in kvm_create_vm_debugfs()
607 sizeof(*kvm->debugfs_stat_data), in kvm_create_vm_debugfs()
609 if (!kvm->debugfs_stat_data) in kvm_create_vm_debugfs()
617 stat_data->kvm = kvm; in kvm_create_vm_debugfs()
619 kvm->debugfs_stat_data[p - debugfs_entries] = stat_data; in kvm_create_vm_debugfs()
620 debugfs_create_file(p->name, 0644, kvm->debugfs_dentry, in kvm_create_vm_debugfs()
626 static struct kvm *kvm_create_vm(unsigned long type) in kvm_create_vm()
629 struct kvm *kvm = kvm_arch_alloc_vm(); in kvm_create_vm() local
631 if (!kvm) in kvm_create_vm()
634 spin_lock_init(&kvm->mmu_lock); in kvm_create_vm()
636 kvm->mm = current->mm; in kvm_create_vm()
637 kvm_eventfd_init(kvm); in kvm_create_vm()
638 mutex_init(&kvm->lock); in kvm_create_vm()
639 mutex_init(&kvm->irq_lock); in kvm_create_vm()
640 mutex_init(&kvm->slots_lock); in kvm_create_vm()
641 refcount_set(&kvm->users_count, 1); in kvm_create_vm()
642 INIT_LIST_HEAD(&kvm->devices); in kvm_create_vm()
644 r = kvm_arch_init_vm(kvm, type); in kvm_create_vm()
653 INIT_HLIST_HEAD(&kvm->irq_ack_notifier_list); in kvm_create_vm()
669 rcu_assign_pointer(kvm->memslots[i], slots); in kvm_create_vm()
672 if (init_srcu_struct(&kvm->srcu)) in kvm_create_vm()
674 if (init_srcu_struct(&kvm->irq_srcu)) in kvm_create_vm()
677 rcu_assign_pointer(kvm->buses[i], in kvm_create_vm()
679 if (!kvm->buses[i]) in kvm_create_vm()
683 r = kvm_init_mmu_notifier(kvm); in kvm_create_vm()
688 list_add(&kvm->vm_list, &vm_list); in kvm_create_vm()
693 return kvm; in kvm_create_vm()
696 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_create_vm()
698 cleanup_srcu_struct(&kvm->srcu); in kvm_create_vm()
702 refcount_set(&kvm->users_count, 0); in kvm_create_vm()
704 kfree(kvm_get_bus(kvm, i)); in kvm_create_vm()
706 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_create_vm()
707 kvm_arch_free_vm(kvm); in kvm_create_vm()
712 static void kvm_destroy_devices(struct kvm *kvm) in kvm_destroy_devices() argument
721 list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) { in kvm_destroy_devices()
727 static void kvm_destroy_vm(struct kvm *kvm) in kvm_destroy_vm() argument
730 struct mm_struct *mm = kvm->mm; in kvm_destroy_vm()
732 kvm_uevent_notify_change(KVM_EVENT_DESTROY_VM, kvm); in kvm_destroy_vm()
733 kvm_destroy_vm_debugfs(kvm); in kvm_destroy_vm()
734 kvm_arch_sync_events(kvm); in kvm_destroy_vm()
736 list_del(&kvm->vm_list); in kvm_destroy_vm()
738 kvm_free_irq_routing(kvm); in kvm_destroy_vm()
740 struct kvm_io_bus *bus = kvm_get_bus(kvm, i); in kvm_destroy_vm()
744 kvm->buses[i] = NULL; in kvm_destroy_vm()
746 kvm_coalesced_mmio_free(kvm); in kvm_destroy_vm()
748 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm); in kvm_destroy_vm()
750 kvm_arch_flush_shadow_all(kvm); in kvm_destroy_vm()
752 kvm_arch_destroy_vm(kvm); in kvm_destroy_vm()
753 kvm_destroy_devices(kvm); in kvm_destroy_vm()
755 kvm_free_memslots(kvm, __kvm_memslots(kvm, i)); in kvm_destroy_vm()
756 cleanup_srcu_struct(&kvm->irq_srcu); in kvm_destroy_vm()
757 cleanup_srcu_struct(&kvm->srcu); in kvm_destroy_vm()
758 kvm_arch_free_vm(kvm); in kvm_destroy_vm()
764 void kvm_get_kvm(struct kvm *kvm) in kvm_get_kvm() argument
766 refcount_inc(&kvm->users_count); in kvm_get_kvm()
770 void kvm_put_kvm(struct kvm *kvm) in kvm_put_kvm() argument
772 if (refcount_dec_and_test(&kvm->users_count)) in kvm_put_kvm()
773 kvm_destroy_vm(kvm); in kvm_put_kvm()
780 struct kvm *kvm = filp->private_data; in kvm_vm_release() local
782 kvm_irqfd_release(kvm); in kvm_vm_release()
784 kvm_put_kvm(kvm); in kvm_vm_release()
872 static struct kvm_memslots *install_new_memslots(struct kvm *kvm, in install_new_memslots() argument
875 struct kvm_memslots *old_memslots = __kvm_memslots(kvm, as_id); in install_new_memslots()
884 rcu_assign_pointer(kvm->memslots[as_id], slots); in install_new_memslots()
885 synchronize_srcu_expedited(&kvm->srcu); in install_new_memslots()
900 kvm_arch_memslots_updated(kvm, slots); in install_new_memslots()
913 int __kvm_set_memory_region(struct kvm *kvm, in __kvm_set_memory_region() argument
950 slot = id_to_memslot(__kvm_memslots(kvm, as_id), id); in __kvm_set_memory_region()
994 kvm_for_each_memslot(slot, __kvm_memslots(kvm, as_id)) { in __kvm_set_memory_region()
1011 if (kvm_arch_create_memslot(kvm, &new, npages)) in __kvm_set_memory_region()
1024 memcpy(slots, __kvm_memslots(kvm, as_id), sizeof(struct kvm_memslots)); in __kvm_set_memory_region()
1030 old_memslots = install_new_memslots(kvm, as_id, slots); in __kvm_set_memory_region()
1039 kvm_arch_flush_shadow_memslot(kvm, slot); in __kvm_set_memory_region()
1049 r = kvm_arch_prepare_memory_region(kvm, &new, mem, change); in __kvm_set_memory_region()
1060 old_memslots = install_new_memslots(kvm, as_id, slots); in __kvm_set_memory_region()
1062 kvm_arch_commit_memory_region(kvm, mem, &old, &new, change); in __kvm_set_memory_region()
1064 kvm_free_memslot(kvm, &old, &new); in __kvm_set_memory_region()
1071 kvm_free_memslot(kvm, &new, &old); in __kvm_set_memory_region()
1077 int kvm_set_memory_region(struct kvm *kvm, in kvm_set_memory_region() argument
1082 mutex_lock(&kvm->slots_lock); in kvm_set_memory_region()
1083 r = __kvm_set_memory_region(kvm, mem); in kvm_set_memory_region()
1084 mutex_unlock(&kvm->slots_lock); in kvm_set_memory_region()
1089 static int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, in kvm_vm_ioctl_set_memory_region() argument
1095 return kvm_set_memory_region(kvm, mem); in kvm_vm_ioctl_set_memory_region()
1098 int kvm_get_dirty_log(struct kvm *kvm, in kvm_get_dirty_log() argument
1112 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log()
1154 int kvm_get_dirty_log_protect(struct kvm *kvm, in kvm_get_dirty_log_protect() argument
1169 slots = __kvm_memslots(kvm, as_id); in kvm_get_dirty_log_protect()
1181 spin_lock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1197 kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, in kvm_get_dirty_log_protect()
1202 spin_unlock(&kvm->mmu_lock); in kvm_get_dirty_log_protect()
1221 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn) in gfn_to_memslot() argument
1223 return __gfn_to_memslot(kvm_memslots(kvm), gfn); in gfn_to_memslot()
1232 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn) in kvm_is_visible_gfn() argument
1234 struct kvm_memory_slot *memslot = gfn_to_memslot(kvm, gfn); in kvm_is_visible_gfn()
1244 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn) in kvm_host_page_size() argument
1251 addr = gfn_to_hva(kvm, gfn); in kvm_host_page_size()
1301 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn) in gfn_to_hva() argument
1303 return gfn_to_hva_many(gfn_to_memslot(kvm, gfn), gfn, NULL); in gfn_to_hva()
1328 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable) in gfn_to_hva_prot() argument
1330 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in gfn_to_hva_prot()
1571 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, in gfn_to_pfn_prot() argument
1574 return __gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn, false, NULL, in gfn_to_pfn_prot()
1591 kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn_atomic() argument
1593 return gfn_to_pfn_memslot_atomic(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn_atomic()
1603 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn) in gfn_to_pfn() argument
1605 return gfn_to_pfn_memslot(gfn_to_memslot(kvm, gfn), gfn); in gfn_to_pfn()
1645 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn) in gfn_to_page() argument
1649 pfn = gfn_to_pfn(kvm, gfn); in gfn_to_page()
1743 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, in kvm_read_guest_page() argument
1746 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_page()
1761 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len) in kvm_read_guest() argument
1769 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg); in kvm_read_guest()
1818 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, in kvm_read_guest_atomic() argument
1822 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_read_guest_atomic()
1856 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, in kvm_write_guest_page() argument
1859 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn); in kvm_write_guest_page()
1874 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, in kvm_write_guest() argument
1883 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg); in kvm_write_guest()
1953 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_gfn_to_hva_cache_init() argument
1956 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_gfn_to_hva_cache_init()
1961 int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_offset_cached() argument
1964 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_write_guest_offset_cached()
1974 return kvm_write_guest(kvm, gpa, data, len); in kvm_write_guest_offset_cached()
1988 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_write_guest_cached() argument
1991 return kvm_write_guest_offset_cached(kvm, ghc, data, 0, len); in kvm_write_guest_cached()
1995 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, in kvm_read_guest_cached() argument
1998 struct kvm_memslots *slots = kvm_memslots(kvm); in kvm_read_guest_cached()
2007 return kvm_read_guest(kvm, ghc->gpa, data, len); in kvm_read_guest_cached()
2020 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len) in kvm_clear_guest_page() argument
2024 return kvm_write_guest_page(kvm, gfn, zero_page, offset, len); in kvm_clear_guest_page()
2028 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len) in kvm_clear_guest() argument
2036 ret = kvm_clear_guest_page(kvm, gfn, offset, seg); in kvm_clear_guest()
2057 void mark_page_dirty(struct kvm *kvm, gfn_t gfn) in mark_page_dirty() argument
2061 memslot = gfn_to_memslot(kvm, gfn); in mark_page_dirty()
2135 int idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_vcpu_check_block()
2148 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_vcpu_check_block()
2318 struct kvm *kvm = me->kvm; in kvm_vcpu_on_spin() local
2320 int last_boosted_vcpu = me->kvm->last_boosted_vcpu; in kvm_vcpu_on_spin()
2335 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_vcpu_on_spin()
2354 kvm->last_boosted_vcpu = i; in kvm_vcpu_on_spin()
2383 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring); in kvm_vcpu_fault()
2407 kvm_put_kvm(vcpu->kvm); in kvm_vcpu_release()
2443 vcpu->kvm->debugfs_dentry); in kvm_create_vcpu_debugfs()
2459 static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id) in kvm_vm_ioctl_create_vcpu() argument
2467 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2468 if (kvm->created_vcpus == KVM_MAX_VCPUS) { in kvm_vm_ioctl_create_vcpu()
2469 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2473 kvm->created_vcpus++; in kvm_vm_ioctl_create_vcpu()
2474 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2476 vcpu = kvm_arch_vcpu_create(kvm, id); in kvm_vm_ioctl_create_vcpu()
2492 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2493 if (kvm_get_vcpu_by_id(kvm, id)) { in kvm_vm_ioctl_create_vcpu()
2498 BUG_ON(kvm->vcpus[atomic_read(&kvm->online_vcpus)]); in kvm_vm_ioctl_create_vcpu()
2501 kvm_get_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2504 kvm_put_kvm(kvm); in kvm_vm_ioctl_create_vcpu()
2508 kvm->vcpus[atomic_read(&kvm->online_vcpus)] = vcpu; in kvm_vm_ioctl_create_vcpu()
2515 atomic_inc(&kvm->online_vcpus); in kvm_vm_ioctl_create_vcpu()
2517 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2522 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2527 mutex_lock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2528 kvm->created_vcpus--; in kvm_vm_ioctl_create_vcpu()
2529 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_create_vcpu()
2553 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_ioctl()
2759 if (vcpu->kvm->mm != current->mm) in kvm_vcpu_compat_ioctl()
2832 struct kvm *kvm = dev->kvm; in kvm_device_release() local
2834 kvm_put_kvm(kvm); in kvm_device_release()
2877 static int kvm_ioctl_create_device(struct kvm *kvm, in kvm_ioctl_create_device() argument
2900 dev->kvm = kvm; in kvm_ioctl_create_device()
2902 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
2905 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
2909 list_add(&dev->vm_node, &kvm->devices); in kvm_ioctl_create_device()
2910 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
2917 mutex_lock(&kvm->lock); in kvm_ioctl_create_device()
2919 mutex_unlock(&kvm->lock); in kvm_ioctl_create_device()
2924 kvm_get_kvm(kvm); in kvm_ioctl_create_device()
2929 static long kvm_vm_ioctl_check_extension_generic(struct kvm *kvm, long arg) in kvm_vm_ioctl_check_extension_generic() argument
2963 return kvm_vm_ioctl_check_extension(kvm, arg); in kvm_vm_ioctl_check_extension_generic()
2969 struct kvm *kvm = filp->private_data; in kvm_vm_ioctl() local
2973 if (kvm->mm != current->mm) in kvm_vm_ioctl()
2977 r = kvm_vm_ioctl_create_vcpu(kvm, arg); in kvm_vm_ioctl()
2987 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem); in kvm_vm_ioctl()
2996 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_ioctl()
3006 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
3015 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone); in kvm_vm_ioctl()
3025 r = kvm_irqfd(kvm, &data); in kvm_vm_ioctl()
3034 r = kvm_ioeventfd(kvm, &data); in kvm_vm_ioctl()
3044 r = kvm_send_userspace_msi(kvm, &msi); in kvm_vm_ioctl()
3057 r = kvm_vm_ioctl_irq_line(kvm, &irq_event, in kvm_vm_ioctl()
3082 if (!kvm_arch_can_set_irq_routing(kvm)) in kvm_vm_ioctl()
3100 r = kvm_set_irq_routing(kvm, entries, routing.nr, in kvm_vm_ioctl()
3114 r = kvm_ioctl_create_device(kvm, &cd); in kvm_vm_ioctl()
3126 r = kvm_vm_ioctl_check_extension_generic(kvm, arg); in kvm_vm_ioctl()
3148 struct kvm *kvm = filp->private_data; in kvm_vm_compat_ioctl() local
3151 if (kvm->mm != current->mm) in kvm_vm_compat_ioctl()
3166 r = kvm_vm_ioctl_get_dirty_log(kvm, &log); in kvm_vm_compat_ioctl()
3186 struct kvm *kvm; in kvm_dev_ioctl_create_vm() local
3189 kvm = kvm_create_vm(type); in kvm_dev_ioctl_create_vm()
3190 if (IS_ERR(kvm)) in kvm_dev_ioctl_create_vm()
3191 return PTR_ERR(kvm); in kvm_dev_ioctl_create_vm()
3193 r = kvm_coalesced_mmio_init(kvm); in kvm_dev_ioctl_create_vm()
3201 file = anon_inode_getfile("kvm-vm", &kvm_vm_fops, kvm, O_RDWR); in kvm_dev_ioctl_create_vm()
3214 if (kvm_create_vm_debugfs(kvm, r) < 0) { in kvm_dev_ioctl_create_vm()
3219 kvm_uevent_notify_change(KVM_EVENT_CREATE_VM, kvm); in kvm_dev_ioctl_create_vm()
3225 kvm_put_kvm(kvm); in kvm_dev_ioctl_create_vm()
3485 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write()
3504 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_write_cookie()
3556 bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu); in kvm_io_bus_read()
3565 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
3572 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_register_dev()
3600 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_register_dev()
3601 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_register_dev()
3608 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_unregister_dev() argument
3614 bus = kvm_get_bus(kvm, bus_idx); in kvm_io_bus_unregister_dev()
3639 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); in kvm_io_bus_unregister_dev()
3640 synchronize_srcu_expedited(&kvm->srcu); in kvm_io_bus_unregister_dev()
3645 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, in kvm_io_bus_get_dev() argument
3652 srcu_idx = srcu_read_lock(&kvm->srcu); in kvm_io_bus_get_dev()
3654 bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu); in kvm_io_bus_get_dev()
3665 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvm_io_bus_get_dev()
3683 if (!refcount_inc_not_zero(&stat_data->kvm->users_count)) in kvm_debugfs_open()
3687 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_open()
3700 kvm_put_kvm(stat_data->kvm); in kvm_debugfs_release()
3709 *val = *(ulong *)((void *)stat_data->kvm + stat_data->offset); in vm_stat_get_per_vm()
3721 *(ulong *)((void *)stat_data->kvm + stat_data->offset) = 0; in vm_stat_clear_per_vm()
3750 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) in vcpu_stat_get_per_vm()
3765 kvm_for_each_vcpu(i, vcpu, stat_data->kvm) in vcpu_stat_clear_per_vm()
3795 struct kvm *kvm; in vm_stat_get() local
3801 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_get()
3802 stat_tmp.kvm = kvm; in vm_stat_get()
3813 struct kvm *kvm; in vm_stat_clear() local
3820 list_for_each_entry(kvm, &vm_list, vm_list) { in vm_stat_clear()
3821 stat_tmp.kvm = kvm; in vm_stat_clear()
3834 struct kvm *kvm; in vcpu_stat_get() local
3840 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_get()
3841 stat_tmp.kvm = kvm; in vcpu_stat_get()
3852 struct kvm *kvm; in vcpu_stat_clear() local
3859 list_for_each_entry(kvm, &vm_list, vm_list) { in vcpu_stat_clear()
3860 stat_tmp.kvm = kvm; in vcpu_stat_clear()
3876 static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm) in kvm_uevent_notify_change() argument
3881 if (!kvm_dev.this_device || !kvm) in kvm_uevent_notify_change()
3904 kvm->userspace_pid = task_pid_nr(current); in kvm_uevent_notify_change()
3908 add_uevent_var(env, "PID=%d", kvm->userspace_pid); in kvm_uevent_notify_change()
3910 if (kvm->debugfs_dentry) { in kvm_uevent_notify_change()
3914 tmp = dentry_path_raw(kvm->debugfs_dentry, p, PATH_MAX); in kvm_uevent_notify_change()