Lines Matching refs:kvm
119 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) in kvm_arch_init_vm() argument
126 kvm->arch.last_vcpu_ran = alloc_percpu(typeof(*kvm->arch.last_vcpu_ran)); in kvm_arch_init_vm()
127 if (!kvm->arch.last_vcpu_ran) in kvm_arch_init_vm()
131 *per_cpu_ptr(kvm->arch.last_vcpu_ran, cpu) = -1; in kvm_arch_init_vm()
133 ret = kvm_alloc_stage2_pgd(kvm); in kvm_arch_init_vm()
137 ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP); in kvm_arch_init_vm()
141 kvm_vgic_early_init(kvm); in kvm_arch_init_vm()
144 kvm->arch.vmid_gen = 0; in kvm_arch_init_vm()
147 kvm->arch.max_vcpus = vgic_present ? in kvm_arch_init_vm()
152 kvm_free_stage2_pgd(kvm); in kvm_arch_init_vm()
154 free_percpu(kvm->arch.last_vcpu_ran); in kvm_arch_init_vm()
155 kvm->arch.last_vcpu_ran = NULL; in kvm_arch_init_vm()
179 void kvm_arch_destroy_vm(struct kvm *kvm) in kvm_arch_destroy_vm() argument
183 kvm_vgic_destroy(kvm); in kvm_arch_destroy_vm()
185 free_percpu(kvm->arch.last_vcpu_ran); in kvm_arch_destroy_vm()
186 kvm->arch.last_vcpu_ran = NULL; in kvm_arch_destroy_vm()
189 if (kvm->vcpus[i]) { in kvm_arch_destroy_vm()
190 kvm_arch_vcpu_free(kvm->vcpus[i]); in kvm_arch_destroy_vm()
191 kvm->vcpus[i] = NULL; in kvm_arch_destroy_vm()
194 atomic_set(&kvm->online_vcpus, 0); in kvm_arch_destroy_vm()
197 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) in kvm_vm_ioctl_check_extension() argument
230 if (!kvm) in kvm_vm_ioctl_check_extension()
233 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
243 r = kvm_arch_dev_ioctl_check_extension(kvm, ext); in kvm_vm_ioctl_check_extension()
255 struct kvm *kvm_arch_alloc_vm(void) in kvm_arch_alloc_vm()
258 return kzalloc(sizeof(struct kvm), GFP_KERNEL); in kvm_arch_alloc_vm()
260 return vzalloc(sizeof(struct kvm)); in kvm_arch_alloc_vm()
263 void kvm_arch_free_vm(struct kvm *kvm) in kvm_arch_free_vm() argument
266 kfree(kvm); in kvm_arch_free_vm()
268 vfree(kvm); in kvm_arch_free_vm()
271 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) in kvm_arch_vcpu_create() argument
276 if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) { in kvm_arch_vcpu_create()
281 if (id >= kvm->arch.max_vcpus) { in kvm_arch_vcpu_create()
292 err = kvm_vcpu_init(vcpu, kvm, id); in kvm_arch_vcpu_create()
315 if (vcpu->arch.has_run_once && unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_free()
365 last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran); in kvm_arch_vcpu_load()
483 static bool need_new_vmid_gen(struct kvm *kvm) in need_new_vmid_gen() argument
485 return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); in need_new_vmid_gen()
496 static void update_vttbr(struct kvm *kvm) in update_vttbr() argument
503 new_gen = need_new_vmid_gen(kvm); in update_vttbr()
516 if (!need_new_vmid_gen(kvm)) { in update_vttbr()
540 kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); in update_vttbr()
541 kvm->arch.vmid = kvm_next_vmid; in update_vttbr()
546 pgd_phys = virt_to_phys(kvm->arch.pgd); in update_vttbr()
548 vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); in update_vttbr()
549 kvm->arch.vttbr = kvm_phys_to_vttbr(pgd_phys) | vmid; in update_vttbr()
556 struct kvm *kvm = vcpu->kvm; in kvm_vcpu_first_run_init() local
564 if (likely(irqchip_in_kernel(kvm))) { in kvm_vcpu_first_run_init()
569 if (unlikely(!vgic_ready(kvm))) { in kvm_vcpu_first_run_init()
570 ret = kvm_vgic_map_resources(kvm); in kvm_vcpu_first_run_init()
591 bool kvm_arch_intc_initialized(struct kvm *kvm) in kvm_arch_intc_initialized() argument
593 return vgic_initialized(kvm); in kvm_arch_intc_initialized()
596 void kvm_arm_halt_guest(struct kvm *kvm) in kvm_arm_halt_guest() argument
601 kvm_for_each_vcpu(i, vcpu, kvm) in kvm_arm_halt_guest()
603 kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); in kvm_arm_halt_guest()
606 void kvm_arm_resume_guest(struct kvm *kvm) in kvm_arm_resume_guest() argument
611 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_arm_resume_guest()
694 update_vttbr(vcpu->kvm); in kvm_arch_vcpu_ioctl_run()
743 if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || in kvm_arch_vcpu_ioctl_run()
836 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
881 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, in kvm_vm_ioctl_irq_line() argument
886 int nrcpus = atomic_read(&kvm->online_vcpus); in kvm_vm_ioctl_irq_line()
898 if (irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
904 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
913 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
919 vcpu = kvm_get_vcpu(kvm, vcpu_idx); in kvm_vm_ioctl_irq_line()
926 return kvm_vgic_inject_irq(kvm, vcpu->vcpu_id, irq_num, level, NULL); in kvm_vm_ioctl_irq_line()
928 if (!irqchip_in_kernel(kvm)) in kvm_vm_ioctl_irq_line()
934 return kvm_vgic_inject_irq(kvm, 0, irq_num, level, NULL); in kvm_vm_ioctl_irq_line()
996 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1204 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) in kvm_vm_ioctl_get_dirty_log() argument
1209 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1211 r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); in kvm_vm_ioctl_get_dirty_log()
1214 kvm_flush_remote_tlbs(kvm); in kvm_vm_ioctl_get_dirty_log()
1216 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log()
1220 static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, in kvm_vm_ioctl_set_device_addr() argument
1234 return kvm_vgic_addr(kvm, type, &dev_addr->addr, true); in kvm_vm_ioctl_set_device_addr()
1243 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl() local
1251 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1252 ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); in kvm_arch_vm_ioctl()
1253 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1261 return kvm_vm_ioctl_set_device_addr(kvm, &dev_addr); in kvm_arch_vm_ioctl()
1578 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) in kvm_mpidr_to_vcpu() argument
1584 kvm_for_each_vcpu(i, vcpu, kvm) { in kvm_mpidr_to_vcpu()
1602 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
1611 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
1620 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
1628 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()