Lines Matching +full:gpa +full:- +full:1
1 // SPDX-License-Identifier: GPL-2.0
35 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_shared_info_init()
37 gpa_t gpa = gfn_to_gpa(gfn); in kvm_xen_shared_info_init() local
42 int idx = srcu_read_lock(&kvm->srcu); in kvm_xen_shared_info_init()
50 ret = kvm_gpc_activate(kvm, gpc, NULL, KVM_HOST_USES_PFN, gpa, in kvm_xen_shared_info_init()
59 wall_nsec = ktime_get_real_ns() - get_kvmclock_ns(kvm); in kvm_xen_shared_info_init()
62 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
64 if (gpc->valid) in kvm_xen_shared_info_init()
67 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
68 } while (1); in kvm_xen_shared_info_init()
70 /* Paranoia checks on the 32-bit struct layout */ in kvm_xen_shared_info_init()
76 /* Paranoia checks on the 64-bit struct layout */ in kvm_xen_shared_info_init()
80 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_shared_info_init()
81 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
83 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init()
84 wc = &shinfo->wc; in kvm_xen_shared_info_init()
88 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
90 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init()
91 wc = &shinfo->wc; in kvm_xen_shared_info_init()
95 wc_version = wc->version = (wc->version + 1) | 1; in kvm_xen_shared_info_init()
98 wc->nsec = do_div(wall_nsec, 1000000000); in kvm_xen_shared_info_init()
99 wc->sec = (u32)wall_nsec; in kvm_xen_shared_info_init()
103 wc->version = wc_version + 1; in kvm_xen_shared_info_init()
104 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
109 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_shared_info_init()
115 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { in kvm_xen_inject_timer_irqs()
118 e.vcpu_id = vcpu->vcpu_id; in kvm_xen_inject_timer_irqs()
119 e.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_inject_timer_irqs()
120 e.port = vcpu->arch.xen.timer_virq; in kvm_xen_inject_timer_irqs()
123 kvm_xen_set_evtchn(&e, vcpu->kvm); in kvm_xen_inject_timer_irqs()
125 vcpu->arch.xen.timer_expires = 0; in kvm_xen_inject_timer_irqs()
126 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_inject_timer_irqs()
134 if (atomic_read(&vcpu->arch.xen.timer_pending)) in xen_timer_callback()
137 atomic_inc(&vcpu->arch.xen.timer_pending); in xen_timer_callback()
146 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_start_timer()
147 vcpu->arch.xen.timer_expires = guest_abs; in kvm_xen_start_timer()
150 xen_timer_callback(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
153 hrtimer_start(&vcpu->arch.xen.timer, in kvm_xen_start_timer()
161 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_stop_timer()
162 vcpu->arch.xen.timer_expires = 0; in kvm_xen_stop_timer()
163 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_stop_timer()
168 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, in kvm_xen_init_timer()
170 vcpu->arch.xen.timer.function = xen_timer_callback; in kvm_xen_init_timer()
175 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate()
176 u64 now = get_kvmclock_ns(v->kvm); in kvm_xen_update_runstate()
177 u64 delta_ns = now - vx->runstate_entry_time; in kvm_xen_update_runstate()
178 u64 run_delay = current->sched_info.run_delay; in kvm_xen_update_runstate()
180 if (unlikely(!vx->runstate_entry_time)) in kvm_xen_update_runstate()
181 vx->current_runstate = RUNSTATE_offline; in kvm_xen_update_runstate()
187 if (vx->current_runstate == RUNSTATE_running) { in kvm_xen_update_runstate()
188 u64 steal_ns = run_delay - vx->last_steal; in kvm_xen_update_runstate()
190 delta_ns -= steal_ns; in kvm_xen_update_runstate()
192 vx->runstate_times[RUNSTATE_runnable] += steal_ns; in kvm_xen_update_runstate()
194 vx->last_steal = run_delay; in kvm_xen_update_runstate()
196 vx->runstate_times[vx->current_runstate] += delta_ns; in kvm_xen_update_runstate()
197 vx->current_runstate = state; in kvm_xen_update_runstate()
198 vx->runstate_entry_time = now; in kvm_xen_update_runstate()
203 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate_guest()
204 struct gfn_to_pfn_cache *gpc = &vx->runstate_cache; in kvm_xen_update_runstate_guest()
212 if (!vx->runstate_cache.active) in kvm_xen_update_runstate_guest()
215 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) in kvm_xen_update_runstate_guest()
220 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
221 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in kvm_xen_update_runstate_guest()
223 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
229 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, user_len)) in kvm_xen_update_runstate_guest()
232 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
236 * The only difference between 32-bit and 64-bit versions of the in kvm_xen_update_runstate_guest()
237 * runstate struct us the alignment of uint64_t in 32-bit, which in kvm_xen_update_runstate_guest()
238 * means that the 64-bit version has an additional 4 bytes of in kvm_xen_update_runstate_guest()
243 * the actual array of time[] in each state starts at user_times[1]. in kvm_xen_update_runstate_guest()
255 user_state = gpc->khva; in kvm_xen_update_runstate_guest()
257 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) in kvm_xen_update_runstate_guest()
258 user_times = gpc->khva + offsetof(struct vcpu_runstate_info, in kvm_xen_update_runstate_guest()
261 user_times = gpc->khva + offsetof(struct compat_vcpu_runstate_info, in kvm_xen_update_runstate_guest()
273 user_times[0] = vx->runstate_entry_time | XEN_RUNSTATE_UPDATE; in kvm_xen_update_runstate_guest()
278 * for 32-bit and 64-bit guests, asserted here for paranoia. in kvm_xen_update_runstate_guest()
283 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
285 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
287 *user_state = vx->current_runstate; in kvm_xen_update_runstate_guest()
294 offsetof(struct vcpu_runstate_info, time) - sizeof(u64)); in kvm_xen_update_runstate_guest()
296 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(u64)); in kvm_xen_update_runstate_guest()
300 sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
302 memcpy(user_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
312 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_update_runstate_guest()
314 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_update_runstate_guest()
322 irq.dest_id = v->vcpu_id; in kvm_xen_inject_vcpu_vector()
323 irq.vector = v->arch.xen.upcall_vector; in kvm_xen_inject_vcpu_vector()
327 irq.level = 1; in kvm_xen_inject_vcpu_vector()
330 WARN_ON_ONCE(!kvm_irq_delivery_to_apic_fast(v->kvm, NULL, &irq, &r, NULL)); in kvm_xen_inject_vcpu_vector()
335 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
342 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); in kvm_xen_inject_pending_events()
343 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events()
350 * Yes, this is an open-coded loop. But that's just what put_user() in kvm_xen_inject_pending_events()
354 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
355 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in kvm_xen_inject_pending_events()
357 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
359 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, in kvm_xen_inject_pending_events()
363 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
366 /* Now gpc->khva is a valid kernel address for the vcpu_info */ in kvm_xen_inject_pending_events()
367 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_inject_pending_events()
368 struct vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
370 asm volatile(LOCK_PREFIX "orq %0, %1\n" in kvm_xen_inject_pending_events()
374 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
375 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
377 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
380 struct compat_vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
382 asm volatile(LOCK_PREFIX "orl %0, %1\n" in kvm_xen_inject_pending_events()
386 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
387 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
389 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
391 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
393 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_inject_pending_events()
394 if (v->arch.xen.upcall_vector) in kvm_xen_inject_pending_events()
397 mark_page_dirty_in_slot(v->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); in kvm_xen_inject_pending_events()
402 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt()
419 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
420 while (!kvm_gfn_to_pfn_cache_check(v->kvm, gpc, gpc->gpa, in __kvm_xen_has_interrupt()
422 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
428 * at that point, just return 1 in order to trigger an immediate wake, in __kvm_xen_has_interrupt()
433 return 1; in __kvm_xen_has_interrupt()
435 if (kvm_gfn_to_pfn_cache_refresh(v->kvm, gpc, gpc->gpa, in __kvm_xen_has_interrupt()
443 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
446 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; in __kvm_xen_has_interrupt()
447 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
453 int r = -ENOENT; in kvm_xen_hvm_set_attr()
456 switch (data->type) { in kvm_xen_hvm_set_attr()
458 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { in kvm_xen_hvm_set_attr()
459 r = -EINVAL; in kvm_xen_hvm_set_attr()
461 mutex_lock(&kvm->lock); in kvm_xen_hvm_set_attr()
462 kvm->arch.xen.long_mode = !!data->u.long_mode; in kvm_xen_hvm_set_attr()
463 mutex_unlock(&kvm->lock); in kvm_xen_hvm_set_attr()
469 mutex_lock(&kvm->lock); in kvm_xen_hvm_set_attr()
470 r = kvm_xen_shared_info_init(kvm, data->u.shared_info.gfn); in kvm_xen_hvm_set_attr()
471 mutex_unlock(&kvm->lock); in kvm_xen_hvm_set_attr()
475 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_hvm_set_attr()
476 r = -EINVAL; in kvm_xen_hvm_set_attr()
478 mutex_lock(&kvm->lock); in kvm_xen_hvm_set_attr()
479 kvm->arch.xen.upcall_vector = data->u.vector; in kvm_xen_hvm_set_attr()
480 mutex_unlock(&kvm->lock); in kvm_xen_hvm_set_attr()
490 mutex_lock(&kvm->lock); in kvm_xen_hvm_set_attr()
491 kvm->arch.xen.xen_version = data->u.xen_version; in kvm_xen_hvm_set_attr()
492 mutex_unlock(&kvm->lock); in kvm_xen_hvm_set_attr()
505 int r = -ENOENT; in kvm_xen_hvm_get_attr()
507 mutex_lock(&kvm->lock); in kvm_xen_hvm_get_attr()
509 switch (data->type) { in kvm_xen_hvm_get_attr()
511 data->u.long_mode = kvm->arch.xen.long_mode; in kvm_xen_hvm_get_attr()
516 if (kvm->arch.xen.shinfo_cache.active) in kvm_xen_hvm_get_attr()
517 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr()
519 data->u.shared_info.gfn = GPA_INVALID; in kvm_xen_hvm_get_attr()
524 data->u.vector = kvm->arch.xen.upcall_vector; in kvm_xen_hvm_get_attr()
529 data->u.xen_version = kvm->arch.xen.xen_version; in kvm_xen_hvm_get_attr()
537 mutex_unlock(&kvm->lock); in kvm_xen_hvm_get_attr()
543 int idx, r = -ENOENT; in kvm_xen_vcpu_set_attr()
545 mutex_lock(&vcpu->kvm->lock); in kvm_xen_vcpu_set_attr()
546 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_vcpu_set_attr()
548 switch (data->type) { in kvm_xen_vcpu_set_attr()
556 if (data->u.gpa == GPA_INVALID) { in kvm_xen_vcpu_set_attr()
557 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
562 r = kvm_gpc_activate(vcpu->kvm, in kvm_xen_vcpu_set_attr()
563 &vcpu->arch.xen.vcpu_info_cache, NULL, in kvm_xen_vcpu_set_attr()
564 KVM_HOST_USES_PFN, data->u.gpa, in kvm_xen_vcpu_set_attr()
572 if (data->u.gpa == GPA_INVALID) { in kvm_xen_vcpu_set_attr()
573 kvm_gpc_deactivate(vcpu->kvm, in kvm_xen_vcpu_set_attr()
574 &vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_vcpu_set_attr()
579 r = kvm_gpc_activate(vcpu->kvm, in kvm_xen_vcpu_set_attr()
580 &vcpu->arch.xen.vcpu_time_info_cache, in kvm_xen_vcpu_set_attr()
581 NULL, KVM_HOST_USES_PFN, data->u.gpa, in kvm_xen_vcpu_set_attr()
589 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
592 if (data->u.gpa == GPA_INVALID) { in kvm_xen_vcpu_set_attr()
593 kvm_gpc_deactivate(vcpu->kvm, in kvm_xen_vcpu_set_attr()
594 &vcpu->arch.xen.runstate_cache); in kvm_xen_vcpu_set_attr()
599 r = kvm_gpc_activate(vcpu->kvm, &vcpu->arch.xen.runstate_cache, in kvm_xen_vcpu_set_attr()
600 NULL, KVM_HOST_USES_PFN, data->u.gpa, in kvm_xen_vcpu_set_attr()
606 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
609 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
610 r = -EINVAL; in kvm_xen_vcpu_set_attr()
614 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
620 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
623 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
624 r = -EINVAL; in kvm_xen_vcpu_set_attr()
627 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
628 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
629 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
630 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
631 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
632 r = -EINVAL; in kvm_xen_vcpu_set_attr()
635 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
636 data->u.runstate.state_entry_time) { in kvm_xen_vcpu_set_attr()
637 r = -EINVAL; in kvm_xen_vcpu_set_attr()
641 vcpu->arch.xen.current_runstate = data->u.runstate.state; in kvm_xen_vcpu_set_attr()
642 vcpu->arch.xen.runstate_entry_time = in kvm_xen_vcpu_set_attr()
643 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
644 vcpu->arch.xen.runstate_times[RUNSTATE_running] = in kvm_xen_vcpu_set_attr()
645 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
646 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = in kvm_xen_vcpu_set_attr()
647 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
648 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = in kvm_xen_vcpu_set_attr()
649 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
650 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = in kvm_xen_vcpu_set_attr()
651 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
652 vcpu->arch.xen.last_steal = current->sched_info.run_delay; in kvm_xen_vcpu_set_attr()
658 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
661 if (data->u.runstate.state > RUNSTATE_offline && in kvm_xen_vcpu_set_attr()
662 data->u.runstate.state != (u64)-1) { in kvm_xen_vcpu_set_attr()
663 r = -EINVAL; in kvm_xen_vcpu_set_attr()
667 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
668 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
669 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
670 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
671 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
672 r = -EINVAL; in kvm_xen_vcpu_set_attr()
676 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
677 (vcpu->arch.xen.runstate_entry_time + in kvm_xen_vcpu_set_attr()
678 data->u.runstate.state_entry_time)) { in kvm_xen_vcpu_set_attr()
679 r = -EINVAL; in kvm_xen_vcpu_set_attr()
683 vcpu->arch.xen.runstate_entry_time += in kvm_xen_vcpu_set_attr()
684 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
685 vcpu->arch.xen.runstate_times[RUNSTATE_running] += in kvm_xen_vcpu_set_attr()
686 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
687 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += in kvm_xen_vcpu_set_attr()
688 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
689 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += in kvm_xen_vcpu_set_attr()
690 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
691 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += in kvm_xen_vcpu_set_attr()
692 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
694 if (data->u.runstate.state <= RUNSTATE_offline) in kvm_xen_vcpu_set_attr()
695 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
700 if (data->u.vcpu_id >= KVM_MAX_VCPUS) in kvm_xen_vcpu_set_attr()
701 r = -EINVAL; in kvm_xen_vcpu_set_attr()
703 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; in kvm_xen_vcpu_set_attr()
709 if (data->u.timer.port && in kvm_xen_vcpu_set_attr()
710 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { in kvm_xen_vcpu_set_attr()
711 r = -EINVAL; in kvm_xen_vcpu_set_attr()
715 if (!vcpu->arch.xen.timer.function) in kvm_xen_vcpu_set_attr()
720 vcpu->arch.xen.timer_virq = data->u.timer.port; in kvm_xen_vcpu_set_attr()
723 if (data->u.timer.port && data->u.timer.expires_ns) in kvm_xen_vcpu_set_attr()
724 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, in kvm_xen_vcpu_set_attr()
725 data->u.timer.expires_ns - in kvm_xen_vcpu_set_attr()
726 get_kvmclock_ns(vcpu->kvm)); in kvm_xen_vcpu_set_attr()
732 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_vcpu_set_attr()
733 r = -EINVAL; in kvm_xen_vcpu_set_attr()
735 vcpu->arch.xen.upcall_vector = data->u.vector; in kvm_xen_vcpu_set_attr()
744 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_vcpu_set_attr()
745 mutex_unlock(&vcpu->kvm->lock); in kvm_xen_vcpu_set_attr()
751 int r = -ENOENT; in kvm_xen_vcpu_get_attr()
753 mutex_lock(&vcpu->kvm->lock); in kvm_xen_vcpu_get_attr()
755 switch (data->type) { in kvm_xen_vcpu_get_attr()
757 if (vcpu->arch.xen.vcpu_info_cache.active) in kvm_xen_vcpu_get_attr()
758 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; in kvm_xen_vcpu_get_attr()
760 data->u.gpa = GPA_INVALID; in kvm_xen_vcpu_get_attr()
765 if (vcpu->arch.xen.vcpu_time_info_cache.active) in kvm_xen_vcpu_get_attr()
766 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; in kvm_xen_vcpu_get_attr()
768 data->u.gpa = GPA_INVALID; in kvm_xen_vcpu_get_attr()
774 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
777 if (vcpu->arch.xen.runstate_cache.active) { in kvm_xen_vcpu_get_attr()
778 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; in kvm_xen_vcpu_get_attr()
785 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
788 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
794 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
797 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
798 data->u.runstate.state_entry_time = in kvm_xen_vcpu_get_attr()
799 vcpu->arch.xen.runstate_entry_time; in kvm_xen_vcpu_get_attr()
800 data->u.runstate.time_running = in kvm_xen_vcpu_get_attr()
801 vcpu->arch.xen.runstate_times[RUNSTATE_running]; in kvm_xen_vcpu_get_attr()
802 data->u.runstate.time_runnable = in kvm_xen_vcpu_get_attr()
803 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; in kvm_xen_vcpu_get_attr()
804 data->u.runstate.time_blocked = in kvm_xen_vcpu_get_attr()
805 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; in kvm_xen_vcpu_get_attr()
806 data->u.runstate.time_offline = in kvm_xen_vcpu_get_attr()
807 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; in kvm_xen_vcpu_get_attr()
812 r = -EINVAL; in kvm_xen_vcpu_get_attr()
816 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; in kvm_xen_vcpu_get_attr()
821 data->u.timer.port = vcpu->arch.xen.timer_virq; in kvm_xen_vcpu_get_attr()
822 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; in kvm_xen_vcpu_get_attr()
823 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; in kvm_xen_vcpu_get_attr()
828 data->u.vector = vcpu->arch.xen.upcall_vector; in kvm_xen_vcpu_get_attr()
836 mutex_unlock(&vcpu->kvm->lock); in kvm_xen_vcpu_get_attr()
842 struct kvm *kvm = vcpu->kvm; in kvm_xen_write_hypercall_page()
848 vcpu->kvm->arch.xen.long_mode = lm; in kvm_xen_write_hypercall_page()
861 return 1; in kvm_xen_write_hypercall_page()
873 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); in kvm_xen_write_hypercall_page()
876 *(u32 *)&instructions[1] = i; in kvm_xen_write_hypercall_page()
880 return 1; in kvm_xen_write_hypercall_page()
884 * Note, truncation is a non-issue as 'lm' is guaranteed to be in kvm_xen_write_hypercall_page()
885 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. in kvm_xen_write_hypercall_page()
887 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 in kvm_xen_write_hypercall_page()
888 : kvm->arch.xen_hvm_config.blob_addr_32; in kvm_xen_write_hypercall_page()
889 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in kvm_xen_write_hypercall_page()
890 : kvm->arch.xen_hvm_config.blob_size_32; in kvm_xen_write_hypercall_page()
894 return 1; in kvm_xen_write_hypercall_page()
904 return 1; in kvm_xen_write_hypercall_page()
916 if (xhc->flags & ~permitted_flags) in kvm_xen_hvm_config()
917 return -EINVAL; in kvm_xen_hvm_config()
923 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && in kvm_xen_hvm_config()
924 (xhc->blob_addr_32 || xhc->blob_addr_64 || in kvm_xen_hvm_config()
925 xhc->blob_size_32 || xhc->blob_size_64)) in kvm_xen_hvm_config()
926 return -EINVAL; in kvm_xen_hvm_config()
928 mutex_lock(&kvm->lock); in kvm_xen_hvm_config()
930 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
932 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
935 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); in kvm_xen_hvm_config()
937 mutex_unlock(&kvm->lock); in kvm_xen_hvm_config()
949 struct kvm_run *run = vcpu->run; in kvm_xen_hypercall_complete_userspace()
951 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) in kvm_xen_hypercall_complete_userspace()
952 return 1; in kvm_xen_hypercall_complete_userspace()
954 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); in kvm_xen_hypercall_complete_userspace()
959 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) in max_evtchn_port()
968 struct kvm *kvm = vcpu->kvm; in wait_pending_event()
969 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event()
975 read_lock_irqsave(&gpc->lock, flags); in wait_pending_event()
976 idx = srcu_read_lock(&kvm->srcu); in wait_pending_event()
977 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) in wait_pending_event()
981 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in wait_pending_event()
982 struct shared_info *shinfo = gpc->khva; in wait_pending_event()
983 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
985 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event()
986 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
997 srcu_read_unlock(&kvm->srcu, idx); in wait_pending_event()
998 read_unlock_irqrestore(&gpc->lock, flags); in wait_pending_event()
1009 gpa_t gpa; in kvm_xen_schedop_poll() local
1012 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) in kvm_xen_schedop_poll()
1015 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_schedop_poll()
1016 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); in kvm_xen_schedop_poll()
1017 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_schedop_poll()
1019 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &sched_poll, in kvm_xen_schedop_poll()
1021 *r = -EFAULT; in kvm_xen_schedop_poll()
1025 if (unlikely(sched_poll.nr_ports > 1)) { in kvm_xen_schedop_poll()
1028 *r = -EINVAL; in kvm_xen_schedop_poll()
1035 *r = -ENOMEM; in kvm_xen_schedop_poll()
1042 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_schedop_poll()
1043 gpa = kvm_mmu_gva_to_gpa_system(vcpu, in kvm_xen_schedop_poll()
1046 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_schedop_poll()
1048 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, in kvm_xen_schedop_poll()
1050 *r = -EFAULT; in kvm_xen_schedop_poll()
1053 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { in kvm_xen_schedop_poll()
1054 *r = -EINVAL; in kvm_xen_schedop_poll()
1059 if (sched_poll.nr_ports == 1) in kvm_xen_schedop_poll()
1060 vcpu->arch.xen.poll_evtchn = port; in kvm_xen_schedop_poll()
1062 vcpu->arch.xen.poll_evtchn = -1; in kvm_xen_schedop_poll()
1064 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1067 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_xen_schedop_poll()
1070 mod_timer(&vcpu->arch.xen.poll_timer, in kvm_xen_schedop_poll()
1076 del_timer(&vcpu->arch.xen.poll_timer); in kvm_xen_schedop_poll()
1078 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_xen_schedop_poll()
1081 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_schedop_poll()
1085 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1087 if (unlikely(sched_poll.nr_ports > 1)) in kvm_xen_schedop_poll()
1129 gpa_t gpa; in kvm_xen_hcall_vcpu_op() local
1137 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1138 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1141 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_hcall_vcpu_op()
1142 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); in kvm_xen_hcall_vcpu_op()
1143 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_hcall_vcpu_op()
1146 * The only difference for 32-bit compat is the 4 bytes of in kvm_xen_hcall_vcpu_op()
1149 * the padding and return -EFAULT if we can't. Otherwise we in kvm_xen_hcall_vcpu_op()
1150 * might as well just have copied the 12-byte 32-bit struct. in kvm_xen_hcall_vcpu_op()
1161 if (!gpa || in kvm_xen_hcall_vcpu_op()
1162 kvm_vcpu_read_guest(vcpu, gpa, &oneshot, longmode ? sizeof(oneshot) : in kvm_xen_hcall_vcpu_op()
1164 *r = -EFAULT; in kvm_xen_hcall_vcpu_op()
1168 delta = oneshot.timeout_abs_ns - get_kvmclock_ns(vcpu->kvm); in kvm_xen_hcall_vcpu_op()
1170 *r = -ETIME; in kvm_xen_hcall_vcpu_op()
1179 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1180 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1198 uint64_t guest_now = get_kvmclock_ns(vcpu->kvm); in kvm_xen_hcall_set_timer_op()
1199 int64_t delta = timeout - guest_now; in kvm_xen_hcall_set_timer_op()
1228 u64 input, params[6], r = -ENOSYS; in kvm_xen_hypercall()
1234 /* Hyper-V hypercalls get bit 31 set in EAX */ in kvm_xen_hypercall()
1242 params[1] = (u32)kvm_rcx_read(vcpu); in kvm_xen_hypercall()
1251 params[1] = (u64)kvm_rsi_read(vcpu); in kvm_xen_hypercall()
1259 trace_kvm_xen_hypercall(input, params[0], params[1], params[2], in kvm_xen_hypercall()
1271 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { in kvm_xen_hypercall()
1272 r = vcpu->kvm->arch.xen.xen_version; in kvm_xen_hypercall()
1278 handled = kvm_xen_hcall_evtchn_send(vcpu, params[1], &r); in kvm_xen_hypercall()
1282 params[1], &r); in kvm_xen_hypercall()
1285 handled = kvm_xen_hcall_vcpu_op(vcpu, longmode, params[0], params[1], in kvm_xen_hypercall()
1290 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ in kvm_xen_hypercall()
1292 timeout |= params[1] << 32; in kvm_xen_hypercall()
1304 vcpu->run->exit_reason = KVM_EXIT_XEN; in kvm_xen_hypercall()
1305 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; in kvm_xen_hypercall()
1306 vcpu->run->xen.u.hcall.longmode = longmode; in kvm_xen_hypercall()
1307 vcpu->run->xen.u.hcall.cpl = cpl; in kvm_xen_hypercall()
1308 vcpu->run->xen.u.hcall.input = input; in kvm_xen_hypercall()
1309 vcpu->run->xen.u.hcall.params[0] = params[0]; in kvm_xen_hypercall()
1310 vcpu->run->xen.u.hcall.params[1] = params[1]; in kvm_xen_hypercall()
1311 vcpu->run->xen.u.hcall.params[2] = params[2]; in kvm_xen_hypercall()
1312 vcpu->run->xen.u.hcall.params[3] = params[3]; in kvm_xen_hypercall()
1313 vcpu->run->xen.u.hcall.params[4] = params[4]; in kvm_xen_hypercall()
1314 vcpu->run->xen.u.hcall.params[5] = params[5]; in kvm_xen_hypercall()
1315 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); in kvm_xen_hypercall()
1316 vcpu->arch.complete_userspace_io = in kvm_xen_hypercall()
1324 int poll_evtchn = vcpu->arch.xen.poll_evtchn; in kvm_xen_check_poller()
1326 if ((poll_evtchn == port || poll_evtchn == -1) && in kvm_xen_check_poller()
1327 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { in kvm_xen_check_poller()
1341 * only check on its return value is a comparison with -EWOULDBLOCK'.
1345 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast()
1353 vcpu_idx = READ_ONCE(xe->vcpu_idx); in kvm_xen_set_evtchn_fast()
1357 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); in kvm_xen_set_evtchn_fast()
1359 return -EINVAL; in kvm_xen_set_evtchn_fast()
1360 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); in kvm_xen_set_evtchn_fast()
1363 if (!vcpu->arch.xen.vcpu_info_cache.active) in kvm_xen_set_evtchn_fast()
1364 return -EINVAL; in kvm_xen_set_evtchn_fast()
1366 if (xe->port >= max_evtchn_port(kvm)) in kvm_xen_set_evtchn_fast()
1367 return -EINVAL; in kvm_xen_set_evtchn_fast()
1369 rc = -EWOULDBLOCK; in kvm_xen_set_evtchn_fast()
1371 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn_fast()
1373 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1374 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, PAGE_SIZE)) in kvm_xen_set_evtchn_fast()
1377 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1378 struct shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1379 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1380 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1381 port_word_bit = xe->port / 64; in kvm_xen_set_evtchn_fast()
1383 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1384 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1385 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1386 port_word_bit = xe->port / 32; in kvm_xen_set_evtchn_fast()
1391 * we try to set the corresponding bit in the in-kernel shadow of in kvm_xen_set_evtchn_fast()
1396 if (test_and_set_bit(xe->port, pending_bits)) { in kvm_xen_set_evtchn_fast()
1398 } else if (test_bit(xe->port, mask_bits)) { in kvm_xen_set_evtchn_fast()
1399 rc = -ENOTCONN; /* Masked */ in kvm_xen_set_evtchn_fast()
1400 kvm_xen_check_poller(vcpu, xe->port); in kvm_xen_set_evtchn_fast()
1402 rc = 1; /* Delivered to the bitmap in shared_info. */ in kvm_xen_set_evtchn_fast()
1404 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1405 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1407 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1408 if (!kvm_gfn_to_pfn_cache_check(kvm, gpc, gpc->gpa, sizeof(struct vcpu_info))) { in kvm_xen_set_evtchn_fast()
1410 * Could not access the vcpu_info. Set the bit in-kernel in kvm_xen_set_evtchn_fast()
1413 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) in kvm_xen_set_evtchn_fast()
1418 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1419 struct vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1420 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1421 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1425 struct compat_vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1427 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1428 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1433 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_set_evtchn_fast()
1434 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { in kvm_xen_set_evtchn_fast()
1441 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1442 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn_fast()
1458 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1461 if (current->mm != kvm->mm) { in kvm_xen_set_evtchn()
1466 if (WARN_ON_ONCE(current->mm)) in kvm_xen_set_evtchn()
1467 return -EINVAL; in kvm_xen_set_evtchn()
1469 kthread_use_mm(kvm->mm); in kvm_xen_set_evtchn()
1474 * For the irqfd workqueue, using the main kvm->lock mutex is in kvm_xen_set_evtchn()
1478 * then it may need to switch to using a leaf-node mutex for in kvm_xen_set_evtchn()
1481 mutex_lock(&kvm->lock); in kvm_xen_set_evtchn()
1498 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn()
1502 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1505 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn()
1506 rc = kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpc->gpa, PAGE_SIZE); in kvm_xen_set_evtchn()
1507 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn()
1510 mutex_unlock(&kvm->lock); in kvm_xen_set_evtchn()
1513 kthread_unuse_mm(kvm->mm); in kvm_xen_set_evtchn()
1523 return -EINVAL; in evtchn_set_fn()
1525 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); in evtchn_set_fn()
1539 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) in kvm_xen_setup_evtchn()
1540 return -EINVAL; in kvm_xen_setup_evtchn()
1543 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_setup_evtchn()
1544 return -EINVAL; in kvm_xen_setup_evtchn()
1554 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); in kvm_xen_setup_evtchn()
1556 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_setup_evtchn()
1558 e->xen_evtchn.vcpu_idx = -1; in kvm_xen_setup_evtchn()
1560 e->xen_evtchn.port = ue->u.xen_evtchn.port; in kvm_xen_setup_evtchn()
1561 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; in kvm_xen_setup_evtchn()
1562 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; in kvm_xen_setup_evtchn()
1563 e->set = evtchn_set_fn; in kvm_xen_setup_evtchn()
1576 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) in kvm_xen_hvm_evtchn_send()
1577 return -EINVAL; in kvm_xen_hvm_evtchn_send()
1580 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_hvm_evtchn_send()
1581 return -EINVAL; in kvm_xen_hvm_evtchn_send()
1583 e.port = uxe->port; in kvm_xen_hvm_evtchn_send()
1584 e.vcpu_id = uxe->vcpu; in kvm_xen_hvm_evtchn_send()
1585 e.vcpu_idx = -1; in kvm_xen_hvm_evtchn_send()
1586 e.priority = uxe->priority; in kvm_xen_hvm_evtchn_send()
1591 * None of that 'return 1 if it actually got delivered' nonsense. in kvm_xen_hvm_evtchn_send()
1592 * We don't care if it was masked (-ENOTCONN) either. in kvm_xen_hvm_evtchn_send()
1594 if (ret > 0 || ret == -ENOTCONN) in kvm_xen_hvm_evtchn_send()
1621 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_update()
1625 return -EINVAL; in kvm_xen_eventfd_update()
1627 mutex_lock(&kvm->lock); in kvm_xen_eventfd_update()
1628 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_update()
1629 mutex_unlock(&kvm->lock); in kvm_xen_eventfd_update()
1632 return -ENOENT; in kvm_xen_eventfd_update()
1635 if (evtchnfd->type != data->u.evtchn.type) in kvm_xen_eventfd_update()
1636 return -EINVAL; in kvm_xen_eventfd_update()
1642 if (!evtchnfd->deliver.port.port || in kvm_xen_eventfd_update()
1643 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) in kvm_xen_eventfd_update()
1644 return -EINVAL; in kvm_xen_eventfd_update()
1647 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_update()
1648 return -EINVAL; in kvm_xen_eventfd_update()
1650 mutex_lock(&kvm->lock); in kvm_xen_eventfd_update()
1651 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_update()
1652 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { in kvm_xen_eventfd_update()
1653 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_update()
1654 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_update()
1656 mutex_unlock(&kvm->lock); in kvm_xen_eventfd_update()
1667 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
1670 int ret = -EINVAL; in kvm_xen_eventfd_assign()
1673 return -EINVAL; in kvm_xen_eventfd_assign()
1677 return -ENOMEM; in kvm_xen_eventfd_assign()
1679 switch(data->u.evtchn.type) { in kvm_xen_eventfd_assign()
1682 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) in kvm_xen_eventfd_assign()
1683 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
1687 if (data->u.evtchn.deliver.port.port) { in kvm_xen_eventfd_assign()
1688 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) in kvm_xen_eventfd_assign()
1689 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
1691 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); in kvm_xen_eventfd_assign()
1704 goto out; /* -EINVAL */ in kvm_xen_eventfd_assign()
1707 evtchnfd->send_port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
1708 evtchnfd->type = data->u.evtchn.type; in kvm_xen_eventfd_assign()
1710 evtchnfd->deliver.eventfd.ctx = eventfd; in kvm_xen_eventfd_assign()
1713 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_assign()
1714 goto out; /* -EINVAL; */ in kvm_xen_eventfd_assign()
1716 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; in kvm_xen_eventfd_assign()
1717 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_assign()
1718 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_assign()
1719 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_assign()
1722 mutex_lock(&kvm->lock); in kvm_xen_eventfd_assign()
1723 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, in kvm_xen_eventfd_assign()
1725 mutex_unlock(&kvm->lock); in kvm_xen_eventfd_assign()
1729 if (ret == -ENOSPC) in kvm_xen_eventfd_assign()
1730 ret = -EEXIST; in kvm_xen_eventfd_assign()
1743 mutex_lock(&kvm->lock); in kvm_xen_eventfd_deassign()
1744 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_deassign()
1745 mutex_unlock(&kvm->lock); in kvm_xen_eventfd_deassign()
1748 return -ENOENT; in kvm_xen_eventfd_deassign()
1751 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_deassign()
1752 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_deassign()
1753 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_deassign()
1763 mutex_lock(&kvm->lock); in kvm_xen_eventfd_reset()
1764 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_eventfd_reset()
1765 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); in kvm_xen_eventfd_reset()
1766 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_reset()
1767 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_reset()
1768 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_reset()
1771 mutex_unlock(&kvm->lock); in kvm_xen_eventfd_reset()
1778 u32 port = data->u.evtchn.send_port; in kvm_xen_setattr_evtchn()
1780 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) in kvm_xen_setattr_evtchn()
1784 return -EINVAL; in kvm_xen_setattr_evtchn()
1786 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) in kvm_xen_setattr_evtchn()
1788 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) in kvm_xen_setattr_evtchn()
1790 if (data->u.evtchn.flags) in kvm_xen_setattr_evtchn()
1791 return -EINVAL; in kvm_xen_setattr_evtchn()
1800 gpa_t gpa; in kvm_xen_hcall_evtchn_send() local
1803 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_hcall_evtchn_send()
1804 gpa = kvm_mmu_gva_to_gpa_system(vcpu, param, NULL); in kvm_xen_hcall_evtchn_send()
1805 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_hcall_evtchn_send()
1807 if (!gpa || kvm_vcpu_read_guest(vcpu, gpa, &send, sizeof(send))) { in kvm_xen_hcall_evtchn_send()
1808 *r = -EFAULT; in kvm_xen_hcall_evtchn_send()
1812 /* The evtchn_ports idr is protected by vcpu->kvm->srcu */ in kvm_xen_hcall_evtchn_send()
1813 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); in kvm_xen_hcall_evtchn_send()
1817 if (evtchnfd->deliver.port.port) { in kvm_xen_hcall_evtchn_send()
1818 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); in kvm_xen_hcall_evtchn_send()
1819 if (ret < 0 && ret != -ENOTCONN) in kvm_xen_hcall_evtchn_send()
1822 eventfd_signal(evtchnfd->deliver.eventfd.ctx, 1); in kvm_xen_hcall_evtchn_send()
1831 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; in kvm_xen_init_vcpu()
1832 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_init_vcpu()
1834 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); in kvm_xen_init_vcpu()
1836 kvm_gpc_init(&vcpu->arch.xen.runstate_cache); in kvm_xen_init_vcpu()
1837 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_init_vcpu()
1838 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_init_vcpu()
1846 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.runstate_cache); in kvm_xen_destroy_vcpu()
1847 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_info_cache); in kvm_xen_destroy_vcpu()
1848 kvm_gpc_deactivate(vcpu->kvm, &vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_destroy_vcpu()
1850 del_timer_sync(&vcpu->arch.xen.poll_timer); in kvm_xen_destroy_vcpu()
1855 idr_init(&kvm->arch.xen.evtchn_ports); in kvm_xen_init_vm()
1856 kvm_gpc_init(&kvm->arch.xen.shinfo_cache); in kvm_xen_init_vm()
1864 kvm_gpc_deactivate(kvm, &kvm->arch.xen.shinfo_cache); in kvm_xen_destroy_vm()
1866 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_destroy_vm()
1867 if (!evtchnfd->deliver.port.port) in kvm_xen_destroy_vm()
1868 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_destroy_vm()
1871 idr_destroy(&kvm->arch.xen.evtchn_ports); in kvm_xen_destroy_vm()
1873 if (kvm->arch.xen_hvm_config.msr) in kvm_xen_destroy_vm()