Lines Matching full:vcpu

44 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level,
47 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu,
51 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu,
57 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_ctl() local
61 return __vcpu_sys_reg(vcpu, CNTV_CTL_EL0); in timer_get_ctl()
63 return __vcpu_sys_reg(vcpu, CNTP_CTL_EL0); in timer_get_ctl()
72 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_cval() local
76 return __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0); in timer_get_cval()
78 return __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0); in timer_get_cval()
87 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_get_offset() local
91 return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); in timer_get_offset()
99 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_set_ctl() local
103 __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = ctl; in timer_set_ctl()
106 __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = ctl; in timer_set_ctl()
115 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_set_cval() local
119 __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = cval; in timer_set_cval()
122 __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = cval; in timer_set_cval()
131 struct kvm_vcpu *vcpu = ctxt->vcpu; in timer_set_offset() local
135 __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; in timer_set_offset()
147 static void get_timer_map(struct kvm_vcpu *vcpu, struct timer_map *map) in get_timer_map() argument
150 map->direct_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
151 map->direct_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
154 map->direct_vtimer = vcpu_vtimer(vcpu); in get_timer_map()
156 map->emul_ptimer = vcpu_ptimer(vcpu); in get_timer_map()
159 trace_kvm_get_timer_map(vcpu->vcpu_id, map); in get_timer_map()
181 struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; in kvm_arch_timer_handler() local
187 * sets the CPU's vcpu pointer to NULL, because even though the timer in kvm_arch_timer_handler()
191 if (!vcpu) in kvm_arch_timer_handler()
194 get_timer_map(vcpu, &map); in kvm_arch_timer_handler()
202 kvm_timer_update_irq(vcpu, true, ctx); in kvm_arch_timer_handler()
204 if (userspace_irqchip(vcpu->kvm) && in kvm_arch_timer_handler()
242 static bool vcpu_has_wfit_active(struct kvm_vcpu *vcpu) in vcpu_has_wfit_active() argument
245 vcpu_get_flag(vcpu, IN_WFIT)); in vcpu_has_wfit_active()
248 static u64 wfit_delay_ns(struct kvm_vcpu *vcpu) in wfit_delay_ns() argument
250 struct arch_timer_context *ctx = vcpu_vtimer(vcpu); in wfit_delay_ns()
251 u64 val = vcpu_get_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu)); in wfit_delay_ns()
260 static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) in kvm_timer_earliest_exp() argument
266 struct arch_timer_context *ctx = &vcpu->arch.timer_cpu.timers[i]; in kvm_timer_earliest_exp()
273 if (vcpu_has_wfit_active(vcpu)) in kvm_timer_earliest_exp()
274 min_delta = min(min_delta, wfit_delay_ns(vcpu)); in kvm_timer_earliest_exp()
286 struct kvm_vcpu *vcpu; in kvm_bg_timer_expire() local
290 vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); in kvm_bg_timer_expire()
297 ns = kvm_timer_earliest_exp(vcpu); in kvm_bg_timer_expire()
303 kvm_vcpu_wake_up(vcpu); in kvm_bg_timer_expire()
310 struct kvm_vcpu *vcpu; in kvm_hrtimer_expire() local
314 vcpu = ctx->vcpu; in kvm_hrtimer_expire()
329 kvm_timer_update_irq(vcpu, true, ctx); in kvm_hrtimer_expire()
373 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) in kvm_cpu_has_pending_timer() argument
375 return vcpu_has_wfit_active(vcpu) && wfit_delay_ns(vcpu) == 0; in kvm_cpu_has_pending_timer()
381 void kvm_timer_update_run(struct kvm_vcpu *vcpu) in kvm_timer_update_run() argument
383 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in kvm_timer_update_run()
384 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_timer_update_run()
385 struct kvm_sync_regs *regs = &vcpu->run->s.regs; in kvm_timer_update_run()
396 static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, in kvm_timer_update_irq() argument
402 trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, in kvm_timer_update_irq()
405 if (!userspace_irqchip(vcpu->kvm)) { in kvm_timer_update_irq()
406 ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, in kvm_timer_update_irq()
422 kvm_timer_update_irq(ctx->vcpu, should_fire, ctx); in timer_emulate()
441 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); in timer_save_state()
488 static void kvm_timer_blocking(struct kvm_vcpu *vcpu) in kvm_timer_blocking() argument
490 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_blocking()
493 get_timer_map(vcpu, &map); in kvm_timer_blocking()
502 !vcpu_has_wfit_active(vcpu)) in kvm_timer_blocking()
509 soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); in kvm_timer_blocking()
512 static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) in kvm_timer_unblocking() argument
514 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_unblocking()
521 struct arch_timer_cpu *timer = vcpu_timer(ctx->vcpu); in timer_restore_state()
569 struct kvm_vcpu *vcpu = ctx->vcpu; in kvm_timer_vcpu_load_gic() local
578 kvm_timer_update_irq(ctx->vcpu, kvm_timer_should_fire(ctx), ctx); in kvm_timer_vcpu_load_gic()
580 if (irqchip_in_kernel(vcpu->kvm)) in kvm_timer_vcpu_load_gic()
581 phys_active = kvm_vgic_map_is_active(vcpu, ctx->irq.irq); in kvm_timer_vcpu_load_gic()
588 static void kvm_timer_vcpu_load_nogic(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_load_nogic() argument
590 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in kvm_timer_vcpu_load_nogic()
598 kvm_timer_update_irq(vcpu, kvm_timer_should_fire(vtimer), vtimer); in kvm_timer_vcpu_load_nogic()
616 void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_load() argument
618 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_vcpu_load()
624 get_timer_map(vcpu, &map); in kvm_timer_vcpu_load()
631 kvm_timer_vcpu_load_nogic(vcpu); in kvm_timer_vcpu_load()
636 kvm_timer_unblocking(vcpu); in kvm_timer_vcpu_load()
646 bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) in kvm_timer_should_notify_user() argument
648 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in kvm_timer_should_notify_user()
649 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_timer_should_notify_user()
650 struct kvm_sync_regs *sregs = &vcpu->run->s.regs; in kvm_timer_should_notify_user()
653 if (likely(irqchip_in_kernel(vcpu->kvm))) in kvm_timer_should_notify_user()
663 void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_put() argument
665 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_vcpu_put()
671 get_timer_map(vcpu, &map); in kvm_timer_vcpu_put()
679 * need it after a vcpu_put is in the context of a sleeping VCPU, and in kvm_timer_vcpu_put()
684 * coming back to the VCPU thread in kvm_timer_vcpu_load(). in kvm_timer_vcpu_put()
689 if (kvm_vcpu_is_blocking(vcpu)) in kvm_timer_vcpu_put()
690 kvm_timer_blocking(vcpu); in kvm_timer_vcpu_put()
707 static void unmask_vtimer_irq_user(struct kvm_vcpu *vcpu) in unmask_vtimer_irq_user() argument
709 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in unmask_vtimer_irq_user()
712 kvm_timer_update_irq(vcpu, false, vtimer); in unmask_vtimer_irq_user()
720 void kvm_timer_sync_user(struct kvm_vcpu *vcpu) in kvm_timer_sync_user() argument
722 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_sync_user()
727 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_timer_sync_user()
728 unmask_vtimer_irq_user(vcpu); in kvm_timer_sync_user()
731 int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_reset() argument
733 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_vcpu_reset()
736 get_timer_map(vcpu, &map); in kvm_timer_vcpu_reset()
744 timer_set_ctl(vcpu_vtimer(vcpu), 0); in kvm_timer_vcpu_reset()
745 timer_set_ctl(vcpu_ptimer(vcpu), 0); in kvm_timer_vcpu_reset()
748 kvm_timer_update_irq(vcpu, false, vcpu_vtimer(vcpu)); in kvm_timer_vcpu_reset()
749 kvm_timer_update_irq(vcpu, false, vcpu_ptimer(vcpu)); in kvm_timer_vcpu_reset()
751 if (irqchip_in_kernel(vcpu->kvm)) { in kvm_timer_vcpu_reset()
752 kvm_vgic_reset_mapped_irq(vcpu, map.direct_vtimer->irq.irq); in kvm_timer_vcpu_reset()
754 kvm_vgic_reset_mapped_irq(vcpu, map.direct_ptimer->irq.irq); in kvm_timer_vcpu_reset()
765 static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) in update_vtimer_cntvoff() argument
768 struct kvm *kvm = vcpu->kvm; in update_vtimer_cntvoff()
776 * When called from the vcpu create path, the CPU being created is not in update_vtimer_cntvoff()
779 timer_set_offset(vcpu_vtimer(vcpu), cntvoff); in update_vtimer_cntvoff()
783 void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_init() argument
785 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_vcpu_init()
786 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in kvm_timer_vcpu_init()
787 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_timer_vcpu_init()
789 vtimer->vcpu = vcpu; in kvm_timer_vcpu_init()
790 ptimer->vcpu = vcpu; in kvm_timer_vcpu_init()
793 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); in kvm_timer_vcpu_init()
820 int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) in kvm_arm_timer_set_reg() argument
826 timer = vcpu_vtimer(vcpu); in kvm_arm_timer_set_reg()
827 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); in kvm_arm_timer_set_reg()
830 timer = vcpu_vtimer(vcpu); in kvm_arm_timer_set_reg()
831 update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); in kvm_arm_timer_set_reg()
834 timer = vcpu_vtimer(vcpu); in kvm_arm_timer_set_reg()
835 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); in kvm_arm_timer_set_reg()
838 timer = vcpu_ptimer(vcpu); in kvm_arm_timer_set_reg()
839 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CTL, value); in kvm_arm_timer_set_reg()
842 timer = vcpu_ptimer(vcpu); in kvm_arm_timer_set_reg()
843 kvm_arm_timer_write(vcpu, timer, TIMER_REG_CVAL, value); in kvm_arm_timer_set_reg()
869 u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) in kvm_arm_timer_get_reg() argument
873 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
874 vcpu_vtimer(vcpu), TIMER_REG_CTL); in kvm_arm_timer_get_reg()
876 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
877 vcpu_vtimer(vcpu), TIMER_REG_CNT); in kvm_arm_timer_get_reg()
879 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
880 vcpu_vtimer(vcpu), TIMER_REG_CVAL); in kvm_arm_timer_get_reg()
882 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
883 vcpu_ptimer(vcpu), TIMER_REG_CTL); in kvm_arm_timer_get_reg()
885 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
886 vcpu_ptimer(vcpu), TIMER_REG_CNT); in kvm_arm_timer_get_reg()
888 return kvm_arm_timer_read(vcpu, in kvm_arm_timer_get_reg()
889 vcpu_ptimer(vcpu), TIMER_REG_CVAL); in kvm_arm_timer_get_reg()
894 static u64 kvm_arm_timer_read(struct kvm_vcpu *vcpu, in kvm_arm_timer_read() argument
925 u64 kvm_arm_timer_read_sysreg(struct kvm_vcpu *vcpu, in kvm_arm_timer_read_sysreg() argument
932 kvm_timer_vcpu_put(vcpu); in kvm_arm_timer_read_sysreg()
934 val = kvm_arm_timer_read(vcpu, vcpu_get_timer(vcpu, tmr), treg); in kvm_arm_timer_read_sysreg()
936 kvm_timer_vcpu_load(vcpu); in kvm_arm_timer_read_sysreg()
942 static void kvm_arm_timer_write(struct kvm_vcpu *vcpu, in kvm_arm_timer_write() argument
965 void kvm_arm_timer_write_sysreg(struct kvm_vcpu *vcpu, in kvm_arm_timer_write_sysreg() argument
971 kvm_timer_vcpu_put(vcpu); in kvm_arm_timer_write_sysreg()
973 kvm_arm_timer_write(vcpu, vcpu_get_timer(vcpu, tmr), treg, val); in kvm_arm_timer_write_sysreg()
975 kvm_timer_vcpu_load(vcpu); in kvm_arm_timer_write_sysreg()
991 static int timer_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) in timer_irq_set_vcpu_affinity() argument
993 if (vcpu) in timer_irq_set_vcpu_affinity()
1151 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); in kvm_timer_hyp_init()
1175 kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); in kvm_timer_hyp_init()
1197 void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) in kvm_timer_vcpu_terminate() argument
1199 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_vcpu_terminate()
1204 static bool timer_irqs_are_valid(struct kvm_vcpu *vcpu) in timer_irqs_are_valid() argument
1209 vtimer_irq = vcpu_vtimer(vcpu)->irq.irq; in timer_irqs_are_valid()
1210 ret = kvm_vgic_set_owner(vcpu, vtimer_irq, vcpu_vtimer(vcpu)); in timer_irqs_are_valid()
1214 ptimer_irq = vcpu_ptimer(vcpu)->irq.irq; in timer_irqs_are_valid()
1215 ret = kvm_vgic_set_owner(vcpu, ptimer_irq, vcpu_ptimer(vcpu)); in timer_irqs_are_valid()
1219 kvm_for_each_vcpu(i, vcpu, vcpu->kvm) { in timer_irqs_are_valid()
1220 if (vcpu_vtimer(vcpu)->irq.irq != vtimer_irq || in timer_irqs_are_valid()
1221 vcpu_ptimer(vcpu)->irq.irq != ptimer_irq) in timer_irqs_are_valid()
1230 struct kvm_vcpu *vcpu = kvm_get_running_vcpu(); in kvm_arch_timer_get_input_level() local
1233 if (WARN(!vcpu, "No vcpu context!\n")) in kvm_arch_timer_get_input_level()
1236 if (vintid == vcpu_vtimer(vcpu)->irq.irq) in kvm_arch_timer_get_input_level()
1237 timer = vcpu_vtimer(vcpu); in kvm_arch_timer_get_input_level()
1238 else if (vintid == vcpu_ptimer(vcpu)->irq.irq) in kvm_arch_timer_get_input_level()
1239 timer = vcpu_ptimer(vcpu); in kvm_arch_timer_get_input_level()
1246 int kvm_timer_enable(struct kvm_vcpu *vcpu) in kvm_timer_enable() argument
1248 struct arch_timer_cpu *timer = vcpu_timer(vcpu); in kvm_timer_enable()
1256 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_timer_enable()
1263 if (!timer_irqs_are_valid(vcpu)) { in kvm_timer_enable()
1268 get_timer_map(vcpu, &map); in kvm_timer_enable()
1270 ret = kvm_vgic_map_phys_irq(vcpu, in kvm_timer_enable()
1278 ret = kvm_vgic_map_phys_irq(vcpu, in kvm_timer_enable()
1316 struct kvm_vcpu *vcpu; in set_timer_irqs() local
1319 kvm_for_each_vcpu(i, vcpu, kvm) { in set_timer_irqs()
1320 vcpu_vtimer(vcpu)->irq.irq = vtimer_irq; in set_timer_irqs()
1321 vcpu_ptimer(vcpu)->irq.irq = ptimer_irq; in set_timer_irqs()
1325 int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_timer_set_attr() argument
1328 struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); in kvm_arm_timer_set_attr()
1329 struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); in kvm_arm_timer_set_attr()
1332 if (!irqchip_in_kernel(vcpu->kvm)) in kvm_arm_timer_set_attr()
1341 if (vcpu->arch.timer_cpu.enabled) in kvm_arm_timer_set_attr()
1346 set_timer_irqs(vcpu->kvm, irq, ptimer->irq.irq); in kvm_arm_timer_set_attr()
1349 set_timer_irqs(vcpu->kvm, vtimer->irq.irq, irq); in kvm_arm_timer_set_attr()
1358 int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_timer_get_attr() argument
1366 timer = vcpu_vtimer(vcpu); in kvm_arm_timer_get_attr()
1369 timer = vcpu_ptimer(vcpu); in kvm_arm_timer_get_attr()
1379 int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) in kvm_arm_timer_has_attr() argument